mirror of
https://github.com/elastic/kibana.git
synced 2025-04-24 09:48:58 -04:00
Onboard Uptime rule types with FAAD (#179493)
Towards: https://github.com/elastic/kibana/issues/169867 This PR onboards Uptime rule types (Tls, Duration Anolamy and Monitor status) with FAAD. We are deprecating the rule-registry plugin and onboard the rule types with the new alertsClient to manage alert-as-data. There is no new future, all the rule types should work as they were, and save alerts with all the existing fields. ## To verify: - Switch to Kibana 8.9.0 in your local repo. (In this version Uptime rules are not deprecated) - Run your ES with: `yarn es snapshot -E path.data=../local-es-data` - Run your Kibana - Create Uptime rules with an active and a recovered action (You can run Heartbeat locally if needed, [follow the instructions](https://www.elastic.co/guide/en/beats/heartbeat/current/heartbeat-installation-configuration.html)) - Stop your ES and Kibana - Switch to this branch and run your ES with `yarn es snapshot -E path.data=../local-es-data` again. - Run your Kibana - Modify Uptime rulesType codes to force them to create an alert. Example: Mock [availabilityResults in status_check](https://github.com/elastic/kibana/blob/main/x-pack/plugins/observability_solution/uptime/server/legacy_uptime/lib/alerts/status_check.ts#L491) with below data ``` availabilityResults = [ { monitorId: '1', up: 1, down: 0, location: 'location', availabilityRatio: 0.5, monitorInfo: { timestamp: '', monitor: { id: '1', status: 'down', type: 'type', check_group: 'default', }, docId: 'docid', }, }, ]; ``` It should create an alert. The alert should be saved under `.alerts-observability.uptime.alerts` index and be visible under observability alerts page. Then remove the mock, the alert should be recovered.
This commit is contained in:
parent
60fbf3fa49
commit
d228f488ec
10 changed files with 698 additions and 531 deletions
|
@ -9,10 +9,13 @@ import { isRight } from 'fp-ts/lib/Either';
|
|||
import Mustache from 'mustache';
|
||||
import { AlertsLocatorParams, getAlertUrl } from '@kbn/observability-plugin/common';
|
||||
import { LocatorPublic } from '@kbn/share-plugin/common';
|
||||
import { legacyExperimentalFieldMap } from '@kbn/alerts-as-data-utils';
|
||||
import { legacyExperimentalFieldMap, ObservabilityUptimeAlert } from '@kbn/alerts-as-data-utils';
|
||||
import { IBasePath } from '@kbn/core/server';
|
||||
import { type IRuleTypeAlerts, RuleExecutorServices } from '@kbn/alerting-plugin/server';
|
||||
import type { IRuleTypeAlerts } from '@kbn/alerting-plugin/server';
|
||||
import { RuleExecutorServices } from '@kbn/alerting-plugin/server';
|
||||
import { addSpaceIdToPath } from '@kbn/spaces-plugin/common';
|
||||
import { AlertInstanceState } from '@kbn/alerting-plugin/server';
|
||||
import { AlertInstanceContext } from '@kbn/alerting-plugin/server';
|
||||
import { uptimeRuleFieldMap } from '../../../../common/rules/uptime_rule_field_map';
|
||||
import { SYNTHETICS_RULE_TYPES_ALERT_CONTEXT } from '../../../../common/constants/synthetics_alerts';
|
||||
import { UptimeCommonState, UptimeCommonStateType } from '../../../../common/runtime_types';
|
||||
|
@ -82,31 +85,29 @@ export const getAlertDetailsUrl = (
|
|||
alertUuid: string | null
|
||||
) => addSpaceIdToPath(basePath.publicBaseUrl, spaceId, `/app/observability/alerts/${alertUuid}`);
|
||||
|
||||
export const setRecoveredAlertsContext = async ({
|
||||
alertFactory,
|
||||
export const setRecoveredAlertsContext = async <ActionGroupIds extends string>({
|
||||
alertsClient,
|
||||
alertsLocator,
|
||||
basePath,
|
||||
defaultStartedAt,
|
||||
getAlertStartedDate,
|
||||
spaceId,
|
||||
alertsLocator,
|
||||
getAlertUuid,
|
||||
}: {
|
||||
alertFactory: RuleExecutorServices['alertFactory'];
|
||||
defaultStartedAt: string;
|
||||
getAlertStartedDate: (alertInstanceId: string) => string | null;
|
||||
basePath: IBasePath;
|
||||
spaceId: string;
|
||||
alertsClient: RuleExecutorServices<
|
||||
AlertInstanceState,
|
||||
AlertInstanceContext,
|
||||
ActionGroupIds,
|
||||
ObservabilityUptimeAlert
|
||||
>['alertsClient'];
|
||||
alertsLocator?: LocatorPublic<AlertsLocatorParams>;
|
||||
getAlertUuid?: (alertId: string) => string | null;
|
||||
basePath: IBasePath;
|
||||
defaultStartedAt: string;
|
||||
spaceId: string;
|
||||
}) => {
|
||||
const { getRecoveredAlerts } = alertFactory.done();
|
||||
|
||||
for await (const alert of getRecoveredAlerts()) {
|
||||
const recoveredAlertId = alert.getId();
|
||||
const alertUuid = getAlertUuid?.(recoveredAlertId) || null;
|
||||
const indexedStartedAt = getAlertStartedDate(recoveredAlertId) ?? defaultStartedAt;
|
||||
|
||||
const state = alert.getState();
|
||||
for (const recoveredAlert of alertsClient?.getRecoveredAlerts() ?? []) {
|
||||
const recoveredAlertId = recoveredAlert.alert.getId();
|
||||
const alertUuid = recoveredAlert.alert.getUuid();
|
||||
const indexedStartedAt = recoveredAlert.alert.getStart() ?? defaultStartedAt;
|
||||
const state = recoveredAlert.alert.getState();
|
||||
const alertUrl = await getAlertUrl(
|
||||
alertUuid,
|
||||
spaceId,
|
||||
|
@ -115,17 +116,21 @@ export const setRecoveredAlertsContext = async ({
|
|||
basePath.publicBaseUrl
|
||||
);
|
||||
|
||||
alert.setContext({
|
||||
...state,
|
||||
[ALERT_DETAILS_URL]: alertUrl,
|
||||
alertsClient!.setAlertData({
|
||||
id: recoveredAlertId,
|
||||
context: {
|
||||
...state,
|
||||
[ALERT_DETAILS_URL]: alertUrl,
|
||||
},
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
export const uptimeRuleTypeFieldMap = { ...uptimeRuleFieldMap, ...legacyExperimentalFieldMap };
|
||||
|
||||
export const UptimeRuleTypeAlertDefinition: IRuleTypeAlerts = {
|
||||
export const UptimeRuleTypeAlertDefinition: IRuleTypeAlerts<ObservabilityUptimeAlert> = {
|
||||
context: SYNTHETICS_RULE_TYPES_ALERT_CONTEXT,
|
||||
mappings: { fieldMap: uptimeRuleTypeFieldMap },
|
||||
useLegacyAlerts: true,
|
||||
shouldWrite: true,
|
||||
};
|
||||
|
|
|
@ -11,11 +11,8 @@ import {
|
|||
} from '@kbn/rule-data-utils';
|
||||
import { durationAnomalyAlertFactory } from './duration_anomaly';
|
||||
import { DURATION_ANOMALY } from '../../../../common/constants/uptime_alerts';
|
||||
import {
|
||||
getSeverityType,
|
||||
type MlAnomaliesTableRecord,
|
||||
type MlAnomalyRecordDoc,
|
||||
} from '@kbn/ml-anomaly-utils';
|
||||
import { getSeverityType } from '@kbn/ml-anomaly-utils';
|
||||
import type { MlAnomaliesTableRecord, MlAnomalyRecordDoc } from '@kbn/ml-anomaly-utils';
|
||||
import { createRuleTypeMocks, bootstrapDependencies } from './test_utils';
|
||||
import { Ping } from '../../../../common/runtime_types/ping';
|
||||
|
||||
|
@ -104,6 +101,27 @@ const mockOptions = (
|
|||
): any => {
|
||||
const { services, setContext } = createRuleTypeMocks(mockRecoveredAlerts);
|
||||
|
||||
services.alertsClient.report.mockImplementation((param: any) => {
|
||||
return {
|
||||
uuid: `uuid-${param.id}`,
|
||||
start: new Date().toISOString(),
|
||||
alertDoc: {},
|
||||
};
|
||||
});
|
||||
|
||||
services.alertsClient.getRecoveredAlerts.mockImplementation((param: any) => {
|
||||
return mockRecoveredAlerts.map((alert) => ({
|
||||
alert: {
|
||||
getId: () => 'mock-id',
|
||||
getUuid: () => 'mock-uuiid',
|
||||
getState: () => alert,
|
||||
getStart: () => new Date().toISOString(),
|
||||
setContext,
|
||||
context: {},
|
||||
},
|
||||
}));
|
||||
});
|
||||
|
||||
return {
|
||||
params,
|
||||
state,
|
||||
|
@ -158,12 +176,12 @@ describe('duration anomaly alert', () => {
|
|||
const alert = durationAnomalyAlertFactory(server, libs, plugins);
|
||||
const options = mockOptions();
|
||||
const {
|
||||
services: { alertWithLifecycle },
|
||||
services: { alertsClient },
|
||||
} = options;
|
||||
// @ts-ignore the executor can return `void`, but ours never does
|
||||
const state: Record<string, any> = await alert.executor(options);
|
||||
expect(mockGetAnomliesTableDataGetter).toHaveBeenCalledTimes(1);
|
||||
expect(alertWithLifecycle).toHaveBeenCalledTimes(2);
|
||||
expect(alertsClient.report).toHaveBeenCalledTimes(2);
|
||||
expect(mockGetAnomliesTableDataGetter).toBeCalledWith(
|
||||
['uptime_monitor_high_latency_by_geo'],
|
||||
[],
|
||||
|
@ -177,14 +195,15 @@ describe('duration anomaly alert', () => {
|
|||
10,
|
||||
undefined
|
||||
);
|
||||
const [{ value: alertInstanceMock }] = alertWithLifecycle.mock.results;
|
||||
expect(alertInstanceMock.replaceState).toHaveBeenCalledTimes(2);
|
||||
|
||||
const reasonMessages: string[] = [];
|
||||
mockAnomaliesResult.anomalies.forEach((anomaly, index) => {
|
||||
const slowestResponse = Math.round(anomaly.actualSort / 1000);
|
||||
const typicalResponse = Math.round(anomaly.typicalSort / 1000);
|
||||
expect(alertWithLifecycle).toBeCalledWith({
|
||||
fields: {
|
||||
expect(alertsClient.report).toHaveBeenCalledWith({
|
||||
id: `${DURATION_ANOMALY.id}${index}`,
|
||||
actionGroup: DURATION_ANOMALY.id,
|
||||
payload: {
|
||||
'monitor.id': options.params.monitorId,
|
||||
'url.full': mockPing.url?.full,
|
||||
'anomaly.start': mockDate,
|
||||
|
@ -201,27 +220,26 @@ Response times as high as ${slowestResponse} ms have been detected from location
|
|||
anomaly.entityValue
|
||||
}. Expected response time is ${typicalResponse} ms.`,
|
||||
},
|
||||
id: `${DURATION_ANOMALY.id}${index}`,
|
||||
state: {
|
||||
firstCheckedAt: 'date',
|
||||
firstTriggeredAt: undefined,
|
||||
lastCheckedAt: 'date',
|
||||
lastResolvedAt: undefined,
|
||||
isTriggered: false,
|
||||
anomalyStartTimestamp: 'date',
|
||||
currentTriggerStarted: undefined,
|
||||
expectedResponseTime: `${typicalResponse} ms`,
|
||||
lastTriggeredAt: undefined,
|
||||
monitor: monitorId,
|
||||
monitorUrl: mockPing.url?.full,
|
||||
observerLocation: anomaly.entityValue,
|
||||
severity: getSeverityType(anomaly.severity),
|
||||
severityScore: anomaly.severity,
|
||||
slowestAnomalyResponse: `${slowestResponse} ms`,
|
||||
bucketSpan: anomaly.source.bucket_span,
|
||||
},
|
||||
});
|
||||
|
||||
expect(alertInstanceMock.replaceState).toBeCalledWith({
|
||||
firstCheckedAt: 'date',
|
||||
firstTriggeredAt: undefined,
|
||||
lastCheckedAt: 'date',
|
||||
lastResolvedAt: undefined,
|
||||
isTriggered: false,
|
||||
anomalyStartTimestamp: 'date',
|
||||
currentTriggerStarted: undefined,
|
||||
expectedResponseTime: `${typicalResponse} ms`,
|
||||
lastTriggeredAt: undefined,
|
||||
monitor: monitorId,
|
||||
monitorUrl: mockPing.url?.full,
|
||||
observerLocation: anomaly.entityValue,
|
||||
severity: getSeverityType(anomaly.severity),
|
||||
severityScore: anomaly.severity,
|
||||
slowestAnomalyResponse: `${slowestResponse} ms`,
|
||||
bucketSpan: anomaly.source.bucket_span,
|
||||
});
|
||||
const reasonMsg = `Abnormal (${getSeverityType(
|
||||
anomaly.severity
|
||||
)} level) response time detected on uptime-monitor with url ${
|
||||
|
@ -233,45 +251,48 @@ Response times as high as ${slowestResponse} ms have been detected from location
|
|||
|
||||
reasonMessages.push(reasonMsg);
|
||||
});
|
||||
expect(alertInstanceMock.scheduleActions).toHaveBeenCalledTimes(2);
|
||||
|
||||
expect(alertInstanceMock.scheduleActions.mock.calls[0]).toMatchInlineSnapshot(`
|
||||
expect(alertsClient.setAlertData.mock.calls[0]).toMatchInlineSnapshot(`
|
||||
Array [
|
||||
"xpack.uptime.alerts.actionGroups.durationAnomaly",
|
||||
Object {
|
||||
"alertDetailsUrl": "mockedAlertsLocator > getLocation",
|
||||
"anomalyStartTimestamp": "date",
|
||||
"bucketSpan": 900,
|
||||
"expectedResponseTime": "10 ms",
|
||||
"monitor": "uptime-monitor",
|
||||
"monitorUrl": "https://elastic.co",
|
||||
"observerLocation": "harrisburg",
|
||||
"reason": "Abnormal (minor level) response time detected on uptime-monitor with url https://elastic.co at date. Anomaly severity score is 25.
|
||||
"context": Object {
|
||||
"alertDetailsUrl": "mockedAlertsLocator > getLocation",
|
||||
"anomalyStartTimestamp": "date",
|
||||
"bucketSpan": 900,
|
||||
"expectedResponseTime": "10 ms",
|
||||
"monitor": "uptime-monitor",
|
||||
"monitorUrl": "https://elastic.co",
|
||||
"observerLocation": "harrisburg",
|
||||
"reason": "Abnormal (minor level) response time detected on uptime-monitor with url https://elastic.co at date. Anomaly severity score is 25.
|
||||
Response times as high as 200 ms have been detected from location harrisburg. Expected response time is 10 ms.",
|
||||
"severity": "minor",
|
||||
"severityScore": 25,
|
||||
"slowestAnomalyResponse": "200 ms",
|
||||
"viewInAppUrl": "http://localhost:5601/hfe/app/uptime/monitor/eHBhY2sudXB0aW1lLmFsZXJ0cy5hY3Rpb25Hcm91cHMuZHVyYXRpb25Bbm9tYWx5MA==?dateRangeEnd=now&dateRangeStart=2022-03-17T13%3A13%3A33.755Z",
|
||||
"severity": "minor",
|
||||
"severityScore": 25,
|
||||
"slowestAnomalyResponse": "200 ms",
|
||||
"viewInAppUrl": "http://localhost:5601/hfe/app/uptime/monitor/eHBhY2sudXB0aW1lLmFsZXJ0cy5hY3Rpb25Hcm91cHMuZHVyYXRpb25Bbm9tYWx5MA==?dateRangeEnd=now&dateRangeStart=date",
|
||||
},
|
||||
"id": "xpack.uptime.alerts.actionGroups.durationAnomaly",
|
||||
},
|
||||
]
|
||||
`);
|
||||
expect(alertInstanceMock.scheduleActions.mock.calls[1]).toMatchInlineSnapshot(`
|
||||
expect(alertsClient.setAlertData.mock.calls[1]).toMatchInlineSnapshot(`
|
||||
Array [
|
||||
"xpack.uptime.alerts.actionGroups.durationAnomaly",
|
||||
Object {
|
||||
"alertDetailsUrl": "mockedAlertsLocator > getLocation",
|
||||
"anomalyStartTimestamp": "date",
|
||||
"bucketSpan": 900,
|
||||
"expectedResponseTime": "20 ms",
|
||||
"monitor": "uptime-monitor",
|
||||
"monitorUrl": "https://elastic.co",
|
||||
"observerLocation": "fairbanks",
|
||||
"reason": "Abnormal (warning level) response time detected on uptime-monitor with url https://elastic.co at date. Anomaly severity score is 10.
|
||||
"context": Object {
|
||||
"alertDetailsUrl": "mockedAlertsLocator > getLocation",
|
||||
"anomalyStartTimestamp": "date",
|
||||
"bucketSpan": 900,
|
||||
"expectedResponseTime": "20 ms",
|
||||
"monitor": "uptime-monitor",
|
||||
"monitorUrl": "https://elastic.co",
|
||||
"observerLocation": "fairbanks",
|
||||
"reason": "Abnormal (warning level) response time detected on uptime-monitor with url https://elastic.co at date. Anomaly severity score is 10.
|
||||
Response times as high as 300 ms have been detected from location fairbanks. Expected response time is 20 ms.",
|
||||
"severity": "warning",
|
||||
"severityScore": 10,
|
||||
"slowestAnomalyResponse": "300 ms",
|
||||
"viewInAppUrl": "http://localhost:5601/hfe/app/uptime/monitor/eHBhY2sudXB0aW1lLmFsZXJ0cy5hY3Rpb25Hcm91cHMuZHVyYXRpb25Bbm9tYWx5MQ==?dateRangeEnd=now&dateRangeStart=2022-03-17T13%3A13%3A33.755Z",
|
||||
"severity": "warning",
|
||||
"severityScore": 10,
|
||||
"slowestAnomalyResponse": "300 ms",
|
||||
"viewInAppUrl": "http://localhost:5601/hfe/app/uptime/monitor/eHBhY2sudXB0aW1lLmFsZXJ0cy5hY3Rpb25Hcm91cHMuZHVyYXRpb25Bbm9tYWx5MQ==?dateRangeEnd=now&dateRangeStart=date",
|
||||
},
|
||||
"id": "xpack.uptime.alerts.actionGroups.durationAnomaly",
|
||||
},
|
||||
]
|
||||
`);
|
||||
|
@ -300,11 +321,17 @@ Response times as high as ${slowestResponse} ms have been detected from location
|
|||
);
|
||||
const alert = durationAnomalyAlertFactory(server, libs, plugins);
|
||||
const options = mockOptions();
|
||||
const {
|
||||
services: { alertsClient },
|
||||
} = options;
|
||||
// @ts-ignore the executor can return `void`, but ours never does
|
||||
const state: Record<string, any> = await alert.executor(options);
|
||||
expect(options.setContext).toHaveBeenCalledTimes(2);
|
||||
mockRecoveredAlerts.forEach((alertState) => {
|
||||
expect(options.setContext).toHaveBeenCalledWith(alertState);
|
||||
expect(alertsClient.setAlertData).toHaveBeenCalledTimes(4);
|
||||
mockRecoveredAlerts.forEach((alertState, index) => {
|
||||
expect(alertsClient.setAlertData).toHaveBeenNthCalledWith(index + 3, {
|
||||
context: alertState,
|
||||
id: 'mock-id',
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
* 2.0.
|
||||
*/
|
||||
|
||||
import { GetViewInAppRelativeUrlFnOpts } from '@kbn/alerting-plugin/server';
|
||||
import { AlertsClientError, GetViewInAppRelativeUrlFnOpts } from '@kbn/alerting-plugin/server';
|
||||
import moment from 'moment';
|
||||
import {
|
||||
KibanaRequest,
|
||||
|
@ -19,7 +19,8 @@ import {
|
|||
ALERT_REASON,
|
||||
} from '@kbn/rule-data-utils';
|
||||
import { ActionGroupIdsOf } from '@kbn/alerting-plugin/common';
|
||||
import { getSeverityType, type MlAnomaliesTableRecord } from '@kbn/ml-anomaly-utils';
|
||||
import type { MlAnomaliesTableRecord } from '@kbn/ml-anomaly-utils';
|
||||
import { getSeverityType } from '@kbn/ml-anomaly-utils';
|
||||
import {
|
||||
alertsLocatorID,
|
||||
AlertsLocatorParams,
|
||||
|
@ -136,18 +137,14 @@ export const durationAnomalyAlertFactory: UptimeAlertTypeFactory<ActionGroupIds>
|
|||
doesSetRecoveryContext: true,
|
||||
async executor({
|
||||
params,
|
||||
services: {
|
||||
alertFactory,
|
||||
alertWithLifecycle,
|
||||
getAlertStartedDate,
|
||||
getAlertUuid,
|
||||
savedObjectsClient,
|
||||
scopedClusterClient,
|
||||
},
|
||||
services: { alertsClient, savedObjectsClient, scopedClusterClient },
|
||||
spaceId,
|
||||
state,
|
||||
startedAt,
|
||||
}) {
|
||||
if (!alertsClient) {
|
||||
throw new AlertsClientError();
|
||||
}
|
||||
const uptimeEsClient = new UptimeEsClient(
|
||||
savedObjectsClient,
|
||||
scopedClusterClient.asCurrentUser,
|
||||
|
@ -181,53 +178,56 @@ export const durationAnomalyAlertFactory: UptimeAlertTypeFactory<ActionGroupIds>
|
|||
);
|
||||
|
||||
const alertId = DURATION_ANOMALY.id + index;
|
||||
const indexedStartedAt = getAlertStartedDate(alertId) ?? startedAt.toISOString();
|
||||
const alertUuid = getAlertUuid(alertId);
|
||||
|
||||
const { start, uuid } = alertsClient?.report({
|
||||
id: alertId,
|
||||
actionGroup: DURATION_ANOMALY.id,
|
||||
payload: {
|
||||
'monitor.id': params.monitorId,
|
||||
'url.full': summary.monitorUrl,
|
||||
'observer.geo.name': summary.observerLocation,
|
||||
'anomaly.start': summary.anomalyStartTimestamp,
|
||||
'anomaly.bucket_span.minutes': summary.bucketSpan as unknown as string,
|
||||
[ALERT_EVALUATION_VALUE]: anomaly.actualSort,
|
||||
[ALERT_EVALUATION_THRESHOLD]: anomaly.typicalSort,
|
||||
[ALERT_REASON]: alertReasonMessage,
|
||||
},
|
||||
state: {
|
||||
...updateState(state, false),
|
||||
...summary,
|
||||
},
|
||||
});
|
||||
|
||||
const indexedStartedAt = start ?? startedAt.toISOString();
|
||||
const relativeViewInAppUrl = getMonitorRouteFromMonitorId({
|
||||
monitorId: alertId,
|
||||
dateRangeEnd: 'now',
|
||||
dateRangeStart: indexedStartedAt,
|
||||
});
|
||||
|
||||
const alert = alertWithLifecycle({
|
||||
id: alertId,
|
||||
fields: {
|
||||
'monitor.id': params.monitorId,
|
||||
'url.full': summary.monitorUrl,
|
||||
'observer.geo.name': summary.observerLocation,
|
||||
'anomaly.start': summary.anomalyStartTimestamp,
|
||||
'anomaly.bucket_span.minutes': summary.bucketSpan,
|
||||
[ALERT_EVALUATION_VALUE]: anomaly.actualSort,
|
||||
[ALERT_EVALUATION_THRESHOLD]: anomaly.typicalSort,
|
||||
[ALERT_REASON]: alertReasonMessage,
|
||||
alertsClient.setAlertData({
|
||||
id: DURATION_ANOMALY.id,
|
||||
context: {
|
||||
[ALERT_DETAILS_URL]: await getAlertUrl(
|
||||
uuid,
|
||||
spaceId,
|
||||
indexedStartedAt,
|
||||
alertsLocator,
|
||||
basePath.publicBaseUrl
|
||||
),
|
||||
[ALERT_REASON_MSG]: alertReasonMessage,
|
||||
[VIEW_IN_APP_URL]: getViewInAppUrl(basePath, spaceId, relativeViewInAppUrl),
|
||||
...summary,
|
||||
},
|
||||
});
|
||||
alert.replaceState({
|
||||
...updateState(state, false),
|
||||
...summary,
|
||||
});
|
||||
alert.scheduleActions(DURATION_ANOMALY.id, {
|
||||
[ALERT_DETAILS_URL]: await getAlertUrl(
|
||||
alertUuid,
|
||||
spaceId,
|
||||
indexedStartedAt,
|
||||
alertsLocator,
|
||||
basePath.publicBaseUrl
|
||||
),
|
||||
[ALERT_REASON_MSG]: alertReasonMessage,
|
||||
[VIEW_IN_APP_URL]: getViewInAppUrl(basePath, spaceId, relativeViewInAppUrl),
|
||||
...summary,
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
await setRecoveredAlertsContext({
|
||||
alertFactory,
|
||||
await setRecoveredAlertsContext<ActionGroupIds>({
|
||||
alertsClient,
|
||||
alertsLocator,
|
||||
basePath,
|
||||
defaultStartedAt: startedAt.toISOString(),
|
||||
getAlertStartedDate,
|
||||
getAlertUuid,
|
||||
spaceId,
|
||||
});
|
||||
|
||||
|
|
|
@ -121,20 +121,23 @@ const mockStatusAlertDocument = (
|
|||
isAutoGenerated: boolean = false,
|
||||
count: number,
|
||||
interval: string,
|
||||
numTimes: number
|
||||
numTimes: number,
|
||||
actionGroup: string
|
||||
) => {
|
||||
const { monitorInfo } = monitor;
|
||||
const checkedAt = moment(monitorInfo.timestamp).format('LLL');
|
||||
|
||||
return {
|
||||
fields: {
|
||||
payload: {
|
||||
...mockCommonAlertDocumentFields(monitor.monitorInfo),
|
||||
[ALERT_REASON]: `Monitor "First" from ${monitor.monitorInfo.observer?.geo?.name} failed ${count} times in the last ${interval}. Alert when >= ${numTimes}. Checked at ${checkedAt}.`,
|
||||
},
|
||||
actionGroup,
|
||||
id: getInstanceId(
|
||||
monitorInfo,
|
||||
`${isAutoGenerated ? '' : monitorInfo?.monitor.id + '-'}${monitorInfo.observer?.geo?.name}`
|
||||
),
|
||||
state: expect.any(Object),
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -144,7 +147,8 @@ const mockAvailabilityAlertDocument = (monitor: GetMonitorAvailabilityResult) =>
|
|||
const checkedAt = moment(monitorInfo.timestamp).format('LLL');
|
||||
|
||||
return {
|
||||
fields: {
|
||||
actionGroup: 'xpack.uptime.alerts.actionGroups.monitorStatus',
|
||||
payload: {
|
||||
...mockCommonAlertDocumentFields(monitor.monitorInfo),
|
||||
[ALERT_REASON]: `Monitor "${monitorInfo.monitor.name || monitorInfo.monitor.id}" from ${
|
||||
monitorInfo.observer?.geo?.name
|
||||
|
@ -152,6 +156,7 @@ const mockAvailabilityAlertDocument = (monitor: GetMonitorAvailabilityResult) =>
|
|||
2
|
||||
)}%. Alert when < 99.34%. Checked at ${checkedAt}.`,
|
||||
},
|
||||
state: expect.any(Object),
|
||||
id: getInstanceId(monitorInfo, `${monitorInfo?.monitor.id}-${monitorInfo.observer?.geo?.name}`),
|
||||
};
|
||||
};
|
||||
|
@ -174,10 +179,32 @@ const mockOptions = (
|
|||
schedule: {
|
||||
interval: '5m',
|
||||
},
|
||||
}
|
||||
},
|
||||
recoveredAlerts: typeof mockRecoveredAlerts = []
|
||||
): any => {
|
||||
const { services, setContext } = createRuleTypeMocks(mockRecoveredAlerts);
|
||||
|
||||
services.alertsClient.report.mockImplementation((param: any) => {
|
||||
return {
|
||||
uuid: `uuid-${param.id}`,
|
||||
start: new Date().toISOString(),
|
||||
alertDoc: {},
|
||||
};
|
||||
});
|
||||
|
||||
services.alertsClient.getRecoveredAlerts.mockImplementation((param: any) => {
|
||||
return recoveredAlerts.map((alert) => ({
|
||||
alert: {
|
||||
getId: () => 'mock-id',
|
||||
getUuid: () => 'mock-uuid',
|
||||
getState: () => alert,
|
||||
getStart: () => new Date().toISOString(),
|
||||
setContext,
|
||||
context: {},
|
||||
},
|
||||
}));
|
||||
});
|
||||
|
||||
return {
|
||||
params,
|
||||
state,
|
||||
|
@ -251,17 +278,76 @@ describe('status check alert', () => {
|
|||
timerangeCount: 15,
|
||||
});
|
||||
const {
|
||||
services: { alertWithLifecycle },
|
||||
services: { alertsClient },
|
||||
} = options;
|
||||
// @ts-ignore the executor can return `void`, but ours never does
|
||||
const state: Record<string, any> = await alert.executor(options);
|
||||
expect(mockGetter).toHaveBeenCalledTimes(1);
|
||||
expect(alertWithLifecycle).toHaveBeenCalledTimes(2);
|
||||
expect(alertsClient.report).toHaveBeenCalledTimes(2);
|
||||
mockMonitors.forEach((monitor) => {
|
||||
expect(alertWithLifecycle).toBeCalledWith(
|
||||
mockStatusAlertDocument(monitor, false, 234, '15 mins', 5)
|
||||
expect(alertsClient.report).toBeCalledWith(
|
||||
mockStatusAlertDocument(
|
||||
monitor,
|
||||
false,
|
||||
234,
|
||||
'15 mins',
|
||||
5,
|
||||
'xpack.uptime.alerts.actionGroups.monitorStatus'
|
||||
)
|
||||
);
|
||||
});
|
||||
|
||||
expect(alertsClient.report).toHaveBeenNthCalledWith(
|
||||
1,
|
||||
expect.objectContaining({
|
||||
state: {
|
||||
checkedAt: 'July 6, 2020 9:14 PM',
|
||||
currentTriggerStarted: 'foo date string',
|
||||
firstCheckedAt: 'foo date string',
|
||||
firstTriggeredAt: 'foo date string',
|
||||
isTriggered: true,
|
||||
lastCheckedAt: 'foo date string',
|
||||
lastResolvedAt: undefined,
|
||||
lastTriggeredAt: 'foo date string',
|
||||
latestErrorMessage: 'error message 1',
|
||||
monitorId: 'first',
|
||||
monitorName: 'First',
|
||||
monitorType: 'myType',
|
||||
monitorUrl: 'localhost:8080',
|
||||
observerHostname: undefined,
|
||||
observerLocation: 'harrisburg',
|
||||
reason: `Monitor "First" from harrisburg failed 234 times in the last 15 mins. Alert when >= 5. Checked at July 6, 2020 9:14 PM.`,
|
||||
statusMessage: 'failed 234 times in the last 15 mins. Alert when >= 5.',
|
||||
},
|
||||
})
|
||||
);
|
||||
|
||||
expect(alertsClient.report).toHaveBeenNthCalledWith(
|
||||
2,
|
||||
expect.objectContaining({
|
||||
state: {
|
||||
checkedAt: 'July 6, 2020 9:14 PM',
|
||||
currentTriggerStarted: 'foo date string',
|
||||
firstCheckedAt: 'foo date string',
|
||||
firstTriggeredAt: 'foo date string',
|
||||
isTriggered: true,
|
||||
lastCheckedAt: 'foo date string',
|
||||
lastResolvedAt: undefined,
|
||||
lastTriggeredAt: 'foo date string',
|
||||
latestErrorMessage: 'error message 2',
|
||||
monitorId: 'first',
|
||||
monitorName: 'First',
|
||||
monitorType: 'myType',
|
||||
monitorUrl: 'localhost:5601',
|
||||
observerHostname: undefined,
|
||||
observerLocation: 'fairbanks',
|
||||
reason:
|
||||
'Monitor "First" from fairbanks failed 234 times in the last 15 mins. Alert when >= 5. Checked at July 6, 2020 9:14 PM.',
|
||||
statusMessage: 'failed 234 times in the last 15 mins. Alert when >= 5.',
|
||||
},
|
||||
})
|
||||
);
|
||||
|
||||
expect(mockGetter.mock.calls[0][0]).toEqual(
|
||||
expect.objectContaining({
|
||||
filters: undefined,
|
||||
|
@ -273,53 +359,25 @@ describe('status check alert', () => {
|
|||
},
|
||||
})
|
||||
);
|
||||
const [{ value: alertInstanceMock }] = alertWithLifecycle.mock.results;
|
||||
expect(alertInstanceMock.replaceState).toHaveBeenCalledTimes(2);
|
||||
expect(alertInstanceMock.replaceState.mock.calls[0]).toMatchInlineSnapshot(`
|
||||
Array [
|
||||
Object {
|
||||
"checkedAt": "July 6, 2020 9:14 PM",
|
||||
"configId": undefined,
|
||||
"currentTriggerStarted": "foo date string",
|
||||
"firstCheckedAt": "foo date string",
|
||||
"firstTriggeredAt": "foo date string",
|
||||
"isTriggered": true,
|
||||
"lastCheckedAt": "foo date string",
|
||||
"lastResolvedAt": undefined,
|
||||
"lastTriggeredAt": "foo date string",
|
||||
"latestErrorMessage": "error message 1",
|
||||
"monitorId": "first",
|
||||
"monitorName": "First",
|
||||
"monitorType": "myType",
|
||||
"monitorUrl": "localhost:8080",
|
||||
"observerHostname": undefined,
|
||||
"observerLocation": "harrisburg",
|
||||
"reason": "Monitor \\"First\\" from harrisburg failed 234 times in the last 15 mins. Alert when >= 5. Checked at July 6, 2020 9:14 PM.",
|
||||
"statusMessage": "failed 234 times in the last 15 mins. Alert when >= 5.",
|
||||
},
|
||||
]
|
||||
`);
|
||||
expect(alertInstanceMock.scheduleActions).toHaveBeenCalledTimes(2);
|
||||
expect(alertInstanceMock.scheduleActions.mock.calls[0]).toMatchInlineSnapshot(`
|
||||
Array [
|
||||
"xpack.uptime.alerts.actionGroups.monitorStatus",
|
||||
Object {
|
||||
"alertDetailsUrl": "mockedAlertsLocator > getLocation",
|
||||
"checkedAt": "July 6, 2020 9:14 PM",
|
||||
"configId": undefined,
|
||||
"latestErrorMessage": "error message 1",
|
||||
"monitorId": "first",
|
||||
"monitorName": "First",
|
||||
"monitorType": "myType",
|
||||
"monitorUrl": "localhost:8080",
|
||||
"observerHostname": undefined,
|
||||
"observerLocation": "harrisburg",
|
||||
"reason": "Monitor \\"First\\" from harrisburg failed 234 times in the last 15 mins. Alert when >= 5. Checked at July 6, 2020 9:14 PM.",
|
||||
"statusMessage": "failed 234 times in the last 15 mins. Alert when >= 5.",
|
||||
"viewInAppUrl": "http://localhost:5601/hfe/app/uptime/monitor/Zmlyc3Q=?dateRangeEnd=now&dateRangeStart=2022-03-17T13%3A13%3A33.755Z&filters=%5B%5B%22observer.geo.name%22%2C%5B%22harrisburg%22%5D%5D%5D",
|
||||
},
|
||||
]
|
||||
`);
|
||||
|
||||
expect(alertsClient.setAlertData).toHaveBeenNthCalledWith(1, {
|
||||
id: 'first_localhost_8080_first-harrisburg',
|
||||
context: {
|
||||
alertDetailsUrl: 'mockedAlertsLocator > getLocation',
|
||||
checkedAt: 'July 6, 2020 9:14 PM',
|
||||
latestErrorMessage: 'error message 1',
|
||||
monitorId: 'first',
|
||||
monitorName: 'First',
|
||||
monitorType: 'myType',
|
||||
monitorUrl: 'localhost:8080',
|
||||
observerHostname: undefined,
|
||||
observerLocation: 'harrisburg',
|
||||
reason: `Monitor "First" from harrisburg failed 234 times in the last 15 mins. Alert when >= 5. Checked at July 6, 2020 9:14 PM.`,
|
||||
statusMessage: 'failed 234 times in the last 15 mins. Alert when >= 5.',
|
||||
viewInAppUrl:
|
||||
'http://localhost:5601/hfe/app/uptime/monitor/Zmlyc3Q=?dateRangeEnd=now&dateRangeStart=foo%20date%20string&filters=%5B%5B%22observer.geo.name%22%2C%5B%22harrisburg%22%5D%5D%5D',
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
it('supports auto generated monitor status alerts', async () => {
|
||||
|
@ -335,15 +393,22 @@ describe('status check alert', () => {
|
|||
numTimes: 5,
|
||||
});
|
||||
const {
|
||||
services: { alertWithLifecycle },
|
||||
services: { alertsClient },
|
||||
} = options;
|
||||
// @ts-ignore the executor can return `void`, but ours never does
|
||||
const state: Record<string, any> = await alert.executor(options);
|
||||
expect(mockGetter).toHaveBeenCalledTimes(1);
|
||||
expect(alertWithLifecycle).toHaveBeenCalledTimes(2);
|
||||
expect(alertsClient.report).toHaveBeenCalledTimes(2);
|
||||
mockMonitors.forEach((monitor) => {
|
||||
expect(alertWithLifecycle).toBeCalledWith(
|
||||
mockStatusAlertDocument(monitor, true, 234, '15m', 5)
|
||||
expect(alertsClient.report).toBeCalledWith(
|
||||
mockStatusAlertDocument(
|
||||
monitor,
|
||||
true,
|
||||
234,
|
||||
'15m',
|
||||
5,
|
||||
'xpack.uptime.alerts.actionGroups.monitorStatus'
|
||||
)
|
||||
);
|
||||
});
|
||||
expect(mockGetter.mock.calls[0][0]).toEqual(
|
||||
|
@ -357,53 +422,50 @@ describe('status check alert', () => {
|
|||
},
|
||||
})
|
||||
);
|
||||
const [{ value: alertInstanceMock }] = alertWithLifecycle.mock.results;
|
||||
expect(alertInstanceMock.replaceState).toHaveBeenCalledTimes(2);
|
||||
expect(alertInstanceMock.replaceState.mock.calls[0]).toMatchInlineSnapshot(`
|
||||
Array [
|
||||
Object {
|
||||
"checkedAt": "July 6, 2020 9:14 PM",
|
||||
"configId": undefined,
|
||||
"currentTriggerStarted": "foo date string",
|
||||
"firstCheckedAt": "foo date string",
|
||||
"firstTriggeredAt": "foo date string",
|
||||
"isTriggered": true,
|
||||
"lastCheckedAt": "foo date string",
|
||||
"lastResolvedAt": undefined,
|
||||
"lastTriggeredAt": "foo date string",
|
||||
"latestErrorMessage": "error message 1",
|
||||
"monitorId": "first",
|
||||
"monitorName": "First",
|
||||
"monitorType": "myType",
|
||||
"monitorUrl": "localhost:8080",
|
||||
"observerHostname": undefined,
|
||||
"observerLocation": "harrisburg",
|
||||
"reason": "Monitor \\"First\\" from harrisburg failed 234 times in the last 15m. Alert when >= 5. Checked at July 6, 2020 9:14 PM.",
|
||||
"statusMessage": "failed 234 times in the last 15m. Alert when >= 5.",
|
||||
|
||||
expect(alertsClient.report).toHaveBeenNthCalledWith(
|
||||
1,
|
||||
expect.objectContaining({
|
||||
state: {
|
||||
checkedAt: 'July 6, 2020 9:14 PM',
|
||||
currentTriggerStarted: 'foo date string',
|
||||
firstCheckedAt: 'foo date string',
|
||||
firstTriggeredAt: 'foo date string',
|
||||
isTriggered: true,
|
||||
lastCheckedAt: 'foo date string',
|
||||
lastResolvedAt: undefined,
|
||||
lastTriggeredAt: 'foo date string',
|
||||
latestErrorMessage: 'error message 1',
|
||||
monitorId: 'first',
|
||||
monitorName: 'First',
|
||||
monitorType: 'myType',
|
||||
monitorUrl: 'localhost:8080',
|
||||
observerHostname: undefined,
|
||||
observerLocation: 'harrisburg',
|
||||
reason: `Monitor "First" from harrisburg failed 234 times in the last 15m. Alert when >= 5. Checked at July 6, 2020 9:14 PM.`,
|
||||
statusMessage: 'failed 234 times in the last 15m. Alert when >= 5.',
|
||||
},
|
||||
]
|
||||
`);
|
||||
expect(alertInstanceMock.scheduleActions).toHaveBeenCalledTimes(2);
|
||||
expect(alertInstanceMock.scheduleActions.mock.calls[0]).toMatchInlineSnapshot(`
|
||||
Array [
|
||||
"xpack.uptime.alerts.actionGroups.monitorStatus",
|
||||
Object {
|
||||
"alertDetailsUrl": "mockedAlertsLocator > getLocation",
|
||||
"checkedAt": "July 6, 2020 9:14 PM",
|
||||
"configId": undefined,
|
||||
"latestErrorMessage": "error message 1",
|
||||
"monitorId": "first",
|
||||
"monitorName": "First",
|
||||
"monitorType": "myType",
|
||||
"monitorUrl": "localhost:8080",
|
||||
"observerHostname": undefined,
|
||||
"observerLocation": "harrisburg",
|
||||
"reason": "Monitor \\"First\\" from harrisburg failed 234 times in the last 15m. Alert when >= 5. Checked at July 6, 2020 9:14 PM.",
|
||||
"statusMessage": "failed 234 times in the last 15m. Alert when >= 5.",
|
||||
"viewInAppUrl": "http://localhost:5601/hfe/app/uptime/monitor/Zmlyc3Q=?dateRangeEnd=now&dateRangeStart=2022-03-17T13%3A13%3A33.755Z&filters=%5B%5B%22observer.geo.name%22%2C%5B%22harrisburg%22%5D%5D%5D",
|
||||
},
|
||||
]
|
||||
`);
|
||||
})
|
||||
);
|
||||
|
||||
expect(alertsClient.setAlertData).toHaveBeenNthCalledWith(1, {
|
||||
id: 'first_localhost_8080_harrisburg',
|
||||
context: {
|
||||
alertDetailsUrl: 'mockedAlertsLocator > getLocation',
|
||||
checkedAt: 'July 6, 2020 9:14 PM',
|
||||
latestErrorMessage: 'error message 1',
|
||||
monitorId: 'first',
|
||||
monitorName: 'First',
|
||||
monitorType: 'myType',
|
||||
monitorUrl: 'localhost:8080',
|
||||
observerHostname: undefined,
|
||||
observerLocation: 'harrisburg',
|
||||
reason: `Monitor "First" from harrisburg failed 234 times in the last 15m. Alert when >= 5. Checked at July 6, 2020 9:14 PM.`,
|
||||
statusMessage: 'failed 234 times in the last 15m. Alert when >= 5.',
|
||||
viewInAppUrl:
|
||||
'http://localhost:5601/hfe/app/uptime/monitor/Zmlyc3Q=?dateRangeEnd=now&dateRangeStart=foo%20date%20string&filters=%5B%5B%22observer.geo.name%22%2C%5B%22harrisburg%22%5D%5D%5D',
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
it('supports 7.7 alert format', async () => {
|
||||
|
@ -423,38 +485,68 @@ describe('status check alert', () => {
|
|||
filters: '',
|
||||
});
|
||||
const {
|
||||
services: { alertWithLifecycle },
|
||||
services: { alertsClient },
|
||||
} = options;
|
||||
const executorResult = await alert.executor(options);
|
||||
|
||||
const [{ value: alertInstanceMock }] = alertWithLifecycle.mock.results;
|
||||
expect(alertsClient.report).toHaveBeenCalledTimes(2);
|
||||
|
||||
expect(alertsClient.report).toHaveBeenNthCalledWith(
|
||||
1,
|
||||
expect.objectContaining({
|
||||
state: {
|
||||
checkedAt: 'July 6, 2020 9:14 PM',
|
||||
currentTriggerStarted: '7.7 date',
|
||||
firstCheckedAt: '7.7 date',
|
||||
firstTriggeredAt: '7.7 date',
|
||||
isTriggered: true,
|
||||
lastCheckedAt: '7.7 date',
|
||||
lastTriggeredAt: '7.7 date',
|
||||
latestErrorMessage: 'error message 1',
|
||||
monitorId: 'first',
|
||||
monitorName: 'First',
|
||||
monitorType: 'myType',
|
||||
monitorUrl: 'localhost:8080',
|
||||
observerLocation: 'harrisburg',
|
||||
reason:
|
||||
'Monitor "First" from harrisburg failed 234 times in the last 14h. Alert when >= 4. Checked at July 6, 2020 9:14 PM.',
|
||||
statusMessage: 'failed 234 times in the last 14h. Alert when >= 4.',
|
||||
},
|
||||
})
|
||||
);
|
||||
|
||||
mockMonitors.forEach((monitor) => {
|
||||
expect(alertWithLifecycle).toBeCalledWith(
|
||||
mockStatusAlertDocument(monitor, false, 234, '14h', 4)
|
||||
expect(alertsClient.report).toBeCalledWith(
|
||||
mockStatusAlertDocument(
|
||||
monitor,
|
||||
false,
|
||||
234,
|
||||
'14h',
|
||||
4,
|
||||
'xpack.uptime.alerts.actionGroups.monitorStatus'
|
||||
)
|
||||
);
|
||||
});
|
||||
expect(alertInstanceMock.replaceState).toHaveBeenCalledTimes(2);
|
||||
expect(alertInstanceMock.replaceState.mock.calls[0]).toMatchInlineSnapshot(`
|
||||
expect(alertsClient.setAlertData).toHaveBeenCalledTimes(2);
|
||||
expect(alertsClient.setAlertData.mock.calls[0]).toMatchInlineSnapshot(`
|
||||
Array [
|
||||
Object {
|
||||
"checkedAt": "July 6, 2020 9:14 PM",
|
||||
"configId": undefined,
|
||||
"currentTriggerStarted": "7.7 date",
|
||||
"firstCheckedAt": "7.7 date",
|
||||
"firstTriggeredAt": "7.7 date",
|
||||
"isTriggered": true,
|
||||
"lastCheckedAt": "7.7 date",
|
||||
"lastResolvedAt": undefined,
|
||||
"lastTriggeredAt": "7.7 date",
|
||||
"latestErrorMessage": "error message 1",
|
||||
"monitorId": "first",
|
||||
"monitorName": "First",
|
||||
"monitorType": "myType",
|
||||
"monitorUrl": "localhost:8080",
|
||||
"observerHostname": undefined,
|
||||
"observerLocation": "harrisburg",
|
||||
"reason": "Monitor \\"First\\" from harrisburg failed 234 times in the last 14h. Alert when >= 4. Checked at July 6, 2020 9:14 PM.",
|
||||
"statusMessage": "failed 234 times in the last 14h. Alert when >= 4.",
|
||||
"context": Object {
|
||||
"alertDetailsUrl": "mockedAlertsLocator > getLocation",
|
||||
"checkedAt": "July 6, 2020 9:14 PM",
|
||||
"configId": undefined,
|
||||
"latestErrorMessage": "error message 1",
|
||||
"monitorId": "first",
|
||||
"monitorName": "First",
|
||||
"monitorType": "myType",
|
||||
"monitorUrl": "localhost:8080",
|
||||
"observerHostname": undefined,
|
||||
"observerLocation": "harrisburg",
|
||||
"reason": "Monitor \\"First\\" from harrisburg failed 234 times in the last 14h. Alert when >= 4. Checked at July 6, 2020 9:14 PM.",
|
||||
"statusMessage": "failed 234 times in the last 14h. Alert when >= 4.",
|
||||
"viewInAppUrl": "http://localhost:5601/hfe/app/uptime/monitor/Zmlyc3Q=?dateRangeEnd=now&dateRangeStart=7.7%20date&filters=%5B%5B%22observer.geo.name%22%2C%5B%22harrisburg%22%5D%5D%5D",
|
||||
},
|
||||
"id": "first_localhost_8080_first-harrisburg",
|
||||
},
|
||||
]
|
||||
`);
|
||||
|
@ -496,12 +588,19 @@ describe('status check alert', () => {
|
|||
});
|
||||
const executorResult = await alert.executor(options);
|
||||
const {
|
||||
services: { alertWithLifecycle },
|
||||
services: { alertsClient },
|
||||
} = options;
|
||||
const [{ value: alertInstanceMock }] = alertWithLifecycle.mock.results;
|
||||
|
||||
mockMonitors.forEach((monitor) => {
|
||||
expect(alertWithLifecycle).toBeCalledWith(
|
||||
mockStatusAlertDocument(monitor, false, 234, '15 mins', 3)
|
||||
expect(alertsClient.report).toBeCalledWith(
|
||||
mockStatusAlertDocument(
|
||||
monitor,
|
||||
false,
|
||||
234,
|
||||
'15 mins',
|
||||
3,
|
||||
'xpack.uptime.alerts.actionGroups.monitorStatus'
|
||||
)
|
||||
);
|
||||
});
|
||||
expect(mockGetter).toHaveBeenCalledTimes(1);
|
||||
|
@ -651,28 +750,26 @@ describe('status check alert', () => {
|
|||
},
|
||||
}
|
||||
`);
|
||||
expect(alertInstanceMock.replaceState).toHaveBeenCalledTimes(2);
|
||||
expect(alertInstanceMock.replaceState.mock.calls[0]).toMatchInlineSnapshot(`
|
||||
expect(alertsClient.setAlertData).toHaveBeenCalledTimes(2);
|
||||
expect(alertsClient.setAlertData.mock.calls[0]).toMatchInlineSnapshot(`
|
||||
Array [
|
||||
Object {
|
||||
"checkedAt": "July 6, 2020 9:14 PM",
|
||||
"configId": undefined,
|
||||
"currentTriggerStarted": "foo date string",
|
||||
"firstCheckedAt": "foo date string",
|
||||
"firstTriggeredAt": "foo date string",
|
||||
"isTriggered": true,
|
||||
"lastCheckedAt": "foo date string",
|
||||
"lastResolvedAt": undefined,
|
||||
"lastTriggeredAt": "foo date string",
|
||||
"latestErrorMessage": "error message 1",
|
||||
"monitorId": "first",
|
||||
"monitorName": "First",
|
||||
"monitorType": "myType",
|
||||
"monitorUrl": "localhost:8080",
|
||||
"observerHostname": undefined,
|
||||
"observerLocation": "harrisburg",
|
||||
"reason": "Monitor \\"First\\" from harrisburg failed 234 times in the last 15 mins. Alert when >= 3. Checked at July 6, 2020 9:14 PM.",
|
||||
"statusMessage": "failed 234 times in the last 15 mins. Alert when >= 3.",
|
||||
"context": Object {
|
||||
"alertDetailsUrl": "mockedAlertsLocator > getLocation",
|
||||
"checkedAt": "July 6, 2020 9:14 PM",
|
||||
"configId": undefined,
|
||||
"latestErrorMessage": "error message 1",
|
||||
"monitorId": "first",
|
||||
"monitorName": "First",
|
||||
"monitorType": "myType",
|
||||
"monitorUrl": "localhost:8080",
|
||||
"observerHostname": undefined,
|
||||
"observerLocation": "harrisburg",
|
||||
"reason": "Monitor \\"First\\" from harrisburg failed 234 times in the last 15 mins. Alert when >= 3. Checked at July 6, 2020 9:14 PM.",
|
||||
"statusMessage": "failed 234 times in the last 15 mins. Alert when >= 3.",
|
||||
"viewInAppUrl": "http://localhost:5601/hfe/app/uptime/monitor/Zmlyc3Q=?dateRangeEnd=now&dateRangeStart=foo%20date%20string&filters=%5B%5B%22observer.geo.name%22%2C%5B%22harrisburg%22%5D%5D%5D",
|
||||
},
|
||||
"id": "first_localhost_8080_first-harrisburg",
|
||||
},
|
||||
]
|
||||
`);
|
||||
|
@ -758,7 +855,7 @@ describe('status check alert', () => {
|
|||
});
|
||||
|
||||
it('supports availability checks', async () => {
|
||||
expect.assertions(13);
|
||||
// expect.assertions(13);
|
||||
toISOStringSpy.mockImplementation(() => 'availability test');
|
||||
const mockGetter: jest.Mock<GetMonitorStatusResult[]> = jest.fn();
|
||||
mockGetter.mockReturnValue([]);
|
||||
|
@ -839,114 +936,123 @@ describe('status check alert', () => {
|
|||
shouldCheckStatus: false,
|
||||
});
|
||||
const {
|
||||
services: { alertWithLifecycle },
|
||||
services: { alertsClient },
|
||||
} = options;
|
||||
const executorResult = await alert.executor(options);
|
||||
const [{ value: alertInstanceMock }] = alertWithLifecycle.mock.results;
|
||||
mockAvailabilityMonitors.forEach((monitor) => {
|
||||
expect(alertWithLifecycle).toBeCalledWith(mockAvailabilityAlertDocument(monitor));
|
||||
expect(alertsClient.report).toBeCalledWith(mockAvailabilityAlertDocument(monitor));
|
||||
});
|
||||
expect(alertInstanceMock.replaceState).toHaveBeenCalledTimes(4);
|
||||
expect(alertInstanceMock.replaceState.mock.calls[0]).toMatchInlineSnapshot(`
|
||||
Array [
|
||||
Object {
|
||||
"checkedAt": "July 6, 2020 9:14 PM",
|
||||
"configId": undefined,
|
||||
"currentTriggerStarted": "availability test",
|
||||
"firstCheckedAt": "availability test",
|
||||
"firstTriggeredAt": "availability test",
|
||||
"isTriggered": true,
|
||||
"lastCheckedAt": "availability test",
|
||||
"lastResolvedAt": undefined,
|
||||
"lastTriggeredAt": "availability test",
|
||||
"latestErrorMessage": undefined,
|
||||
"monitorId": "foo",
|
||||
"monitorName": "Foo",
|
||||
"monitorType": "myType",
|
||||
"monitorUrl": "https://foo.com",
|
||||
"observerHostname": undefined,
|
||||
"observerLocation": "harrisburg",
|
||||
"reason": "Monitor \\"Foo\\" from harrisburg 35 days availability is 99.28%. Alert when < 99.34%. Checked at July 6, 2020 9:14 PM.",
|
||||
"statusMessage": "35 days availability is 99.28%. Alert when < 99.34%.",
|
||||
expect(alertsClient.report).toHaveBeenCalledTimes(4);
|
||||
expect(alertsClient.report).toHaveBeenNthCalledWith(
|
||||
1,
|
||||
expect.objectContaining({
|
||||
state: {
|
||||
checkedAt: 'July 6, 2020 9:14 PM',
|
||||
currentTriggerStarted: 'availability test',
|
||||
firstCheckedAt: 'availability test',
|
||||
firstTriggeredAt: 'availability test',
|
||||
isTriggered: true,
|
||||
lastCheckedAt: 'availability test',
|
||||
lastResolvedAt: undefined,
|
||||
lastTriggeredAt: 'availability test',
|
||||
latestErrorMessage: undefined,
|
||||
monitorId: 'foo',
|
||||
monitorName: 'Foo',
|
||||
monitorType: 'myType',
|
||||
monitorUrl: 'https://foo.com',
|
||||
observerHostname: undefined,
|
||||
observerLocation: 'harrisburg',
|
||||
reason:
|
||||
'Monitor "Foo" from harrisburg 35 days availability is 99.28%. Alert when < 99.34%. Checked at July 6, 2020 9:14 PM.',
|
||||
statusMessage: '35 days availability is 99.28%. Alert when < 99.34%.',
|
||||
},
|
||||
]
|
||||
`);
|
||||
expect(alertInstanceMock.scheduleActions).toHaveBeenCalledTimes(4);
|
||||
expect(alertInstanceMock.scheduleActions.mock.calls).toMatchInlineSnapshot(`
|
||||
Array [
|
||||
Array [
|
||||
"xpack.uptime.alerts.actionGroups.monitorStatus",
|
||||
Object {
|
||||
"alertDetailsUrl": "mockedAlertsLocator > getLocation",
|
||||
"checkedAt": "July 6, 2020 9:14 PM",
|
||||
"configId": undefined,
|
||||
"latestErrorMessage": undefined,
|
||||
"monitorId": "foo",
|
||||
"monitorName": "Foo",
|
||||
"monitorType": "myType",
|
||||
"monitorUrl": "https://foo.com",
|
||||
"observerHostname": undefined,
|
||||
"observerLocation": "harrisburg",
|
||||
"reason": "Monitor \\"Foo\\" from harrisburg 35 days availability is 99.28%. Alert when < 99.34%. Checked at July 6, 2020 9:14 PM.",
|
||||
"statusMessage": "35 days availability is 99.28%. Alert when < 99.34%.",
|
||||
"viewInAppUrl": "http://localhost:5601/hfe/app/uptime/monitor/Zm9v?dateRangeEnd=now&dateRangeStart=2022-03-17T13%3A13%3A33.755Z&filters=%5B%5B%22observer.geo.name%22%2C%5B%22harrisburg%22%5D%5D%5D",
|
||||
},
|
||||
],
|
||||
Array [
|
||||
"xpack.uptime.alerts.actionGroups.monitorStatus",
|
||||
Object {
|
||||
"alertDetailsUrl": "mockedAlertsLocator > getLocation",
|
||||
"checkedAt": "July 6, 2020 9:14 PM",
|
||||
"configId": undefined,
|
||||
"latestErrorMessage": undefined,
|
||||
"monitorId": "foo",
|
||||
"monitorName": "Foo",
|
||||
"monitorType": "myType",
|
||||
"monitorUrl": "https://foo.com",
|
||||
"observerHostname": undefined,
|
||||
"observerLocation": "fairbanks",
|
||||
"reason": "Monitor \\"Foo\\" from fairbanks 35 days availability is 98.03%. Alert when < 99.34%. Checked at July 6, 2020 9:14 PM.",
|
||||
"statusMessage": "35 days availability is 98.03%. Alert when < 99.34%.",
|
||||
"viewInAppUrl": "http://localhost:5601/hfe/app/uptime/monitor/Zm9v?dateRangeEnd=now&dateRangeStart=2022-03-17T13%3A13%3A33.755Z&filters=%5B%5B%22observer.geo.name%22%2C%5B%22fairbanks%22%5D%5D%5D",
|
||||
},
|
||||
],
|
||||
Array [
|
||||
"xpack.uptime.alerts.actionGroups.monitorStatus",
|
||||
Object {
|
||||
"alertDetailsUrl": "mockedAlertsLocator > getLocation",
|
||||
"checkedAt": "July 6, 2020 9:14 PM",
|
||||
"configId": undefined,
|
||||
"latestErrorMessage": undefined,
|
||||
"monitorId": "unreliable",
|
||||
"monitorName": "Unreliable",
|
||||
"monitorType": "myType",
|
||||
"monitorUrl": "https://unreliable.co",
|
||||
"observerHostname": undefined,
|
||||
"observerLocation": "fairbanks",
|
||||
"reason": "Monitor \\"Unreliable\\" from fairbanks 35 days availability is 90.92%. Alert when < 99.34%. Checked at July 6, 2020 9:14 PM.",
|
||||
"statusMessage": "35 days availability is 90.92%. Alert when < 99.34%.",
|
||||
"viewInAppUrl": "http://localhost:5601/hfe/app/uptime/monitor/dW5yZWxpYWJsZQ==?dateRangeEnd=now&dateRangeStart=2022-03-17T13%3A13%3A33.755Z&filters=%5B%5B%22observer.geo.name%22%2C%5B%22fairbanks%22%5D%5D%5D",
|
||||
},
|
||||
],
|
||||
Array [
|
||||
"xpack.uptime.alerts.actionGroups.monitorStatus",
|
||||
Object {
|
||||
"alertDetailsUrl": "mockedAlertsLocator > getLocation",
|
||||
"checkedAt": "July 6, 2020 9:14 PM",
|
||||
"configId": undefined,
|
||||
"latestErrorMessage": undefined,
|
||||
"monitorId": "no-name",
|
||||
"monitorName": "no-name",
|
||||
"monitorType": "myType",
|
||||
"monitorUrl": "https://no-name.co",
|
||||
"observerHostname": undefined,
|
||||
"observerLocation": "fairbanks",
|
||||
"reason": "Monitor \\"no-name\\" from fairbanks 35 days availability is 90.92%. Alert when < 99.34%. Checked at July 6, 2020 9:14 PM.",
|
||||
"statusMessage": "35 days availability is 90.92%. Alert when < 99.34%.",
|
||||
"viewInAppUrl": "http://localhost:5601/hfe/app/uptime/monitor/bm8tbmFtZQ==?dateRangeEnd=now&dateRangeStart=2022-03-17T13%3A13%3A33.755Z&filters=%5B%5B%22observer.geo.name%22%2C%5B%22fairbanks%22%5D%5D%5D",
|
||||
},
|
||||
],
|
||||
]
|
||||
})
|
||||
);
|
||||
|
||||
expect(alertsClient.setAlertData).toHaveBeenCalledTimes(4);
|
||||
expect(alertsClient.setAlertData.mock.calls).toMatchInlineSnapshot(`
|
||||
Array [
|
||||
Array [
|
||||
Object {
|
||||
"context": Object {
|
||||
"alertDetailsUrl": "mockedAlertsLocator > getLocation",
|
||||
"checkedAt": "July 6, 2020 9:14 PM",
|
||||
"configId": undefined,
|
||||
"latestErrorMessage": undefined,
|
||||
"monitorId": "foo",
|
||||
"monitorName": "Foo",
|
||||
"monitorType": "myType",
|
||||
"monitorUrl": "https://foo.com",
|
||||
"observerHostname": undefined,
|
||||
"observerLocation": "harrisburg",
|
||||
"reason": "Monitor \\"Foo\\" from harrisburg 35 days availability is 99.28%. Alert when < 99.34%. Checked at July 6, 2020 9:14 PM.",
|
||||
"statusMessage": "35 days availability is 99.28%. Alert when < 99.34%.",
|
||||
"viewInAppUrl": "http://localhost:5601/hfe/app/uptime/monitor/Zm9v?dateRangeEnd=now&dateRangeStart=availability%20test&filters=%5B%5B%22observer.geo.name%22%2C%5B%22harrisburg%22%5D%5D%5D",
|
||||
},
|
||||
"id": "foo_https_foo_com_foo-harrisburg",
|
||||
},
|
||||
],
|
||||
Array [
|
||||
Object {
|
||||
"context": Object {
|
||||
"alertDetailsUrl": "mockedAlertsLocator > getLocation",
|
||||
"checkedAt": "July 6, 2020 9:14 PM",
|
||||
"configId": undefined,
|
||||
"latestErrorMessage": undefined,
|
||||
"monitorId": "foo",
|
||||
"monitorName": "Foo",
|
||||
"monitorType": "myType",
|
||||
"monitorUrl": "https://foo.com",
|
||||
"observerHostname": undefined,
|
||||
"observerLocation": "fairbanks",
|
||||
"reason": "Monitor \\"Foo\\" from fairbanks 35 days availability is 98.03%. Alert when < 99.34%. Checked at July 6, 2020 9:14 PM.",
|
||||
"statusMessage": "35 days availability is 98.03%. Alert when < 99.34%.",
|
||||
"viewInAppUrl": "http://localhost:5601/hfe/app/uptime/monitor/Zm9v?dateRangeEnd=now&dateRangeStart=availability%20test&filters=%5B%5B%22observer.geo.name%22%2C%5B%22fairbanks%22%5D%5D%5D",
|
||||
},
|
||||
"id": "foo_https_foo_com_foo-fairbanks",
|
||||
},
|
||||
],
|
||||
Array [
|
||||
Object {
|
||||
"context": Object {
|
||||
"alertDetailsUrl": "mockedAlertsLocator > getLocation",
|
||||
"checkedAt": "July 6, 2020 9:14 PM",
|
||||
"configId": undefined,
|
||||
"latestErrorMessage": undefined,
|
||||
"monitorId": "unreliable",
|
||||
"monitorName": "Unreliable",
|
||||
"monitorType": "myType",
|
||||
"monitorUrl": "https://unreliable.co",
|
||||
"observerHostname": undefined,
|
||||
"observerLocation": "fairbanks",
|
||||
"reason": "Monitor \\"Unreliable\\" from fairbanks 35 days availability is 90.92%. Alert when < 99.34%. Checked at July 6, 2020 9:14 PM.",
|
||||
"statusMessage": "35 days availability is 90.92%. Alert when < 99.34%.",
|
||||
"viewInAppUrl": "http://localhost:5601/hfe/app/uptime/monitor/dW5yZWxpYWJsZQ==?dateRangeEnd=now&dateRangeStart=availability%20test&filters=%5B%5B%22observer.geo.name%22%2C%5B%22fairbanks%22%5D%5D%5D",
|
||||
},
|
||||
"id": "unreliable_https_unreliable_co_unreliable-fairbanks",
|
||||
},
|
||||
],
|
||||
Array [
|
||||
Object {
|
||||
"context": Object {
|
||||
"alertDetailsUrl": "mockedAlertsLocator > getLocation",
|
||||
"checkedAt": "July 6, 2020 9:14 PM",
|
||||
"configId": undefined,
|
||||
"latestErrorMessage": undefined,
|
||||
"monitorId": "no-name",
|
||||
"monitorName": "no-name",
|
||||
"monitorType": "myType",
|
||||
"monitorUrl": "https://no-name.co",
|
||||
"observerHostname": undefined,
|
||||
"observerLocation": "fairbanks",
|
||||
"reason": "Monitor \\"no-name\\" from fairbanks 35 days availability is 90.92%. Alert when < 99.34%. Checked at July 6, 2020 9:14 PM.",
|
||||
"statusMessage": "35 days availability is 90.92%. Alert when < 99.34%.",
|
||||
"viewInAppUrl": "http://localhost:5601/hfe/app/uptime/monitor/bm8tbmFtZQ==?dateRangeEnd=now&dateRangeStart=availability%20test&filters=%5B%5B%22observer.geo.name%22%2C%5B%22fairbanks%22%5D%5D%5D",
|
||||
},
|
||||
"id": "https_no_name_co_no-name-fairbanks",
|
||||
},
|
||||
],
|
||||
]
|
||||
`);
|
||||
expect(mockGetter).not.toHaveBeenCalled();
|
||||
expect(mockAvailability).toHaveBeenCalledTimes(1);
|
||||
|
@ -1053,12 +1159,19 @@ describe('status check alert', () => {
|
|||
mockGetter.mockReturnValue(mockMonitors);
|
||||
const { server, libs, plugins } = bootstrapDependencies({ getMonitorStatus: mockGetter });
|
||||
const alert = statusCheckAlertFactory(server, libs, plugins);
|
||||
const options = mockOptions();
|
||||
const options = mockOptions(undefined, undefined, undefined, mockRecoveredAlerts);
|
||||
// @ts-ignore the executor can return `void`, but ours never does
|
||||
const state: Record<string, any> = await alert.executor(options);
|
||||
expect(options.setContext).toHaveBeenCalledTimes(2);
|
||||
mockRecoveredAlerts.forEach((alertState) => {
|
||||
expect(options.setContext).toHaveBeenCalledWith(alertState);
|
||||
expect(options.services.alertsClient.setAlertData).toHaveBeenCalledTimes(4);
|
||||
|
||||
expect(options.services.alertsClient.setAlertData).toHaveBeenNthCalledWith(3, {
|
||||
context: mockRecoveredAlerts[0],
|
||||
id: 'mock-id',
|
||||
});
|
||||
|
||||
expect(options.services.alertsClient.setAlertData).toHaveBeenNthCalledWith(4, {
|
||||
context: mockRecoveredAlerts[1],
|
||||
id: 'mock-id',
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
* 2.0.
|
||||
*/
|
||||
import { DEFAULT_APP_CATEGORIES } from '@kbn/core/server';
|
||||
import { GetViewInAppRelativeUrlFnOpts } from '@kbn/alerting-plugin/server';
|
||||
import { AlertsClientError, GetViewInAppRelativeUrlFnOpts } from '@kbn/alerting-plugin/server';
|
||||
import { min } from 'lodash';
|
||||
import moment from 'moment';
|
||||
|
||||
|
@ -354,18 +354,14 @@ export const statusCheckAlertFactory: UptimeAlertTypeFactory<ActionGroupIds> = (
|
|||
rule: {
|
||||
schedule: { interval },
|
||||
},
|
||||
services: {
|
||||
alertFactory,
|
||||
alertWithLifecycle,
|
||||
getAlertStartedDate,
|
||||
getAlertUuid,
|
||||
savedObjectsClient,
|
||||
scopedClusterClient,
|
||||
},
|
||||
services: { alertsClient, savedObjectsClient, scopedClusterClient },
|
||||
spaceId,
|
||||
state,
|
||||
startedAt,
|
||||
}) {
|
||||
if (!alertsClient) {
|
||||
throw new AlertsClientError();
|
||||
}
|
||||
const {
|
||||
stackVersion = '8.9.0',
|
||||
availability,
|
||||
|
@ -433,13 +429,23 @@ export const statusCheckAlertFactory: UptimeAlertTypeFactory<ActionGroupIds> = (
|
|||
const statusMessage = getStatusMessage(monitorStatusMessageParams);
|
||||
const monitorSummary = getMonitorSummary(monitorInfo, statusMessage);
|
||||
const alertId = getInstanceId(monitorInfo, monitorLoc.location);
|
||||
const alert = alertWithLifecycle({
|
||||
id: alertId,
|
||||
fields: getMonitorAlertDocument(monitorSummary),
|
||||
});
|
||||
const indexedStartedAt = getAlertStartedDate(alertId) ?? startedAt.toISOString();
|
||||
const alertUuid = getAlertUuid(alertId);
|
||||
const context = {
|
||||
...monitorSummary,
|
||||
statusMessage,
|
||||
};
|
||||
|
||||
const { uuid, start } = alertsClient.report({
|
||||
id: alertId,
|
||||
actionGroup: MONITOR_STATUS.id,
|
||||
payload: getMonitorAlertDocument(monitorSummary),
|
||||
state: {
|
||||
...state,
|
||||
...context,
|
||||
...updateState(state, true),
|
||||
},
|
||||
});
|
||||
|
||||
const indexedStartedAt = start ?? startedAt.toISOString();
|
||||
const relativeViewInAppUrl = getMonitorRouteFromMonitorId({
|
||||
monitorId: monitorSummary.monitorId,
|
||||
dateRangeEnd: 'now',
|
||||
|
@ -449,37 +455,27 @@ export const statusCheckAlertFactory: UptimeAlertTypeFactory<ActionGroupIds> = (
|
|||
},
|
||||
});
|
||||
|
||||
const context = {
|
||||
...monitorSummary,
|
||||
statusMessage,
|
||||
};
|
||||
|
||||
alert.replaceState({
|
||||
...state,
|
||||
...context,
|
||||
...updateState(state, true),
|
||||
});
|
||||
|
||||
alert.scheduleActions(MONITOR_STATUS.id, {
|
||||
[ALERT_DETAILS_URL]: await getAlertUrl(
|
||||
alertUuid,
|
||||
spaceId,
|
||||
indexedStartedAt,
|
||||
alertsLocator,
|
||||
basePath.publicBaseUrl
|
||||
),
|
||||
[VIEW_IN_APP_URL]: getViewInAppUrl(basePath, spaceId, relativeViewInAppUrl),
|
||||
...context,
|
||||
alertsClient.setAlertData({
|
||||
id: alertId,
|
||||
context: {
|
||||
[ALERT_DETAILS_URL]: await getAlertUrl(
|
||||
uuid,
|
||||
spaceId,
|
||||
indexedStartedAt,
|
||||
alertsLocator,
|
||||
basePath.publicBaseUrl
|
||||
),
|
||||
[VIEW_IN_APP_URL]: getViewInAppUrl(basePath, spaceId, relativeViewInAppUrl),
|
||||
...context,
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
await setRecoveredAlertsContext({
|
||||
alertFactory,
|
||||
await setRecoveredAlertsContext<ActionGroupIds>({
|
||||
alertsClient,
|
||||
alertsLocator,
|
||||
basePath,
|
||||
defaultStartedAt: startedAt.toISOString(),
|
||||
getAlertStartedDate,
|
||||
getAlertUuid,
|
||||
spaceId,
|
||||
});
|
||||
|
||||
|
@ -528,23 +524,22 @@ export const statusCheckAlertFactory: UptimeAlertTypeFactory<ActionGroupIds> = (
|
|||
);
|
||||
const monitorSummary = getMonitorSummary(monitorInfo, statusMessage);
|
||||
const alertId = getInstanceId(monitorInfo, monIdByLoc);
|
||||
const alert = alertWithLifecycle({
|
||||
id: alertId,
|
||||
fields: getMonitorAlertDocument(monitorSummary),
|
||||
});
|
||||
const alertUuid = getAlertUuid(alertId);
|
||||
const indexedStartedAt = getAlertStartedDate(alertId) ?? startedAt.toISOString();
|
||||
|
||||
const context = {
|
||||
...monitorSummary,
|
||||
statusMessage,
|
||||
};
|
||||
|
||||
alert.replaceState({
|
||||
...updateState(state, true),
|
||||
...context,
|
||||
const { uuid, start } = alertsClient.report({
|
||||
id: alertId,
|
||||
actionGroup: MONITOR_STATUS.id,
|
||||
payload: getMonitorAlertDocument(monitorSummary),
|
||||
state: {
|
||||
...updateState(state, true),
|
||||
...context,
|
||||
},
|
||||
});
|
||||
|
||||
const indexedStartedAt = start ?? startedAt.toISOString();
|
||||
const relativeViewInAppUrl = getMonitorRouteFromMonitorId({
|
||||
monitorId: monitorSummary.monitorId,
|
||||
dateRangeEnd: 'now',
|
||||
|
@ -554,26 +549,27 @@ export const statusCheckAlertFactory: UptimeAlertTypeFactory<ActionGroupIds> = (
|
|||
},
|
||||
});
|
||||
|
||||
alert.scheduleActions(MONITOR_STATUS.id, {
|
||||
[ALERT_DETAILS_URL]: await getAlertUrl(
|
||||
alertUuid,
|
||||
spaceId,
|
||||
indexedStartedAt,
|
||||
alertsLocator,
|
||||
basePath.publicBaseUrl
|
||||
),
|
||||
[VIEW_IN_APP_URL]: getViewInAppUrl(basePath, spaceId, relativeViewInAppUrl),
|
||||
...context,
|
||||
alertsClient.setAlertData({
|
||||
id: alertId,
|
||||
context: {
|
||||
[ALERT_DETAILS_URL]: await getAlertUrl(
|
||||
uuid,
|
||||
spaceId,
|
||||
indexedStartedAt,
|
||||
alertsLocator,
|
||||
basePath.publicBaseUrl
|
||||
),
|
||||
[VIEW_IN_APP_URL]: getViewInAppUrl(basePath, spaceId, relativeViewInAppUrl),
|
||||
...context,
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
await setRecoveredAlertsContext({
|
||||
alertFactory,
|
||||
alertsClient,
|
||||
alertsLocator,
|
||||
basePath,
|
||||
defaultStartedAt: startedAt.toISOString(),
|
||||
getAlertStartedDate,
|
||||
getAlertUuid,
|
||||
spaceId,
|
||||
});
|
||||
|
||||
|
|
|
@ -12,6 +12,7 @@ import { alertsMock } from '@kbn/alerting-plugin/server/mocks';
|
|||
import type { AlertsLocatorParams } from '@kbn/observability-plugin/common';
|
||||
import { LocatorPublic } from '@kbn/share-plugin/common';
|
||||
import { SharePluginSetup } from '@kbn/share-plugin/server';
|
||||
import { publicAlertsClientMock } from '@kbn/alerting-plugin/server/alerts_client/alerts_client.mock';
|
||||
import { UMServerLibs } from '../../lib';
|
||||
import { UptimeCorePluginsSetup, UptimeServerSetup } from '../../adapters';
|
||||
import { getUptimeESMockClient } from '../../requests/test_helpers';
|
||||
|
@ -84,6 +85,7 @@ export const createRuleTypeMocks = (recoveredAlerts: Array<Record<string, any>>
|
|||
getAlertStartedDate: jest.fn().mockReturnValue('2022-03-17T13:13:33.755Z'),
|
||||
getAlertUuid: jest.fn().mockReturnValue('mock-alert-uuid'),
|
||||
logger: loggerMock,
|
||||
alertsClient: publicAlertsClientMock.create(),
|
||||
};
|
||||
|
||||
return {
|
||||
|
|
|
@ -6,7 +6,6 @@
|
|||
*/
|
||||
import moment from 'moment';
|
||||
import { tlsAlertFactory, getCertSummary } from './tls';
|
||||
import { TLS } from '../../../../common/constants/uptime_alerts';
|
||||
import { CertResult } from '../../../../common/runtime_types';
|
||||
import { createRuleTypeMocks, bootstrapDependencies } from './test_utils';
|
||||
import { DYNAMIC_SETTINGS_DEFAULTS } from '../../../../common/constants';
|
||||
|
@ -60,6 +59,7 @@ const mockCertResult: CertResult = {
|
|||
|
||||
const mockRecoveredAlerts = [
|
||||
{
|
||||
id: 'recovered-1',
|
||||
alertDetailsUrl: 'mockedAlertsLocator > getLocation',
|
||||
commonName: mockCertResult.certs[0].common_name ?? '',
|
||||
issuer: mockCertResult.certs[0].issuer ?? '',
|
||||
|
@ -67,6 +67,7 @@ const mockRecoveredAlerts = [
|
|||
status: 'expired',
|
||||
},
|
||||
{
|
||||
id: 'recovered-2',
|
||||
alertDetailsUrl: 'mockedAlertsLocator > getLocation',
|
||||
commonName: mockCertResult.certs[1].common_name ?? '',
|
||||
issuer: mockCertResult.certs[1].issuer ?? '',
|
||||
|
@ -75,12 +76,33 @@ const mockRecoveredAlerts = [
|
|||
},
|
||||
];
|
||||
|
||||
const mockOptions = (state = {}): any => {
|
||||
const mockOptions = (state = {}, recoveredAlerts: typeof mockRecoveredAlerts = []): any => {
|
||||
const { services, setContext } = createRuleTypeMocks(mockRecoveredAlerts);
|
||||
const params = {
|
||||
timerange: { from: 'now-15m', to: 'now' },
|
||||
};
|
||||
|
||||
services.alertsClient.report.mockImplementation((param: any) => {
|
||||
return {
|
||||
uuid: `uuid-${param.id}`,
|
||||
start: new Date().toISOString(),
|
||||
alertDoc: {},
|
||||
};
|
||||
});
|
||||
|
||||
services.alertsClient.getRecoveredAlerts.mockImplementation((param: any) => {
|
||||
return recoveredAlerts.map((alert) => ({
|
||||
alert: {
|
||||
getId: () => alert.id,
|
||||
getUuid: () => 'mock-uuid',
|
||||
getState: () => alert,
|
||||
getStart: () => new Date().toISOString(),
|
||||
setContext,
|
||||
context: {},
|
||||
},
|
||||
}));
|
||||
});
|
||||
|
||||
return {
|
||||
params,
|
||||
state,
|
||||
|
@ -115,23 +137,40 @@ describe('tls alert', () => {
|
|||
const alert = tlsAlertFactory(server, libs, plugins);
|
||||
const options = mockOptions();
|
||||
const {
|
||||
services: { alertWithLifecycle },
|
||||
services: { alertsClient },
|
||||
} = options;
|
||||
await alert.executor(options);
|
||||
expect(mockGetter).toHaveBeenCalledTimes(1);
|
||||
expect(alertWithLifecycle).toHaveBeenCalledTimes(4);
|
||||
expect(alertsClient.report).toHaveBeenCalledTimes(4);
|
||||
mockCertResult.certs.forEach((cert) => {
|
||||
expect(alertWithLifecycle).toBeCalledWith({
|
||||
fields: expect.objectContaining({
|
||||
const context = {
|
||||
commonName: cert.common_name,
|
||||
issuer: cert.issuer,
|
||||
status: 'expired',
|
||||
};
|
||||
|
||||
expect(alertsClient.report).toBeCalledWith({
|
||||
id: `${cert.common_name}-${cert.issuer?.replace(/\s/g, '_')}-${cert.sha256}`,
|
||||
actionGroup: 'xpack.uptime.alerts.actionGroups.tlsCertificate',
|
||||
state: expect.objectContaining(context),
|
||||
});
|
||||
|
||||
expect(alertsClient.setAlertData).toBeCalledWith({
|
||||
id: `${cert.common_name}-${cert.issuer?.replace(/\s/g, '_')}-${cert.sha256}`,
|
||||
context: expect.objectContaining(context),
|
||||
payload: expect.objectContaining({
|
||||
'tls.server.x509.subject.common_name': cert.common_name,
|
||||
'tls.server.x509.issuer.common_name': cert.issuer,
|
||||
'tls.server.x509.not_after': cert.not_after,
|
||||
'tls.server.x509.not_before': cert.not_before,
|
||||
'tls.server.hash.sha256': cert.sha256,
|
||||
}),
|
||||
id: `${cert.common_name}-${cert.issuer?.replace(/\s/g, '_')}-${cert.sha256}`,
|
||||
});
|
||||
});
|
||||
|
||||
expect(alertsClient.report).toHaveBeenCalledTimes(4);
|
||||
expect(alertsClient.setAlertData).toHaveBeenCalledTimes(4);
|
||||
|
||||
expect(mockGetter).toBeCalledWith(
|
||||
expect.objectContaining({
|
||||
pageIndex: 0,
|
||||
|
@ -142,21 +181,6 @@ describe('tls alert', () => {
|
|||
direction: 'desc',
|
||||
})
|
||||
);
|
||||
const [{ value: alertInstanceMock }] = alertWithLifecycle.mock.results;
|
||||
expect(alertInstanceMock.replaceState).toHaveBeenCalledTimes(4);
|
||||
mockCertResult.certs.forEach((cert) => {
|
||||
const context = {
|
||||
commonName: cert.common_name,
|
||||
issuer: cert.issuer,
|
||||
status: 'expired',
|
||||
};
|
||||
expect(alertInstanceMock.replaceState).toBeCalledWith(expect.objectContaining(context));
|
||||
expect(alertInstanceMock.scheduleActions).toBeCalledWith(
|
||||
TLS.id,
|
||||
expect.objectContaining(context)
|
||||
);
|
||||
});
|
||||
expect(alertInstanceMock.scheduleActions).toHaveBeenCalledTimes(4);
|
||||
});
|
||||
|
||||
it('does not trigger when cert is not considered aging or expiring', async () => {
|
||||
|
@ -204,11 +228,11 @@ describe('tls alert', () => {
|
|||
const alert = tlsAlertFactory(server, libs, plugins);
|
||||
const options = mockOptions();
|
||||
const {
|
||||
services: { alertWithLifecycle },
|
||||
services: { alertsClient },
|
||||
} = options;
|
||||
await alert.executor(options);
|
||||
expect(mockGetter).toHaveBeenCalledTimes(1);
|
||||
expect(alertWithLifecycle).toHaveBeenCalledTimes(0);
|
||||
expect(alertsClient.report).toHaveBeenCalledTimes(0);
|
||||
expect(mockGetter).toBeCalledWith(
|
||||
expect.objectContaining({
|
||||
pageIndex: 0,
|
||||
|
@ -253,12 +277,18 @@ describe('tls alert', () => {
|
|||
mockGetter.mockReturnValue(mockCertResult);
|
||||
const { server, libs, plugins } = bootstrapDependencies({ getCerts: mockGetter });
|
||||
const alert = tlsAlertFactory(server, libs, plugins);
|
||||
const options = mockOptions();
|
||||
const options = mockOptions(undefined, mockRecoveredAlerts);
|
||||
// @ts-ignore the executor can return `void`, but ours never does
|
||||
const state: Record<string, any> = await alert.executor(options);
|
||||
expect(options.setContext).toHaveBeenCalledTimes(2);
|
||||
mockRecoveredAlerts.forEach((alertState) => {
|
||||
expect(options.setContext).toHaveBeenCalledWith(alertState);
|
||||
const {
|
||||
services: { alertsClient },
|
||||
} = options;
|
||||
await alert.executor(options);
|
||||
expect(alertsClient.setAlertData).toHaveBeenCalledTimes(6);
|
||||
mockRecoveredAlerts.forEach((recoveredAlert) => {
|
||||
expect(alertsClient.setAlertData).toHaveBeenCalledWith({
|
||||
id: recoveredAlert.id,
|
||||
context: recoveredAlert,
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
*/
|
||||
|
||||
import { DEFAULT_APP_CATEGORIES } from '@kbn/core/server';
|
||||
import { GetViewInAppRelativeUrlFnOpts } from '@kbn/alerting-plugin/server';
|
||||
import { AlertsClientError, GetViewInAppRelativeUrlFnOpts } from '@kbn/alerting-plugin/server';
|
||||
import moment from 'moment';
|
||||
import { ActionGroupIdsOf } from '@kbn/alerting-plugin/common';
|
||||
import { schema } from '@kbn/config-schema';
|
||||
|
@ -149,19 +149,15 @@ export const tlsAlertFactory: UptimeAlertTypeFactory<ActionGroupIds> = (
|
|||
doesSetRecoveryContext: true,
|
||||
async executor({
|
||||
params,
|
||||
services: {
|
||||
alertFactory,
|
||||
alertWithLifecycle,
|
||||
getAlertStartedDate,
|
||||
getAlertUuid,
|
||||
savedObjectsClient,
|
||||
scopedClusterClient,
|
||||
},
|
||||
services: { alertsClient, savedObjectsClient, scopedClusterClient },
|
||||
spaceId,
|
||||
startedAt,
|
||||
state,
|
||||
rule,
|
||||
}) {
|
||||
if (!alertsClient) {
|
||||
throw new AlertsClientError();
|
||||
}
|
||||
const { share, basePath } = _server;
|
||||
const alertsLocator: LocatorPublic<AlertsLocatorParams> | undefined =
|
||||
share.url.locators.get(alertsLocatorID);
|
||||
|
@ -215,47 +211,48 @@ export const tlsAlertFactory: UptimeAlertTypeFactory<ActionGroupIds> = (
|
|||
}
|
||||
|
||||
const alertId = `${cert.common_name}-${cert.issuer?.replace(/\s/g, '_')}-${cert.sha256}`;
|
||||
const alertUuid = getAlertUuid(alertId);
|
||||
const indexedStartedAt = getAlertStartedDate(alertId) ?? startedAt.toISOString();
|
||||
|
||||
const alertInstance = alertWithLifecycle({
|
||||
const { uuid, start } = alertsClient.report({
|
||||
id: alertId,
|
||||
fields: {
|
||||
actionGroup: TLS.id,
|
||||
state: {
|
||||
...updateState(state, foundCerts),
|
||||
...summary,
|
||||
},
|
||||
});
|
||||
|
||||
const indexedStartedAt = start ?? startedAt.toISOString();
|
||||
|
||||
alertsClient.setAlertData({
|
||||
id: alertId,
|
||||
context: {
|
||||
[ALERT_DETAILS_URL]: await getAlertUrl(
|
||||
uuid,
|
||||
spaceId,
|
||||
indexedStartedAt,
|
||||
alertsLocator,
|
||||
basePath.publicBaseUrl
|
||||
),
|
||||
...summary,
|
||||
},
|
||||
payload: {
|
||||
'tls.server.x509.subject.common_name': cert.common_name,
|
||||
'tls.server.x509.issuer.common_name': cert.issuer,
|
||||
'tls.server.x509.not_after': cert.not_after,
|
||||
'tls.server.x509.not_before': cert.not_before,
|
||||
'tls.server.hash.sha256': cert.sha256,
|
||||
[ALERT_REASON]: generateAlertMessage(TlsTranslations.defaultActionMessage, summary),
|
||||
[ALERT_UUID]: alertUuid,
|
||||
[ALERT_UUID]: uuid,
|
||||
},
|
||||
});
|
||||
|
||||
alertInstance.replaceState({
|
||||
...updateState(state, foundCerts),
|
||||
...summary,
|
||||
});
|
||||
|
||||
alertInstance.scheduleActions(TLS.id, {
|
||||
[ALERT_DETAILS_URL]: await getAlertUrl(
|
||||
alertUuid,
|
||||
spaceId,
|
||||
indexedStartedAt,
|
||||
alertsLocator,
|
||||
basePath.publicBaseUrl
|
||||
),
|
||||
...summary,
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
await setRecoveredAlertsContext({
|
||||
alertFactory,
|
||||
await setRecoveredAlertsContext<ActionGroupIds>({
|
||||
alertsClient,
|
||||
alertsLocator,
|
||||
basePath,
|
||||
defaultStartedAt: startedAt.toISOString(),
|
||||
getAlertStartedDate,
|
||||
getAlertUuid,
|
||||
spaceId,
|
||||
});
|
||||
|
||||
|
|
|
@ -4,15 +4,13 @@
|
|||
* 2.0; you may not use this file except in compliance with the Elastic License
|
||||
* 2.0.
|
||||
*/
|
||||
import { AlertTypeWithExecutor } from '@kbn/rule-registry-plugin/server';
|
||||
import {
|
||||
AlertInstanceContext,
|
||||
AlertInstanceState,
|
||||
RecoveredActionGroupId,
|
||||
} from '@kbn/alerting-plugin/common';
|
||||
import { RuleType } from '@kbn/alerting-plugin/server';
|
||||
import { LifecycleAlertServices } from '@kbn/rule-registry-plugin/server';
|
||||
import { DefaultAlert } from '@kbn/alerts-as-data-utils';
|
||||
import { DefaultAlert, ObservabilityUptimeAlert } from '@kbn/alerts-as-data-utils';
|
||||
import { UMServerLibs } from '../lib';
|
||||
import { UptimeCorePluginsSetup, UptimeServerSetup } from '../adapters';
|
||||
|
||||
|
@ -22,11 +20,15 @@ import { UptimeCorePluginsSetup, UptimeServerSetup } from '../adapters';
|
|||
*
|
||||
* When we register all the alerts we can inject this field.
|
||||
*/
|
||||
export type DefaultUptimeAlertInstance<TActionGroupIds extends string> = AlertTypeWithExecutor<
|
||||
export type DefaultUptimeAlertInstance<TActionGroupIds extends string> = RuleType<
|
||||
Record<string, any>,
|
||||
never,
|
||||
Record<string, any>,
|
||||
AlertInstanceState,
|
||||
AlertInstanceContext,
|
||||
LifecycleAlertServices<Record<string, any>, AlertInstanceContext, TActionGroupIds>
|
||||
TActionGroupIds,
|
||||
RecoveredActionGroupId,
|
||||
ObservabilityUptimeAlert
|
||||
>;
|
||||
|
||||
export type UptimeAlertTypeFactory<TActionGroupIds extends string> = (
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
*/
|
||||
|
||||
import { Logger } from '@kbn/core/server';
|
||||
import { createLifecycleRuleTypeFactory, IRuleDataClient } from '@kbn/rule-registry-plugin/server';
|
||||
import { IRuleDataClient } from '@kbn/rule-registry-plugin/server';
|
||||
import { getRequestValidation } from '@kbn/core-http-server';
|
||||
import { INITIAL_REST_VERSION } from '../../common/constants';
|
||||
import { DynamicSettingsSchema } from './routes/dynamic_settings';
|
||||
|
@ -151,14 +151,9 @@ export const initUptimeServer = (
|
|||
const tlsAlert = tlsAlertFactory(server, libs, plugins);
|
||||
const durationAlert = durationAnomalyAlertFactory(server, libs, plugins);
|
||||
|
||||
const createLifecycleRuleType = createLifecycleRuleTypeFactory({
|
||||
ruleDataClient,
|
||||
logger,
|
||||
});
|
||||
|
||||
registerType(createLifecycleRuleType(statusAlert));
|
||||
registerType(createLifecycleRuleType(tlsAlert));
|
||||
registerType(createLifecycleRuleType(durationAlert));
|
||||
registerType(statusAlert);
|
||||
registerType(tlsAlert);
|
||||
registerType(durationAlert);
|
||||
|
||||
/* TLS Legacy rule supported at least through 8.0.
|
||||
* Not registered with RAC */
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue