[Response Ops][Alerting] Update FAAD AlertsClient to support AAD payload (#158404)

Resolves https://github.com/elastic/kibana/issues/156443,
https://github.com/elastic/kibana/issues/156445

## Summary

- Updates `AlertsClient` with `create` API that allows rule executors to
report alerts with AAD payload, along with the values for `actionGroup`,
`context` and `state`. This proxies the `LegacyAlertsClient` to create
alerts via the alerts factory but also saves the reported payload
- When the alert doc is bulk written at the end of rule execution, the
AAD payload (if specified) is included in the alert document
- Deprecates the alert factory that is passed into the rule executors,
but this PR does not remove or replace usages of the alert factory
- Expose `AlertsClient` services to the rule executors. Note that this
PR does not migrate any rule type to use this service.

This PR does not opt any rule types into writing the AAD payload or
using the AlertsClient API but updates the AAD functional test to do so.
To test it out with the ES query rule type, use the following commit:
1b1e139f80

## Followup issues
- This PR does not add a recovery API to the FAAD AlertsClient so alerts
reported via the new alerts client currently do not have a way of
specifying recovered payload.

## To Verify
- Verify that rule registry rule types still work as expected
- Verify that non rule-registry rule types still work as expected
- Check out [this
commit](1b1e139f80)
which onboards the ES query rule type onto FAAD. Create an ES query rule
that alerts and then recovers and verify that the alert documents look
as expected. Alternatively, you can modify your own rule type to
register with FAAD and write alerts and verify that the alert documents
look as expected.

### Checklist 

- [x] [Unit or functional
tests](https://www.elastic.co/guide/en/kibana/master/development-tests.html)
were updated or added to match the most common scenarios
This commit is contained in:
Ying Mao 2023-06-07 12:08:43 -04:00 committed by GitHub
parent b5adaecb6a
commit 3aa3f04abf
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
39 changed files with 2303 additions and 351 deletions

View file

@ -7,13 +7,16 @@
const createAlertsClientMock = () => {
return jest.fn().mockImplementation(() => {
return {
initializeExecution: jest.fn(),
processAndLogAlerts: jest.fn(),
getTrackedAlerts: jest.fn(),
getProcessedAlerts: jest.fn(),
getAlertsToSerialize: jest.fn(),
hasReachedAlertLimit: jest.fn(),
checkLimitUsage: jest.fn(),
getExecutorServices: jest.fn(),
persistAlerts: jest.fn(),
factory: jest.fn(),
client: jest.fn(),
};
});
};
@ -21,3 +24,18 @@ const createAlertsClientMock = () => {
export const alertsClientMock = {
create: createAlertsClientMock(),
};
const createPublicAlertsClientMock = () => {
return jest.fn().mockImplementation(() => {
return {
create: jest.fn(),
getAlertLimitValue: jest.fn(),
setAlertLimitReached: jest.fn(),
getRecoveredAlerts: jest.fn(),
};
});
};
export const publicAlertsClientMock = {
create: createPublicAlertsClientMock(),
};

View file

@ -12,7 +12,7 @@ import { Alert } from '../alert/alert';
import { AlertsClient } from './alerts_client';
import { AlertRuleData } from './types';
import { legacyAlertsClientMock } from './legacy_alerts_client.mock';
import { range } from 'lodash';
import { keys, range } from 'lodash';
import { alertingEventLoggerMock } from '../lib/alerting_event_logger/alerting_event_logger.mock';
import { ruleRunMetricsStoreMock } from '../lib/rule_run_metrics_store.mock';
@ -47,7 +47,11 @@ const ruleType: jest.Mocked<UntypedNormalizedRuleType> = {
};
const mockLegacyAlertsClient = legacyAlertsClientMock.create();
const mockReplaceState = jest.fn();
const mockScheduleActions = jest
.fn()
.mockImplementation(() => ({ replaceState: mockReplaceState }));
const mockCreate = jest.fn().mockImplementation(() => ({ scheduleActions: mockScheduleActions }));
const alertRuleData: AlertRuleData = {
consumer: 'bar',
executionId: '5f6aa57d-3e22-484e-bae8-cbed868f4d28',
@ -68,7 +72,7 @@ describe('Alerts Client', () => {
});
beforeEach(() => {
jest.resetAllMocks();
jest.clearAllMocks();
logger = loggingSystemMock.createLogger();
});
@ -290,7 +294,7 @@ describe('Alerts Client', () => {
});
});
describe('test getAlertsToSerialize()', () => {
describe('persistAlerts()', () => {
test('should index new alerts', async () => {
const alertsClient = new AlertsClient<{}, {}, {}, 'default', 'recovered'>({
logger,
@ -309,7 +313,7 @@ describe('Alerts Client', () => {
});
// Report 2 new alerts
const alertExecutorService = alertsClient.getExecutorServices();
const alertExecutorService = alertsClient.factory();
alertExecutorService.create('1').scheduleActions('default');
alertExecutorService.create('2').scheduleActions('default');
@ -322,8 +326,9 @@ describe('Alerts Client', () => {
maintenanceWindowIds: [],
});
const { alertsToReturn } = await alertsClient.getAlertsToSerialize();
await alertsClient.persistAlerts();
const { alertsToReturn } = alertsClient.getAlertsToSerialize();
const uuid1 = alertsToReturn['1'].meta?.uuid;
const uuid2 = alertsToReturn['2'].meta?.uuid;
@ -496,7 +501,7 @@ describe('Alerts Client', () => {
});
// Report 1 new alert and 1 active alert
const alertExecutorService = alertsClient.getExecutorServices();
const alertExecutorService = alertsClient.factory();
alertExecutorService.create('1').scheduleActions('default');
alertExecutorService.create('2').scheduleActions('default');
@ -509,8 +514,9 @@ describe('Alerts Client', () => {
maintenanceWindowIds: [],
});
const { alertsToReturn } = await alertsClient.getAlertsToSerialize();
await alertsClient.persistAlerts();
const { alertsToReturn } = alertsClient.getAlertsToSerialize();
const uuid2 = alertsToReturn['2'].meta?.uuid;
expect(clusterClient.bulk).toHaveBeenCalledWith({
@ -738,7 +744,7 @@ describe('Alerts Client', () => {
});
// Report 1 new alert and 1 active alert, recover 1 alert
const alertExecutorService = alertsClient.getExecutorServices();
const alertExecutorService = alertsClient.factory();
alertExecutorService.create('2').scheduleActions('default');
alertExecutorService.create('3').scheduleActions('default');
@ -751,8 +757,9 @@ describe('Alerts Client', () => {
maintenanceWindowIds: [],
});
const { alertsToReturn } = await alertsClient.getAlertsToSerialize();
await alertsClient.persistAlerts();
const { alertsToReturn } = alertsClient.getAlertsToSerialize();
const uuid3 = alertsToReturn['3'].meta?.uuid;
expect(clusterClient.bulk).toHaveBeenCalledWith({
@ -893,5 +900,441 @@ describe('Alerts Client', () => {
],
});
});
test('should not try to index if no alerts', async () => {
const alertsClient = new AlertsClient<{}, {}, {}, 'default', 'recovered'>({
logger,
elasticsearchClientPromise: Promise.resolve(clusterClient),
ruleType,
namespace: 'default',
rule: alertRuleData,
});
await alertsClient.initializeExecution({
maxAlerts,
ruleLabel: `test: rule-name`,
flappingSettings: DEFAULT_FLAPPING_SETTINGS,
activeAlertsFromState: {},
recoveredAlertsFromState: {},
});
// Report no alerts
alertsClient.processAndLogAlerts({
eventLogger: alertingEventLogger,
ruleRunMetricsStore,
shouldLogAlerts: false,
flappingSettings: DEFAULT_FLAPPING_SETTINGS,
notifyWhen: RuleNotifyWhen.CHANGE,
maintenanceWindowIds: [],
});
await alertsClient.persistAlerts();
expect(clusterClient.bulk).not.toHaveBeenCalled();
});
test('should log if bulk indexing fails for some alerts', async () => {
clusterClient.bulk.mockResponseOnce({
took: 1,
errors: true,
items: [
{
index: {
_index: '.internal.alerts-test.alerts-default-000001',
status: 400,
error: {
type: 'action_request_validation_exception',
reason: 'Validation Failed: 1: index is missing;2: type is missing;',
},
},
},
{
index: {
_index: '.internal.alerts-test.alerts-default-000002',
_id: '1',
_version: 1,
result: 'created',
_shards: {
total: 2,
successful: 1,
failed: 0,
},
status: 201,
_seq_no: 0,
_primary_term: 1,
},
},
],
});
const alertsClient = new AlertsClient<{}, {}, {}, 'default', 'recovered'>({
logger,
elasticsearchClientPromise: Promise.resolve(clusterClient),
ruleType,
namespace: 'default',
rule: alertRuleData,
});
await alertsClient.initializeExecution({
maxAlerts,
ruleLabel: `test: rule-name`,
flappingSettings: DEFAULT_FLAPPING_SETTINGS,
activeAlertsFromState: {},
recoveredAlertsFromState: {},
});
// Report 2 new alerts
const alertExecutorService = alertsClient.factory();
alertExecutorService.create('1').scheduleActions('default');
alertExecutorService.create('2').scheduleActions('default');
alertsClient.processAndLogAlerts({
eventLogger: alertingEventLogger,
ruleRunMetricsStore,
shouldLogAlerts: false,
flappingSettings: DEFAULT_FLAPPING_SETTINGS,
notifyWhen: RuleNotifyWhen.CHANGE,
maintenanceWindowIds: [],
});
await alertsClient.persistAlerts();
expect(clusterClient.bulk).toHaveBeenCalled();
expect(logger.error).toHaveBeenCalledWith(
`Error writing 1 out of 2 alerts - [{\"type\":\"action_request_validation_exception\",\"reason\":\"Validation Failed: 1: index is missing;2: type is missing;\"}]`
);
});
test('should log and swallow error if bulk indexing throws error', async () => {
clusterClient.bulk.mockImplementation(() => {
throw new Error('fail');
});
const alertsClient = new AlertsClient<{}, {}, {}, 'default', 'recovered'>({
logger,
elasticsearchClientPromise: Promise.resolve(clusterClient),
ruleType,
namespace: 'default',
rule: alertRuleData,
});
await alertsClient.initializeExecution({
maxAlerts,
ruleLabel: `test: rule-name`,
flappingSettings: DEFAULT_FLAPPING_SETTINGS,
activeAlertsFromState: {},
recoveredAlertsFromState: {},
});
// Report 2 new alerts
const alertExecutorService = alertsClient.factory();
alertExecutorService.create('1').scheduleActions('default');
alertExecutorService.create('2').scheduleActions('default');
alertsClient.processAndLogAlerts({
eventLogger: alertingEventLogger,
ruleRunMetricsStore,
shouldLogAlerts: false,
flappingSettings: DEFAULT_FLAPPING_SETTINGS,
notifyWhen: RuleNotifyWhen.CHANGE,
maintenanceWindowIds: [],
});
await alertsClient.persistAlerts();
expect(clusterClient.bulk).toHaveBeenCalled();
expect(logger.error).toHaveBeenCalledWith(
`Error writing 2 alerts to .alerts-test.alerts-default - fail`
);
});
});
describe('create()', () => {
test('should create legacy alert with id, action group', async () => {
mockLegacyAlertsClient.factory.mockImplementation(() => ({ create: mockCreate }));
const spy = jest
.spyOn(LegacyAlertsClientModule, 'LegacyAlertsClient')
.mockImplementation(() => mockLegacyAlertsClient);
const alertsClient = new AlertsClient<{}, {}, {}, 'default', 'recovered'>({
logger,
elasticsearchClientPromise: Promise.resolve(clusterClient),
ruleType,
namespace: 'default',
rule: alertRuleData,
});
await alertsClient.initializeExecution({
maxAlerts,
ruleLabel: `test: rule-name`,
flappingSettings: DEFAULT_FLAPPING_SETTINGS,
activeAlertsFromState: {},
recoveredAlertsFromState: {},
});
// Report 2 new alerts
alertsClient.create({ id: '1', actionGroup: 'default', state: {}, context: {} });
alertsClient.create({ id: '2', actionGroup: 'default', state: {}, context: {} });
expect(mockCreate).toHaveBeenCalledTimes(2);
expect(mockCreate).toHaveBeenNthCalledWith(1, '1');
expect(mockCreate).toHaveBeenNthCalledWith(2, '2');
expect(mockScheduleActions).toHaveBeenCalledTimes(2);
expect(mockScheduleActions).toHaveBeenNthCalledWith(1, 'default', {});
expect(mockScheduleActions).toHaveBeenNthCalledWith(2, 'default', {});
expect(mockReplaceState).not.toHaveBeenCalled();
spy.mockRestore();
});
test('should set context if defined', async () => {
mockLegacyAlertsClient.factory.mockImplementation(() => ({ create: mockCreate }));
const spy = jest
.spyOn(LegacyAlertsClientModule, 'LegacyAlertsClient')
.mockImplementation(() => mockLegacyAlertsClient);
const alertsClient = new AlertsClient<{}, {}, { foo?: string }, 'default', 'recovered'>({
logger,
elasticsearchClientPromise: Promise.resolve(clusterClient),
ruleType,
namespace: 'default',
rule: alertRuleData,
});
await alertsClient.initializeExecution({
maxAlerts,
ruleLabel: `test: rule-name`,
flappingSettings: DEFAULT_FLAPPING_SETTINGS,
activeAlertsFromState: {},
recoveredAlertsFromState: {},
});
// Report 2 new alerts
alertsClient.create({
id: '1',
actionGroup: 'default',
state: {},
context: { foo: 'cheese' },
});
alertsClient.create({ id: '2', actionGroup: 'default', state: {}, context: {} });
expect(mockCreate).toHaveBeenCalledTimes(2);
expect(mockCreate).toHaveBeenNthCalledWith(1, '1');
expect(mockCreate).toHaveBeenNthCalledWith(2, '2');
expect(mockScheduleActions).toHaveBeenCalledTimes(2);
expect(mockScheduleActions).toHaveBeenNthCalledWith(1, 'default', { foo: 'cheese' });
expect(mockScheduleActions).toHaveBeenNthCalledWith(2, 'default', {});
expect(mockReplaceState).not.toHaveBeenCalled();
spy.mockRestore();
});
test('should set state if defined', async () => {
mockLegacyAlertsClient.factory.mockImplementation(() => ({ create: mockCreate }));
const spy = jest
.spyOn(LegacyAlertsClientModule, 'LegacyAlertsClient')
.mockImplementation(() => mockLegacyAlertsClient);
const alertsClient = new AlertsClient<{}, { count: number }, {}, 'default', 'recovered'>({
logger,
elasticsearchClientPromise: Promise.resolve(clusterClient),
ruleType,
namespace: 'default',
rule: alertRuleData,
});
await alertsClient.initializeExecution({
maxAlerts,
ruleLabel: `test: rule-name`,
flappingSettings: DEFAULT_FLAPPING_SETTINGS,
activeAlertsFromState: {},
recoveredAlertsFromState: {},
});
// Report 2 new alerts
alertsClient.create({ id: '1', actionGroup: 'default', state: { count: 1 }, context: {} });
alertsClient.create({ id: '2', actionGroup: 'default', state: { count: 2 }, context: {} });
expect(mockCreate).toHaveBeenCalledTimes(2);
expect(mockCreate).toHaveBeenNthCalledWith(1, '1');
expect(mockCreate).toHaveBeenNthCalledWith(2, '2');
expect(mockScheduleActions).toHaveBeenCalledTimes(2);
expect(mockScheduleActions).toHaveBeenNthCalledWith(1, 'default', {});
expect(mockScheduleActions).toHaveBeenNthCalledWith(2, 'default', {});
expect(mockReplaceState).toHaveBeenCalledTimes(2);
expect(mockReplaceState).toHaveBeenNthCalledWith(1, { count: 1 });
expect(mockReplaceState).toHaveBeenNthCalledWith(2, { count: 2 });
spy.mockRestore();
});
test('should set payload if defined and write out to alert doc', async () => {
const alertsClient = new AlertsClient<
{ count: number; url: string },
{},
{},
'default',
'recovered'
>({
logger,
elasticsearchClientPromise: Promise.resolve(clusterClient),
ruleType,
namespace: 'default',
rule: alertRuleData,
});
await alertsClient.initializeExecution({
maxAlerts,
ruleLabel: `test: rule-name`,
flappingSettings: DEFAULT_FLAPPING_SETTINGS,
activeAlertsFromState: {},
recoveredAlertsFromState: {},
});
// Report 2 new alerts
alertsClient.create({
id: '1',
actionGroup: 'default',
state: {},
context: {},
payload: { count: 1, url: `https://url1` },
});
alertsClient.create({
id: '2',
actionGroup: 'default',
state: {},
context: {},
payload: { count: 2, url: `https://url2` },
});
alertsClient.processAndLogAlerts({
eventLogger: alertingEventLogger,
ruleRunMetricsStore,
shouldLogAlerts: false,
flappingSettings: DEFAULT_FLAPPING_SETTINGS,
notifyWhen: RuleNotifyWhen.CHANGE,
maintenanceWindowIds: [],
});
await alertsClient.persistAlerts();
const { alertsToReturn } = alertsClient.getAlertsToSerialize();
const uuid1 = alertsToReturn['1'].meta?.uuid;
const uuid2 = alertsToReturn['2'].meta?.uuid;
expect(clusterClient.bulk).toHaveBeenCalledWith({
index: '.alerts-test.alerts-default',
refresh: 'wait_for',
require_alias: true,
body: [
{ index: { _id: uuid1 } },
// new alert doc
{
'@timestamp': date,
count: 1,
kibana: {
alert: {
action_group: 'default',
duration: {
us: '0',
},
flapping: false,
flapping_history: [true],
instance: {
id: '1',
},
maintenance_window_ids: [],
rule: {
category: 'My test rule',
consumer: 'bar',
execution: {
uuid: '5f6aa57d-3e22-484e-bae8-cbed868f4d28',
},
name: 'rule-name',
parameters: {
bar: true,
},
producer: 'alerts',
revision: 0,
rule_type_id: 'test.rule-type',
tags: ['rule-', '-tags'],
uuid: '1',
},
start: date,
status: 'active',
uuid: uuid1,
},
space_ids: ['default'],
},
url: `https://url1`,
},
{ index: { _id: uuid2 } },
// new alert doc
{
'@timestamp': date,
count: 2,
kibana: {
alert: {
action_group: 'default',
duration: {
us: '0',
},
flapping: false,
flapping_history: [true],
instance: {
id: '2',
},
maintenance_window_ids: [],
rule: {
category: 'My test rule',
consumer: 'bar',
execution: {
uuid: '5f6aa57d-3e22-484e-bae8-cbed868f4d28',
},
name: 'rule-name',
parameters: {
bar: true,
},
producer: 'alerts',
revision: 0,
rule_type_id: 'test.rule-type',
tags: ['rule-', '-tags'],
uuid: '1',
},
start: date,
status: 'active',
uuid: uuid2,
},
space_ids: ['default'],
},
url: `https://url2`,
},
],
});
});
});
describe('client()', () => {
test('only returns subset of functionality', async () => {
const alertsClient = new AlertsClient<{}, {}, {}, 'default', 'recovered'>({
logger,
elasticsearchClientPromise: Promise.resolve(clusterClient),
ruleType,
namespace: 'default',
rule: alertRuleData,
});
await alertsClient.initializeExecution({
maxAlerts,
ruleLabel: `test: rule-name`,
flappingSettings: DEFAULT_FLAPPING_SETTINGS,
activeAlertsFromState: {},
recoveredAlertsFromState: {},
});
const publicAlertsClient = alertsClient.client();
expect(keys(publicAlertsClient)).toEqual([
'create',
'getAlertLimitValue',
'setAlertLimitReached',
'getRecoveredAlerts',
]);
});
});
});

View file

@ -7,13 +7,21 @@
import { ElasticsearchClient } from '@kbn/core/server';
import { ALERT_RULE_UUID, ALERT_UUID } from '@kbn/rule-data-utils';
import { chunk, flatMap, keys } from 'lodash';
import { chunk, flatMap, isEmpty, keys } from 'lodash';
import { SearchRequest } from '@elastic/elasticsearch/lib/api/typesWithBodyKey';
import type { Alert } from '@kbn/alerts-as-data-utils';
import { DEFAULT_NAMESPACE_STRING } from '@kbn/core-saved-objects-utils-server';
import { AlertInstanceContext, AlertInstanceState, RuleAlertData } from '../types';
import {
AlertInstanceContext,
AlertInstanceState,
RuleAlertData,
WithoutReservedActionGroups,
} from '../types';
import { LegacyAlertsClient } from './legacy_alerts_client';
import { getIndexTemplateAndPattern } from '../alerts_service/resource_installer_utils';
import {
getIndexTemplateAndPattern,
IIndexPatternString,
} from '../alerts_service/resource_installer_utils';
import { CreateAlertsClientParams } from '../alerts_service/alerts_service';
import {
type AlertRule,
@ -21,8 +29,15 @@ import {
InitializeExecutionOpts,
ProcessAndLogAlertsOpts,
TrackedAlerts,
ReportedAlert,
} from './types';
import { buildNewAlert, buildOngoingAlert, buildRecoveredAlert, formatRule } from './lib';
import {
buildNewAlert,
buildOngoingAlert,
buildUpdatedRecoveredAlert,
buildRecoveredAlert,
formatRule,
} from './lib';
// Term queries can take up to 10,000 terms
const CHUNK_SIZE = 10000;
@ -37,7 +52,8 @@ export class AlertsClient<
LegacyContext extends AlertInstanceContext,
ActionGroupIds extends string,
RecoveryActionGroupId extends string
> implements IAlertsClient<LegacyState, LegacyContext, ActionGroupIds, RecoveryActionGroupId>
> implements
IAlertsClient<AlertData, LegacyState, LegacyContext, ActionGroupIds, RecoveryActionGroupId>
{
private legacyAlertsClient: LegacyAlertsClient<
LegacyState,
@ -56,6 +72,10 @@ export class AlertsClient<
private rule: AlertRule = {};
private indexTemplateAndPattern: IIndexPatternString;
private reportedAlerts: Record<string, AlertData> = {};
constructor(private readonly options: AlertsClientParams) {
this.legacyAlertsClient = new LegacyAlertsClient<
LegacyState,
@ -63,6 +83,12 @@ export class AlertsClient<
ActionGroupIds,
RecoveryActionGroupId
>({ logger: this.options.logger, ruleType: this.options.ruleType });
this.indexTemplateAndPattern = getIndexTemplateAndPattern({
context: this.options.ruleType.alerts?.context!,
namespace: this.options.ruleType.alerts?.isSpaceAware
? this.options.namespace
: DEFAULT_NAMESPACE_STRING,
});
this.fetchedAlerts = { indices: {}, data: {} };
this.rule = formatRule({ rule: this.options.rule, ruleType: this.options.ruleType });
}
@ -131,26 +157,45 @@ export class AlertsClient<
}
public async search(queryBody: SearchRequest['body']) {
const context = this.options.ruleType.alerts?.context;
const esClient = await this.options.elasticsearchClientPromise;
const indexTemplateAndPattern = getIndexTemplateAndPattern({
context: context!,
namespace: this.options.ruleType.alerts?.isSpaceAware
? this.options.namespace
: DEFAULT_NAMESPACE_STRING,
});
const {
hits: { hits },
} = await esClient.search<Alert & AlertData>({
index: indexTemplateAndPattern.pattern,
index: this.indexTemplateAndPattern.pattern,
body: queryBody,
});
return hits;
}
public create(
alert: ReportedAlert<
AlertData,
LegacyState,
LegacyContext,
WithoutReservedActionGroups<ActionGroupIds, RecoveryActionGroupId>
>
) {
const context = alert.context ? alert.context : ({} as LegacyContext);
const state = !isEmpty(alert.state) ? alert.state : null;
// Create a legacy alert
const legacyAlert = this.legacyAlertsClient
.factory()
.create(alert.id)
.scheduleActions(alert.actionGroup, context);
if (state) {
legacyAlert.replaceState(state);
}
// Save the reported alert data
if (alert.payload) {
this.reportedAlerts[alert.id] = alert.payload;
}
}
public hasReachedAlertLimit(): boolean {
return this.legacyAlertsClient.hasReachedAlertLimit();
}
@ -169,23 +214,15 @@ export class AlertsClient<
return this.legacyAlertsClient.getProcessedAlerts(type);
}
public async getAlertsToSerialize() {
public async persistAlerts() {
const currentTime = new Date().toISOString();
const context = this.options.ruleType.alerts?.context;
const esClient = await this.options.elasticsearchClientPromise;
const indexTemplateAndPattern = getIndexTemplateAndPattern({
context: context!,
namespace: this.options.ruleType.alerts?.isSpaceAware
? this.options.namespace
: DEFAULT_NAMESPACE_STRING,
});
const { alertsToReturn, recoveredAlertsToReturn } =
await this.legacyAlertsClient.getAlertsToSerialize(false);
this.legacyAlertsClient.getAlertsToSerialize(false);
const activeAlerts = this.legacyAlertsClient.getProcessedAlerts('active');
const recoveredAlerts = this.legacyAlertsClient.getProcessedAlerts('recovered');
const activeAlerts = this.legacyAlertsClient.getProcessedAlerts('activeCurrent');
const recoveredAlerts = this.legacyAlertsClient.getProcessedAlerts('recoveredCurrent');
// TODO - Lifecycle alerts set some other fields based on alert status
// Example: workflow status - default to 'open' if not set
@ -210,6 +247,7 @@ export class AlertsClient<
legacyAlert: activeAlerts[id],
rule: this.rule,
timestamp: currentTime,
payload: this.reportedAlerts[id],
})
);
} else {
@ -220,7 +258,12 @@ export class AlertsClient<
LegacyContext,
ActionGroupIds,
RecoveryActionGroupId
>({ legacyAlert: activeAlerts[id], rule: this.rule, timestamp: currentTime })
>({
legacyAlert: activeAlerts[id],
rule: this.rule,
timestamp: currentTime,
payload: this.reportedAlerts[id],
})
);
}
}
@ -231,19 +274,26 @@ export class AlertsClient<
// If there is not, log an error because there should be
if (this.fetchedAlerts.data.hasOwnProperty(id)) {
recoveredAlertsToIndex.push(
buildRecoveredAlert<
AlertData,
LegacyState,
LegacyContext,
ActionGroupIds,
RecoveryActionGroupId
>({
alert: this.fetchedAlerts.data[id],
legacyAlert: recoveredAlerts[id],
rule: this.rule,
timestamp: currentTime,
recoveryActionGroup: this.options.ruleType.recoveryActionGroup.id,
})
recoveredAlerts[id]
? buildRecoveredAlert<
AlertData,
LegacyState,
LegacyContext,
ActionGroupIds,
RecoveryActionGroupId
>({
alert: this.fetchedAlerts.data[id],
legacyAlert: recoveredAlerts[id],
rule: this.rule,
timestamp: currentTime,
recoveryActionGroup: this.options.ruleType.recoveryActionGroup.id,
})
: buildUpdatedRecoveredAlert<AlertData>({
alert: this.fetchedAlerts.data[id],
legacyRawAlert: recoveredAlertsToReturn[id],
timestamp: currentTime,
rule: this.rule,
})
);
} else {
this.options.logger.warn(
@ -256,30 +306,51 @@ export class AlertsClient<
const alertsToIndex = [...activeAlertsToIndex, ...recoveredAlertsToIndex];
if (alertsToIndex.length > 0) {
await esClient.bulk({
refresh: 'wait_for',
index: indexTemplateAndPattern.alias,
require_alias: true,
body: flatMap(
[...activeAlertsToIndex, ...recoveredAlertsToIndex].map((alert: Alert & AlertData) => [
{
index: {
_id: alert.kibana.alert.uuid,
// If we know the concrete index for this alert, specify it
...(this.fetchedAlerts.indices[alert.kibana.alert.uuid]
? {
_index: this.fetchedAlerts.indices[alert.kibana.alert.uuid],
require_alias: false,
}
: {}),
try {
const response = await esClient.bulk({
refresh: 'wait_for',
index: this.indexTemplateAndPattern.alias,
require_alias: true,
body: flatMap(
[...activeAlertsToIndex, ...recoveredAlertsToIndex].map((alert: Alert & AlertData) => [
{
index: {
_id: alert.kibana.alert.uuid,
// If we know the concrete index for this alert, specify it
...(this.fetchedAlerts.indices[alert.kibana.alert.uuid]
? {
_index: this.fetchedAlerts.indices[alert.kibana.alert.uuid],
require_alias: false,
}
: {}),
},
},
},
alert,
])
),
});
}
alert,
])
),
});
// If there were individual indexing errors, they will be returned in the success response
if (response && response.errors) {
const errorsInResponse = (response.items ?? [])
.map((item) => (item && item.index && item.index.error ? item.index.error : null))
.filter((item) => item != null);
this.options.logger.error(
`Error writing ${errorsInResponse.length} out of ${
alertsToIndex.length
} alerts - ${JSON.stringify(errorsInResponse)}`
);
}
} catch (err) {
this.options.logger.error(
`Error writing ${alertsToIndex.length} alerts to ${this.indexTemplateAndPattern.alias} - ${err.message}`
);
}
}
}
public getAlertsToSerialize() {
// The flapping value that is persisted inside the task manager state (and used in the next execution)
// is different than the value that should be written to the alert document. For this reason, we call
// getAlertsToSerialize() twice, once before building and bulk indexing alert docs and once after to return
@ -287,10 +358,30 @@ export class AlertsClient<
// This will be a blocker if ever we want to stop serializing alert data inside the task state and just use
// the fetched alert document.
return await this.legacyAlertsClient.getAlertsToSerialize();
return this.legacyAlertsClient.getAlertsToSerialize();
}
public getExecutorServices() {
return this.legacyAlertsClient.getExecutorServices();
public factory() {
return this.legacyAlertsClient.factory();
}
public client() {
return {
create: (
alert: ReportedAlert<
AlertData,
LegacyState,
LegacyContext,
WithoutReservedActionGroups<ActionGroupIds, RecoveryActionGroupId>
>
) => this.create(alert),
getAlertLimitValue: (): number => this.factory().alertLimit.getValue(),
setAlertLimitReached: (reached: boolean) =>
this.factory().alertLimit.setLimitReached(reached),
getRecoveredAlerts: () => {
const { getRecoveredAlerts } = this.factory().done();
return getRecoveredAlerts();
},
};
}
}

View file

@ -14,7 +14,9 @@ const createLegacyAlertsClientMock = () => {
getAlertsToSerialize: jest.fn(),
hasReachedAlertLimit: jest.fn(),
checkLimitUsage: jest.fn(),
getExecutorServices: jest.fn(),
persistAlerts: jest.fn(),
factory: jest.fn(),
client: jest.fn(),
};
});
};

View file

@ -149,7 +149,7 @@ describe('Legacy Alerts Client', () => {
});
});
test('getExecutorServices() should call getPublicAlertFactory on alert factory', async () => {
test('factory() should call getPublicAlertFactory on alert factory', async () => {
const alertsClient = new LegacyAlertsClient({
logger,
ruleType,
@ -166,7 +166,7 @@ describe('Legacy Alerts Client', () => {
recoveredAlertsFromState: {},
});
alertsClient.getExecutorServices();
alertsClient.factory();
expect(getPublicAlertFactory).toHaveBeenCalledWith(mockCreateAlertFactory);
});

View file

@ -44,7 +44,7 @@ export class LegacyAlertsClient<
Context extends AlertInstanceContext,
ActionGroupIds extends string,
RecoveryActionGroupId extends string
> implements IAlertsClient<State, Context, ActionGroupIds, RecoveryActionGroupId>
> implements IAlertsClient<{}, State, Context, ActionGroupIds, RecoveryActionGroupId>
{
private maxAlerts: number = DEFAULT_MAX_ALERTS;
private flappingSettings: RulesSettingsFlappingProperties = DEFAULT_FLAPPING_SETTINGS;
@ -197,9 +197,13 @@ export class LegacyAlertsClient<
return {};
}
public async getAlertsToSerialize(shouldSetFlapping: boolean = true) {
public getAlertsToSerialize(shouldSetFlapping: boolean = true) {
if (shouldSetFlapping) {
this.setFlapping();
setFlapping<State, Context, ActionGroupIds, RecoveryActionGroupId>(
this.flappingSettings,
this.processedAlerts.active,
this.processedAlerts.recovered
);
}
return determineAlertsToReturn<State, Context, ActionGroupIds, RecoveryActionGroupId>(
this.processedAlerts.active,
@ -215,15 +219,13 @@ export class LegacyAlertsClient<
return this.alertFactory!.alertLimit.checkLimitUsage();
}
public getExecutorServices() {
public factory() {
return getPublicAlertFactory(this.alertFactory!);
}
public setFlapping() {
setFlapping<State, Context, ActionGroupIds, RecoveryActionGroupId>(
this.flappingSettings,
this.processedAlerts.active,
this.processedAlerts.recovered
);
public client() {
return null;
}
public async persistAlerts() {}
}

View file

@ -50,6 +50,7 @@ describe('buildNewAlert', () => {
alert: {
action_group: 'default',
flapping: false,
flapping_history: [],
instance: {
id: 'alert-A',
},
@ -83,6 +84,7 @@ describe('buildNewAlert', () => {
us: '0',
},
flapping: false,
flapping_history: [],
instance: {
id: 'alert-A',
},
@ -128,4 +130,92 @@ describe('buildNewAlert', () => {
},
});
});
test('should include alert payload if specified', () => {
const legacyAlert = new LegacyAlert<{}, {}, 'default'>('alert-A');
legacyAlert.scheduleActions('default');
expect(
buildNewAlert<
{ count: number; url: string; kibana: { alert: { nested_field: number } } },
{},
{},
'default',
'recovered'
>({
legacyAlert,
rule: alertRule,
timestamp: '2023-03-28T12:27:28.159Z',
payload: { count: 1, url: `https://url1`, kibana: { alert: { nested_field: 2 } } },
})
).toEqual({
'@timestamp': '2023-03-28T12:27:28.159Z',
count: 1,
kibana: {
alert: {
action_group: 'default',
flapping: false,
flapping_history: [],
instance: {
id: 'alert-A',
},
maintenance_window_ids: [],
nested_field: 2,
rule,
status: 'active',
uuid: legacyAlert.getUuid(),
},
space_ids: ['default'],
},
url: `https://url1`,
});
});
test('should overwrite any framework fields included in payload', () => {
const legacyAlert = new LegacyAlert<{}, {}, 'default'>('alert-A');
legacyAlert.scheduleActions('default');
expect(
buildNewAlert<
{
count: number;
url: string;
kibana: { alert: { action_group: string; nested_field: number } };
},
{},
{},
'default',
'recovered'
>({
legacyAlert,
rule: alertRule,
timestamp: '2023-03-28T12:27:28.159Z',
payload: {
count: 1,
url: `https://url1`,
kibana: { alert: { action_group: 'bad action group', nested_field: 2 } },
},
})
).toEqual({
'@timestamp': '2023-03-28T12:27:28.159Z',
count: 1,
kibana: {
alert: {
action_group: 'default',
flapping: false,
flapping_history: [],
instance: {
id: 'alert-A',
},
maintenance_window_ids: [],
nested_field: 2,
rule,
status: 'active',
uuid: legacyAlert.getUuid(),
},
space_ids: ['default'],
},
url: `https://url1`,
});
});
});

View file

@ -4,13 +4,15 @@
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
import { isEmpty } from 'lodash';
import deepmerge from 'deepmerge';
import type { Alert } from '@kbn/alerts-as-data-utils';
import { Alert as LegacyAlert } from '../../alert/alert';
import { AlertInstanceContext, AlertInstanceState, RuleAlertData } from '../../types';
import type { AlertRule } from '../types';
import { stripFrameworkFields } from './strip_framework_fields';
interface BuildNewAlertOpts<
AlertData extends RuleAlertData,
LegacyState extends AlertInstanceState,
LegacyContext extends AlertInstanceContext,
ActionGroupIds extends string,
@ -18,6 +20,7 @@ interface BuildNewAlertOpts<
> {
legacyAlert: LegacyAlert<LegacyState, LegacyContext, ActionGroupIds | RecoveryActionGroupId>;
rule: AlertRule;
payload?: AlertData;
timestamp: string;
}
@ -36,30 +39,41 @@ export const buildNewAlert = <
legacyAlert,
rule,
timestamp,
}: BuildNewAlertOpts<LegacyState, LegacyContext, ActionGroupIds, RecoveryActionGroupId>): Alert &
AlertData => {
return {
'@timestamp': timestamp,
kibana: {
alert: {
action_group: legacyAlert.getScheduledActionOptions()?.actionGroup,
flapping: legacyAlert.getFlapping(),
instance: {
id: legacyAlert.getId(),
payload,
}: BuildNewAlertOpts<
AlertData,
LegacyState,
LegacyContext,
ActionGroupIds,
RecoveryActionGroupId
>): Alert & AlertData => {
const cleanedPayload = payload ? stripFrameworkFields(payload) : {};
return deepmerge.all(
[
cleanedPayload,
{
'@timestamp': timestamp,
kibana: {
alert: {
action_group: legacyAlert.getScheduledActionOptions()?.actionGroup,
flapping: legacyAlert.getFlapping(),
flapping_history: legacyAlert.getFlappingHistory(),
instance: {
id: legacyAlert.getId(),
},
maintenance_window_ids: legacyAlert.getMaintenanceWindowIds(),
rule: rule.kibana?.alert.rule,
status: 'active',
uuid: legacyAlert.getUuid(),
...(legacyAlert.getState().duration
? { duration: { us: legacyAlert.getState().duration } }
: {}),
...(legacyAlert.getState().start ? { start: legacyAlert.getState().start } : {}),
},
space_ids: rule.kibana?.space_ids,
},
maintenance_window_ids: legacyAlert.getMaintenanceWindowIds(),
rule: rule.kibana?.alert.rule,
status: 'active',
uuid: legacyAlert.getUuid(),
...(legacyAlert.getState().duration
? { duration: { us: legacyAlert.getState().duration } }
: {}),
...(!isEmpty(legacyAlert.getFlappingHistory())
? { flapping_history: legacyAlert.getFlappingHistory() }
: {}),
...(legacyAlert.getState().start ? { start: legacyAlert.getState().start } : {}),
},
space_ids: rule.kibana?.space_ids,
},
} as Alert & AlertData;
],
{ arrayMerge: (_, sourceArray) => sourceArray }
) as Alert & AlertData;
};

View file

@ -41,6 +41,7 @@ const existingAlert = {
us: '0',
},
flapping: false,
flapping_history: [true],
instance: {
id: 'alert-A',
},
@ -76,6 +77,7 @@ describe('buildOngoingAlert', () => {
us: '36000000',
},
flapping: false,
flapping_history: [],
instance: {
id: 'alert-A',
},
@ -125,6 +127,7 @@ describe('buildOngoingAlert', () => {
us: '36000000',
},
flapping: false,
flapping_history: [],
instance: {
id: 'alert-A',
},
@ -192,23 +195,132 @@ describe('buildOngoingAlert', () => {
});
});
test('should update alert document with latest maintenance window ids', () => {
const legacyAlert = new LegacyAlert<{}, {}, 'error' | 'warning'>('1');
legacyAlert.scheduleActions('error');
legacyAlert.setFlappingHistory([false, false, true, true]);
test('should update alert document with updated payload if specified', () => {
const legacyAlert = new LegacyAlert<{}, {}, 'error' | 'warning'>('alert-A');
legacyAlert
.scheduleActions('warning')
.replaceState({ start: '0000-00-00T00:00:00.000Z', duration: '36000000' });
expect(
buildOngoingAlert<{}, {}, {}, 'error' | 'warning', 'recovered'>({
buildOngoingAlert<
{ count: number; url: string; kibana?: { alert?: { nested_field?: number } } },
{},
{},
'error' | 'warning',
'recovered'
>({
alert: {
...existingAlert,
kibana: {
...existingAlert.kibana,
alert: {
...existingAlert.kibana.alert,
flapping_history: [true, false, false, false, true, true],
maintenance_window_ids: ['maint-1', 'maint-321'],
},
count: 1,
url: `https://url1`,
},
legacyAlert,
rule: alertRule,
timestamp: '2023-03-29T12:27:28.159Z',
payload: {
count: 2,
url: `https://url2`,
kibana: { alert: { nested_field: 2 } },
},
})
).toEqual({
'@timestamp': '2023-03-29T12:27:28.159Z',
count: 2,
kibana: {
alert: {
action_group: 'warning',
duration: {
us: '36000000',
},
flapping: false,
flapping_history: [],
instance: {
id: 'alert-A',
},
maintenance_window_ids: [],
nested_field: 2,
start: '2023-03-28T12:27:28.159Z',
rule,
status: 'active',
uuid: 'abcdefg',
},
space_ids: ['default'],
},
url: `https://url2`,
});
});
test('should update alert document with updated payload is specified but not overwrite any framework fields', () => {
const legacyAlert = new LegacyAlert<{}, {}, 'error' | 'warning'>('alert-A');
legacyAlert
.scheduleActions('warning')
.replaceState({ start: '0000-00-00T00:00:00.000Z', duration: '36000000' });
expect(
buildOngoingAlert<
{
count: number;
url: string;
kibana?: { alert?: { action_group: string; nested_field?: number } };
},
{},
{},
'error' | 'warning',
'recovered'
>({
alert: {
...existingAlert,
count: 1,
url: `https://url1`,
},
legacyAlert,
rule: alertRule,
timestamp: '2023-03-29T12:27:28.159Z',
payload: {
count: 2,
url: `https://url2`,
kibana: { alert: { action_group: 'bad action group', nested_field: 2 } },
},
})
).toEqual({
'@timestamp': '2023-03-29T12:27:28.159Z',
count: 2,
kibana: {
alert: {
action_group: 'warning',
duration: {
us: '36000000',
},
flapping: false,
flapping_history: [],
instance: {
id: 'alert-A',
},
maintenance_window_ids: [],
nested_field: 2,
start: '2023-03-28T12:27:28.159Z',
rule,
status: 'active',
uuid: 'abcdefg',
},
space_ids: ['default'],
},
url: `https://url2`,
});
});
test('should not update alert document if no payload is specified', () => {
const legacyAlert = new LegacyAlert<{}, {}, 'error' | 'warning'>('alert-A');
legacyAlert
.scheduleActions('warning')
.replaceState({ start: '0000-00-00T00:00:00.000Z', duration: '36000000' });
expect(
buildOngoingAlert<{ count: number; url: string }, {}, {}, 'error' | 'warning', 'recovered'>({
alert: {
...existingAlert,
count: 1,
url: `https://url1`,
},
legacyAlert,
rule: alertRule,
@ -216,14 +328,15 @@ describe('buildOngoingAlert', () => {
})
).toEqual({
'@timestamp': '2023-03-29T12:27:28.159Z',
count: 1,
kibana: {
alert: {
action_group: 'error',
action_group: 'warning',
duration: {
us: '0',
us: '36000000',
},
flapping: false,
flapping_history: [false, false, true, true],
flapping_history: [],
instance: {
id: 'alert-A',
},
@ -235,6 +348,7 @@ describe('buildOngoingAlert', () => {
},
space_ids: ['default'],
},
url: `https://url1`,
});
});
});

View file

@ -5,11 +5,12 @@
* 2.0.
*/
import { isEmpty } from 'lodash';
import deepmerge from 'deepmerge';
import type { Alert } from '@kbn/alerts-as-data-utils';
import { Alert as LegacyAlert } from '../../alert/alert';
import { AlertInstanceContext, AlertInstanceState, RuleAlertData } from '../../types';
import type { AlertRule } from '../types';
import { stripFrameworkFields } from './strip_framework_fields';
interface BuildOngoingAlertOpts<
AlertData extends RuleAlertData,
@ -21,6 +22,7 @@ interface BuildOngoingAlertOpts<
alert: Alert & AlertData;
legacyAlert: LegacyAlert<LegacyState, LegacyContext, ActionGroupIds | RecoveryActionGroupId>;
rule: AlertRule;
payload?: AlertData;
timestamp: string;
}
@ -38,6 +40,7 @@ export const buildOngoingAlert = <
>({
alert,
legacyAlert,
payload,
rule,
timestamp,
}: BuildOngoingAlertOpts<
@ -47,38 +50,45 @@ export const buildOngoingAlert = <
ActionGroupIds,
RecoveryActionGroupId
>): Alert & AlertData => {
return {
...alert,
// Update the timestamp to reflect latest update time
'@timestamp': timestamp,
kibana: {
...alert.kibana,
alert: {
...alert.kibana.alert,
// Set latest action group as this may have changed during execution (ex: error -> warning)
action_group: legacyAlert.getScheduledActionOptions()?.actionGroup,
// Set latest flapping state
flapping: legacyAlert.getFlapping(),
// Set latest rule configuration
rule: rule.kibana?.alert.rule,
// Set latest maintenance window IDs
maintenance_window_ids: legacyAlert.getMaintenanceWindowIds(),
// Set latest duration as ongoing alerts should have updated duration
...(legacyAlert.getState().duration
? { duration: { us: legacyAlert.getState().duration } }
: {}),
// Set latest flapping history
...(!isEmpty(legacyAlert.getFlappingHistory())
? { flapping_history: legacyAlert.getFlappingHistory() }
: {}),
// Fields that are explicitly not updated:
// instance.id
// status - ongoing alerts should maintain 'active' status
// uuid - ongoing alerts should carry over previous UUID
// start - ongoing alerts should keep the initial start time
const cleanedPayload = payload ? stripFrameworkFields(payload) : {};
return deepmerge.all(
[
alert,
cleanedPayload,
{
// Update the timestamp to reflect latest update time
'@timestamp': timestamp,
kibana: {
alert: {
// Because we're building this alert after the action execution handler has been
// run, the scheduledExecutionOptions for the alert has been cleared and
// the lastScheduledActions has been set. If we ever change the order of operations
// to build and persist the alert before action execution handler, we will need to
// update where we pull the action group from.
// Set latest action group as this may have changed during execution (ex: error -> warning)
action_group: legacyAlert.getScheduledActionOptions()?.actionGroup,
// Set latest flapping state
flapping: legacyAlert.getFlapping(),
// Set latest flapping_history
flapping_history: legacyAlert.getFlappingHistory(),
// Set latest maintenance window IDs
maintenance_window_ids: legacyAlert.getMaintenanceWindowIds(),
// Set latest rule configuration
rule: rule.kibana?.alert.rule,
// Set latest duration as ongoing alerts should have updated duration
...(legacyAlert.getState().duration
? { duration: { us: legacyAlert.getState().duration } }
: {}),
// Fields that are explicitly not updated:
// instance.id
// status - ongoing alerts should maintain 'active' status
// uuid - ongoing alerts should carry over previous UUID
// start - ongoing alerts should keep the initial start time
},
space_ids: rule.kibana?.space_ids,
},
},
space_ids: rule.kibana?.space_ids,
},
};
],
{ arrayMerge: (_, sourceArray) => sourceArray }
) as Alert & AlertData;
};

View file

@ -41,6 +41,7 @@ const existingActiveAlert = {
us: '0',
},
flapping: false,
flapping_history: [true, false],
instance: {
id: 'alert-A',
},
@ -54,30 +55,6 @@ const existingActiveAlert = {
},
};
const existingRecoveredAlert = {
'@timestamp': '2023-03-28T12:27:28.159Z',
kibana: {
alert: {
action_group: 'default',
duration: {
us: '0',
},
end: '2023-03-28T12:27:28.159Z',
flapping: false,
flapping_history: [true, false, false],
instance: {
id: 'alert-A',
},
maintenance_window_ids: ['maint-x'],
start: '2023-03-27T12:27:28.159Z',
rule,
status: 'recovered',
uuid: 'abcdefg',
},
space_ids: ['default'],
},
};
describe('buildRecoveredAlert', () => {
test('should update active alert document with recovered status and info from legacy alert', () => {
const legacyAlert = new LegacyAlert<{}, {}, 'default'>('alert-A');
@ -103,6 +80,7 @@ describe('buildRecoveredAlert', () => {
},
end: '2023-03-30T12:27:28.159Z',
flapping: false,
flapping_history: [],
instance: {
id: 'alert-A',
},
@ -155,6 +133,7 @@ describe('buildRecoveredAlert', () => {
},
end: '2023-03-30T12:27:28.159Z',
flapping: false,
flapping_history: [],
instance: {
id: 'alert-A',
},
@ -174,43 +153,4 @@ describe('buildRecoveredAlert', () => {
},
});
});
test('should update already recovered alert document with updated flapping history but not maintenance window ids', () => {
const legacyAlert = new LegacyAlert<{}, {}, 'default'>('alert-A');
legacyAlert.scheduleActions('default');
legacyAlert.setFlappingHistory([false, false, true, true]);
legacyAlert.setMaintenanceWindowIds(['maint-1', 'maint-321']);
expect(
buildRecoveredAlert<{}, {}, {}, 'default', 'recovered'>({
alert: existingRecoveredAlert,
legacyAlert,
rule: alertRule,
recoveryActionGroup: 'recovered',
timestamp: '2023-03-29T12:27:28.159Z',
})
).toEqual({
'@timestamp': '2023-03-29T12:27:28.159Z',
kibana: {
alert: {
action_group: 'recovered',
duration: {
us: '0',
},
end: '2023-03-28T12:27:28.159Z',
flapping: false,
flapping_history: [false, false, true, true],
instance: {
id: 'alert-A',
},
maintenance_window_ids: ['maint-x'],
start: '2023-03-27T12:27:28.159Z',
rule,
status: 'recovered',
uuid: 'abcdefg',
},
space_ids: ['default'],
},
});
});
});

View file

@ -4,8 +4,7 @@
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
import { isEmpty } from 'lodash';
import deepmerge from 'deepmerge';
import type { Alert } from '@kbn/alerts-as-data-utils';
import { Alert as LegacyAlert } from '../../alert/alert';
import { AlertInstanceContext, AlertInstanceState, RuleAlertData } from '../../types';
@ -26,9 +25,7 @@ interface BuildRecoveredAlertOpts<
}
/**
* Updates an existing alert document with data from the LegacyAlert class
* This could be a currently active alert that is now recovered or a previously
* recovered alert that has updates to its flapping history
* Updates an active alert document to recovered
* Currently only populates framework fields and not any rule type specific fields
*/
@ -51,47 +48,43 @@ export const buildRecoveredAlert = <
ActionGroupIds,
RecoveryActionGroupId
>): Alert & AlertData => {
// If we're updating an active alert to be recovered,
// persist any maintenance window IDs on the alert, otherwise
// we should only be changing fields related to flapping
const maintenanceWindowIds =
alert.kibana.alert.status === 'active' ? legacyAlert.getMaintenanceWindowIds() : null;
return {
...alert,
// Update the timestamp to reflect latest update time
'@timestamp': timestamp,
kibana: {
...alert.kibana,
alert: {
...alert.kibana.alert,
// Set the recovery action group
action_group: recoveryActionGroup,
// Set latest flapping state
flapping: legacyAlert.getFlapping(),
// Set latest rule configuration
rule: rule.kibana?.alert.rule,
// Set status to 'recovered'
status: 'recovered',
// Set latest duration as recovered alerts should have updated duration
...(legacyAlert.getState().duration
? { duration: { us: legacyAlert.getState().duration } }
: {}),
// Set end time
...(legacyAlert.getState().end ? { end: legacyAlert.getState().end } : {}),
// Set latest flapping history
...(!isEmpty(legacyAlert.getFlappingHistory())
? { flapping_history: legacyAlert.getFlappingHistory() }
: {}),
// Set maintenance window IDs if defined
...(maintenanceWindowIds ? { maintenance_window_ids: maintenanceWindowIds } : {}),
return deepmerge.all(
[
alert,
{
// Update the timestamp to reflect latest update time
'@timestamp': timestamp,
kibana: {
alert: {
// Set the recovery action group
action_group: recoveryActionGroup,
// Set latest flapping state
flapping: legacyAlert.getFlapping(),
// Set latest flapping_history
flapping_history: legacyAlert.getFlappingHistory(),
// Set latest maintenance window IDs
maintenance_window_ids: legacyAlert.getMaintenanceWindowIds(),
// Set latest rule configuration
rule: rule.kibana?.alert.rule,
// Set status to 'recovered'
status: 'recovered',
// Set latest duration as recovered alerts should have updated duration
...(legacyAlert.getState().duration
? { duration: { us: legacyAlert.getState().duration } }
: {}),
// Set end time
...(legacyAlert.getState().end ? { end: legacyAlert.getState().end } : {}),
// Fields that are explicitly not updated:
// instance.id
// action_group
// uuid - recovered alerts should carry over previous UUID
// start - recovered alerts should keep the initial start time
// Fields that are explicitly not updated:
// instance.id
// action_group
// uuid - recovered alerts should carry over previous UUID
// start - recovered alerts should keep the initial start time
},
space_ids: rule.kibana?.space_ids,
},
},
space_ids: rule.kibana?.space_ids,
},
};
],
{ arrayMerge: (_, sourceArray) => sourceArray }
) as Alert & AlertData;
};

View file

@ -0,0 +1,108 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
import { Alert as LegacyAlert } from '../../alert/alert';
import { AlertRule } from '../types';
import { buildUpdatedRecoveredAlert } from './build_updated_recovered_alert';
const rule = {
category: 'My test rule',
consumer: 'bar',
execution: {
uuid: '5f6aa57d-3e22-484e-bae8-cbed868f4d28',
},
name: 'rule-name',
parameters: {
bar: true,
},
producer: 'alerts',
revision: 0,
rule_type_id: 'test.rule-type',
tags: ['rule-', '-tags'],
uuid: '1',
};
const alertRule: AlertRule = {
kibana: {
alert: {
rule,
},
space_ids: ['default'],
},
};
const existingRecoveredAlert = {
'@timestamp': '2023-03-28T12:27:28.159Z',
kibana: {
alert: {
action_group: 'recovered',
duration: {
us: '0',
},
end: '2023-03-28T12:27:28.159Z',
flapping: false,
flapping_history: [true, false, false],
instance: {
id: 'alert-A',
},
maintenance_window_ids: ['maint-x'],
start: '2023-03-27T12:27:28.159Z',
rule,
status: 'recovered',
uuid: 'abcdefg',
},
space_ids: ['default'],
},
};
describe('buildUpdatedRecoveredAlert', () => {
test('should update already recovered alert document with updated flapping values and timestamp only', () => {
const legacyAlert = new LegacyAlert<{}, {}, 'default'>('alert-A');
legacyAlert.scheduleActions('default');
legacyAlert.setFlappingHistory([false, false, true, true]);
legacyAlert.setMaintenanceWindowIds(['maint-1', 'maint-321']);
expect(
buildUpdatedRecoveredAlert<{}>({
alert: existingRecoveredAlert,
legacyRawAlert: {
meta: {
flapping: true,
flappingHistory: [false, false, true, true],
maintenanceWindowIds: ['maint-1', 'maint-321'],
},
state: {
start: '3023-03-27T12:27:28.159Z',
},
},
rule: alertRule,
timestamp: '2023-03-29T12:27:28.159Z',
})
).toEqual({
'@timestamp': '2023-03-29T12:27:28.159Z',
kibana: {
alert: {
action_group: 'recovered',
duration: {
us: '0',
},
end: '2023-03-28T12:27:28.159Z',
flapping: true,
flapping_history: [false, false, true, true],
instance: {
id: 'alert-A',
},
maintenance_window_ids: ['maint-x'],
start: '2023-03-27T12:27:28.159Z',
rule,
status: 'recovered',
uuid: 'abcdefg',
},
space_ids: ['default'],
},
});
});
});

View file

@ -0,0 +1,52 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
import deepmerge from 'deepmerge';
import type { Alert } from '@kbn/alerts-as-data-utils';
import { RawAlertInstance } from '@kbn/alerting-state-types';
import { RuleAlertData } from '../../types';
import { AlertRule } from '../types';
interface BuildUpdatedRecoveredAlertOpts<AlertData extends RuleAlertData> {
alert: Alert & AlertData;
legacyRawAlert: RawAlertInstance;
timestamp: string;
rule: AlertRule;
}
/**
* Updates an existing recovered alert document with latest flapping
* information
*/
export const buildUpdatedRecoveredAlert = <AlertData extends RuleAlertData>({
alert,
legacyRawAlert,
rule,
timestamp,
}: BuildUpdatedRecoveredAlertOpts<AlertData>): Alert & AlertData => {
return deepmerge.all(
[
alert,
{
// Update the timestamp to reflect latest update time
'@timestamp': timestamp,
kibana: {
alert: {
// Set latest flapping state
flapping: legacyRawAlert.meta?.flapping,
// Set latest flapping history
flapping_history: legacyRawAlert.meta?.flappingHistory,
// Set latest rule configuration
rule: rule.kibana?.alert.rule,
},
},
},
],
{ arrayMerge: (_, sourceArray) => sourceArray }
) as Alert & AlertData;
};

View file

@ -8,4 +8,5 @@
export { buildNewAlert } from './build_new_alert';
export { buildOngoingAlert } from './build_ongoing_alert';
export { buildRecoveredAlert } from './build_recovered_alert';
export { buildUpdatedRecoveredAlert } from './build_updated_recovered_alert';
export { formatRule } from './format_rule';

View file

@ -0,0 +1,91 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
import { stripFrameworkFields } from './strip_framework_fields';
describe('stripFrameworkFields', () => {
test('should do nothing if payload has no framework fields', () => {
const payload = { field1: 'test', kibana: { alert: { not_a_framework_field: 2 } } };
expect(stripFrameworkFields(payload)).toEqual(payload);
});
test(`should allow allowed fields like "kibana.alert.reason"`, () => {
const payload = {
field1: 'test',
kibana: { alert: { not_a_framework_field: 2, reason: 'because i said so' } },
};
expect(stripFrameworkFields(payload)).toEqual(payload);
});
test(`should strip fields that the framework controls`, () => {
const payload = {
field1: 'test',
field2: [],
kibana: {
alert: {
action_group: 'invalid action group',
not_a_framework_field1: 2,
not_a_framework_field2: [],
not_a_framework_field3: {
abc: 'xyz',
},
instance: { id: 'A' },
duration: {
us: '23543543534',
},
case_ids: ['abcdefg'],
start: 'datestring',
status: 'bad',
end: 'datestring',
flapping: true,
flapping_history: [true],
maintenance_window_ids: ['xyz'],
rule: {
category: 'My test rule',
consumer: 'bar',
execution: {
uuid: '5f6aa57d-3e22-484e-bae8-cbed868f4d28',
},
name: 'rule-name',
parameters: {
bar: true,
},
producer: 'alerts',
revision: 0,
rule_type_id: 'test.rule-type',
tags: ['rule-', '-tags'],
uuid: '1',
},
uuid: 'uuid',
},
},
};
expect(stripFrameworkFields(payload)).toEqual({
field1: 'test',
field2: [],
kibana: {
alert: {
// lodash omit will remove the value for kibana.alert.duration.us but
// keep the empty duration object. this doesn't affect the final alert document
duration: {},
// lodash omit will remove the value for kibana.alert.instance.id but
// keep the empty instance object. this doesn't affect the final alert document
instance: {},
not_a_framework_field1: 2,
not_a_framework_field2: [],
not_a_framework_field3: {
abc: 'xyz',
},
// lodash omit will remove the value for kibana.alert.rule.execution.uuid but
// keep the empty rule.execution object. this doesn't affect the final alert document
rule: {
execution: {},
},
},
},
});
});
});

View file

@ -0,0 +1,28 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
import { omit } from 'lodash';
import { ALERT_REASON } from '@kbn/rule-data-utils';
import { alertFieldMap } from '@kbn/alerts-as-data-utils';
import { RuleAlertData } from '../../types';
const allowedFrameworkFields = new Set<string>([ALERT_REASON]);
/**
* Remove framework fields from the alert payload reported by
* the rule type. Fields are considered framework fields if they are
* defined in the "alertFieldMap". Framework fields should only be
* set by the alerting framework during rule execution.
*/
export const stripFrameworkFields = <AlertData extends RuleAlertData>(
payload: AlertData
): AlertData => {
const keysToStrip = Object.keys(alertFieldMap).filter(
(key: string) => !allowedFrameworkFields.has(key)
);
return omit(payload, keysToStrip) as AlertData;
};

View file

@ -11,12 +11,14 @@ import {
AlertInstanceContext,
AlertInstanceState,
RawAlertInstance,
RuleAlertData,
RuleNotifyWhenType,
WithoutReservedActionGroups,
} from '../types';
import { AlertingEventLogger } from '../lib/alerting_event_logger/alerting_event_logger';
import { RuleRunMetricsStore } from '../lib/rule_run_metrics_store';
import { RulesSettingsFlappingProperties } from '../../common/rules_settings';
import type { PublicAlertFactory } from '../alert/create_alert_factory';
export interface AlertRuleData {
consumer: string;
executionId: string;
@ -38,6 +40,7 @@ export interface AlertRule {
}
export interface IAlertsClient<
AlertData extends RuleAlertData,
State extends AlertInstanceState,
Context extends AlertInstanceContext,
ActionGroupIds extends string,
@ -50,10 +53,22 @@ export interface IAlertsClient<
getProcessedAlerts(
type: 'new' | 'active' | 'activeCurrent' | 'recovered' | 'recoveredCurrent'
): Record<string, LegacyAlert<State, Context, ActionGroupIds | RecoveryActionGroupId>>;
getAlertsToSerialize(): Promise<{
persistAlerts(): Promise<void>;
getAlertsToSerialize(): {
alertsToReturn: Record<string, RawAlertInstance>;
recoveredAlertsToReturn: Record<string, RawAlertInstance>;
}>;
};
factory(): PublicAlertFactory<
State,
Context,
WithoutReservedActionGroups<ActionGroupIds, RecoveryActionGroupId>
>;
client(): PublicAlertsClient<
AlertData,
State,
Context,
WithoutReservedActionGroups<ActionGroupIds, RecoveryActionGroupId>
> | null;
}
export interface ProcessAndLogAlertsOpts {
@ -80,3 +95,28 @@ export interface TrackedAlerts<
active: Record<string, LegacyAlert<State, Context>>;
recovered: Record<string, LegacyAlert<State, Context>>;
}
export interface PublicAlertsClient<
AlertData extends RuleAlertData,
State extends AlertInstanceState,
Context extends AlertInstanceContext,
ActionGroupIds extends string
> {
create(alert: ReportedAlert<AlertData, State, Context, ActionGroupIds>): void;
getAlertLimitValue: () => number;
setAlertLimitReached: (reached: boolean) => void;
getRecoveredAlerts: () => Array<LegacyAlert<State, Context, ActionGroupIds>>;
}
export interface ReportedAlert<
AlertData extends RuleAlertData,
State extends AlertInstanceState,
Context extends AlertInstanceContext,
ActionGroupIds extends string
> {
id: string; // alert instance id
actionGroup: ActionGroupIds;
state?: State;
context?: Context;
payload?: AlertData;
}

View file

@ -36,6 +36,7 @@ import {
installWithTimeout,
} from './lib';
import { type LegacyAlertsClientParams, type AlertRuleData, AlertsClient } from '../alerts_client';
import { IAlertsClient } from '../alerts_client/types';
export const TOTAL_FIELDS_LIMIT = 2500;
const LEGACY_ALERT_CONTEXT = 'legacy-alert';
@ -92,7 +93,7 @@ interface IAlertsService {
RecoveryActionGroupId extends string
>(
opts: CreateAlertsClientParams
): Promise<AlertsClient<
): Promise<IAlertsClient<
AlertData,
LegacyState,
LegacyContext,
@ -136,7 +137,15 @@ export class AlertsService implements IAlertsService {
LegacyContext extends AlertInstanceContext,
ActionGroupIds extends string,
RecoveryActionGroupId extends string
>(opts: CreateAlertsClientParams) {
>(
opts: CreateAlertsClientParams
): Promise<IAlertsClient<
AlertData,
LegacyState,
LegacyContext,
ActionGroupIds,
RecoveryActionGroupId
> | null> {
if (!opts.ruleType.alerts) {
return null;
}

View file

@ -771,8 +771,9 @@ describe('AlertingEventLogger', () => {
[TaskRunnerTimerSpan.PrepareRule]: 30,
[TaskRunnerTimerSpan.RuleTypeRun]: 40,
[TaskRunnerTimerSpan.ProcessAlerts]: 50,
[TaskRunnerTimerSpan.TriggerActions]: 60,
[TaskRunnerTimerSpan.ProcessRuleRun]: 70,
[TaskRunnerTimerSpan.PersistAlerts]: 60,
[TaskRunnerTimerSpan.TriggerActions]: 70,
[TaskRunnerTimerSpan.ProcessRuleRun]: 80,
},
});
@ -793,8 +794,9 @@ describe('AlertingEventLogger', () => {
prepare_rule_duration_ms: 30,
rule_type_run_duration_ms: 40,
process_alerts_duration_ms: 50,
trigger_actions_duration_ms: 60,
process_rule_duration_ms: 70,
persist_alerts_duration_ms: 60,
trigger_actions_duration_ms: 70,
process_rule_duration_ms: 80,
},
},
},
@ -828,8 +830,9 @@ describe('AlertingEventLogger', () => {
[TaskRunnerTimerSpan.PrepareRule]: 30,
[TaskRunnerTimerSpan.RuleTypeRun]: 40,
[TaskRunnerTimerSpan.ProcessAlerts]: 50,
[TaskRunnerTimerSpan.TriggerActions]: 60,
[TaskRunnerTimerSpan.ProcessRuleRun]: 70,
[TaskRunnerTimerSpan.PersistAlerts]: 60,
[TaskRunnerTimerSpan.TriggerActions]: 70,
[TaskRunnerTimerSpan.ProcessRuleRun]: 80,
},
});
@ -860,8 +863,9 @@ describe('AlertingEventLogger', () => {
prepare_rule_duration_ms: 30,
rule_type_run_duration_ms: 40,
process_alerts_duration_ms: 50,
trigger_actions_duration_ms: 60,
process_rule_duration_ms: 70,
persist_alerts_duration_ms: 60,
trigger_actions_duration_ms: 70,
process_rule_duration_ms: 80,
},
},
},

View file

@ -358,6 +358,72 @@ describe('processAlerts', () => {
expect(activeAlert2State.end).not.toBeDefined();
});
test('preserves changes to other state fields', () => {
const newAlert = new Alert<AlertInstanceState, AlertInstanceContext>('1');
const existingAlert1 = new Alert<AlertInstanceState, AlertInstanceContext>('2');
const existingAlert2 = new Alert<AlertInstanceState, AlertInstanceContext>('3');
const existingAlerts = {
'2': existingAlert1,
'3': existingAlert2,
};
existingAlerts['2'].replaceState({
stateField1: 'xyz',
start: '1969-12-30T00:00:00.000Z',
duration: 33000,
});
existingAlerts['3'].replaceState({
anotherState: true,
start: '1969-12-31T07:34:00.000Z',
duration: 23532,
});
const updatedAlerts = {
...cloneDeep(existingAlerts),
'1': newAlert,
};
updatedAlerts['1'].scheduleActions('default' as never, { foo: '1' });
updatedAlerts['2']
.scheduleActions('default' as never, { foo: '1' })
.replaceState({ stateField1: 'abc' });
updatedAlerts['3']
.scheduleActions('default' as never, { foo: '2' })
.replaceState({ anotherState: false });
const { activeAlerts } = processAlerts({
alerts: updatedAlerts,
existingAlerts,
previouslyRecoveredAlerts: {},
hasReachedAlertLimit: false,
alertLimit: 10,
autoRecoverAlerts: true,
flappingSettings: DISABLE_FLAPPING_SETTINGS,
maintenanceWindowIds: [],
});
expect(activeAlerts).toEqual({
'1': updatedAlerts['1'],
'2': updatedAlerts['2'],
'3': updatedAlerts['3'],
});
const activeAlert1State = activeAlerts['2'].getState();
const activeAlert2State = activeAlerts['3'].getState();
expect(activeAlert1State.start).toEqual('1969-12-30T00:00:00.000Z');
expect(activeAlert2State.start).toEqual('1969-12-31T07:34:00.000Z');
expect(activeAlert1State.stateField1).toEqual('abc');
expect(activeAlert2State.anotherState).toEqual(false);
expect(activeAlert1State.duration).toEqual('172800000000000');
expect(activeAlert2State.duration).toEqual('59160000000000');
expect(activeAlert1State.end).not.toBeDefined();
expect(activeAlert2State.end).not.toBeDefined();
});
test('sets start time in active alert state if alert was previously recovered', () => {
const previouslyRecoveredAlert1 = new Alert<AlertInstanceState, AlertInstanceContext>('1');
const previouslyRecoveredAlert2 = new Alert<AlertInstanceState, AlertInstanceContext>('2');

View file

@ -124,11 +124,12 @@ function processAlertsHelper<
// this alert did exist in previous run
// calculate duration to date for active alerts
const state = existingAlerts[id].getState();
const currentState = activeAlerts[id].getState();
const durationInMs =
new Date(currentTime).valueOf() - new Date(state.start as string).valueOf();
const duration = state.start ? millisToNanos(durationInMs) : undefined;
activeAlerts[id].replaceState({
...state,
...currentState,
...(state.start ? { start: state.start } : {}),
...(duration !== undefined ? { duration } : {}),
});

View file

@ -22,6 +22,7 @@ import {
PublicRuleResultService,
PublicRuleMonitoringService,
} from './types';
import { publicAlertsClientMock } from './alerts_client/alerts_client.mock';
export { rulesClientMock };
@ -162,6 +163,7 @@ const createRuleExecutorServicesMock = <
},
done: jest.fn().mockReturnValue(alertFactoryMockDone),
},
alertsClient: publicAlertsClientMock.create(),
savedObjectsClient: savedObjectsClientMock.create(),
uiSettingsClient: uiSettingsServiceMock.createClient(),
scopedClusterClient: elasticsearchServiceMock.createScopedClusterClient(),

View file

@ -280,6 +280,7 @@ describe('Task Runner', () => {
expect(call.rule.ruleTypeName).toBe('My test rule');
expect(call.rule.actions).toEqual(RULE_ACTIONS);
expect(call.services.alertFactory.create).toBeTruthy();
expect(call.services.alertsClient).toBe(null);
expect(call.services.scopedClusterClient).toBeTruthy();
expect(call.services).toBeTruthy();
@ -320,51 +321,6 @@ describe('Task Runner', () => {
).toHaveBeenCalled();
});
test('checks alertsService context initialized if rule type has registered alerts with framework', async () => {
const ruleTypeWithAlerts = {
...ruleType,
alerts: { context: 'test', mappings: { fieldMap: {} } },
};
ruleTypeRegistry.get.mockReturnValue(ruleTypeWithAlerts);
const taskRunner = new TaskRunner(
ruleTypeWithAlerts,
{
...mockedTaskInstance,
state: {
...mockedTaskInstance.state,
previousStartedAt: new Date(Date.now() - 5 * 60 * 1000).toISOString(),
},
},
taskRunnerFactoryInitializerParams,
inMemoryMetrics
);
expect(AlertingEventLogger).toHaveBeenCalledTimes(1);
rulesClient.getAlertFromRaw.mockReturnValue(mockedRuleTypeSavedObject as Rule);
encryptedSavedObjectsClient.getDecryptedAsInternalUser.mockResolvedValue(SAVED_OBJECT);
const runnerResult = await taskRunner.run();
expect(runnerResult).toEqual(generateRunnerResult({ state: true, history: [true] }));
expect(ruleType.executor).toHaveBeenCalledTimes(1);
expect(alertsService.createAlertsClient).toHaveBeenCalledWith({
logger,
ruleType: ruleTypeWithAlerts,
namespace: 'default',
rule: {
consumer: 'bar',
executionId: '5f6aa57d-3e22-484e-bae8-cbed868f4d28',
id: '1',
name: 'rule-name',
parameters: {
bar: true,
},
revision: 0,
spaceId: 'default',
tags: ['rule-', '-tags'],
},
});
});
test.each(ephemeralTestParams)(
'actionsPlugin.execute is called per alert alert that is scheduled %s',
async (nameExtension, customTaskRunnerFactoryInitializerParams, enqueueFunction, isBulk) => {
@ -3155,6 +3111,7 @@ describe('Task Runner', () => {
},
timings: {
claim_to_start_duration_ms: 0,
persist_alerts_duration_ms: 0,
prepare_rule_duration_ms: 0,
process_alerts_duration_ms: 0,
process_rule_duration_ms: 0,
@ -3187,6 +3144,7 @@ describe('Task Runner', () => {
},
timings: {
claim_to_start_duration_ms: 0,
persist_alerts_duration_ms: 0,
prepare_rule_duration_ms: 0,
process_alerts_duration_ms: 0,
process_rule_duration_ms: 0,
@ -3215,6 +3173,7 @@ describe('Task Runner', () => {
},
timings: {
claim_to_start_duration_ms: 0,
persist_alerts_duration_ms: 0,
prepare_rule_duration_ms: 0,
process_alerts_duration_ms: 0,
process_rule_duration_ms: 0,

View file

@ -72,6 +72,7 @@ import { ILastRun, lastRunFromState, lastRunToRaw } from '../lib/last_run_status
import { RunningHandler } from './running_handler';
import { RuleResultService } from '../monitoring/rule_result_service';
import { LegacyAlertsClient } from '../alerts_client';
import { IAlertsClient } from '../alerts_client/types';
const FALLBACK_RETRY_INTERVAL = '5m';
const CONNECTIVITY_RETRY_INTERVAL = '5m';
@ -301,21 +302,42 @@ export class TaskRunner<
// writing from alerts-as-data indices and eventually
// we will want to migrate all the processing of alerts out
// of the LegacyAlertsClient and into the AlertsClient.
const alertsClient =
(await this.context.alertsService?.createAlertsClient<
AlertData,
State,
Context,
ActionGroupIds,
RecoveryActionGroupId
>({
...alertsClientParams,
namespace: namespace ?? DEFAULT_NAMESPACE_STRING,
rule: this.getAADRuleData(rule, spaceId),
})) ??
new LegacyAlertsClient<State, Context, ActionGroupIds, RecoveryActionGroupId>(
let alertsClient: IAlertsClient<
AlertData,
State,
Context,
ActionGroupIds,
RecoveryActionGroupId
>;
try {
const client =
(await this.context.alertsService?.createAlertsClient<
AlertData,
State,
Context,
ActionGroupIds,
RecoveryActionGroupId
>({
...alertsClientParams,
namespace: namespace ?? DEFAULT_NAMESPACE_STRING,
rule: this.getAADRuleData(rule, spaceId),
})) ?? null;
alertsClient = client
? client
: new LegacyAlertsClient<State, Context, ActionGroupIds, RecoveryActionGroupId>(
alertsClientParams
);
} catch (err) {
this.logger.error(
`Error initializing AlertsClient for context ${this.ruleType.alerts?.context}. Using legacy alerts client instead. - ${err.message}`
);
alertsClient = new LegacyAlertsClient<State, Context, ActionGroupIds, RecoveryActionGroupId>(
alertsClientParams
);
}
await alertsClient.initializeExecution({
maxAlerts: this.maxAlerts,
@ -405,7 +427,8 @@ export class TaskRunner<
searchSourceClient: wrappedSearchSourceClient.searchSourceClient,
uiSettingsClient: this.context.uiSettings.asScopedToClient(savedObjectsClient),
scopedClusterClient: wrappedScopedClusterClient.client(),
alertFactory: alertsClient.getExecutorServices(),
alertFactory: alertsClient.factory(),
alertsClient: alertsClient.client(),
shouldWriteAlerts: () => this.shouldLogAndScheduleActionsForAlerts(),
shouldStopExecution: () => this.cancelled,
ruleMonitoringService: this.ruleMonitoring.getLastRunMetricsSetters(),
@ -493,6 +516,10 @@ export class TaskRunner<
});
});
await this.timer.runWithTimer(TaskRunnerTimerSpan.PersistAlerts, async () => {
await alertsClient.persistAlerts();
});
const executionHandler = new ExecutionHandler({
rule,
ruleType: this.ruleType,
@ -530,12 +557,12 @@ export class TaskRunner<
let alertsToReturn: Record<string, RawAlertInstance> = {};
let recoveredAlertsToReturn: Record<string, RawAlertInstance> = {};
const { alertsToReturn: alerts, recoveredAlertsToReturn: recovered } =
await alertsClient.getAlertsToSerialize();
// Only serialize alerts into task state if we're auto-recovering, otherwise
// we don't need to keep this information around.
if (this.ruleType.autoRecoverAlerts) {
const { alertsToReturn: alerts, recoveredAlertsToReturn: recovered } =
alertsClient.getAlertsToSerialize();
alertsToReturn = alerts;
recoveredAlertsToReturn = recovered;
}

View file

@ -0,0 +1,749 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
import sinon from 'sinon';
import { usageCountersServiceMock } from '@kbn/usage-collection-plugin/server/usage_counters/usage_counters_service.mock';
import {
RuleExecutorOptions,
RuleTypeParams,
RuleTypeState,
AlertInstanceState,
AlertInstanceContext,
Rule,
RuleNotifyWhen,
RuleAlertData,
} from '../types';
import { ConcreteTaskInstance } from '@kbn/task-manager-plugin/server';
import { TaskRunnerContext } from './task_runner_factory';
import { TaskRunner } from './task_runner';
import { encryptedSavedObjectsMock } from '@kbn/encrypted-saved-objects-plugin/server/mocks';
import {
loggingSystemMock,
savedObjectsRepositoryMock,
httpServiceMock,
executionContextServiceMock,
savedObjectsServiceMock,
elasticsearchServiceMock,
uiSettingsServiceMock,
} from '@kbn/core/server/mocks';
import { PluginStartContract as ActionsPluginStart } from '@kbn/actions-plugin/server';
import { actionsMock, actionsClientMock } from '@kbn/actions-plugin/server/mocks';
import { alertsMock, rulesClientMock } from '../mocks';
import { eventLoggerMock } from '@kbn/event-log-plugin/server/event_logger.mock';
import { IEventLogger } from '@kbn/event-log-plugin/server';
import { ruleTypeRegistryMock } from '../rule_type_registry.mock';
import { inMemoryMetricsMock } from '../monitoring/in_memory_metrics.mock';
import {
mockDate,
mockedRuleTypeSavedObject,
ruleType,
RULE_NAME,
SAVED_OBJECT,
generateRunnerResult,
RULE_ACTIONS,
generateSavedObjectParams,
mockTaskInstance,
DATE_1970,
DATE_1970_5_MIN,
} from './fixtures';
import { dataPluginMock } from '@kbn/data-plugin/server/mocks';
import { AlertingEventLogger } from '../lib/alerting_event_logger/alerting_event_logger';
import { alertingEventLoggerMock } from '../lib/alerting_event_logger/alerting_event_logger.mock';
import { SharePluginStart } from '@kbn/share-plugin/server';
import { dataViewPluginMocks } from '@kbn/data-views-plugin/public/mocks';
import { DataViewsServerPluginStart } from '@kbn/data-views-plugin/server';
import { rulesSettingsClientMock } from '../rules_settings_client.mock';
import { maintenanceWindowClientMock } from '../maintenance_window_client.mock';
import { alertsServiceMock } from '../alerts_service/alerts_service.mock';
import { UntypedNormalizedRuleType } from '../rule_type_registry';
import { alertsClientMock } from '../alerts_client/alerts_client.mock';
import * as LegacyAlertsClientModule from '../alerts_client/legacy_alerts_client';
import * as RuleRunMetricsStoreModule from '../lib/rule_run_metrics_store';
import { legacyAlertsClientMock } from '../alerts_client/legacy_alerts_client.mock';
import { ruleRunMetricsStoreMock } from '../lib/rule_run_metrics_store.mock';
import { AlertsService } from '../alerts_service';
import { ReplaySubject } from 'rxjs';
import { IAlertsClient } from '../alerts_client/types';
jest.mock('uuid', () => ({
v4: () => '5f6aa57d-3e22-484e-bae8-cbed868f4d28',
}));
jest.mock('../lib/wrap_scoped_cluster_client', () => ({
createWrappedScopedClusterClientFactory: jest.fn(),
}));
jest.mock('../lib/alerting_event_logger/alerting_event_logger');
let fakeTimer: sinon.SinonFakeTimers;
const logger: ReturnType<typeof loggingSystemMock.createLogger> = loggingSystemMock.createLogger();
const mockUsageCountersSetup = usageCountersServiceMock.createSetupContract();
const mockUsageCounter = mockUsageCountersSetup.createUsageCounter('test');
const alertingEventLogger = alertingEventLoggerMock.create();
const clusterClient = elasticsearchServiceMock.createClusterClient().asInternalUser;
const ruleTypeWithAlerts: jest.Mocked<UntypedNormalizedRuleType> = {
...ruleType,
alerts: {
context: 'test',
mappings: {
fieldMap: {
textField: {
type: 'keyword',
required: false,
},
numericField: {
type: 'long',
required: false,
},
},
},
shouldWrite: true,
},
};
describe('Task Runner', () => {
let mockedTaskInstance: ConcreteTaskInstance;
beforeAll(() => {
fakeTimer = sinon.useFakeTimers();
mockedTaskInstance = mockTaskInstance();
});
afterAll(() => fakeTimer.restore());
const encryptedSavedObjectsClient = encryptedSavedObjectsMock.createClient();
const services = alertsMock.createRuleExecutorServices();
const actionsClient = actionsClientMock.create();
const rulesClient = rulesClientMock.create();
const ruleTypeRegistry = ruleTypeRegistryMock.create();
const savedObjectsService = savedObjectsServiceMock.createInternalStartContract();
const elasticsearchService = elasticsearchServiceMock.createInternalStart();
const dataPlugin = dataPluginMock.createStartContract();
const uiSettingsService = uiSettingsServiceMock.createStartContract();
const inMemoryMetrics = inMemoryMetricsMock.create();
const dataViewsMock = {
dataViewsServiceFactory: jest.fn().mockResolvedValue(dataViewPluginMocks.createStartContract()),
} as DataViewsServerPluginStart;
const mockAlertsService = alertsServiceMock.create();
const mockAlertsClient = alertsClientMock.create();
const mockLegacyAlertsClient = legacyAlertsClientMock.create();
const ruleRunMetricsStore = ruleRunMetricsStoreMock.create();
const maintenanceWindowClient = maintenanceWindowClientMock.create();
type TaskRunnerFactoryInitializerParamsType = jest.Mocked<TaskRunnerContext> & {
actionsPlugin: jest.Mocked<ActionsPluginStart>;
eventLogger: jest.Mocked<IEventLogger>;
executionContext: ReturnType<typeof executionContextServiceMock.createInternalStartContract>;
};
const taskRunnerFactoryInitializerParams: TaskRunnerFactoryInitializerParamsType = {
data: dataPlugin,
dataViews: dataViewsMock,
savedObjects: savedObjectsService,
share: {} as SharePluginStart,
uiSettings: uiSettingsService,
elasticsearch: elasticsearchService,
actionsPlugin: actionsMock.createStart(),
getRulesClientWithRequest: jest.fn().mockReturnValue(rulesClient),
encryptedSavedObjectsClient,
logger,
executionContext: executionContextServiceMock.createInternalStartContract(),
spaceIdToNamespace: jest.fn().mockReturnValue(undefined),
basePathService: httpServiceMock.createBasePath(),
eventLogger: eventLoggerMock.create(),
internalSavedObjectsRepository: savedObjectsRepositoryMock.create(),
ruleTypeRegistry,
alertsService: mockAlertsService,
kibanaBaseUrl: 'https://localhost:5601',
supportsEphemeralTasks: false,
maxEphemeralActionsPerRule: 10,
maxAlerts: 1000,
cancelAlertsOnRuleTimeout: true,
usageCounter: mockUsageCounter,
actionsConfigMap: {
default: {
max: 10000,
},
},
getRulesSettingsClientWithRequest: jest.fn().mockReturnValue(rulesSettingsClientMock.create()),
getMaintenanceWindowClientWithRequest: jest.fn().mockReturnValue(maintenanceWindowClient),
};
beforeEach(() => {
jest.clearAllMocks();
jest
.requireMock('../lib/wrap_scoped_cluster_client')
.createWrappedScopedClusterClientFactory.mockReturnValue({
client: () => services.scopedClusterClient,
getMetrics: () => ({
numSearches: 3,
esSearchDurationMs: 33,
totalSearchDurationMs: 23423,
}),
});
savedObjectsService.getScopedClient.mockReturnValue(services.savedObjectsClient);
elasticsearchService.client.asScoped.mockReturnValue(services.scopedClusterClient);
maintenanceWindowClient.getActiveMaintenanceWindows.mockResolvedValue([]);
taskRunnerFactoryInitializerParams.getRulesClientWithRequest.mockReturnValue(rulesClient);
taskRunnerFactoryInitializerParams.actionsPlugin.getActionsClientWithRequest.mockResolvedValue(
actionsClient
);
taskRunnerFactoryInitializerParams.actionsPlugin.renderActionParameterTemplates.mockImplementation(
(actionTypeId, actionId, params) => params
);
ruleTypeRegistry.get.mockReturnValue(ruleTypeWithAlerts);
taskRunnerFactoryInitializerParams.executionContext.withContext.mockImplementation((ctx, fn) =>
fn()
);
taskRunnerFactoryInitializerParams.getRulesSettingsClientWithRequest.mockReturnValue(
rulesSettingsClientMock.create()
);
taskRunnerFactoryInitializerParams.getMaintenanceWindowClientWithRequest.mockReturnValue(
maintenanceWindowClient
);
mockedRuleTypeSavedObject.monitoring!.run.history = [];
mockedRuleTypeSavedObject.monitoring!.run.calculated_metrics.success_ratio = 0;
alertingEventLogger.getStartAndDuration.mockImplementation(() => ({ start: new Date() }));
(AlertingEventLogger as jest.Mock).mockImplementation(() => alertingEventLogger);
logger.get.mockImplementation(() => logger);
ruleType.executor.mockResolvedValue({ state: {} });
});
test('should not use legacy alerts client if alerts client created', async () => {
const spy1 = jest
.spyOn(LegacyAlertsClientModule, 'LegacyAlertsClient')
.mockImplementation(() => mockLegacyAlertsClient);
const spy2 = jest
.spyOn(RuleRunMetricsStoreModule, 'RuleRunMetricsStore')
.mockImplementation(() => ruleRunMetricsStore);
mockAlertsService.createAlertsClient.mockImplementation(() => mockAlertsClient);
mockAlertsClient.getAlertsToSerialize.mockResolvedValue({
alertsToReturn: {},
recoveredAlertsToReturn: {},
});
ruleRunMetricsStore.getMetrics.mockReturnValue({
numSearches: 3,
totalSearchDurationMs: 23423,
esSearchDurationMs: 33,
numberOfTriggeredActions: 0,
numberOfGeneratedActions: 0,
numberOfActiveAlerts: 0,
numberOfRecoveredAlerts: 0,
numberOfNewAlerts: 0,
hasReachedAlertLimit: false,
triggeredActionsStatus: 'complete',
});
const taskRunner = new TaskRunner(
ruleTypeWithAlerts,
{
...mockedTaskInstance,
state: {
...mockedTaskInstance.state,
previousStartedAt: new Date(Date.now() - 5 * 60 * 1000).toISOString(),
},
},
taskRunnerFactoryInitializerParams,
inMemoryMetrics
);
expect(AlertingEventLogger).toHaveBeenCalledTimes(1);
rulesClient.getAlertFromRaw.mockReturnValue(mockedRuleTypeSavedObject as Rule);
encryptedSavedObjectsClient.getDecryptedAsInternalUser.mockResolvedValue(SAVED_OBJECT);
await taskRunner.run();
expect(mockAlertsService.createAlertsClient).toHaveBeenCalledWith({
logger,
ruleType: ruleTypeWithAlerts,
namespace: 'default',
rule: {
consumer: 'bar',
executionId: '5f6aa57d-3e22-484e-bae8-cbed868f4d28',
id: '1',
name: 'rule-name',
parameters: {
bar: true,
},
revision: 0,
spaceId: 'default',
tags: ['rule-', '-tags'],
},
});
expect(LegacyAlertsClientModule.LegacyAlertsClient).not.toHaveBeenCalled();
testCorrectAlertsClientUsed({
alertsClientToUse: mockAlertsClient,
alertsClientNotToUse: mockLegacyAlertsClient,
});
expect(ruleType.executor).toHaveBeenCalledTimes(1);
expect(logger.debug).toHaveBeenCalledTimes(5);
expect(logger.debug).nthCalledWith(1, 'executing rule test:1 at 1970-01-01T00:00:00.000Z');
expect(logger.debug).nthCalledWith(
2,
'deprecated ruleRunStatus for test:1: {"lastExecutionDate":"1970-01-01T00:00:00.000Z","status":"ok"}'
);
expect(logger.debug).nthCalledWith(
3,
'ruleRunStatus for test:1: {"outcome":"succeeded","outcomeOrder":0,"outcomeMsg":null,"warning":null,"alertsCount":{"active":0,"new":0,"recovered":0,"ignored":0}}'
);
expect(logger.debug).nthCalledWith(
4,
'ruleRunMetrics for test:1: {"numSearches":3,"totalSearchDurationMs":23423,"esSearchDurationMs":33,"numberOfTriggeredActions":0,"numberOfGeneratedActions":0,"numberOfActiveAlerts":0,"numberOfRecoveredAlerts":0,"numberOfNewAlerts":0,"hasReachedAlertLimit":false,"triggeredActionsStatus":"complete"}'
);
expect(
taskRunnerFactoryInitializerParams.internalSavedObjectsRepository.update
).toHaveBeenCalledWith(...generateSavedObjectParams({}));
expect(taskRunnerFactoryInitializerParams.executionContext.withContext).toBeCalledTimes(1);
expect(taskRunnerFactoryInitializerParams.executionContext.withContext).toHaveBeenCalledWith(
{
id: '1',
name: 'execute test',
type: 'alert',
description: 'execute [test] with name [rule-name] in [default] namespace',
},
expect.any(Function)
);
expect(mockUsageCounter.incrementCounter).not.toHaveBeenCalled();
expect(
jest.requireMock('../lib/wrap_scoped_cluster_client').createWrappedScopedClusterClientFactory
).toHaveBeenCalled();
spy1.mockRestore();
spy2.mockRestore();
});
test('should successfully execute task with alerts client', async () => {
const alertsService = new AlertsService({
logger,
pluginStop$: new ReplaySubject(1),
kibanaVersion: '8.8.0',
elasticsearchClientPromise: Promise.resolve(clusterClient),
});
const spy = jest
.spyOn(alertsService, 'getContextInitializationPromise')
.mockResolvedValue({ result: true });
const taskRunner = new TaskRunner(
ruleTypeWithAlerts,
{
...mockedTaskInstance,
state: {
...mockedTaskInstance.state,
previousStartedAt: new Date(Date.now() - 5 * 60 * 1000).toISOString(),
},
},
{
...taskRunnerFactoryInitializerParams,
alertsService,
},
inMemoryMetrics
);
expect(AlertingEventLogger).toHaveBeenCalledTimes(1);
rulesClient.getAlertFromRaw.mockReturnValue(mockedRuleTypeSavedObject as Rule);
encryptedSavedObjectsClient.getDecryptedAsInternalUser.mockResolvedValue(SAVED_OBJECT);
const runnerResult = await taskRunner.run();
expect(runnerResult).toEqual(generateRunnerResult({ state: true, history: [true] }));
expect(ruleType.executor).toHaveBeenCalledTimes(1);
const call = ruleType.executor.mock.calls[0][0];
expect(call.params).toEqual({ bar: true });
expect(call.startedAt).toStrictEqual(new Date(DATE_1970));
expect(call.previousStartedAt).toStrictEqual(new Date(DATE_1970_5_MIN));
expect(call.state).toEqual({});
expect(call.rule).not.toBe(null);
expect(call.rule.id).toBe('1');
expect(call.rule.name).toBe(RULE_NAME);
expect(call.rule.tags).toEqual(['rule-', '-tags']);
expect(call.rule.consumer).toBe('bar');
expect(call.rule.enabled).toBe(true);
expect(call.rule.schedule).toEqual({ interval: '10s' });
expect(call.rule.createdBy).toBe('rule-creator');
expect(call.rule.updatedBy).toBe('rule-updater');
expect(call.rule.createdAt).toBe(mockDate);
expect(call.rule.updatedAt).toBe(mockDate);
expect(call.rule.notifyWhen).toBe('onActiveAlert');
expect(call.rule.throttle).toBe(null);
expect(call.rule.producer).toBe('alerts');
expect(call.rule.ruleTypeId).toBe('test');
expect(call.rule.ruleTypeName).toBe('My test rule');
expect(call.rule.actions).toEqual(RULE_ACTIONS);
expect(call.services.alertFactory.create).toBeTruthy();
expect(call.services.alertsClient).not.toBe(null);
expect(call.services.alertsClient?.create).toBeTruthy();
expect(call.services.scopedClusterClient).toBeTruthy();
expect(call.services).toBeTruthy();
expect(logger.debug).toHaveBeenCalledTimes(6);
expect(logger.debug).nthCalledWith(1, `Initializing resources for AlertsService`);
expect(logger.debug).nthCalledWith(2, 'executing rule test:1 at 1970-01-01T00:00:00.000Z');
expect(logger.debug).nthCalledWith(
3,
'deprecated ruleRunStatus for test:1: {"lastExecutionDate":"1970-01-01T00:00:00.000Z","status":"ok"}'
);
expect(logger.debug).nthCalledWith(
4,
'ruleRunStatus for test:1: {"outcome":"succeeded","outcomeOrder":0,"outcomeMsg":null,"warning":null,"alertsCount":{"active":0,"new":0,"recovered":0,"ignored":0}}'
);
expect(logger.debug).nthCalledWith(
5,
'ruleRunMetrics for test:1: {"numSearches":3,"totalSearchDurationMs":23423,"esSearchDurationMs":33,"numberOfTriggeredActions":0,"numberOfGeneratedActions":0,"numberOfActiveAlerts":0,"numberOfRecoveredAlerts":0,"numberOfNewAlerts":0,"hasReachedAlertLimit":false,"triggeredActionsStatus":"complete"}'
);
expect(
taskRunnerFactoryInitializerParams.internalSavedObjectsRepository.update
).toHaveBeenCalledWith(...generateSavedObjectParams({}));
expect(taskRunnerFactoryInitializerParams.executionContext.withContext).toBeCalledTimes(1);
expect(taskRunnerFactoryInitializerParams.executionContext.withContext).toHaveBeenCalledWith(
{
id: '1',
name: 'execute test',
type: 'alert',
description: 'execute [test] with name [rule-name] in [default] namespace',
},
expect.any(Function)
);
expect(mockUsageCounter.incrementCounter).not.toHaveBeenCalled();
expect(
jest.requireMock('../lib/wrap_scoped_cluster_client').createWrappedScopedClusterClientFactory
).toHaveBeenCalled();
spy.mockRestore();
});
test('should successfully execute task and index alert documents', async () => {
const alertsService = new AlertsService({
logger,
pluginStop$: new ReplaySubject(1),
kibanaVersion: '8.8.0',
elasticsearchClientPromise: Promise.resolve(clusterClient),
});
const spy = jest
.spyOn(alertsService, 'getContextInitializationPromise')
.mockResolvedValue({ result: true });
ruleTypeWithAlerts.executor.mockImplementation(
async ({
services: executorServices,
}: RuleExecutorOptions<
RuleTypeParams,
RuleTypeState,
AlertInstanceState,
AlertInstanceContext,
string,
RuleAlertData
>) => {
executorServices.alertsClient?.create({
id: '1',
actionGroup: 'default',
payload: { textField: 'foo', numericField: 27 },
});
return { state: {} };
}
);
const taskRunner = new TaskRunner(
ruleTypeWithAlerts,
mockedTaskInstance,
{
...taskRunnerFactoryInitializerParams,
alertsService,
},
inMemoryMetrics
);
expect(AlertingEventLogger).toHaveBeenCalledTimes(1);
rulesClient.getAlertFromRaw.mockReturnValue(mockedRuleTypeSavedObject as Rule);
encryptedSavedObjectsClient.getDecryptedAsInternalUser.mockResolvedValue(SAVED_OBJECT);
await taskRunner.run();
expect(ruleType.executor).toHaveBeenCalledTimes(1);
expect(clusterClient.bulk).toHaveBeenCalledWith({
index: '.alerts-test.alerts-default',
refresh: 'wait_for',
require_alias: true,
body: [
{ index: { _id: '5f6aa57d-3e22-484e-bae8-cbed868f4d28' } },
// new alert doc
{
'@timestamp': DATE_1970,
kibana: {
alert: {
action_group: 'default',
duration: {
us: '0',
},
flapping: false,
flapping_history: [true],
instance: {
id: '1',
},
maintenance_window_ids: [],
rule: {
category: 'My test rule',
consumer: 'bar',
execution: {
uuid: '5f6aa57d-3e22-484e-bae8-cbed868f4d28',
},
name: 'rule-name',
parameters: {
bar: true,
},
producer: 'alerts',
revision: 0,
rule_type_id: 'test',
tags: ['rule-', '-tags'],
uuid: '1',
},
start: DATE_1970,
status: 'active',
uuid: '5f6aa57d-3e22-484e-bae8-cbed868f4d28',
},
space_ids: ['default'],
},
numericField: 27,
textField: 'foo',
},
],
});
spy.mockRestore();
});
test('should default to legacy alerts client if error creating alerts client', async () => {
const spy1 = jest
.spyOn(LegacyAlertsClientModule, 'LegacyAlertsClient')
.mockImplementation(() => mockLegacyAlertsClient);
const spy2 = jest
.spyOn(RuleRunMetricsStoreModule, 'RuleRunMetricsStore')
.mockImplementation(() => ruleRunMetricsStore);
mockAlertsService.createAlertsClient.mockImplementation(() => {
throw new Error('Could not initialize!');
});
mockLegacyAlertsClient.getAlertsToSerialize.mockResolvedValue({
alertsToReturn: {},
recoveredAlertsToReturn: {},
});
ruleRunMetricsStore.getMetrics.mockReturnValue({
numSearches: 3,
totalSearchDurationMs: 23423,
esSearchDurationMs: 33,
numberOfTriggeredActions: 0,
numberOfGeneratedActions: 0,
numberOfActiveAlerts: 0,
numberOfRecoveredAlerts: 0,
numberOfNewAlerts: 0,
hasReachedAlertLimit: false,
triggeredActionsStatus: 'complete',
});
const taskRunner = new TaskRunner(
ruleTypeWithAlerts,
{
...mockedTaskInstance,
state: {
...mockedTaskInstance.state,
previousStartedAt: new Date(Date.now() - 5 * 60 * 1000).toISOString(),
},
},
taskRunnerFactoryInitializerParams,
inMemoryMetrics
);
expect(AlertingEventLogger).toHaveBeenCalledTimes(1);
rulesClient.getAlertFromRaw.mockReturnValue(mockedRuleTypeSavedObject as Rule);
encryptedSavedObjectsClient.getDecryptedAsInternalUser.mockResolvedValue(SAVED_OBJECT);
await taskRunner.run();
expect(mockAlertsService.createAlertsClient).toHaveBeenCalled();
expect(logger.error).toHaveBeenCalledWith(
`Error initializing AlertsClient for context test. Using legacy alerts client instead. - Could not initialize!`
);
expect(LegacyAlertsClientModule.LegacyAlertsClient).toHaveBeenCalledWith({
logger,
ruleType: ruleTypeWithAlerts,
});
testCorrectAlertsClientUsed({
alertsClientToUse: mockLegacyAlertsClient,
alertsClientNotToUse: mockAlertsClient,
});
expect(ruleType.executor).toHaveBeenCalledTimes(1);
expect(logger.debug).toHaveBeenCalledTimes(5);
expect(logger.debug).nthCalledWith(1, 'executing rule test:1 at 1970-01-01T00:00:00.000Z');
expect(
taskRunnerFactoryInitializerParams.internalSavedObjectsRepository.update
).toHaveBeenCalledWith(...generateSavedObjectParams({}));
expect(taskRunnerFactoryInitializerParams.executionContext.withContext).toBeCalledTimes(1);
expect(taskRunnerFactoryInitializerParams.executionContext.withContext).toHaveBeenCalledWith(
{
id: '1',
name: 'execute test',
type: 'alert',
description: 'execute [test] with name [rule-name] in [default] namespace',
},
expect.any(Function)
);
expect(mockUsageCounter.incrementCounter).not.toHaveBeenCalled();
expect(
jest.requireMock('../lib/wrap_scoped_cluster_client').createWrappedScopedClusterClientFactory
).toHaveBeenCalled();
spy1.mockRestore();
spy2.mockRestore();
});
test('should default to legacy alerts client if alert service is not defined', async () => {
const spy1 = jest
.spyOn(LegacyAlertsClientModule, 'LegacyAlertsClient')
.mockImplementation(() => mockLegacyAlertsClient);
const spy2 = jest
.spyOn(RuleRunMetricsStoreModule, 'RuleRunMetricsStore')
.mockImplementation(() => ruleRunMetricsStore);
mockLegacyAlertsClient.getAlertsToSerialize.mockResolvedValue({
alertsToReturn: {},
recoveredAlertsToReturn: {},
});
ruleRunMetricsStore.getMetrics.mockReturnValue({
numSearches: 3,
totalSearchDurationMs: 23423,
esSearchDurationMs: 33,
numberOfTriggeredActions: 0,
numberOfGeneratedActions: 0,
numberOfActiveAlerts: 0,
numberOfRecoveredAlerts: 0,
numberOfNewAlerts: 0,
hasReachedAlertLimit: false,
triggeredActionsStatus: 'complete',
});
const taskRunner = new TaskRunner(
ruleTypeWithAlerts,
{
...mockedTaskInstance,
state: {
...mockedTaskInstance.state,
previousStartedAt: new Date(Date.now() - 5 * 60 * 1000).toISOString(),
},
},
{ ...taskRunnerFactoryInitializerParams, alertsService: null },
inMemoryMetrics
);
expect(AlertingEventLogger).toHaveBeenCalledTimes(1);
rulesClient.getAlertFromRaw.mockReturnValue(mockedRuleTypeSavedObject as Rule);
encryptedSavedObjectsClient.getDecryptedAsInternalUser.mockResolvedValue(SAVED_OBJECT);
await taskRunner.run();
expect(mockAlertsService.createAlertsClient).not.toHaveBeenCalled();
expect(logger.error).not.toHaveBeenCalled();
expect(LegacyAlertsClientModule.LegacyAlertsClient).toHaveBeenCalledWith({
logger,
ruleType: ruleTypeWithAlerts,
});
testCorrectAlertsClientUsed({
alertsClientToUse: mockLegacyAlertsClient,
alertsClientNotToUse: mockAlertsClient,
});
expect(ruleType.executor).toHaveBeenCalledTimes(1);
expect(logger.debug).toHaveBeenCalledTimes(5);
expect(logger.debug).nthCalledWith(1, 'executing rule test:1 at 1970-01-01T00:00:00.000Z');
expect(
taskRunnerFactoryInitializerParams.internalSavedObjectsRepository.update
).toHaveBeenCalledWith(...generateSavedObjectParams({}));
expect(taskRunnerFactoryInitializerParams.executionContext.withContext).toBeCalledTimes(1);
expect(taskRunnerFactoryInitializerParams.executionContext.withContext).toHaveBeenCalledWith(
{
id: '1',
name: 'execute test',
type: 'alert',
description: 'execute [test] with name [rule-name] in [default] namespace',
},
expect.any(Function)
);
expect(mockUsageCounter.incrementCounter).not.toHaveBeenCalled();
expect(
jest.requireMock('../lib/wrap_scoped_cluster_client').createWrappedScopedClusterClientFactory
).toHaveBeenCalled();
spy1.mockRestore();
spy2.mockRestore();
});
function testCorrectAlertsClientUsed<
AlertData extends RuleAlertData = never,
State extends AlertInstanceState = never,
Context extends AlertInstanceContext = never,
ActionGroupIds extends string = 'default',
RecoveryActionGroupId extends string = 'recovered'
>({
alertsClientToUse,
alertsClientNotToUse,
}: {
alertsClientToUse: IAlertsClient<
AlertData,
State,
Context,
ActionGroupIds,
RecoveryActionGroupId
>;
alertsClientNotToUse: IAlertsClient<
AlertData,
State,
Context,
ActionGroupIds,
RecoveryActionGroupId
>;
}) {
expect(alertsClientToUse.initializeExecution).toHaveBeenCalledWith({
activeAlertsFromState: {},
flappingSettings: {
enabled: true,
lookBackWindow: 20,
statusChangeThreshold: 4,
},
maxAlerts: 1000,
recoveredAlertsFromState: {},
ruleLabel: "test:1: 'rule-name'",
});
expect(alertsClientNotToUse.initializeExecution).not.toHaveBeenCalled();
expect(alertsClientToUse.checkLimitUsage).toHaveBeenCalled();
expect(alertsClientNotToUse.checkLimitUsage).not.toHaveBeenCalled();
expect(alertsClientToUse.processAndLogAlerts).toHaveBeenCalledWith({
eventLogger: alertingEventLogger,
ruleRunMetricsStore,
shouldLogAlerts: true,
flappingSettings: {
enabled: true,
lookBackWindow: 20,
statusChangeThreshold: 4,
},
notifyWhen: RuleNotifyWhen.ACTIVE,
maintenanceWindowIds: [],
});
expect(alertsClientNotToUse.processAndLogAlerts).not.toHaveBeenCalled();
expect(alertsClientToUse.persistAlerts).toHaveBeenCalled();
expect(alertsClientNotToUse.persistAlerts).not.toHaveBeenCalled();
expect(alertsClientToUse.getProcessedAlerts).toHaveBeenCalledWith('activeCurrent');
expect(alertsClientToUse.getProcessedAlerts).toHaveBeenCalledWith('recoveredCurrent');
expect(alertsClientNotToUse.getProcessedAlerts).not.toHaveBeenCalled();
expect(alertsClientToUse.getAlertsToSerialize).toHaveBeenCalled();
expect(alertsClientNotToUse.getAlertsToSerialize).not.toHaveBeenCalled();
}
});

View file

@ -525,6 +525,7 @@ describe('Task Runner Cancel', () => {
},
timings: {
claim_to_start_duration_ms: 0,
persist_alerts_duration_ms: 0,
prepare_rule_duration_ms: 0,
process_alerts_duration_ms: 0,
process_rule_duration_ms: 0,

View file

@ -32,6 +32,7 @@ describe('TaskRunnerTimer', () => {
timer.setDuration(TaskRunnerTimerSpan.StartTaskRun, new Date('2020-03-06'));
expect(timer.toJson()).toEqual({
claim_to_start_duration_ms: 259200000,
persist_alerts_duration_ms: 0,
prepare_rule_duration_ms: 0,
process_alerts_duration_ms: 0,
process_rule_duration_ms: 0,
@ -49,6 +50,7 @@ describe('TaskRunnerTimer', () => {
);
expect(timer.toJson()).toEqual({
claim_to_start_duration_ms: 432000000,
persist_alerts_duration_ms: 0,
prepare_rule_duration_ms: 0,
process_alerts_duration_ms: 0,
process_rule_duration_ms: 0,

View file

@ -13,6 +13,7 @@ export enum TaskRunnerTimerSpan {
PrepareRule = 'prepare_rule_duration_ms',
RuleTypeRun = 'rule_type_run_duration_ms',
ProcessAlerts = 'process_alerts_duration_ms',
PersistAlerts = 'persist_alerts_duration_ms',
TriggerActions = 'trigger_actions_duration_ms',
ProcessRuleRun = 'process_rule_duration_ms',
}
@ -62,6 +63,7 @@ export class TaskRunnerTimer {
[TaskRunnerTimerSpan.PrepareRule]: this.timings[TaskRunnerTimerSpan.PrepareRule] ?? 0,
[TaskRunnerTimerSpan.RuleTypeRun]: this.timings[TaskRunnerTimerSpan.RuleTypeRun] ?? 0,
[TaskRunnerTimerSpan.ProcessAlerts]: this.timings[TaskRunnerTimerSpan.ProcessAlerts] ?? 0,
[TaskRunnerTimerSpan.PersistAlerts]: this.timings[TaskRunnerTimerSpan.PersistAlerts] ?? 0,
[TaskRunnerTimerSpan.TriggerActions]: this.timings[TaskRunnerTimerSpan.TriggerActions] ?? 0,
[TaskRunnerTimerSpan.ProcessRuleRun]: this.timings[TaskRunnerTimerSpan.ProcessRuleRun] ?? 0,
};

View file

@ -58,6 +58,7 @@ import {
} from '../common';
import { PublicAlertFactory } from './alert/create_alert_factory';
import { RulesSettingsFlappingProperties } from '../common/rules_settings';
import { PublicAlertsClient } from './alerts_client/types';
export type WithoutQueryAndParams<T> = Pick<T, Exclude<keyof T, 'query' | 'params'>>;
export type SpaceIdToNamespaceFunction = (spaceId?: string) => string | undefined;
export type { RuleTypeParams };
@ -94,7 +95,17 @@ export interface RuleExecutorServices<
savedObjectsClient: SavedObjectsClientContract;
uiSettingsClient: IUiSettingsClient;
scopedClusterClient: IScopedClusterClient;
/**
* Deprecate alertFactory and remove when all rules are onboarded to
* the alertsClient
* @deprecated
*/
alertFactory: PublicAlertFactory<State, Context, ActionGroupIds>;
/**
* Only available when framework alerts are enabled and rule
* type has registered alert context with the framework with shouldWrite set to true
*/
alertsClient: PublicAlertsClient<AlertData, State, Context, ActionGroupIds> | null;
shouldWriteAlerts: () => boolean;
shouldStopExecution: () => boolean;
ruleMonitoringService?: PublicRuleMonitoringService;

View file

@ -389,6 +389,9 @@
"claim_to_start_duration_ms": {
"type": "long"
},
"persist_alerts_duration_ms": {
"type": "long"
},
"prepare_rule_duration_ms": {
"type": "long"
},

View file

@ -172,6 +172,7 @@ export const EventSchema = schema.maybe(
trigger_actions_duration_ms: ecsStringOrNumber(),
process_rule_duration_ms: ecsStringOrNumber(),
claim_to_start_duration_ms: ecsStringOrNumber(),
persist_alerts_duration_ms: ecsStringOrNumber(),
prepare_rule_duration_ms: ecsStringOrNumber(),
total_run_duration_ms: ecsStringOrNumber(),
total_enrichment_duration_ms: ecsStringOrNumber(),

View file

@ -164,6 +164,9 @@ exports.EcsCustomPropertyMappings = {
claim_to_start_duration_ms: {
type: 'long',
},
persist_alerts_duration_ms: {
type: 'long',
},
prepare_rule_duration_ms: {
type: 'long',
},

View file

@ -123,6 +123,7 @@ describe('BurnRateRuleExecutor', () => {
alertWithLifecycle: alertWithLifecycleMock,
savedObjectsClient: soClientMock,
scopedClusterClient: { asCurrentUser: esClientMock, asInternalUser: esClientMock },
alertsClient: null,
alertFactory: alertFactoryMock,
searchSourceClient: searchSourceClientMock,
uiSettingsClient: uiSettingsClientMock,

View file

@ -132,6 +132,7 @@ function createRule(shouldWriteAlerts: boolean = true) {
updatedBy: 'updatedBy',
},
services: {
alertsClient: null,
alertFactory,
savedObjectsClient: {} as any,
scopedClusterClient: {} as any,

View file

@ -77,6 +77,7 @@ export const createDefaultAlertExecutorOptions = <
params,
spaceId: 'SPACE_ID',
services: {
alertsClient: null,
alertFactory: alertsMock.createRuleExecutorServices<InstanceState, InstanceContext>()
.alertFactory,
savedObjectsClient: savedObjectsClientMock.create(),

View file

@ -249,6 +249,7 @@ export const previewRulesRoute = async (
services: {
shouldWriteAlerts,
shouldStopExecution: () => false,
alertsClient: null,
alertFactory,
savedObjectsClient: coreContext.savedObjects.client,
scopedClusterClient: wrapScopedClusterClient({

View file

@ -530,7 +530,16 @@ function getPatternFiringAlertsAsDataRuleType() {
interface State extends RuleTypeState {
patternIndex?: number;
}
const result: RuleType<ParamsType, never, State, {}, {}, 'default', 'recovered', {}> = {
const result: RuleType<
ParamsType,
never,
State,
{},
{},
'default',
'recovered',
{ patternIndex: number; instancePattern: boolean[] }
> = {
id: 'test.patternFiringAad',
name: 'Test: Firing on a Pattern and writing Alerts as Data',
actionGroups: [{ id: 'default', name: 'Default' }],
@ -553,6 +562,11 @@ function getPatternFiringAlertsAsDataRuleType() {
maxPatternLength = Math.max(maxPatternLength, instancePattern.length);
}
const alertsClient = services.alertsClient;
if (!alertsClient) {
throw new Error(`Expected alertsClient to be defined but it is not`);
}
// get the pattern index, return if past it
const patternIndex = state.patternIndex ?? 0;
if (patternIndex >= maxPatternLength) {
@ -563,9 +577,19 @@ function getPatternFiringAlertsAsDataRuleType() {
for (const [instanceId, instancePattern] of Object.entries(pattern)) {
const scheduleByPattern = instancePattern[patternIndex];
if (scheduleByPattern === true) {
services.alertFactory.create(instanceId).scheduleActions('default');
alertsClient.create({
id: instanceId,
actionGroup: 'default',
state: { patternIndex },
payload: { patternIndex, instancePattern: instancePattern as boolean[] },
});
} else if (typeof scheduleByPattern === 'string') {
services.alertFactory.create(instanceId).scheduleActions('default', scheduleByPattern);
alertsClient.create({
id: instanceId,
actionGroup: 'default',
state: { patternIndex },
payload: { patternIndex, instancePattern: [true] },
});
}
}

View file

@ -17,6 +17,7 @@ import {
getTestRuleData,
getUrlPrefix,
ObjectRemover,
TaskManagerDoc,
} from '../../../../../common/lib';
// eslint-disable-next-line import/no-default-export
@ -26,7 +27,7 @@ export default function createAlertsAsDataInstallResourcesTest({ getService }: F
const supertestWithoutAuth = getService('supertestWithoutAuth');
const objectRemover = new ObjectRemover(supertestWithoutAuth);
type PatternFiringAlert = Alert;
type PatternFiringAlert = Alert & { patternIndex: number; instancePattern: boolean[] };
const alertsAsDataIndex = '.alerts-test.patternfiring.alerts-default';
const timestampPattern = /\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.\d{3}Z/;
@ -82,6 +83,12 @@ export default function createAlertsAsDataInstallResourcesTest({ getService }: F
// Query for alerts
const alertDocsRun1 = await queryForAlertDocs<PatternFiringAlert>();
// Get alert state from task document
let state: any = await getTaskState(ruleId);
expect(state.alertInstances.alertA.state.patternIndex).to.equal(0);
expect(state.alertInstances.alertB.state.patternIndex).to.equal(0);
expect(state.alertInstances.alertC.state.patternIndex).to.equal(0);
// After the first run, we should have 3 alert docs for the 3 active alerts
expect(alertDocsRun1.length).to.equal(3);
@ -92,6 +99,9 @@ export default function createAlertsAsDataInstallResourcesTest({ getService }: F
// Each doc should have active status and default action group id
expect(source.kibana.alert.action_group).to.equal('default');
// patternIndex should be 0 for the first run
expect(source.patternIndex).to.equal(0);
// alert UUID should equal doc id
expect(source.kibana.alert.uuid).to.equal(alertDocsRun1[i]._id);
@ -116,12 +126,15 @@ export default function createAlertsAsDataInstallResourcesTest({ getService }: F
(doc) => doc._source!.kibana.alert.instance.id === 'alertA'
);
const alertADocRun1 = alertDoc!._source!;
expect(alertADocRun1.instancePattern).to.eql(pattern.alertA);
alertDoc = alertDocsRun1.find((doc) => doc._source!.kibana.alert.instance.id === 'alertB');
const alertBDocRun1 = alertDoc!._source!;
expect(alertBDocRun1.instancePattern).to.eql(pattern.alertB);
alertDoc = alertDocsRun1.find((doc) => doc._source!.kibana.alert.instance.id === 'alertC');
const alertCDocRun1 = alertDoc!._source!;
expect(alertCDocRun1.instancePattern).to.eql(pattern.alertC);
// --------------------------
// RUN 2 - 2 recovered (alertB, alertC), 1 ongoing (alertA)
@ -140,6 +153,12 @@ export default function createAlertsAsDataInstallResourcesTest({ getService }: F
// Query for alerts
const alertDocsRun2 = await queryForAlertDocs<PatternFiringAlert>();
// Get alert state from task document
state = await getTaskState(ruleId);
expect(state.alertInstances.alertA.state.patternIndex).to.equal(1);
expect(state.alertInstances.alertB).to.be(undefined);
expect(state.alertInstances.alertC).to.be(undefined);
// After the second run, we should have 3 alert docs
expect(alertDocsRun2.length).to.equal(3);
@ -159,8 +178,11 @@ export default function createAlertsAsDataInstallResourcesTest({ getService }: F
// status is still active; duration is updated; no end time
alertDoc = alertDocsRun2.find((doc) => doc._source!.kibana.alert.instance.id === 'alertA');
const alertADocRun2 = alertDoc!._source!;
expect(alertADocRun2.instancePattern).to.eql(pattern.alertA);
// uuid is the same
expect(alertADocRun2.kibana.alert.uuid).to.equal(alertADocRun1.kibana.alert.uuid);
// patternIndex should be 1 for the second run
expect(alertADocRun2.patternIndex).to.equal(1);
expect(alertADocRun2.kibana.alert.action_group).to.equal('default');
// start time should be defined and the same as prior run
expect(alertADocRun2.kibana.alert.start).to.match(timestampPattern);
@ -183,6 +205,8 @@ export default function createAlertsAsDataInstallResourcesTest({ getService }: F
const alertBDocRun2 = alertDoc!._source!;
// action group should be set to recovered
expect(alertBDocRun2.kibana.alert.action_group).to.be('recovered');
expect(alertBDocRun2.instancePattern).to.eql(alertBDocRun1.instancePattern);
expect(alertBDocRun2.patternIndex).to.be(alertBDocRun1.patternIndex);
// uuid is the same
expect(alertBDocRun2.kibana.alert.uuid).to.equal(alertBDocRun1.kibana.alert.uuid);
// start time should be defined and the same as before
@ -208,6 +232,8 @@ export default function createAlertsAsDataInstallResourcesTest({ getService }: F
const alertCDocRun2 = alertDoc!._source!;
// action group should be set to recovered
expect(alertCDocRun2.kibana.alert.action_group).to.be('recovered');
expect(alertCDocRun2.instancePattern).to.eql(alertCDocRun1.instancePattern);
expect(alertCDocRun2.patternIndex).to.be(alertCDocRun1.patternIndex);
// uuid is the same
expect(alertCDocRun2.kibana.alert.uuid).to.equal(alertCDocRun1.kibana.alert.uuid);
// start time should be defined and the same as before
@ -244,6 +270,12 @@ export default function createAlertsAsDataInstallResourcesTest({ getService }: F
// Query for alerts
const alertDocsRun3 = await queryForAlertDocs<PatternFiringAlert>();
// Get alert state from task document
state = await getTaskState(ruleId);
expect(state.alertInstances.alertA.state.patternIndex).to.equal(2);
expect(state.alertInstances.alertB).to.be(undefined);
expect(state.alertInstances.alertC.state.patternIndex).to.equal(2);
// After the third run, we should have 4 alert docs
// The docs for "alertA" and "alertB" should not have been updated
// There should be two docs for "alertC", one for the first active -> recovered span
@ -256,9 +288,12 @@ export default function createAlertsAsDataInstallResourcesTest({ getService }: F
// status is still active; duration is updated; no end time
alertDoc = alertDocsRun3.find((doc) => doc._source!.kibana.alert.instance.id === 'alertA');
const alertADocRun3 = alertDoc!._source!;
expect(alertADocRun3.instancePattern).to.eql(pattern.alertA);
// uuid is the same as previous runs
expect(alertADocRun3.kibana.alert.uuid).to.equal(alertADocRun2.kibana.alert.uuid);
expect(alertADocRun3.kibana.alert.uuid).to.equal(alertADocRun1.kibana.alert.uuid);
// patternIndex should be 2 for the third run
expect(alertADocRun3.patternIndex).to.equal(2);
expect(alertADocRun3.kibana.alert.action_group).to.equal('default');
// start time should be defined and the same as prior runs
expect(alertADocRun3.kibana.alert.start).to.match(timestampPattern);
@ -305,9 +340,12 @@ export default function createAlertsAsDataInstallResourcesTest({ getService }: F
const alertCDocRun3 = alertCDocs.find(
(doc) => doc._source!.kibana.alert.rule.execution?.uuid === executionUuid
)!._source!;
expect(alertCDocRun3.instancePattern).to.eql(pattern.alertC);
// uuid is the different from prior run]
expect(alertCDocRun3.kibana.alert.uuid).not.to.equal(alertCDocRun2.kibana.alert.uuid);
expect(alertCDocRun3.kibana.alert.action_group).to.equal('default');
// patternIndex should be 2 for the third run
expect(alertCDocRun3.patternIndex).to.equal(2);
// start time should be defined and different from the prior run
expect(alertCDocRun3.kibana.alert.start).to.match(timestampPattern);
expect(alertCDocRun3.kibana.alert.start).not.to.equal(alertCDocRun2.kibana.alert.start);
@ -360,6 +398,15 @@ export default function createAlertsAsDataInstallResourcesTest({ getService }: F
return searchResult.hits.hits as Array<SearchHit<T>>;
}
async function getTaskState(ruleId: string) {
const task = await es.get<TaskManagerDoc>({
id: `task:${ruleId}`,
index: '.kibana_task_manager',
});
return JSON.parse(task._source!.task.state);
}
async function waitForEventLogDocs(
id: string,
actions: Map<string, { gte: number } | { equal: number }>