mirror of
https://github.com/elastic/kibana.git
synced 2025-06-28 03:01:21 -04:00
Fixes https://github.com/elastic/kibana/issues/151938 In this PR, I'm re-writing the Task Manager poller so it doesn't run concurrently when timeouts occur while also fixing the issue where polling requests would pile up when polling takes time. To support this, I've also made the following changes: - Removed the observable monitor and the `xpack.task_manager.max_poll_inactivity_cycles` setting - Make the task store `search` and `updateByQuery` functions have no retries. This prevents the request from retrying 5x whenever a timeout occurs, causing each call taking up to 2 1/2 minutes before Kibana sees the error (now down to 30s each). We have polling to manage retries in these situations. - Switch the task poller tests to use `sinon` for faking timers - Removing the `assertStillInSetup` checks on plugin setup. Felt like a maintenance burden that wasn't necessary to fix with my code changes. The main code changes are within these files (to review thoroughly so the polling cycle doesn't suddenly stop): - x-pack/plugins/task_manager/server/polling/task_poller.ts - x-pack/plugins/task_manager/server/polling_lifecycle.ts (easier to review if you disregard whitespace `?w=1`) ## To verify 1. Tasks run normally (create a rule or something that goes through task manager regularly). 2. When the update by query takes a while, the request is cancelled after 30s or the time manually configured. 4. When the search for claimed tasks query takes a while, the request is cancelled after 30s or the time manually configured. **Tips:** <details><summary>how to slowdown search for claimed task queries</summary> ``` diff --git a/x-pack/plugins/task_manager/server/queries/task_claiming.ts b/x-pack/plugins/task_manager/server/queries/task_claiming.ts index 07042650a37..2caefd63672 100644 --- a/x-pack/plugins/task_manager/server/queries/task_claiming.ts +++ b/x-pack/plugins/task_manager/server/queries/task_claiming.ts @@ -247,7 +247,7 @@ export class TaskClaiming { taskTypes, }); - const docs = tasksUpdated > 0 ? await this.sweepForClaimedTasks(taskTypes, size) : []; + const docs = await this.sweepForClaimedTasks(taskTypes, size); this.emitEvents(docs.map((doc) => asTaskClaimEvent(doc.id, asOk(doc)))); @@ -346,6 +346,13 @@ export class TaskClaiming { size, sort: SortByRunAtAndRetryAt, seq_no_primary_term: true, + aggs: { + delay: { + shard_delay: { + value: '40s', + }, + }, + }, }); return docs; ``` </details> <details><summary>how to slow down update by queries</summary> Not the cleanest way but you'll see occasional request timeouts from the updateByQuery calls. I had more luck creating rules running every 1s. ``` diff --git a/x-pack/plugins/task_manager/server/task_store.ts b/x-pack/plugins/task_manager/server/task_store.ts index a06ee7b918a..07aa81e5388 100644 --- a/x-pack/plugins/task_manager/server/task_store.ts +++ b/x-pack/plugins/task_manager/server/task_store.ts @@ -126,6 +126,7 @@ export class TaskStore { // Timeouts are retried and make requests timeout after (requestTimeout * (1 + maxRetries)) // The poller doesn't need retry logic because it will try again at the next polling cycle maxRetries: 0, + requestTimeout: 900, }); } @@ -458,6 +459,7 @@ export class TaskStore { ignore_unavailable: true, refresh: true, conflicts: 'proceed', + requests_per_second: 1, body: { ...opts, max_docs, ``` </details> --------- Co-authored-by: Kibana Machine <42973632+kibanamachine@users.noreply.github.com>
243 lines
8.7 KiB
TypeScript
243 lines
8.7 KiB
TypeScript
/*
|
|
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
|
* or more contributor license agreements. Licensed under the Elastic License
|
|
* 2.0; you may not use this file except in compliance with the Elastic License
|
|
* 2.0.
|
|
*/
|
|
|
|
import { TaskManagerPlugin, getElasticsearchAndSOAvailability } from './plugin';
|
|
import { coreMock } from '@kbn/core/server/mocks';
|
|
import { TaskManagerConfig } from './config';
|
|
import { Subject } from 'rxjs';
|
|
import { bufferCount, take } from 'rxjs/operators';
|
|
import { CoreStatus, ServiceStatusLevels } from '@kbn/core/server';
|
|
import { taskPollingLifecycleMock } from './polling_lifecycle.mock';
|
|
import { TaskPollingLifecycle } from './polling_lifecycle';
|
|
import type { TaskPollingLifecycle as TaskPollingLifecycleClass } from './polling_lifecycle';
|
|
import { ephemeralTaskLifecycleMock } from './ephemeral_task_lifecycle.mock';
|
|
import { EphemeralTaskLifecycle } from './ephemeral_task_lifecycle';
|
|
import type { EphemeralTaskLifecycle as EphemeralTaskLifecycleClass } from './ephemeral_task_lifecycle';
|
|
|
|
let mockTaskPollingLifecycle = taskPollingLifecycleMock.create({});
|
|
jest.mock('./polling_lifecycle', () => {
|
|
return {
|
|
TaskPollingLifecycle: jest.fn().mockImplementation(() => {
|
|
return mockTaskPollingLifecycle;
|
|
}),
|
|
};
|
|
});
|
|
|
|
let mockEphemeralTaskLifecycle = ephemeralTaskLifecycleMock.create({});
|
|
jest.mock('./ephemeral_task_lifecycle', () => {
|
|
return {
|
|
EphemeralTaskLifecycle: jest.fn().mockImplementation(() => {
|
|
return mockEphemeralTaskLifecycle;
|
|
}),
|
|
};
|
|
});
|
|
|
|
const coreStart = coreMock.createStart();
|
|
const pluginInitializerContextParams = {
|
|
max_workers: 10,
|
|
max_attempts: 9,
|
|
poll_interval: 3000,
|
|
version_conflict_threshold: 80,
|
|
request_capacity: 1000,
|
|
monitored_aggregated_stats_refresh_rate: 5000,
|
|
monitored_stats_health_verbose_log: {
|
|
enabled: false,
|
|
level: 'debug' as const,
|
|
warn_delayed_task_start_in_seconds: 60,
|
|
},
|
|
monitored_stats_required_freshness: 5000,
|
|
monitored_stats_running_average_window: 50,
|
|
monitored_task_execution_thresholds: {
|
|
default: {
|
|
error_threshold: 90,
|
|
warn_threshold: 80,
|
|
},
|
|
custom: {},
|
|
},
|
|
ephemeral_tasks: {
|
|
enabled: false,
|
|
request_capacity: 10,
|
|
},
|
|
unsafe: {
|
|
exclude_task_types: [],
|
|
},
|
|
event_loop_delay: {
|
|
monitor: true,
|
|
warn_threshold: 5000,
|
|
},
|
|
worker_utilization_running_average_window: 5,
|
|
};
|
|
|
|
describe('TaskManagerPlugin', () => {
|
|
beforeEach(() => {
|
|
mockTaskPollingLifecycle = taskPollingLifecycleMock.create({});
|
|
(TaskPollingLifecycle as jest.Mock<TaskPollingLifecycleClass>).mockClear();
|
|
mockEphemeralTaskLifecycle = ephemeralTaskLifecycleMock.create({});
|
|
(EphemeralTaskLifecycle as jest.Mock<EphemeralTaskLifecycleClass>).mockClear();
|
|
});
|
|
|
|
describe('setup', () => {
|
|
test('throws if no valid UUID is available', async () => {
|
|
const pluginInitializerContext = coreMock.createPluginInitializerContext<TaskManagerConfig>(
|
|
pluginInitializerContextParams
|
|
);
|
|
|
|
pluginInitializerContext.env.instanceUuid = '';
|
|
|
|
const taskManagerPlugin = new TaskManagerPlugin(pluginInitializerContext);
|
|
expect(() =>
|
|
taskManagerPlugin.setup(coreMock.createSetup(), { usageCollection: undefined })
|
|
).toThrow(
|
|
new Error(`TaskManager is unable to start as Kibana has no valid UUID assigned to it.`)
|
|
);
|
|
});
|
|
|
|
test('it logs a warning when the unsafe `exclude_task_types` config is used', async () => {
|
|
const pluginInitializerContext = coreMock.createPluginInitializerContext<TaskManagerConfig>({
|
|
...pluginInitializerContextParams,
|
|
unsafe: {
|
|
exclude_task_types: ['*'],
|
|
},
|
|
});
|
|
|
|
const logger = pluginInitializerContext.logger.get();
|
|
const taskManagerPlugin = new TaskManagerPlugin(pluginInitializerContext);
|
|
taskManagerPlugin.setup(coreMock.createSetup(), { usageCollection: undefined });
|
|
expect((logger.warn as jest.Mock).mock.calls.length).toBe(1);
|
|
expect((logger.warn as jest.Mock).mock.calls[0][0]).toBe(
|
|
'Excluding task types from execution: *'
|
|
);
|
|
});
|
|
});
|
|
|
|
describe('start', () => {
|
|
test('should initialize task polling lifecycle if node.roles.backgroundTasks is true', async () => {
|
|
const pluginInitializerContext = coreMock.createPluginInitializerContext<TaskManagerConfig>(
|
|
pluginInitializerContextParams
|
|
);
|
|
pluginInitializerContext.node.roles.backgroundTasks = true;
|
|
const taskManagerPlugin = new TaskManagerPlugin(pluginInitializerContext);
|
|
taskManagerPlugin.setup(coreMock.createSetup(), { usageCollection: undefined });
|
|
taskManagerPlugin.start(coreStart);
|
|
|
|
expect(TaskPollingLifecycle as jest.Mock<TaskPollingLifecycleClass>).toHaveBeenCalledTimes(1);
|
|
expect(
|
|
EphemeralTaskLifecycle as jest.Mock<EphemeralTaskLifecycleClass>
|
|
).toHaveBeenCalledTimes(1);
|
|
});
|
|
|
|
test('should not initialize task polling lifecycle if node.roles.backgroundTasks is false', async () => {
|
|
const pluginInitializerContext = coreMock.createPluginInitializerContext<TaskManagerConfig>(
|
|
pluginInitializerContextParams
|
|
);
|
|
pluginInitializerContext.node.roles.backgroundTasks = false;
|
|
const taskManagerPlugin = new TaskManagerPlugin(pluginInitializerContext);
|
|
taskManagerPlugin.setup(coreMock.createSetup(), { usageCollection: undefined });
|
|
taskManagerPlugin.start(coreStart);
|
|
|
|
expect(TaskPollingLifecycle as jest.Mock<TaskPollingLifecycleClass>).not.toHaveBeenCalled();
|
|
expect(
|
|
EphemeralTaskLifecycle as jest.Mock<EphemeralTaskLifecycleClass>
|
|
).not.toHaveBeenCalled();
|
|
});
|
|
});
|
|
|
|
describe('getElasticsearchAndSOAvailability', () => {
|
|
test('returns true when both services are available', async () => {
|
|
const core$ = new Subject<CoreStatus>();
|
|
|
|
const availability = getElasticsearchAndSOAvailability(core$)
|
|
.pipe(take(1), bufferCount(1))
|
|
.toPromise();
|
|
|
|
core$.next(mockCoreStatusAvailability({ elasticsearch: true, savedObjects: true }));
|
|
|
|
expect(await availability).toEqual([true]);
|
|
});
|
|
|
|
test('returns false when both services are unavailable', async () => {
|
|
const core$ = new Subject<CoreStatus>();
|
|
|
|
const availability = getElasticsearchAndSOAvailability(core$)
|
|
.pipe(take(1), bufferCount(1))
|
|
.toPromise();
|
|
|
|
core$.next(mockCoreStatusAvailability({ elasticsearch: false, savedObjects: false }));
|
|
|
|
expect(await availability).toEqual([false]);
|
|
});
|
|
|
|
test('returns false when one service is unavailable but the other is available', async () => {
|
|
const core$ = new Subject<CoreStatus>();
|
|
|
|
const availability = getElasticsearchAndSOAvailability(core$)
|
|
.pipe(take(1), bufferCount(1))
|
|
.toPromise();
|
|
|
|
core$.next(mockCoreStatusAvailability({ elasticsearch: true, savedObjects: false }));
|
|
|
|
expect(await availability).toEqual([false]);
|
|
});
|
|
|
|
test('shift back and forth between values as status changes', async () => {
|
|
const core$ = new Subject<CoreStatus>();
|
|
|
|
const availability = getElasticsearchAndSOAvailability(core$)
|
|
.pipe(take(3), bufferCount(3))
|
|
.toPromise();
|
|
|
|
core$.next(mockCoreStatusAvailability({ elasticsearch: true, savedObjects: false }));
|
|
|
|
core$.next(mockCoreStatusAvailability({ elasticsearch: true, savedObjects: true }));
|
|
|
|
core$.next(mockCoreStatusAvailability({ elasticsearch: false, savedObjects: false }));
|
|
|
|
expect(await availability).toEqual([false, true, false]);
|
|
});
|
|
|
|
test(`skips values when the status hasn't changed`, async () => {
|
|
const core$ = new Subject<CoreStatus>();
|
|
|
|
const availability = getElasticsearchAndSOAvailability(core$)
|
|
.pipe(take(3), bufferCount(3))
|
|
.toPromise();
|
|
|
|
core$.next(mockCoreStatusAvailability({ elasticsearch: true, savedObjects: false }));
|
|
|
|
// still false, so shouldn't emit a second time
|
|
core$.next(mockCoreStatusAvailability({ elasticsearch: false, savedObjects: true }));
|
|
|
|
core$.next(mockCoreStatusAvailability({ elasticsearch: true, savedObjects: true }));
|
|
|
|
// shouldn't emit as already true
|
|
core$.next(mockCoreStatusAvailability({ elasticsearch: true, savedObjects: true }));
|
|
|
|
core$.next(mockCoreStatusAvailability({ elasticsearch: false, savedObjects: false }));
|
|
|
|
expect(await availability).toEqual([false, true, false]);
|
|
});
|
|
});
|
|
});
|
|
|
|
function mockCoreStatusAvailability({
|
|
elasticsearch,
|
|
savedObjects,
|
|
}: {
|
|
elasticsearch: boolean;
|
|
savedObjects: boolean;
|
|
}) {
|
|
return {
|
|
elasticsearch: {
|
|
level: elasticsearch ? ServiceStatusLevels.available : ServiceStatusLevels.unavailable,
|
|
summary: '',
|
|
},
|
|
savedObjects: {
|
|
level: savedObjects ? ServiceStatusLevels.available : ServiceStatusLevels.unavailable,
|
|
summary: '',
|
|
},
|
|
};
|
|
}
|