mirror of
https://github.com/elastic/kibana.git
synced 2025-04-24 09:48:58 -04:00
Check compatible cluster.routing.allocation.enable
only on reindex migrations (#186090)
## Summary
Addresses https://github.com/elastic/kibana/issues/177831.
The PR introduces specific steps to check that
`cluster.routing.allocation.enable` has a suitable value for _reindex
migrations_.
Up until now, this check was done systematically after the `INIT` step.
Now, a couple new dedicated steps have been introduced, which allow
verifying this setting on _reindex migrations_ only (highlighted in
orange):

This commit is contained in:
parent
2f4997cbfe
commit
37426f0bde
19 changed files with 1176 additions and 668 deletions
|
@ -24,7 +24,6 @@ export {
|
|||
calculateExcludeFilters,
|
||||
checkForUnknownDocs,
|
||||
waitForIndexStatus,
|
||||
initAction,
|
||||
cloneIndex,
|
||||
waitForTask,
|
||||
updateAndPickupMappings,
|
||||
|
@ -40,6 +39,7 @@ export {
|
|||
fetchIndices,
|
||||
waitForReindexTask,
|
||||
waitForPickupUpdatedMappingsTask,
|
||||
checkClusterRoutingAllocationEnabled,
|
||||
} from './src/actions';
|
||||
export type {
|
||||
OpenPitResponse,
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -10,19 +10,13 @@ import * as Either from 'fp-ts/lib/Either';
|
|||
import { catchRetryableEsClientErrors } from './catch_retryable_es_client_errors';
|
||||
import { errors as EsErrors } from '@elastic/elasticsearch';
|
||||
import { elasticsearchClientMock } from '@kbn/core-elasticsearch-client-server-mocks';
|
||||
import { initAction, type InitActionParams } from './initialize_action';
|
||||
import { checkClusterRoutingAllocationEnabled } from './check_cluster_routing_allocation';
|
||||
|
||||
jest.mock('./catch_retryable_es_client_errors');
|
||||
|
||||
describe('initAction', () => {
|
||||
let initActionParams: Omit<InitActionParams, 'client'>;
|
||||
|
||||
describe('checkClusterRoutingAllocationEnabled', () => {
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
|
||||
initActionParams = {
|
||||
indices: ['.kibana', '.kibana_8.8.0'],
|
||||
};
|
||||
});
|
||||
it('calls catchRetryableEsClientErrors when the promise rejects', async () => {
|
||||
const retryableError = new EsErrors.ResponseError(
|
||||
|
@ -34,7 +28,7 @@ describe('initAction', () => {
|
|||
const client = elasticsearchClientMock.createInternalClient(
|
||||
elasticsearchClientMock.createErrorTransportRequestPromise(retryableError)
|
||||
);
|
||||
const task = initAction({ ...initActionParams, client });
|
||||
const task = checkClusterRoutingAllocationEnabled(client);
|
||||
try {
|
||||
await task();
|
||||
} catch (e) {
|
||||
|
@ -62,7 +56,7 @@ describe('initAction', () => {
|
|||
const client = elasticsearchClientMock.createInternalClient(
|
||||
Promise.resolve(clusterSettingsResponse)
|
||||
);
|
||||
const task = initAction({ ...initActionParams, client });
|
||||
const task = checkClusterRoutingAllocationEnabled(client);
|
||||
const result = await task();
|
||||
expect(Either.isLeft(result)).toEqual(true);
|
||||
});
|
||||
|
@ -107,7 +101,7 @@ describe('initAction', () => {
|
|||
const client = elasticsearchClientMock.createInternalClient(
|
||||
Promise.resolve(clusterSettingsResponse)
|
||||
);
|
||||
const task = initAction({ ...initActionParams, client });
|
||||
const task = checkClusterRoutingAllocationEnabled(client);
|
||||
const result = await task();
|
||||
expect(Either.isRight(result)).toEqual(true);
|
||||
});
|
|
@ -8,35 +8,22 @@
|
|||
|
||||
import * as TaskEither from 'fp-ts/lib/TaskEither';
|
||||
import * as Either from 'fp-ts/lib/Either';
|
||||
import { pipe } from 'fp-ts/lib/function';
|
||||
import type { ElasticsearchClient } from '@kbn/core-elasticsearch-server';
|
||||
import {
|
||||
catchRetryableEsClientErrors,
|
||||
type RetryableEsClientError,
|
||||
} from './catch_retryable_es_client_errors';
|
||||
|
||||
import { type FetchIndexResponse, fetchIndices } from './fetch_indices';
|
||||
|
||||
const routingAllocationEnable = 'cluster.routing.allocation.enable';
|
||||
export interface ClusterRoutingAllocationEnabled {
|
||||
clusterRoutingAllocationEnabled: boolean;
|
||||
}
|
||||
|
||||
export interface InitActionParams {
|
||||
client: ElasticsearchClient;
|
||||
indices: string[];
|
||||
}
|
||||
const ROUTING_ALLOCATION_ENABLE = 'cluster.routing.allocation.enable';
|
||||
|
||||
export interface IncompatibleClusterRoutingAllocation {
|
||||
type: 'incompatible_cluster_routing_allocation';
|
||||
}
|
||||
|
||||
export const checkClusterRoutingAllocationEnabledTask =
|
||||
({
|
||||
client,
|
||||
}: {
|
||||
client: ElasticsearchClient;
|
||||
}): TaskEither.TaskEither<RetryableEsClientError | IncompatibleClusterRoutingAllocation, {}> =>
|
||||
export const checkClusterRoutingAllocationEnabled =
|
||||
(
|
||||
client: ElasticsearchClient
|
||||
): TaskEither.TaskEither<RetryableEsClientError | IncompatibleClusterRoutingAllocation, {}> =>
|
||||
() => {
|
||||
return client.cluster
|
||||
.getSettings({
|
||||
|
@ -45,8 +32,8 @@ export const checkClusterRoutingAllocationEnabledTask =
|
|||
.then((settings) => {
|
||||
// transient settings take preference over persistent settings
|
||||
const clusterRoutingAllocation =
|
||||
settings?.transient?.[routingAllocationEnable] ??
|
||||
settings?.persistent?.[routingAllocationEnable];
|
||||
settings?.transient?.[ROUTING_ALLOCATION_ENABLE] ??
|
||||
settings?.persistent?.[ROUTING_ALLOCATION_ENABLE];
|
||||
|
||||
const clusterRoutingAllocationEnabledIsAll =
|
||||
clusterRoutingAllocation === undefined || clusterRoutingAllocation === 'all';
|
||||
|
@ -61,18 +48,3 @@ export const checkClusterRoutingAllocationEnabledTask =
|
|||
})
|
||||
.catch(catchRetryableEsClientErrors);
|
||||
};
|
||||
|
||||
export const initAction = ({
|
||||
client,
|
||||
indices,
|
||||
}: InitActionParams): TaskEither.TaskEither<
|
||||
RetryableEsClientError | IncompatibleClusterRoutingAllocation,
|
||||
FetchIndexResponse
|
||||
> => {
|
||||
return pipe(
|
||||
checkClusterRoutingAllocationEnabledTask({ client }),
|
||||
TaskEither.chainW((value) => {
|
||||
return fetchIndices({ client, indices });
|
||||
})
|
||||
);
|
||||
};
|
|
@ -21,8 +21,8 @@ export {
|
|||
export type { RetryableEsClientError };
|
||||
|
||||
// actions/* imports
|
||||
export type { InitActionParams, IncompatibleClusterRoutingAllocation } from './initialize_action';
|
||||
export { initAction } from './initialize_action';
|
||||
export type { IncompatibleClusterRoutingAllocation } from './check_cluster_routing_allocation';
|
||||
export { checkClusterRoutingAllocationEnabled } from './check_cluster_routing_allocation';
|
||||
|
||||
export type { FetchIndexResponse, FetchIndicesParams } from './fetch_indices';
|
||||
export { fetchIndices } from './fetch_indices';
|
||||
|
@ -108,7 +108,7 @@ export {
|
|||
} from './update_source_mappings_properties';
|
||||
|
||||
import type { UnknownDocsFound } from './check_for_unknown_docs';
|
||||
import type { IncompatibleClusterRoutingAllocation } from './initialize_action';
|
||||
import type { IncompatibleClusterRoutingAllocation } from './check_cluster_routing_allocation';
|
||||
import type { ClusterShardLimitExceeded } from './create_index';
|
||||
import type { SynchronizationFailed } from './synchronize_migrators';
|
||||
import type { IndexMappingsIncomplete, TypesChanged } from './check_target_mappings';
|
||||
|
|
|
@ -39,7 +39,6 @@ import type {
|
|||
OutdatedDocumentsSearchOpenPit,
|
||||
OutdatedDocumentsSearchRead,
|
||||
OutdatedDocumentsTransform,
|
||||
PostInitState,
|
||||
PrepareCompatibleMigration,
|
||||
RefreshTarget,
|
||||
ReindexSourceToTempClosePit,
|
||||
|
@ -57,6 +56,8 @@ import type {
|
|||
WaitForYellowSourceState,
|
||||
ReadyToReindexSyncState,
|
||||
DoneReindexingSyncState,
|
||||
LegacyCheckClusterRoutingAllocationState,
|
||||
CheckClusterRoutingAllocationState,
|
||||
} from '../state';
|
||||
import { type TransformErrorObjects, TransformSavedObjectDocumentError } from '../core';
|
||||
import type { AliasAction, RetryableEsClientError } from '../actions';
|
||||
|
@ -175,6 +176,7 @@ describe('migrations v2 model', () => {
|
|||
type: 'retryable_es_client_error',
|
||||
message: 'snapshot_in_progress_exception',
|
||||
};
|
||||
|
||||
test('increments retryCount, exponential retryDelay if an action fails with a retryable_es_client_error', () => {
|
||||
const states = new Array(10).fill(1).map(() => {
|
||||
state = model(state, Either.left(retryableError));
|
||||
|
@ -301,49 +303,59 @@ describe('migrations v2 model', () => {
|
|||
},
|
||||
} as const;
|
||||
|
||||
test("INIT -> FATAL when .kibana points to newer version's index", () => {
|
||||
const res: ResponseType<'INIT'> = Either.right({
|
||||
'.kibana_7.12.0_001': {
|
||||
aliases: {
|
||||
'.kibana': {},
|
||||
'.kibana_7.12.0': {},
|
||||
},
|
||||
mappings: { properties: {}, _meta: { migrationMappingPropertyHashes: {} } },
|
||||
settings: {},
|
||||
},
|
||||
'.kibana_7.11.0_001': {
|
||||
aliases: { '.kibana_7.11.0': {} },
|
||||
mappings: { properties: {}, _meta: { migrationMappingPropertyHashes: {} } },
|
||||
settings: {},
|
||||
},
|
||||
});
|
||||
const newState = model(initBaseState, res) as FatalState;
|
||||
|
||||
expect(newState.controlState).toEqual('FATAL');
|
||||
expect(newState.reason).toMatchInlineSnapshot(
|
||||
`"The .kibana alias is pointing to a newer version of Kibana: v7.12.0"`
|
||||
);
|
||||
});
|
||||
|
||||
test('INIT -> FATAL when .kibana points to multiple indices', () => {
|
||||
const res: ResponseType<'INIT'> = Either.right({
|
||||
'.kibana_7.12.0_001': {
|
||||
aliases: {
|
||||
'.kibana': {},
|
||||
'.kibana_7.12.0': {},
|
||||
},
|
||||
mappings: { properties: {}, _meta: { migrationMappingPropertyHashes: {} } },
|
||||
settings: {},
|
||||
},
|
||||
'.kibana_7.11.0_001': {
|
||||
aliases: { '.kibana': {}, '.kibana_7.11.0': {} },
|
||||
mappings: { properties: {}, _meta: { migrationMappingPropertyHashes: {} } },
|
||||
settings: {},
|
||||
},
|
||||
});
|
||||
const newState = model(initBaseState, res) as FatalState;
|
||||
|
||||
expect(newState.controlState).toEqual('FATAL');
|
||||
expect(newState.reason).toMatchInlineSnapshot(
|
||||
`"The .kibana alias is pointing to multiple indices: .kibana_7.12.0_001,.kibana_7.11.0_001."`
|
||||
);
|
||||
});
|
||||
|
||||
describe('if waitForMigrationCompletion=true', () => {
|
||||
const initState = Object.assign({}, initBaseState, {
|
||||
waitForMigrationCompletion: true,
|
||||
});
|
||||
test('INIT -> INIT when cluster routing allocation is incompatible', () => {
|
||||
const res: ResponseType<'INIT'> = Either.left({
|
||||
type: 'incompatible_cluster_routing_allocation',
|
||||
});
|
||||
const newState = model(initState, res) as FatalState;
|
||||
|
||||
expect(newState.controlState).toEqual('INIT');
|
||||
expect(newState.retryCount).toEqual(1);
|
||||
expect(newState.retryDelay).toEqual(2000);
|
||||
expect(newState.logs[0]).toMatchInlineSnapshot(`
|
||||
Object {
|
||||
"level": "error",
|
||||
"message": "Action failed with '[incompatible_cluster_routing_allocation] Incompatible Elasticsearch cluster settings detected. Remove the persistent and transient Elasticsearch cluster setting 'cluster.routing.allocation.enable' or set it to a value of 'all' to allow migrations to proceed. Refer to routingAllocationDisabled for more information on how to resolve the issue.'. Retrying attempt 1 in 2 seconds.",
|
||||
}
|
||||
`);
|
||||
});
|
||||
test("INIT -> FATAL when .kibana points to newer version's index", () => {
|
||||
const res: ResponseType<'INIT'> = Either.right({
|
||||
'.kibana_7.12.0_001': {
|
||||
aliases: {
|
||||
'.kibana': {},
|
||||
'.kibana_7.12.0': {},
|
||||
},
|
||||
mappings: { properties: {}, _meta: { migrationMappingPropertyHashes: {} } },
|
||||
settings: {},
|
||||
},
|
||||
'.kibana_7.11.0_001': {
|
||||
aliases: { '.kibana_7.11.0': {} },
|
||||
mappings: { properties: {}, _meta: { migrationMappingPropertyHashes: {} } },
|
||||
settings: {},
|
||||
},
|
||||
});
|
||||
const newState = model(initState, res) as FatalState;
|
||||
|
||||
expect(newState.controlState).toEqual('FATAL');
|
||||
expect(newState.reason).toMatchInlineSnapshot(
|
||||
`"The .kibana alias is pointing to a newer version of Kibana: v7.12.0"`
|
||||
);
|
||||
});
|
||||
test('INIT -> FATAL when later version alias exists', () => {
|
||||
const res: ResponseType<'INIT'> = Either.right({
|
||||
'.kibana_7.11.0_001': {
|
||||
|
@ -359,29 +371,7 @@ describe('migrations v2 model', () => {
|
|||
`"The .kibana_7.12.0 alias refers to a newer version of Kibana: v7.12.0"`
|
||||
);
|
||||
});
|
||||
test('INIT -> FATAL when .kibana points to multiple indices', () => {
|
||||
const res: ResponseType<'INIT'> = Either.right({
|
||||
'.kibana_7.12.0_001': {
|
||||
aliases: {
|
||||
'.kibana': {},
|
||||
'.kibana_7.12.0': {},
|
||||
},
|
||||
mappings: { properties: {}, _meta: { migrationMappingPropertyHashes: {} } },
|
||||
settings: {},
|
||||
},
|
||||
'.kibana_7.11.0_001': {
|
||||
aliases: { '.kibana': {}, '.kibana_7.11.0': {} },
|
||||
mappings: { properties: {}, _meta: { migrationMappingPropertyHashes: {} } },
|
||||
settings: {},
|
||||
},
|
||||
});
|
||||
const newState = model(initState, res) as FatalState;
|
||||
|
||||
expect(newState.controlState).toEqual('FATAL');
|
||||
expect(newState.reason).toMatchInlineSnapshot(
|
||||
`"The .kibana alias is pointing to multiple indices: .kibana_7.12.0_001,.kibana_7.11.0_001."`
|
||||
);
|
||||
});
|
||||
test('INIT -> WAIT_FOR_MIGRATION_COMPLETION when .kibana points to an index with an invalid version', () => {
|
||||
// If users tamper with our index version naming scheme we can no
|
||||
// longer accurately detect a newer version. Older Kibana versions
|
||||
|
@ -405,11 +395,12 @@ describe('migrations v2 model', () => {
|
|||
settings: {},
|
||||
},
|
||||
});
|
||||
const newState = model(initState, res) as WaitForYellowSourceState;
|
||||
const newState = model(initState, res);
|
||||
|
||||
expect(newState.controlState).toBe('WAIT_FOR_MIGRATION_COMPLETION');
|
||||
expect(newState.retryDelay).toBe(2000);
|
||||
});
|
||||
|
||||
test('INIT -> WAIT_FOR_MIGRATION_COMPLETION when migrating from a v2 migrations index (>= 7.11.0)', () => {
|
||||
const res: ResponseType<'INIT'> = Either.right({
|
||||
'.kibana_7.11.0_001': {
|
||||
|
@ -431,11 +422,12 @@ describe('migrations v2 model', () => {
|
|||
versionIndex: '.kibana_7.12.0_001',
|
||||
},
|
||||
res
|
||||
) as WaitForYellowSourceState;
|
||||
);
|
||||
|
||||
expect(newState.controlState).toBe('WAIT_FOR_MIGRATION_COMPLETION');
|
||||
expect(newState.retryDelay).toEqual(2000);
|
||||
});
|
||||
|
||||
test('INIT -> WAIT_FOR_MIGRATION_COMPLETION when migrating from a v1 migrations index (>= 6.5 < 7.11.0)', () => {
|
||||
const res: ResponseType<'INIT'> = Either.right({
|
||||
'.kibana_3': {
|
||||
|
@ -446,11 +438,12 @@ describe('migrations v2 model', () => {
|
|||
settings: {},
|
||||
},
|
||||
});
|
||||
const newState = model(initState, res) as WaitForYellowSourceState;
|
||||
const newState = model(initState, res);
|
||||
|
||||
expect(newState.controlState).toBe('WAIT_FOR_MIGRATION_COMPLETION');
|
||||
expect(newState.retryDelay).toEqual(2000);
|
||||
});
|
||||
|
||||
test('INIT -> WAIT_FOR_MIGRATION_COMPLETION when migrating from a legacy index (>= 6.0.0 < 6.5)', () => {
|
||||
const res: ResponseType<'INIT'> = Either.right({
|
||||
'.kibana': {
|
||||
|
@ -464,6 +457,7 @@ describe('migrations v2 model', () => {
|
|||
expect(newState.controlState).toBe('WAIT_FOR_MIGRATION_COMPLETION');
|
||||
expect(newState.retryDelay).toEqual(2000);
|
||||
});
|
||||
|
||||
test('INIT -> WAIT_FOR_MIGRATION_COMPLETION when migrating from a custom kibana.index name (>= 6.5 < 7.11.0)', () => {
|
||||
const res: ResponseType<'INIT'> = Either.right({
|
||||
'my-saved-objects_3': {
|
||||
|
@ -483,11 +477,12 @@ describe('migrations v2 model', () => {
|
|||
versionIndex: 'my-saved-objects_7.11.0_001',
|
||||
},
|
||||
res
|
||||
) as WaitForYellowSourceState;
|
||||
);
|
||||
|
||||
expect(newState.controlState).toBe('WAIT_FOR_MIGRATION_COMPLETION');
|
||||
expect(newState.retryDelay).toEqual(2000);
|
||||
});
|
||||
|
||||
test('INIT -> WAIT_FOR_MIGRATION_COMPLETION when migrating from a custom kibana.index v2 migrations index (>= 7.11.0)', () => {
|
||||
const res: ResponseType<'INIT'> = Either.right({
|
||||
'my-saved-objects_7.11.0': {
|
||||
|
@ -508,11 +503,12 @@ describe('migrations v2 model', () => {
|
|||
versionIndex: 'my-saved-objects_7.12.0_001',
|
||||
},
|
||||
res
|
||||
) as WaitForYellowSourceState;
|
||||
);
|
||||
|
||||
expect(newState.controlState).toBe('WAIT_FOR_MIGRATION_COMPLETION');
|
||||
expect(newState.retryDelay).toEqual(2000);
|
||||
});
|
||||
|
||||
test('INIT -> WAIT_FOR_MIGRATION_COMPLETION when no indices/aliases exist', () => {
|
||||
const res: ResponseType<'INIT'> = Either.right({});
|
||||
const newState = model(initState, res);
|
||||
|
@ -526,68 +522,7 @@ describe('migrations v2 model', () => {
|
|||
const initState = Object.assign({}, initBaseState, {
|
||||
waitForMigrationCompletion: false,
|
||||
});
|
||||
test('INIT -> INIT when cluster routing allocation is incompatible', () => {
|
||||
const res: ResponseType<'INIT'> = Either.left({
|
||||
type: 'incompatible_cluster_routing_allocation',
|
||||
});
|
||||
const newState = model(initState, res) as FatalState;
|
||||
|
||||
expect(newState.controlState).toEqual('INIT');
|
||||
expect(newState.retryCount).toEqual(1);
|
||||
expect(newState.retryDelay).toEqual(2000);
|
||||
expect(newState.logs[0]).toMatchInlineSnapshot(`
|
||||
Object {
|
||||
"level": "error",
|
||||
"message": "Action failed with '[incompatible_cluster_routing_allocation] Incompatible Elasticsearch cluster settings detected. Remove the persistent and transient Elasticsearch cluster setting 'cluster.routing.allocation.enable' or set it to a value of 'all' to allow migrations to proceed. Refer to routingAllocationDisabled for more information on how to resolve the issue.'. Retrying attempt 1 in 2 seconds.",
|
||||
}
|
||||
`);
|
||||
});
|
||||
test("INIT -> FATAL when .kibana points to newer version's index", () => {
|
||||
const res: ResponseType<'INIT'> = Either.right({
|
||||
'.kibana_7.12.0_001': {
|
||||
aliases: {
|
||||
'.kibana': {},
|
||||
'.kibana_7.12.0': {},
|
||||
},
|
||||
mappings: { properties: {}, _meta: { migrationMappingPropertyHashes: {} } },
|
||||
settings: {},
|
||||
},
|
||||
'.kibana_7.11.0_001': {
|
||||
aliases: { '.kibana_7.11.0': {} },
|
||||
mappings: { properties: {}, _meta: { migrationMappingPropertyHashes: {} } },
|
||||
settings: {},
|
||||
},
|
||||
});
|
||||
const newState = model(initState, res) as FatalState;
|
||||
|
||||
expect(newState.controlState).toEqual('FATAL');
|
||||
expect(newState.reason).toMatchInlineSnapshot(
|
||||
`"The .kibana alias is pointing to a newer version of Kibana: v7.12.0"`
|
||||
);
|
||||
});
|
||||
test('INIT -> FATAL when .kibana points to multiple indices', () => {
|
||||
const res: ResponseType<'INIT'> = Either.right({
|
||||
'.kibana_7.12.0_001': {
|
||||
aliases: {
|
||||
'.kibana': {},
|
||||
'.kibana_7.12.0': {},
|
||||
},
|
||||
mappings: { properties: {}, _meta: { migrationMappingPropertyHashes: {} } },
|
||||
settings: {},
|
||||
},
|
||||
'.kibana_7.11.0_001': {
|
||||
aliases: { '.kibana': {}, '.kibana_7.11.0': {} },
|
||||
mappings: { properties: {}, _meta: { migrationMappingPropertyHashes: {} } },
|
||||
settings: {},
|
||||
},
|
||||
});
|
||||
const newState = model(initState, res) as FatalState;
|
||||
|
||||
expect(newState.controlState).toEqual('FATAL');
|
||||
expect(newState.reason).toMatchInlineSnapshot(
|
||||
`"The .kibana alias is pointing to multiple indices: .kibana_7.12.0_001,.kibana_7.11.0_001."`
|
||||
);
|
||||
});
|
||||
test('INIT -> WAIT_FOR_YELLOW_SOURCE when .kibana points to an index with an invalid version', () => {
|
||||
// If users tamper with our index version naming scheme we can no
|
||||
// longer accurately detect a newer version. Older Kibana versions
|
||||
|
@ -675,7 +610,8 @@ describe('migrations v2 model', () => {
|
|||
'.kibana': '.kibana_3',
|
||||
});
|
||||
});
|
||||
test('INIT -> LEGACY_SET_WRITE_BLOCK when migrating from a legacy index (>= 6.0.0 < 6.5)', () => {
|
||||
|
||||
test('INIT -> LEGACY_CHECK_CLUSTER_ROUTING_ALLOCATION when migrating from a legacy index (>= 6.0.0 < 6.5)', () => {
|
||||
const res: ResponseType<'INIT'> = Either.right({
|
||||
'.kibana': {
|
||||
aliases: {},
|
||||
|
@ -686,7 +622,7 @@ describe('migrations v2 model', () => {
|
|||
const newState = model(initState, res);
|
||||
|
||||
expect(newState).toMatchObject({
|
||||
controlState: 'LEGACY_SET_WRITE_BLOCK',
|
||||
controlState: 'LEGACY_CHECK_CLUSTER_ROUTING_ALLOCATION',
|
||||
sourceIndex: Option.some('.kibana_pre6.5.0_001'),
|
||||
targetIndex: '.kibana_7.11.0_001',
|
||||
});
|
||||
|
@ -714,6 +650,7 @@ describe('migrations v2 model', () => {
|
|||
expect(newState.retryCount).toEqual(0);
|
||||
expect(newState.retryDelay).toEqual(0);
|
||||
});
|
||||
|
||||
test('INIT -> WAIT_FOR_YELLOW_SOURCE when migrating from a custom kibana.index name (>= 6.5 < 7.11.0)', () => {
|
||||
const res: ResponseType<'INIT'> = Either.right({
|
||||
'my-saved-objects_3': {
|
||||
|
@ -740,6 +677,7 @@ describe('migrations v2 model', () => {
|
|||
expect(newState.retryCount).toEqual(0);
|
||||
expect(newState.retryDelay).toEqual(0);
|
||||
});
|
||||
|
||||
test('INIT -> WAIT_FOR_YELLOW_SOURCE when migrating from a custom kibana.index v2 migrations index (>= 7.11.0)', () => {
|
||||
const res: ResponseType<'INIT'> = Either.right({
|
||||
'my-saved-objects_7.11.0': {
|
||||
|
@ -768,6 +706,7 @@ describe('migrations v2 model', () => {
|
|||
expect(newState.retryCount).toEqual(0);
|
||||
expect(newState.retryDelay).toEqual(0);
|
||||
});
|
||||
|
||||
test('INIT -> CREATE_NEW_TARGET when the index does not exist and the migrator is NOT involved in a relocation', () => {
|
||||
const res: ResponseType<'INIT'> = Either.right({});
|
||||
const newState = model(initState, res);
|
||||
|
@ -780,6 +719,7 @@ describe('migrations v2 model', () => {
|
|||
expect(newState.retryCount).toEqual(0);
|
||||
expect(newState.retryDelay).toEqual(0);
|
||||
});
|
||||
|
||||
test('INIT -> CREATE_REINDEX_TEMP when the index does not exist and the migrator is involved in a relocation', () => {
|
||||
const res: ResponseType<'INIT'> = Either.right({});
|
||||
const newState = model(
|
||||
|
@ -837,11 +777,12 @@ describe('migrations v2 model', () => {
|
|||
settings: {},
|
||||
},
|
||||
});
|
||||
const newState = model(waitForMState, res) as WaitForYellowSourceState;
|
||||
const newState = model(waitForMState, res);
|
||||
|
||||
expect(newState.controlState).toBe('WAIT_FOR_MIGRATION_COMPLETION');
|
||||
expect(newState.retryDelay).toBe(2000);
|
||||
});
|
||||
|
||||
test('WAIT_FOR_MIGRATION_COMPLETION -> WAIT_FOR_MIGRATION_COMPLETION when migrating from a v2 migrations index (>= 7.11.0)', () => {
|
||||
const res: ResponseType<'WAIT_FOR_MIGRATION_COMPLETION'> = Either.right({
|
||||
'.kibana_7.11.0_001': {
|
||||
|
@ -865,11 +806,12 @@ describe('migrations v2 model', () => {
|
|||
},
|
||||
},
|
||||
res
|
||||
) as WaitForYellowSourceState;
|
||||
);
|
||||
|
||||
expect(newState.controlState).toBe('WAIT_FOR_MIGRATION_COMPLETION');
|
||||
expect(newState.retryDelay).toEqual(2000);
|
||||
});
|
||||
|
||||
test('WAIT_FOR_MIGRATION_COMPLETION -> WAIT_FOR_MIGRATION_COMPLETION when migrating from a v1 migrations index (>= 6.5 < 7.11.0)', () => {
|
||||
const res: ResponseType<'WAIT_FOR_MIGRATION_COMPLETION'> = Either.right({
|
||||
'.kibana_3': {
|
||||
|
@ -880,11 +822,12 @@ describe('migrations v2 model', () => {
|
|||
settings: {},
|
||||
},
|
||||
});
|
||||
const newState = model(waitForMState, res) as WaitForYellowSourceState;
|
||||
const newState = model(waitForMState, res);
|
||||
|
||||
expect(newState.controlState).toBe('WAIT_FOR_MIGRATION_COMPLETION');
|
||||
expect(newState.retryDelay).toEqual(2000);
|
||||
});
|
||||
|
||||
test('WAIT_FOR_MIGRATION_COMPLETION -> WAIT_FOR_MIGRATION_COMPLETION when migrating from a legacy index (>= 6.0.0 < 6.5)', () => {
|
||||
const res: ResponseType<'WAIT_FOR_MIGRATION_COMPLETION'> = Either.right({
|
||||
'.kibana': {
|
||||
|
@ -898,6 +841,7 @@ describe('migrations v2 model', () => {
|
|||
expect(newState.controlState).toBe('WAIT_FOR_MIGRATION_COMPLETION');
|
||||
expect(newState.retryDelay).toEqual(2000);
|
||||
});
|
||||
|
||||
test('WAIT_FOR_MIGRATION_COMPLETION -> WAIT_FOR_MIGRATION_COMPLETION when migrating from a custom kibana.index name (>= 6.5 < 7.11.0)', () => {
|
||||
const res: ResponseType<'WAIT_FOR_MIGRATION_COMPLETION'> = Either.right({
|
||||
'my-saved-objects_3': {
|
||||
|
@ -916,11 +860,12 @@ describe('migrations v2 model', () => {
|
|||
versionIndex: 'my-saved-objects_7.11.0_001',
|
||||
},
|
||||
res
|
||||
) as WaitForYellowSourceState;
|
||||
);
|
||||
|
||||
expect(newState.controlState).toBe('WAIT_FOR_MIGRATION_COMPLETION');
|
||||
expect(newState.retryDelay).toEqual(2000);
|
||||
});
|
||||
|
||||
test('WAIT_FOR_MIGRATION_COMPLETION -> WAIT_FOR_MIGRATION_COMPLETION when migrating from a custom kibana.index v2 migrations index (>= 7.11.0)', () => {
|
||||
const res: ResponseType<'WAIT_FOR_MIGRATION_COMPLETION'> = Either.right({
|
||||
'my-saved-objects_7.11.0': {
|
||||
|
@ -940,11 +885,12 @@ describe('migrations v2 model', () => {
|
|||
versionIndex: 'my-saved-objects_7.12.0_001',
|
||||
},
|
||||
res
|
||||
) as WaitForYellowSourceState;
|
||||
);
|
||||
|
||||
expect(newState.controlState).toBe('WAIT_FOR_MIGRATION_COMPLETION');
|
||||
expect(newState.retryDelay).toEqual(2000);
|
||||
});
|
||||
|
||||
test('WAIT_FOR_MIGRATION_COMPLETION -> WAIT_FOR_MIGRATION_COMPLETION when no indices/aliases exist', () => {
|
||||
const res: ResponseType<'WAIT_FOR_MIGRATION_COMPLETION'> = Either.right({});
|
||||
const newState = model(waitForMState, res);
|
||||
|
@ -970,6 +916,37 @@ describe('migrations v2 model', () => {
|
|||
});
|
||||
});
|
||||
|
||||
describe('LEGACY_CHECK_CLUSTER_ROUTING_ALLOCATION', () => {
|
||||
const legacyCheckClusterRoutingAllocationState: LegacyCheckClusterRoutingAllocationState = {
|
||||
...postInitState,
|
||||
controlState: 'LEGACY_CHECK_CLUSTER_ROUTING_ALLOCATION',
|
||||
sourceIndex: Option.some('.kibana') as Option.Some<string>,
|
||||
sourceIndexMappings: Option.some({ properties: {} }) as Option.Some<IndexMapping>,
|
||||
legacyPreMigrationDoneActions: [],
|
||||
legacyIndex: '',
|
||||
};
|
||||
|
||||
test('LEGACY_CHECK_CLUSTER_ROUTING_ALLOCATION -> LEGACY_CHECK_CLUSTER_ROUTING_ALLOCATION when cluster allocation is not compatible', () => {
|
||||
const res: ResponseType<'LEGACY_CHECK_CLUSTER_ROUTING_ALLOCATION'> = Either.left({
|
||||
type: 'incompatible_cluster_routing_allocation',
|
||||
});
|
||||
const newState = model(legacyCheckClusterRoutingAllocationState, res);
|
||||
|
||||
expect(newState.controlState).toBe('LEGACY_CHECK_CLUSTER_ROUTING_ALLOCATION');
|
||||
expect(newState.retryCount).toEqual(1);
|
||||
expect(newState.retryDelay).toEqual(2000);
|
||||
});
|
||||
|
||||
test('LEGACY_CHECK_CLUSTER_ROUTING_ALLOCATION -> LEGACY_SET_WRITE_BLOCK when cluster allocation is compatible', () => {
|
||||
const res: ResponseType<'LEGACY_CHECK_CLUSTER_ROUTING_ALLOCATION'> = Either.right({});
|
||||
const newState = model(legacyCheckClusterRoutingAllocationState, res);
|
||||
|
||||
expect(newState.controlState).toBe('LEGACY_SET_WRITE_BLOCK');
|
||||
expect(newState.retryCount).toEqual(0);
|
||||
expect(newState.retryDelay).toEqual(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe('LEGACY_SET_WRITE_BLOCK', () => {
|
||||
const legacySetWriteBlockState: LegacySetWriteBlockState = {
|
||||
...postInitState,
|
||||
|
@ -979,6 +956,7 @@ describe('migrations v2 model', () => {
|
|||
legacyPreMigrationDoneActions: [],
|
||||
legacyIndex: '',
|
||||
};
|
||||
|
||||
test('LEGACY_SET_WRITE_BLOCK -> LEGACY_SET_WRITE_BLOCK if action fails with set_write_block_failed', () => {
|
||||
const res: ResponseType<'LEGACY_SET_WRITE_BLOCK'> = Either.left({
|
||||
type: 'retryable_es_client_error',
|
||||
|
@ -989,6 +967,7 @@ describe('migrations v2 model', () => {
|
|||
expect(newState.retryCount).toEqual(1);
|
||||
expect(newState.retryDelay).toEqual(2000);
|
||||
});
|
||||
|
||||
test('LEGACY_SET_WRITE_BLOCK -> LEGACY_CREATE_REINDEX_TARGET if action fails with index_not_found_exception', () => {
|
||||
const res: ResponseType<'LEGACY_SET_WRITE_BLOCK'> = Either.left({
|
||||
type: 'index_not_found_exception',
|
||||
|
@ -999,6 +978,7 @@ describe('migrations v2 model', () => {
|
|||
expect(newState.retryCount).toEqual(0);
|
||||
expect(newState.retryDelay).toEqual(0);
|
||||
});
|
||||
|
||||
test('LEGACY_SET_WRITE_BLOCK -> LEGACY_CREATE_REINDEX_TARGET if action succeeds with set_write_block_succeeded', () => {
|
||||
const res: ResponseType<'LEGACY_SET_WRITE_BLOCK'> = Either.right(
|
||||
'set_write_block_succeeded'
|
||||
|
@ -1019,6 +999,7 @@ describe('migrations v2 model', () => {
|
|||
legacyPreMigrationDoneActions: [],
|
||||
legacyIndex: '',
|
||||
};
|
||||
|
||||
test('LEGACY_CREATE_REINDEX_TARGET -> LEGACY_REINDEX', () => {
|
||||
const res: ResponseType<'LEGACY_CREATE_REINDEX_TARGET'> =
|
||||
Either.right('create_index_succeeded');
|
||||
|
@ -1027,6 +1008,7 @@ describe('migrations v2 model', () => {
|
|||
expect(newState.retryCount).toEqual(0);
|
||||
expect(newState.retryDelay).toEqual(0);
|
||||
});
|
||||
|
||||
test('LEGACY_CREATE_REINDEX_TARGET -> LEGACY_CREATE_REINDEX_TARGET if action fails with index_not_green_timeout', () => {
|
||||
const res: ResponseType<'LEGACY_CREATE_REINDEX_TARGET'> = Either.left({
|
||||
message: '[index_not_green_timeout] Timeout waiting for ...',
|
||||
|
@ -1043,6 +1025,7 @@ describe('migrations v2 model', () => {
|
|||
}
|
||||
`);
|
||||
});
|
||||
|
||||
test('LEGACY_CREATE_REINDEX_TARGET -> LEGACY_REINDEX resets retry count and retry delay if action succeeds', () => {
|
||||
const res: ResponseType<'LEGACY_CREATE_REINDEX_TARGET'> =
|
||||
Either.right('create_index_succeeded');
|
||||
|
@ -1056,13 +1039,14 @@ describe('migrations v2 model', () => {
|
|||
expect(newState.retryCount).toEqual(0);
|
||||
expect(newState.retryDelay).toEqual(0);
|
||||
});
|
||||
|
||||
test('LEGACY_CREATE_REINDEX_TARGET -> FATAL if action fails with cluster_shard_limit_exceeded', () => {
|
||||
const res: ResponseType<'LEGACY_CREATE_REINDEX_TARGET'> = Either.left({
|
||||
type: 'cluster_shard_limit_exceeded',
|
||||
});
|
||||
const newState = model(legacyCreateReindexTargetState, res);
|
||||
const newState = model(legacyCreateReindexTargetState, res) as FatalState;
|
||||
expect(newState.controlState).toEqual('FATAL');
|
||||
expect((newState as FatalState).reason).toMatchInlineSnapshot(
|
||||
expect(newState.reason).toMatchInlineSnapshot(
|
||||
`"[cluster_shard_limit_exceeded] Upgrading Kibana requires adding a small number of new shards. Ensure that Kibana is able to add 10 more shards by increasing the cluster.max_shards_per_node setting, or removing indices to clear up resources. See clusterShardLimitExceeded"`
|
||||
);
|
||||
});
|
||||
|
@ -1077,6 +1061,7 @@ describe('migrations v2 model', () => {
|
|||
legacyPreMigrationDoneActions: [],
|
||||
legacyIndex: '',
|
||||
};
|
||||
|
||||
test('LEGACY_REINDEX -> LEGACY_REINDEX_WAIT_FOR_TASK', () => {
|
||||
const res: ResponseType<'LEGACY_REINDEX'> = Either.right({ taskId: 'task id' });
|
||||
const newState = model(legacyReindexState, res);
|
||||
|
@ -1096,6 +1081,7 @@ describe('migrations v2 model', () => {
|
|||
legacyIndex: 'legacy_index_name',
|
||||
legacyReindexTaskId: 'test_task_id',
|
||||
};
|
||||
|
||||
test('LEGACY_REINDEX_WAIT_FOR_TASK -> LEGACY_DELETE if action succeeds', () => {
|
||||
const res: ResponseType<'LEGACY_REINDEX_WAIT_FOR_TASK'> = Either.right('reindex_succeeded');
|
||||
const newState = model(legacyReindexWaitForTaskState, res);
|
||||
|
@ -1103,6 +1089,7 @@ describe('migrations v2 model', () => {
|
|||
expect(newState.retryCount).toEqual(0);
|
||||
expect(newState.retryDelay).toEqual(0);
|
||||
});
|
||||
|
||||
test('LEGACY_REINDEX_WAIT_FOR_TASK -> LEGACY_DELETE if action fails with index_not_found_exception for reindex source', () => {
|
||||
const res: ResponseType<'LEGACY_REINDEX_WAIT_FOR_TASK'> = Either.left({
|
||||
type: 'index_not_found_exception',
|
||||
|
@ -1113,6 +1100,7 @@ describe('migrations v2 model', () => {
|
|||
expect(newState.retryCount).toEqual(0);
|
||||
expect(newState.retryDelay).toEqual(0);
|
||||
});
|
||||
|
||||
test('LEGACY_REINDEX_WAIT_FOR_TASK -> LEGACY_DELETE if action fails with target_index_had_write_block', () => {
|
||||
const res: ResponseType<'LEGACY_REINDEX_WAIT_FOR_TASK'> = Either.left({
|
||||
type: 'target_index_had_write_block',
|
||||
|
@ -1122,6 +1110,7 @@ describe('migrations v2 model', () => {
|
|||
expect(newState.retryCount).toEqual(0);
|
||||
expect(newState.retryDelay).toEqual(0);
|
||||
});
|
||||
|
||||
test('LEGACY_REINDEX_WAIT_FOR_TASK -> LEGACY_REINDEX_WAIT_FOR_TASK if action fails with wait_for_task_completion_timeout', () => {
|
||||
const res: ResponseType<'LEGACY_REINDEX_WAIT_FOR_TASK'> = Either.left({
|
||||
message: '[timeout_exception] Timeout waiting for ...',
|
||||
|
@ -1132,6 +1121,7 @@ describe('migrations v2 model', () => {
|
|||
expect(newState.retryCount).toEqual(1);
|
||||
expect(newState.retryDelay).toEqual(2000);
|
||||
});
|
||||
|
||||
test('LEGACY_REINDEX_WAIT_FOR_TASK -> LEGACY_REINDEX_WAIT_FOR_TASK with incremented retryCount if action fails with wait_for_task_completion_timeout a second time', () => {
|
||||
const state = Object.assign({}, legacyReindexWaitForTaskState, { retryCount: 1 });
|
||||
const res: ResponseType<'LEGACY_REINDEX_WAIT_FOR_TASK'> = Either.left({
|
||||
|
@ -1154,6 +1144,7 @@ describe('migrations v2 model', () => {
|
|||
legacyPreMigrationDoneActions: [],
|
||||
legacyIndex: 'legacy_index_name',
|
||||
};
|
||||
|
||||
test('LEGACY_DELETE -> SET_SOURCE_WRITE_BLOCK if action succeeds', () => {
|
||||
const res: ResponseType<'LEGACY_DELETE'> = Either.right('update_aliases_succeeded');
|
||||
const newState = model(legacyDeleteState, res);
|
||||
|
@ -1161,6 +1152,7 @@ describe('migrations v2 model', () => {
|
|||
expect(newState.retryCount).toEqual(0);
|
||||
expect(newState.retryDelay).toEqual(0);
|
||||
});
|
||||
|
||||
test('LEGACY_DELETE -> SET_SOURCE_WRITE_BLOCK if action fails with index_not_found_exception for legacy index', () => {
|
||||
const res: ResponseType<'LEGACY_REINDEX_WAIT_FOR_TASK'> = Either.left({
|
||||
type: 'index_not_found_exception',
|
||||
|
@ -1171,6 +1163,7 @@ describe('migrations v2 model', () => {
|
|||
expect(newState.retryCount).toEqual(0);
|
||||
expect(newState.retryDelay).toEqual(0);
|
||||
});
|
||||
|
||||
test('LEGACY_DELETE -> SET_SOURCE_WRITE_BLOCK if action fails with remove_index_not_a_concrete_index', () => {
|
||||
const res: ResponseType<'LEGACY_DELETE'> = Either.left({
|
||||
type: 'remove_index_not_a_concrete_index',
|
||||
|
@ -1209,25 +1202,20 @@ describe('migrations v2 model', () => {
|
|||
test('WAIT_FOR_YELLOW_SOURCE -> UPDATE_SOURCE_MAPPINGS_PROPERTIES', () => {
|
||||
const res: ResponseType<'WAIT_FOR_YELLOW_SOURCE'> = Either.right({});
|
||||
const newState = model(waitForYellowSourceState, res);
|
||||
|
||||
expect(newState).toMatchObject({
|
||||
controlState: 'UPDATE_SOURCE_MAPPINGS_PROPERTIES',
|
||||
});
|
||||
expect(newState.controlState).toEqual('UPDATE_SOURCE_MAPPINGS_PROPERTIES');
|
||||
});
|
||||
});
|
||||
|
||||
describe('if the migrator is involved in a relocation', () => {
|
||||
// no need to attempt to update the mappings, we are going to reindex
|
||||
test('WAIT_FOR_YELLOW_SOURCE -> CHECK_UNKNOWN_DOCUMENTS', () => {
|
||||
test('WAIT_FOR_YELLOW_SOURCE -> CHECK_CLUSTER_ROUTING_ALLOCATION', () => {
|
||||
const res: ResponseType<'WAIT_FOR_YELLOW_SOURCE'> = Either.right({});
|
||||
const newState = model(
|
||||
{ ...waitForYellowSourceState, mustRelocateDocuments: true },
|
||||
res
|
||||
);
|
||||
|
||||
expect(newState).toMatchObject({
|
||||
controlState: 'CHECK_UNKNOWN_DOCUMENTS',
|
||||
});
|
||||
expect(newState.controlState).toEqual('CHECK_CLUSTER_ROUTING_ALLOCATION');
|
||||
});
|
||||
});
|
||||
});
|
||||
|
@ -1272,10 +1260,7 @@ describe('migrations v2 model', () => {
|
|||
'update_mappings_succeeded'
|
||||
);
|
||||
const newState = model(updateSourceMappingsPropertiesState, res);
|
||||
|
||||
expect(newState).toMatchObject({
|
||||
controlState: 'CLEANUP_UNKNOWN_AND_EXCLUDED',
|
||||
});
|
||||
expect(newState.controlState).toEqual('CLEANUP_UNKNOWN_AND_EXCLUDED');
|
||||
});
|
||||
|
||||
test('UPDATE_SOURCE_MAPPINGS_PROPERTIES -> OUTDATED_DOCUMENTS_SEARCH_OPEN_PIT if mappings changes are compatible and index is already migrated', () => {
|
||||
|
@ -1305,15 +1290,12 @@ describe('migrations v2 model', () => {
|
|||
});
|
||||
|
||||
describe('if action fails', () => {
|
||||
test('UPDATE_SOURCE_MAPPINGS_PROPERTIES -> CHECK_UNKNOWN_DOCUMENTS if mappings changes are incompatible', () => {
|
||||
test('UPDATE_SOURCE_MAPPINGS_PROPERTIES -> CHECK_CLUSTER_ROUTING_ALLOCATION if mappings changes are incompatible', () => {
|
||||
const res: ResponseType<'UPDATE_SOURCE_MAPPINGS_PROPERTIES'> = Either.left({
|
||||
type: 'incompatible_mapping_exception',
|
||||
});
|
||||
const newState = model(updateSourceMappingsPropertiesState, res);
|
||||
|
||||
expect(newState).toMatchObject({
|
||||
controlState: 'CHECK_UNKNOWN_DOCUMENTS',
|
||||
});
|
||||
expect(newState.controlState).toEqual('CHECK_CLUSTER_ROUTING_ALLOCATION');
|
||||
});
|
||||
|
||||
test('UPDATE_SOURCE_MAPPINGS_PROPERTIES -> FATAL', () => {
|
||||
|
@ -1326,11 +1308,12 @@ describe('migrations v2 model', () => {
|
|||
.set(['aliases', '.kibana'], '.kibana_7.11.0_001')
|
||||
.value(),
|
||||
res
|
||||
);
|
||||
) as FatalState;
|
||||
|
||||
expect(newState).toMatchObject({
|
||||
controlState: 'FATAL',
|
||||
});
|
||||
expect(newState.controlState).toEqual('FATAL');
|
||||
expect(newState.reason).toMatchInlineSnapshot(
|
||||
`"Incompatible mappings change on already migrated Kibana instance."`
|
||||
);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
@ -1485,6 +1468,35 @@ describe('migrations v2 model', () => {
|
|||
});
|
||||
});
|
||||
|
||||
describe('CHECK_CLUSTER_ROUTING_ALLOCATION', () => {
|
||||
const checkClusterRoutingAllocationState: CheckClusterRoutingAllocationState = {
|
||||
...postInitState,
|
||||
controlState: 'CHECK_CLUSTER_ROUTING_ALLOCATION',
|
||||
sourceIndex: Option.some('.kibana') as Option.Some<string>,
|
||||
sourceIndexMappings: Option.some({}) as Option.Some<IndexMapping>,
|
||||
};
|
||||
|
||||
test('CHECK_CLUSTER_ROUTING_ALLOCATION -> CHECK_CLUSTER_ROUTING_ALLOCATION when cluster allocation is not compatible', () => {
|
||||
const res: ResponseType<'CHECK_CLUSTER_ROUTING_ALLOCATION'> = Either.left({
|
||||
type: 'incompatible_cluster_routing_allocation',
|
||||
});
|
||||
const newState = model(checkClusterRoutingAllocationState, res);
|
||||
|
||||
expect(newState.controlState).toBe('CHECK_CLUSTER_ROUTING_ALLOCATION');
|
||||
expect(newState.retryCount).toEqual(1);
|
||||
expect(newState.retryDelay).toEqual(2000);
|
||||
});
|
||||
|
||||
test('CHECK_CLUSTER_ROUTING_ALLOCATION -> CHECK_UNKNOWN_DOCUMENTS when cluster allocation is compatible', () => {
|
||||
const res: ResponseType<'CHECK_CLUSTER_ROUTING_ALLOCATION'> = Either.right({});
|
||||
const newState = model(checkClusterRoutingAllocationState, res);
|
||||
|
||||
expect(newState.controlState).toBe('CHECK_UNKNOWN_DOCUMENTS');
|
||||
expect(newState.retryCount).toEqual(0);
|
||||
expect(newState.retryDelay).toEqual(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe('CHECK_UNKNOWN_DOCUMENTS', () => {
|
||||
const mappingsWithUnknownType = {
|
||||
properties: {
|
||||
|
@ -1626,6 +1638,7 @@ describe('migrations v2 model', () => {
|
|||
sourceIndex: Option.some('.kibana') as Option.Some<string>,
|
||||
sourceIndexMappings: Option.some({}) as Option.Some<IndexMapping>,
|
||||
};
|
||||
|
||||
test('SET_SOURCE_WRITE_BLOCK -> SET_SOURCE_WRITE_BLOCK if action fails with set_write_block_failed', () => {
|
||||
const res: ResponseType<'SET_SOURCE_WRITE_BLOCK'> = Either.left({
|
||||
type: 'retryable_es_client_error',
|
||||
|
@ -1636,6 +1649,7 @@ describe('migrations v2 model', () => {
|
|||
expect(newState.retryCount).toEqual(1);
|
||||
expect(newState.retryDelay).toEqual(2000);
|
||||
});
|
||||
|
||||
test('SET_SOURCE_WRITE_BLOCK -> CALCULATE_EXCLUDE_FILTERS if action succeeds with set_write_block_succeeded', () => {
|
||||
const res: ResponseType<'SET_SOURCE_WRITE_BLOCK'> = Either.right(
|
||||
'set_write_block_succeeded'
|
||||
|
@ -1664,6 +1678,7 @@ describe('migrations v2 model', () => {
|
|||
sourceIndexMappings: Option.some({}) as Option.Some<IndexMapping>,
|
||||
tempIndexMappings: { properties: {} },
|
||||
};
|
||||
|
||||
test('CALCULATE_EXCLUDE_FILTERS -> CALCULATE_EXCLUDE_FILTERS if action fails with retryable error', () => {
|
||||
const res: ResponseType<'CALCULATE_EXCLUDE_FILTERS'> = Either.left({
|
||||
type: 'retryable_es_client_error',
|
||||
|
@ -1764,13 +1779,14 @@ describe('migrations v2 model', () => {
|
|||
expect(newState.retryCount).toEqual(0);
|
||||
expect(newState.retryDelay).toEqual(0);
|
||||
});
|
||||
|
||||
test('CREATE_REINDEX_TEMP -> FATAL if action fails with cluster_shard_limit_exceeded', () => {
|
||||
const res: ResponseType<'CREATE_REINDEX_TEMP'> = Either.left({
|
||||
type: 'cluster_shard_limit_exceeded',
|
||||
});
|
||||
const newState = model(state, res);
|
||||
const newState = model(state, res) as FatalState;
|
||||
expect(newState.controlState).toEqual('FATAL');
|
||||
expect((newState as FatalState).reason).toMatchInlineSnapshot(
|
||||
expect(newState.reason).toMatchInlineSnapshot(
|
||||
`"[cluster_shard_limit_exceeded] Upgrading Kibana requires adding a small number of new shards. Ensure that Kibana is able to add 10 more shards by increasing the cluster.max_shards_per_node setting, or removing indices to clear up resources. See clusterShardLimitExceeded"`
|
||||
);
|
||||
});
|
||||
|
@ -1816,9 +1832,9 @@ describe('migrations v2 model', () => {
|
|||
type: 'synchronization_failed',
|
||||
error: new Error('Other migrators failed to reach the synchronization point'),
|
||||
});
|
||||
const newState = model(state, res);
|
||||
const newState = model(state, res) as FatalState;
|
||||
expect(newState.controlState).toEqual('FATAL');
|
||||
expect((newState as FatalState).reason).toMatchInlineSnapshot(
|
||||
expect(newState.reason).toMatchInlineSnapshot(
|
||||
`"An error occurred whilst waiting for other migrators to get to this step."`
|
||||
);
|
||||
});
|
||||
|
@ -1955,12 +1971,13 @@ describe('migrations v2 model', () => {
|
|||
contentLength: 2345,
|
||||
});
|
||||
const newState = model({ ...state, batchSize: 1 }, res) as FatalState;
|
||||
expect(newState.controlState).toBe('FATAL');
|
||||
expect(newState.batchSize).toBe(1); // don't halve the batch size or go below 1
|
||||
expect(newState.maxBatchSize).toBe(1000); // leaves maxBatchSize unchanged
|
||||
expect(newState.reason).toMatchInlineSnapshot(
|
||||
`"After reducing the read batch size to a single document, the Elasticsearch response content length was 2345bytes which still exceeded migrations.maxReadBatchSizeBytes. Increase migrations.maxReadBatchSizeBytes and try again."`
|
||||
);
|
||||
expect(newState).toMatchObject({
|
||||
controlState: 'FATAL',
|
||||
batchSize: 1,
|
||||
maxBatchSize: 1000,
|
||||
reason:
|
||||
'After reducing the read batch size to a single document, the Elasticsearch response content length was 2345bytes which still exceeded migrations.maxReadBatchSizeBytes. Increase migrations.maxReadBatchSizeBytes and try again.',
|
||||
});
|
||||
});
|
||||
|
||||
it('REINDEX_SOURCE_TO_TEMP_READ -> REINDEX_SOURCE_TO_TEMP_CLOSE_PIT if no outdated documents to reindex', () => {
|
||||
|
@ -2073,14 +2090,15 @@ describe('migrations v2 model', () => {
|
|||
const newState = model(state, res);
|
||||
expect(newState.controlState).toEqual('SET_TEMP_WRITE_BLOCK');
|
||||
});
|
||||
|
||||
test('DONE_REINDEXING_SYNC -> FATAL if the synchronization between migrators fails', () => {
|
||||
const res: ResponseType<'DONE_REINDEXING_SYNC'> = Either.left({
|
||||
type: 'synchronization_failed',
|
||||
error: new Error('Other migrators failed to reach the synchronization point'),
|
||||
});
|
||||
const newState = model(state, res);
|
||||
const newState = model(state, res) as FatalState;
|
||||
expect(newState.controlState).toEqual('FATAL');
|
||||
expect((newState as FatalState).reason).toMatchInlineSnapshot(
|
||||
expect(newState.reason).toMatchInlineSnapshot(
|
||||
`"An error occurred whilst waiting for other migrators to get to this step."`
|
||||
);
|
||||
});
|
||||
|
@ -2177,6 +2195,7 @@ describe('migrations v2 model', () => {
|
|||
corruptDocumentIds: [],
|
||||
progress: createInitialProgress(),
|
||||
};
|
||||
|
||||
test('REINDEX_SOURCE_TO_TEMP_INDEX_BULK -> REINDEX_SOURCE_TO_TEMP_READ if action succeeded', () => {
|
||||
const res: ResponseType<'REINDEX_SOURCE_TO_TEMP_INDEX_BULK'> =
|
||||
Either.right('bulk_index_succeeded');
|
||||
|
@ -2185,6 +2204,7 @@ describe('migrations v2 model', () => {
|
|||
expect(newState.retryCount).toEqual(0);
|
||||
expect(newState.retryDelay).toEqual(0);
|
||||
});
|
||||
|
||||
test('REINDEX_SOURCE_TO_TEMP_INDEX_BULK -> REINDEX_SOURCE_TO_TEMP_CLOSE_PIT if response is left target_index_had_write_block', () => {
|
||||
const res: ResponseType<'REINDEX_SOURCE_TO_TEMP_INDEX_BULK'> = Either.left({
|
||||
type: 'target_index_had_write_block',
|
||||
|
@ -2194,6 +2214,7 @@ describe('migrations v2 model', () => {
|
|||
expect(newState.retryCount).toEqual(0);
|
||||
expect(newState.retryDelay).toEqual(0);
|
||||
});
|
||||
|
||||
test('REINDEX_SOURCE_TO_TEMP_INDEX_BULK -> REINDEX_SOURCE_TO_TEMP_CLOSE_PIT if response is left index_not_found_exception', () => {
|
||||
const res: ResponseType<'REINDEX_SOURCE_TO_TEMP_INDEX_BULK'> = Either.left({
|
||||
type: 'index_not_found_exception',
|
||||
|
@ -2204,6 +2225,7 @@ describe('migrations v2 model', () => {
|
|||
expect(newState.retryCount).toEqual(0);
|
||||
expect(newState.retryDelay).toEqual(0);
|
||||
});
|
||||
|
||||
test('REINDEX_SOURCE_TO_TEMP_INDEX_BULK -> FATAL if action returns left request_entity_too_large_exception', () => {
|
||||
const res: ResponseType<'REINDEX_SOURCE_TO_TEMP_INDEX_BULK'> = Either.left({
|
||||
type: 'request_entity_too_large_exception',
|
||||
|
@ -2214,6 +2236,7 @@ describe('migrations v2 model', () => {
|
|||
`"While indexing a batch of saved objects, Elasticsearch returned a 413 Request Entity Too Large exception. Ensure that the Kibana configuration option 'migrations.maxBatchSizeBytes' is set to a value that is lower than or equal to the Elasticsearch 'http.max_content_length' configuration option."`
|
||||
);
|
||||
});
|
||||
|
||||
test('REINDEX_SOURCE_TO_TEMP_INDEX_BULK should throw a throwBadResponse error if action failed', () => {
|
||||
const res: ResponseType<'REINDEX_SOURCE_TO_TEMP_INDEX_BULK'> = Either.left({
|
||||
type: 'retryable_es_client_error',
|
||||
|
@ -2233,6 +2256,7 @@ describe('migrations v2 model', () => {
|
|||
sourceIndex: Option.some('.kibana') as Option.Some<string>,
|
||||
sourceIndexMappings: Option.some({}) as Option.Some<IndexMapping>,
|
||||
};
|
||||
|
||||
test('SET_TEMP_WRITE_BLOCK -> CLONE_TEMP_TO_TARGET when response is right', () => {
|
||||
const res: ResponseType<'SET_TEMP_WRITE_BLOCK'> = Either.right('set_write_block_succeeded');
|
||||
const newState = model(state, res);
|
||||
|
@ -2299,13 +2323,14 @@ describe('migrations v2 model', () => {
|
|||
expect(newState.retryCount).toBe(0);
|
||||
expect(newState.retryDelay).toBe(0);
|
||||
});
|
||||
|
||||
test('CLONE_TEMP_TO_TARGET -> FATAL if action fails with cluster_shard_limit_exceeded', () => {
|
||||
const res: ResponseType<'CLONE_TEMP_TO_TARGET'> = Either.left({
|
||||
type: 'cluster_shard_limit_exceeded',
|
||||
});
|
||||
const newState = model(state, res);
|
||||
const newState = model(state, res) as FatalState;
|
||||
expect(newState.controlState).toEqual('FATAL');
|
||||
expect((newState as FatalState).reason).toMatchInlineSnapshot(
|
||||
expect(newState.reason).toMatchInlineSnapshot(
|
||||
`"[cluster_shard_limit_exceeded] Upgrading Kibana requires adding a small number of new shards. Ensure that Kibana is able to add 10 more shards by increasing the cluster.max_shards_per_node setting, or removing indices to clear up resources. See clusterShardLimitExceeded"`
|
||||
);
|
||||
});
|
||||
|
@ -2687,7 +2712,7 @@ describe('migrations v2 model', () => {
|
|||
|
||||
it('REFRESH_TARGET -> OUTDATED_DOCUMENTS_SEARCH_OPEN_PIT if action succeeded', () => {
|
||||
const res: ResponseType<'REFRESH_TARGET'> = Either.right({ refreshed: true });
|
||||
const newState = model(state, res) as UpdateTargetMappingsPropertiesState;
|
||||
const newState = model(state, res);
|
||||
expect(newState.controlState).toBe('OUTDATED_DOCUMENTS_SEARCH_OPEN_PIT');
|
||||
});
|
||||
});
|
||||
|
@ -2716,6 +2741,7 @@ describe('migrations v2 model', () => {
|
|||
hasTransformedDocs: false,
|
||||
progress: createInitialProgress(),
|
||||
};
|
||||
|
||||
describe('OUTDATED_DOCUMENTS_TRANSFORM if action succeeds', () => {
|
||||
test('OUTDATED_DOCUMENTS_TRANSFORM -> TRANSFORMED_DOCUMENTS_BULK_INDEX if action succeeds', () => {
|
||||
const res: ResponseType<'OUTDATED_DOCUMENTS_TRANSFORM'> = Either.right({ processedDocs });
|
||||
|
@ -2730,6 +2756,7 @@ describe('migrations v2 model', () => {
|
|||
expect(newState.retryDelay).toEqual(0);
|
||||
expect(newState.progress.processed).toBe(outdatedDocuments.length);
|
||||
});
|
||||
|
||||
test('OUTDATED_DOCUMENTS_TRANSFORM -> OUTDATED_DOCUMENTS_SEARCH_READ if there are are existing documents that failed transformation', () => {
|
||||
const outdatedDocumentsTransformStateWithFailedDocuments: OutdatedDocumentsTransform = {
|
||||
...outdatedDocumentsTransformState,
|
||||
|
@ -2747,6 +2774,7 @@ describe('migrations v2 model', () => {
|
|||
expect(newState.retryDelay).toEqual(0);
|
||||
expect(newState.progress.processed).toBe(outdatedDocuments.length);
|
||||
});
|
||||
|
||||
test('OUTDATED_DOCUMENTS_TRANSFORM -> OUTDATED_DOCUMENTS_SEARCH_READ if there are are existing documents that failed transformation because of transform errors', () => {
|
||||
const outdatedDocumentsTransformStateWithFailedDocuments: OutdatedDocumentsTransform = {
|
||||
...outdatedDocumentsTransformState,
|
||||
|
@ -2766,6 +2794,7 @@ describe('migrations v2 model', () => {
|
|||
expect(newState.progress.processed).toBe(outdatedDocuments.length);
|
||||
});
|
||||
});
|
||||
|
||||
describe('OUTDATED_DOCUMENTS_TRANSFORM if action fails', () => {
|
||||
test('OUTDATED_DOCUMENTS_TRANSFORM -> OUTDATED_DOCUMENTS_SEARCH_READ adding newly failed documents to state if documents failed the transform', () => {
|
||||
const res: ResponseType<'OUTDATED_DOCUMENTS_TRANSFORM'> = Either.left({
|
||||
|
@ -2782,6 +2811,7 @@ describe('migrations v2 model', () => {
|
|||
expect(newState.corruptDocumentIds).toEqual(corruptDocumentIds);
|
||||
expect(newState.progress.processed).toBe(outdatedDocuments.length);
|
||||
});
|
||||
|
||||
test('OUTDATED_DOCUMENTS_TRANSFORM -> OUTDATED_DOCUMENTS_SEARCH_READ combines newly failed documents with those already on state if documents failed the transform', () => {
|
||||
const newFailedTransformDocumentIds = ['b:other', 'c:__'];
|
||||
const outdatedDocumentsTransformStateWithFailedDocuments: OutdatedDocumentsTransform = {
|
||||
|
@ -2917,6 +2947,7 @@ describe('migrations v2 model', () => {
|
|||
},
|
||||
}),
|
||||
};
|
||||
|
||||
test('UPDATE_TARGET_MAPPINGS_PROPERTIES -> UPDATE_TARGET_MAPPINGS_PROPERTIES_WAIT_FOR_TASK', () => {
|
||||
const res: ResponseType<'UPDATE_TARGET_MAPPINGS_PROPERTIES'> = Either.right({
|
||||
taskId: 'update target mappings task',
|
||||
|
@ -2961,10 +2992,7 @@ describe('migrations v2 model', () => {
|
|||
message: '[timeout_exception] Timeout waiting for ...',
|
||||
type: 'wait_for_task_completion_timeout',
|
||||
});
|
||||
const newState = model(
|
||||
updateTargetMappingsWaitForTaskState,
|
||||
res
|
||||
) as UpdateTargetMappingsPropertiesWaitForTaskState;
|
||||
const newState = model(updateTargetMappingsWaitForTaskState, res);
|
||||
expect(newState.controlState).toEqual('UPDATE_TARGET_MAPPINGS_PROPERTIES_WAIT_FOR_TASK');
|
||||
expect(newState.retryCount).toEqual(1);
|
||||
expect(newState.retryDelay).toEqual(2000);
|
||||
|
@ -2976,7 +3004,7 @@ describe('migrations v2 model', () => {
|
|||
message: '[timeout_exception] Timeout waiting for ...',
|
||||
type: 'wait_for_task_completion_timeout',
|
||||
});
|
||||
const newState = model(state, res) as UpdateTargetMappingsPropertiesWaitForTaskState;
|
||||
const newState = model(state, res);
|
||||
expect(newState.controlState).toEqual('UPDATE_TARGET_MAPPINGS_PROPERTIES_WAIT_FOR_TASK');
|
||||
expect(newState.retryCount).toEqual(2);
|
||||
expect(newState.retryDelay).toEqual(4000);
|
||||
|
@ -3023,7 +3051,7 @@ describe('migrations v2 model', () => {
|
|||
versionIndexReadyActions,
|
||||
},
|
||||
res
|
||||
) as PostInitState;
|
||||
);
|
||||
expect(newState.controlState).toEqual('MARK_VERSION_INDEX_READY');
|
||||
expect(newState.retryCount).toEqual(0);
|
||||
expect(newState.retryDelay).toEqual(0);
|
||||
|
@ -3041,14 +3069,14 @@ describe('migrations v2 model', () => {
|
|||
versionIndexReadyActions,
|
||||
},
|
||||
res
|
||||
) as PostInitState;
|
||||
);
|
||||
expect(newState.controlState).toEqual('MARK_VERSION_INDEX_READY_SYNC');
|
||||
expect(newState.retryCount).toEqual(0);
|
||||
expect(newState.retryDelay).toEqual(0);
|
||||
});
|
||||
|
||||
test('CHECK_VERSION_INDEX_READY_ACTIONS -> DONE if none versionIndexReadyActions', () => {
|
||||
const newState = model(сheckVersionIndexReadyActionsState, res) as PostInitState;
|
||||
const newState = model(сheckVersionIndexReadyActionsState, res);
|
||||
expect(newState.controlState).toEqual('DONE');
|
||||
expect(newState.retryCount).toEqual(0);
|
||||
expect(newState.retryDelay).toEqual(0);
|
||||
|
@ -3066,6 +3094,7 @@ describe('migrations v2 model', () => {
|
|||
sourceIndex: Option.none as Option.None,
|
||||
targetIndex: '.kibana_7.11.0_001',
|
||||
};
|
||||
|
||||
test('CREATE_NEW_TARGET -> CHECK_VERSION_INDEX_READY_ACTIONS', () => {
|
||||
const res: ResponseType<'CREATE_NEW_TARGET'> = Either.right('create_index_succeeded');
|
||||
const newState = model(createNewTargetState, res);
|
||||
|
@ -3073,6 +3102,7 @@ describe('migrations v2 model', () => {
|
|||
expect(newState.retryCount).toEqual(0);
|
||||
expect(newState.retryDelay).toEqual(0);
|
||||
});
|
||||
|
||||
test('CREATE_NEW_TARGET -> CREATE_NEW_TARGET if action fails with index_not_green_timeout', () => {
|
||||
const res: ResponseType<'CREATE_NEW_TARGET'> = Either.left({
|
||||
message: '[index_not_green_timeout] Timeout waiting for ...',
|
||||
|
@ -3083,6 +3113,7 @@ describe('migrations v2 model', () => {
|
|||
expect(newState.retryCount).toEqual(1);
|
||||
expect(newState.retryDelay).toEqual(2000);
|
||||
});
|
||||
|
||||
test('CREATE_NEW_TARGET -> CHECK_VERSION_INDEX_READY_ACTIONS resets the retry count and delay', () => {
|
||||
const res: ResponseType<'CREATE_NEW_TARGET'> = Either.right('create_index_succeeded');
|
||||
const testState = {
|
||||
|
@ -3096,13 +3127,14 @@ describe('migrations v2 model', () => {
|
|||
expect(newState.retryCount).toEqual(0);
|
||||
expect(newState.retryDelay).toEqual(0);
|
||||
});
|
||||
|
||||
test('CREATE_NEW_TARGET -> FATAL if action fails with cluster_shard_limit_exceeded', () => {
|
||||
const res: ResponseType<'CREATE_NEW_TARGET'> = Either.left({
|
||||
type: 'cluster_shard_limit_exceeded',
|
||||
});
|
||||
const newState = model(createNewTargetState, res);
|
||||
const newState = model(createNewTargetState, res) as FatalState;
|
||||
expect(newState.controlState).toEqual('FATAL');
|
||||
expect((newState as FatalState).reason).toMatchInlineSnapshot(
|
||||
expect(newState.reason).toMatchInlineSnapshot(
|
||||
`"[cluster_shard_limit_exceeded] Upgrading Kibana requires adding a small number of new shards. Ensure that Kibana is able to add 10 more shards by increasing the cluster.max_shards_per_node setting, or removing indices to clear up resources. See clusterShardLimitExceeded"`
|
||||
);
|
||||
});
|
||||
|
@ -3118,6 +3150,7 @@ describe('migrations v2 model', () => {
|
|||
versionIndexReadyActions: aliasActions,
|
||||
targetIndex: '.kibana_7.11.0_001',
|
||||
};
|
||||
|
||||
test('MARK_VERSION_INDEX_READY -> DONE if the action succeeded', () => {
|
||||
const res: ResponseType<'MARK_VERSION_INDEX_READY'> = Either.right(
|
||||
'update_aliases_succeeded'
|
||||
|
@ -3127,6 +3160,7 @@ describe('migrations v2 model', () => {
|
|||
expect(newState.retryCount).toEqual(0);
|
||||
expect(newState.retryDelay).toEqual(0);
|
||||
});
|
||||
|
||||
test('MARK_VERSION_INDEX_READY -> MARK_VERSION_INDEX_CONFLICT if another removed the current alias from the source index', () => {
|
||||
const res: ResponseType<'MARK_VERSION_INDEX_READY'> = Either.left({
|
||||
type: 'alias_not_found_exception',
|
||||
|
@ -3136,6 +3170,7 @@ describe('migrations v2 model', () => {
|
|||
expect(newState.retryCount).toEqual(0);
|
||||
expect(newState.retryDelay).toEqual(0);
|
||||
});
|
||||
|
||||
test('MARK_VERSION_INDEX_READY -> MARK_VERSION_INDEX_CONFLICT if another node removed the temporary index', () => {
|
||||
const res: ResponseType<'MARK_VERSION_INDEX_READY'> = Either.left({
|
||||
type: 'index_not_found_exception',
|
||||
|
@ -3158,6 +3193,7 @@ describe('migrations v2 model', () => {
|
|||
versionIndexReadyActions: aliasActions,
|
||||
targetIndex: '.kibana_7.11.0_001',
|
||||
};
|
||||
|
||||
test('MARK_VERSION_INDEX_CONFLICT -> DONE if the current alias is pointing to the version alias', () => {
|
||||
const res: ResponseType<'MARK_VERSION_INDEX_READY_CONFLICT'> = Either.right({
|
||||
'.kibana_7.11.0_001': {
|
||||
|
@ -3179,6 +3215,7 @@ describe('migrations v2 model', () => {
|
|||
expect(newState.retryCount).toEqual(0);
|
||||
expect(newState.retryDelay).toEqual(0);
|
||||
});
|
||||
|
||||
test('MARK_VERSION_INDEX_READY_CONFLICT -> FATAL if the current alias is pointing to a different version index', () => {
|
||||
const res: ResponseType<'MARK_VERSION_INDEX_READY_CONFLICT'> = Either.right({
|
||||
'.kibana_7.11.0_001': {
|
||||
|
@ -3203,6 +3240,7 @@ describe('migrations v2 model', () => {
|
|||
expect(newState.retryCount).toEqual(0);
|
||||
expect(newState.retryDelay).toEqual(0);
|
||||
});
|
||||
|
||||
test('MARK_VERSION_INDEX_READY_CONFLICT -> FATAL if the current alias is pointing to a multiple indices', () => {
|
||||
const res: ResponseType<'MARK_VERSION_INDEX_READY_CONFLICT'> = Either.right({
|
||||
'.kibana_7.11.0_001': {
|
||||
|
|
|
@ -82,193 +82,181 @@ export const model = (currentState: State, resW: ResponseType<AllActionStates>):
|
|||
|
||||
if (stateP.controlState === 'INIT') {
|
||||
const res = resW as ExcludeRetryableEsError<ResponseType<typeof stateP.controlState>>;
|
||||
if (Either.isLeft(res)) {
|
||||
const left = res.left;
|
||||
if (isTypeof(left, 'incompatible_cluster_routing_allocation')) {
|
||||
const retryErrorMessage = `[${left.type}] Incompatible Elasticsearch cluster settings detected. Remove the persistent and transient Elasticsearch cluster setting 'cluster.routing.allocation.enable' or set it to a value of 'all' to allow migrations to proceed. Refer to ${stateP.migrationDocLinks.routingAllocationDisabled} for more information on how to resolve the issue.`;
|
||||
return delayRetryState(stateP, retryErrorMessage, stateP.retryAttempts);
|
||||
} else {
|
||||
throwBadResponse(stateP, left);
|
||||
}
|
||||
} else if (Either.isRight(res)) {
|
||||
// cluster routing allocation is enabled and we can continue with the migration as normal
|
||||
const indices = res.right;
|
||||
const aliasesRes = getAliases(indices);
|
||||
// cluster routing allocation is enabled and we can continue with the migration as normal
|
||||
const indices = res.right;
|
||||
const aliasesRes = getAliases(indices);
|
||||
|
||||
if (Either.isLeft(aliasesRes)) {
|
||||
return {
|
||||
...stateP,
|
||||
controlState: 'FATAL',
|
||||
reason: `The ${
|
||||
aliasesRes.left.alias
|
||||
} alias is pointing to multiple indices: ${aliasesRes.left.indices.join(',')}.`,
|
||||
};
|
||||
}
|
||||
|
||||
const aliases = aliasesRes.right;
|
||||
|
||||
if (
|
||||
// `.kibana` is pointing to an index that belongs to a later
|
||||
// version of Kibana .e.g. a 7.11.0 instance found the `.kibana` alias
|
||||
// pointing to `.kibana_7.12.0_001`
|
||||
indexBelongsToLaterVersion(stateP.kibanaVersion, aliases[stateP.currentAlias])
|
||||
) {
|
||||
return {
|
||||
...stateP,
|
||||
controlState: 'FATAL',
|
||||
reason: `The ${
|
||||
stateP.currentAlias
|
||||
} alias is pointing to a newer version of Kibana: v${indexVersion(
|
||||
aliases[stateP.currentAlias]
|
||||
)}`,
|
||||
};
|
||||
}
|
||||
|
||||
const laterVersionAlias = hasLaterVersionAlias(stateP.kibanaVersion, aliases);
|
||||
if (
|
||||
// a `.kibana_<version>` alias exist, which refers to a later version of Kibana
|
||||
// e.g. `.kibana_8.7.0` exists, and current stack version is 8.6.1
|
||||
// see https://github.com/elastic/kibana/issues/155136
|
||||
laterVersionAlias
|
||||
) {
|
||||
return {
|
||||
...stateP,
|
||||
controlState: 'FATAL',
|
||||
reason: `The ${laterVersionAlias} alias refers to a newer version of Kibana: v${aliasVersion(
|
||||
laterVersionAlias
|
||||
)}`,
|
||||
};
|
||||
}
|
||||
|
||||
// The source index .kibana is pointing to. E.g: ".kibana_8.7.0_001"
|
||||
const source = aliases[stateP.currentAlias];
|
||||
// The target index .kibana WILL be pointing to if we reindex. E.g: ".kibana_8.8.0_001"
|
||||
const newVersionTarget = stateP.versionIndex;
|
||||
|
||||
const postInitState = {
|
||||
aliases,
|
||||
sourceIndex: Option.fromNullable(source),
|
||||
sourceIndexMappings: Option.fromNullable(source ? indices[source]?.mappings : undefined),
|
||||
versionIndexReadyActions: Option.none,
|
||||
if (Either.isLeft(aliasesRes)) {
|
||||
return {
|
||||
...stateP,
|
||||
controlState: 'FATAL',
|
||||
reason: `The ${
|
||||
aliasesRes.left.alias
|
||||
} alias is pointing to multiple indices: ${aliasesRes.left.indices.join(',')}.`,
|
||||
};
|
||||
}
|
||||
|
||||
if (
|
||||
// Don't actively participate in this migration but wait for another instance to complete it
|
||||
stateP.waitForMigrationCompletion === true
|
||||
) {
|
||||
return {
|
||||
...stateP,
|
||||
...postInitState,
|
||||
sourceIndex: Option.none,
|
||||
targetIndex: newVersionTarget,
|
||||
controlState: 'WAIT_FOR_MIGRATION_COMPLETION',
|
||||
// Wait for 2s before checking again if the migration has completed
|
||||
retryDelay: 2000,
|
||||
logs: [
|
||||
...stateP.logs,
|
||||
{
|
||||
level: 'info',
|
||||
message: `Migration required. Waiting until another Kibana instance completes the migration.`,
|
||||
},
|
||||
],
|
||||
};
|
||||
} else if (
|
||||
// If the `.kibana` alias exists
|
||||
Option.isSome(postInitState.sourceIndex)
|
||||
) {
|
||||
return {
|
||||
...stateP,
|
||||
...postInitState,
|
||||
controlState: 'WAIT_FOR_YELLOW_SOURCE',
|
||||
sourceIndex: postInitState.sourceIndex,
|
||||
sourceIndexMappings: postInitState.sourceIndexMappings as Option.Some<IndexMapping>,
|
||||
targetIndex: postInitState.sourceIndex.value, // We preserve the same index, source == target (E.g: ".xx8.7.0_001")
|
||||
};
|
||||
} else if (indices[stateP.legacyIndex] != null) {
|
||||
// Migrate from a legacy index
|
||||
const aliases = aliasesRes.right;
|
||||
|
||||
// If the user used default index names we can narrow the version
|
||||
// number we use when creating a backup index. This is purely to help
|
||||
// users more easily identify how "old" and index is so that they can
|
||||
// decide if it's safe to delete these rollback backups. Because
|
||||
// backups are kept for rollback, a version number is more useful than
|
||||
// a date.
|
||||
let legacyVersion = '';
|
||||
if (stateP.indexPrefix === '.kibana') {
|
||||
legacyVersion = 'pre6.5.0';
|
||||
} else if (stateP.indexPrefix === '.kibana_task_manager') {
|
||||
legacyVersion = 'pre7.4.0';
|
||||
} else {
|
||||
legacyVersion = 'pre' + stateP.kibanaVersion;
|
||||
}
|
||||
if (
|
||||
// `.kibana` is pointing to an index that belongs to a later
|
||||
// version of Kibana .e.g. a 7.11.0 instance found the `.kibana` alias
|
||||
// pointing to `.kibana_7.12.0_001`
|
||||
indexBelongsToLaterVersion(stateP.kibanaVersion, aliases[stateP.currentAlias])
|
||||
) {
|
||||
return {
|
||||
...stateP,
|
||||
controlState: 'FATAL',
|
||||
reason: `The ${
|
||||
stateP.currentAlias
|
||||
} alias is pointing to a newer version of Kibana: v${indexVersion(
|
||||
aliases[stateP.currentAlias]
|
||||
)}`,
|
||||
};
|
||||
}
|
||||
|
||||
const legacyReindexTarget = `${stateP.indexPrefix}_${legacyVersion}_001`;
|
||||
const laterVersionAlias = hasLaterVersionAlias(stateP.kibanaVersion, aliases);
|
||||
if (
|
||||
// a `.kibana_<version>` alias exist, which refers to a later version of Kibana
|
||||
// e.g. `.kibana_8.7.0` exists, and current stack version is 8.6.1
|
||||
// see https://github.com/elastic/kibana/issues/155136
|
||||
laterVersionAlias
|
||||
) {
|
||||
return {
|
||||
...stateP,
|
||||
controlState: 'FATAL',
|
||||
reason: `The ${laterVersionAlias} alias refers to a newer version of Kibana: v${aliasVersion(
|
||||
laterVersionAlias
|
||||
)}`,
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
...stateP,
|
||||
...postInitState,
|
||||
controlState: 'LEGACY_SET_WRITE_BLOCK',
|
||||
sourceIndex: Option.some(legacyReindexTarget) as Option.Some<string>,
|
||||
sourceIndexMappings: Option.some(
|
||||
indices[stateP.legacyIndex].mappings
|
||||
) as Option.Some<IndexMapping>,
|
||||
targetIndex: newVersionTarget,
|
||||
legacyPreMigrationDoneActions: [
|
||||
{ remove_index: { index: stateP.legacyIndex } },
|
||||
{
|
||||
add: {
|
||||
index: legacyReindexTarget,
|
||||
alias: stateP.currentAlias,
|
||||
},
|
||||
},
|
||||
],
|
||||
versionIndexReadyActions: Option.some<AliasAction[]>([
|
||||
{
|
||||
remove: {
|
||||
index: legacyReindexTarget,
|
||||
alias: stateP.currentAlias,
|
||||
must_exist: true,
|
||||
},
|
||||
},
|
||||
{ add: { index: newVersionTarget, alias: stateP.currentAlias } },
|
||||
{ add: { index: newVersionTarget, alias: stateP.versionAlias } },
|
||||
{ remove_index: { index: stateP.tempIndex } },
|
||||
]),
|
||||
};
|
||||
} else if (
|
||||
// if we must relocate documents to this migrator's index, but the index does NOT yet exist:
|
||||
// this migrator must create a temporary index and synchronize with other migrators
|
||||
// this is a similar flow to the reindex one, but this migrator will not reindexing anything
|
||||
stateP.mustRelocateDocuments
|
||||
) {
|
||||
return {
|
||||
...stateP,
|
||||
...postInitState,
|
||||
controlState: 'CREATE_REINDEX_TEMP',
|
||||
sourceIndex: Option.none as Option.None,
|
||||
targetIndex: newVersionTarget,
|
||||
versionIndexReadyActions: Option.some([
|
||||
{ add: { index: newVersionTarget, alias: stateP.currentAlias } },
|
||||
{ add: { index: newVersionTarget, alias: stateP.versionAlias } },
|
||||
{ remove_index: { index: stateP.tempIndex } },
|
||||
]),
|
||||
};
|
||||
// The source index .kibana is pointing to. E.g: ".kibana_8.7.0_001"
|
||||
const source = aliases[stateP.currentAlias];
|
||||
// The target index .kibana WILL be pointing to if we reindex. E.g: ".kibana_8.8.0_001"
|
||||
const newVersionTarget = stateP.versionIndex;
|
||||
|
||||
const postInitState = {
|
||||
aliases,
|
||||
sourceIndex: Option.fromNullable(source),
|
||||
sourceIndexMappings: Option.fromNullable(source ? indices[source]?.mappings : undefined),
|
||||
versionIndexReadyActions: Option.none,
|
||||
};
|
||||
|
||||
if (
|
||||
// Don't actively participate in this migration but wait for another instance to complete it
|
||||
stateP.waitForMigrationCompletion === true
|
||||
) {
|
||||
return {
|
||||
...stateP,
|
||||
...postInitState,
|
||||
sourceIndex: Option.none,
|
||||
targetIndex: newVersionTarget,
|
||||
controlState: 'WAIT_FOR_MIGRATION_COMPLETION',
|
||||
// Wait for 2s before checking again if the migration has completed
|
||||
retryDelay: 2000,
|
||||
logs: [
|
||||
...stateP.logs,
|
||||
{
|
||||
level: 'info',
|
||||
message: `Migration required. Waiting until another Kibana instance completes the migration.`,
|
||||
},
|
||||
],
|
||||
};
|
||||
} else if (
|
||||
// If the `.kibana` alias exists
|
||||
Option.isSome(postInitState.sourceIndex)
|
||||
) {
|
||||
return {
|
||||
...stateP,
|
||||
...postInitState,
|
||||
controlState: 'WAIT_FOR_YELLOW_SOURCE',
|
||||
sourceIndex: postInitState.sourceIndex,
|
||||
sourceIndexMappings: postInitState.sourceIndexMappings as Option.Some<IndexMapping>,
|
||||
targetIndex: postInitState.sourceIndex.value, // We preserve the same index, source == target (E.g: ".xx8.7.0_001")
|
||||
};
|
||||
} else if (indices[stateP.legacyIndex] != null) {
|
||||
// Migrate from a legacy index
|
||||
|
||||
// If the user used default index names we can narrow the version
|
||||
// number we use when creating a backup index. This is purely to help
|
||||
// users more easily identify how "old" and index is so that they can
|
||||
// decide if it's safe to delete these rollback backups. Because
|
||||
// backups are kept for rollback, a version number is more useful than
|
||||
// a date.
|
||||
let legacyVersion = '';
|
||||
if (stateP.indexPrefix === '.kibana') {
|
||||
legacyVersion = 'pre6.5.0';
|
||||
} else if (stateP.indexPrefix === '.kibana_task_manager') {
|
||||
legacyVersion = 'pre7.4.0';
|
||||
} else {
|
||||
// no need to copy anything over from other indices, we can start with a clean, empty index
|
||||
return {
|
||||
...stateP,
|
||||
...postInitState,
|
||||
controlState: 'CREATE_NEW_TARGET',
|
||||
sourceIndex: Option.none as Option.None,
|
||||
targetIndex: newVersionTarget,
|
||||
versionIndexReadyActions: Option.some([
|
||||
{ add: { index: newVersionTarget, alias: stateP.currentAlias } },
|
||||
{ add: { index: newVersionTarget, alias: stateP.versionAlias } },
|
||||
]) as Option.Some<AliasAction[]>,
|
||||
};
|
||||
legacyVersion = 'pre' + stateP.kibanaVersion;
|
||||
}
|
||||
|
||||
const legacyReindexTarget = `${stateP.indexPrefix}_${legacyVersion}_001`;
|
||||
|
||||
return {
|
||||
...stateP,
|
||||
...postInitState,
|
||||
controlState: 'LEGACY_CHECK_CLUSTER_ROUTING_ALLOCATION',
|
||||
sourceIndex: Option.some(legacyReindexTarget) as Option.Some<string>,
|
||||
sourceIndexMappings: Option.some(
|
||||
indices[stateP.legacyIndex].mappings
|
||||
) as Option.Some<IndexMapping>,
|
||||
targetIndex: newVersionTarget,
|
||||
legacyPreMigrationDoneActions: [
|
||||
{ remove_index: { index: stateP.legacyIndex } },
|
||||
{
|
||||
add: {
|
||||
index: legacyReindexTarget,
|
||||
alias: stateP.currentAlias,
|
||||
},
|
||||
},
|
||||
],
|
||||
versionIndexReadyActions: Option.some<AliasAction[]>([
|
||||
{
|
||||
remove: {
|
||||
index: legacyReindexTarget,
|
||||
alias: stateP.currentAlias,
|
||||
must_exist: true,
|
||||
},
|
||||
},
|
||||
{ add: { index: newVersionTarget, alias: stateP.currentAlias } },
|
||||
{ add: { index: newVersionTarget, alias: stateP.versionAlias } },
|
||||
{ remove_index: { index: stateP.tempIndex } },
|
||||
]),
|
||||
};
|
||||
} else if (
|
||||
// if we must relocate documents to this migrator's index, but the index does NOT yet exist:
|
||||
// this migrator must create a temporary index and synchronize with other migrators
|
||||
// this is a similar flow to the reindex one, but this migrator will not reindexing anything
|
||||
stateP.mustRelocateDocuments
|
||||
) {
|
||||
return {
|
||||
...stateP,
|
||||
...postInitState,
|
||||
controlState: 'CREATE_REINDEX_TEMP',
|
||||
sourceIndex: Option.none as Option.None,
|
||||
targetIndex: newVersionTarget,
|
||||
versionIndexReadyActions: Option.some([
|
||||
{ add: { index: newVersionTarget, alias: stateP.currentAlias } },
|
||||
{ add: { index: newVersionTarget, alias: stateP.versionAlias } },
|
||||
{ remove_index: { index: stateP.tempIndex } },
|
||||
]),
|
||||
};
|
||||
} else {
|
||||
throwBadResponse(stateP, res);
|
||||
// no need to copy anything over from other indices, we can start with a clean, empty index
|
||||
return {
|
||||
...stateP,
|
||||
...postInitState,
|
||||
controlState: 'CREATE_NEW_TARGET',
|
||||
sourceIndex: Option.none as Option.None,
|
||||
targetIndex: newVersionTarget,
|
||||
versionIndexReadyActions: Option.some([
|
||||
{ add: { index: newVersionTarget, alias: stateP.currentAlias } },
|
||||
{ add: { index: newVersionTarget, alias: stateP.versionAlias } },
|
||||
]) as Option.Some<AliasAction[]>,
|
||||
};
|
||||
}
|
||||
} else if (stateP.controlState === 'WAIT_FOR_MIGRATION_COMPLETION') {
|
||||
const res = resW as ExcludeRetryableEsError<ResponseType<typeof stateP.controlState>>;
|
||||
|
@ -306,6 +294,22 @@ export const model = (currentState: State, resW: ResponseType<AllActionStates>):
|
|||
],
|
||||
};
|
||||
}
|
||||
} else if (stateP.controlState === 'LEGACY_CHECK_CLUSTER_ROUTING_ALLOCATION') {
|
||||
const res = resW as ExcludeRetryableEsError<ResponseType<typeof stateP.controlState>>;
|
||||
if (Either.isRight(res)) {
|
||||
return {
|
||||
...stateP,
|
||||
controlState: 'LEGACY_SET_WRITE_BLOCK',
|
||||
};
|
||||
} else {
|
||||
const left = res.left;
|
||||
if (isTypeof(left, 'incompatible_cluster_routing_allocation')) {
|
||||
const retryErrorMessage = `[${left.type}] Incompatible Elasticsearch cluster settings detected. Remove the persistent and transient Elasticsearch cluster setting 'cluster.routing.allocation.enable' or set it to a value of 'all' to allow migrations to proceed. Refer to ${stateP.migrationDocLinks.routingAllocationDisabled} for more information on how to resolve the issue.`;
|
||||
return delayRetryState(stateP, retryErrorMessage, stateP.retryAttempts);
|
||||
} else {
|
||||
throwBadResponse(stateP, left);
|
||||
}
|
||||
}
|
||||
} else if (stateP.controlState === 'LEGACY_SET_WRITE_BLOCK') {
|
||||
const res = resW as ExcludeRetryableEsError<ResponseType<typeof stateP.controlState>>;
|
||||
// If the write block is successfully in place
|
||||
|
@ -457,7 +461,7 @@ export const model = (currentState: State, resW: ResponseType<AllActionStates>):
|
|||
// we must reindex and synchronize with other migrators
|
||||
return {
|
||||
...stateP,
|
||||
controlState: 'CHECK_UNKNOWN_DOCUMENTS',
|
||||
controlState: 'CHECK_CLUSTER_ROUTING_ALLOCATION',
|
||||
};
|
||||
} else {
|
||||
// this migrator is not involved in a relocation, we can proceed with the standard flow
|
||||
|
@ -502,7 +506,7 @@ export const model = (currentState: State, resW: ResponseType<AllActionStates>):
|
|||
case MigrationType.Incompatible:
|
||||
return {
|
||||
...stateP,
|
||||
controlState: 'CHECK_UNKNOWN_DOCUMENTS',
|
||||
controlState: 'CHECK_CLUSTER_ROUTING_ALLOCATION',
|
||||
};
|
||||
case MigrationType.Unnecessary:
|
||||
return {
|
||||
|
@ -636,7 +640,7 @@ export const model = (currentState: State, resW: ResponseType<AllActionStates>):
|
|||
...stateP,
|
||||
controlState: stateP.mustRefresh ? 'REFRESH_SOURCE' : 'OUTDATED_DOCUMENTS_SEARCH_OPEN_PIT',
|
||||
};
|
||||
} else if (Either.isLeft(res)) {
|
||||
} else {
|
||||
const left = res.left;
|
||||
// Note: if multiple newer Kibana versions are competing with each other to perform a migration,
|
||||
// it might happen that another Kibana instance has deleted this instance's version index.
|
||||
|
@ -661,11 +665,8 @@ export const model = (currentState: State, resW: ResponseType<AllActionStates>):
|
|||
// step).
|
||||
throwBadResponse(stateP, left as never);
|
||||
} else {
|
||||
// TODO update to handle 2 more cases
|
||||
throwBadResponse(stateP, left);
|
||||
}
|
||||
} else {
|
||||
throwBadResponse(stateP, res);
|
||||
}
|
||||
} else if (stateP.controlState === 'REFRESH_SOURCE') {
|
||||
const res = resW as ExcludeRetryableEsError<ResponseType<typeof stateP.controlState>>;
|
||||
|
@ -677,6 +678,22 @@ export const model = (currentState: State, resW: ResponseType<AllActionStates>):
|
|||
} else {
|
||||
throwBadResponse(stateP, res);
|
||||
}
|
||||
} else if (stateP.controlState === 'CHECK_CLUSTER_ROUTING_ALLOCATION') {
|
||||
const res = resW as ExcludeRetryableEsError<ResponseType<typeof stateP.controlState>>;
|
||||
if (Either.isRight(res)) {
|
||||
return {
|
||||
...stateP,
|
||||
controlState: 'CHECK_UNKNOWN_DOCUMENTS',
|
||||
};
|
||||
} else {
|
||||
const left = res.left;
|
||||
if (isTypeof(left, 'incompatible_cluster_routing_allocation')) {
|
||||
const retryErrorMessage = `[${left.type}] Incompatible Elasticsearch cluster settings detected. Remove the persistent and transient Elasticsearch cluster setting 'cluster.routing.allocation.enable' or set it to a value of 'all' to allow migrations to proceed. Refer to ${stateP.migrationDocLinks.routingAllocationDisabled} for more information on how to resolve the issue.`;
|
||||
return delayRetryState(stateP, retryErrorMessage, stateP.retryAttempts);
|
||||
} else {
|
||||
throwBadResponse(stateP, left);
|
||||
}
|
||||
}
|
||||
} else if (stateP.controlState === 'CHECK_UNKNOWN_DOCUMENTS') {
|
||||
const res = resW as ExcludeRetryableEsError<ResponseType<typeof stateP.controlState>>;
|
||||
|
||||
|
@ -1304,11 +1321,11 @@ export const model = (currentState: State, resW: ResponseType<AllActionStates>):
|
|||
Either.isRight(res) ||
|
||||
(isTypeof(res.left, 'documents_transform_failed') && stateP.discardCorruptObjects)
|
||||
) {
|
||||
// we might have some transformation errors, but user has chosen to discard them
|
||||
if (
|
||||
(stateP.corruptDocumentIds.length === 0 && stateP.transformErrors.length === 0) ||
|
||||
stateP.discardCorruptObjects
|
||||
) {
|
||||
// we might have some transformation errors from previous iterations, but user has chosen to discard them
|
||||
const documents = Either.isRight(res) ? res.right.processedDocs : res.left.processedDocs;
|
||||
|
||||
let corruptDocumentIds = stateP.corruptDocumentIds;
|
||||
|
@ -1346,8 +1363,10 @@ export const model = (currentState: State, resW: ResponseType<AllActionStates>):
|
|||
};
|
||||
}
|
||||
} else {
|
||||
// We have seen corrupt documents and/or transformation errors
|
||||
// skip indexing and go straight to reading and transforming more docs
|
||||
// At this point, there are some corrupt documents and/or transformation errors
|
||||
// from previous iterations and we're not discarding them.
|
||||
// Also, the current batch of SEARCH_READ documents has been transformed successfully
|
||||
// so there is no need to append them to the lists of corruptDocumentIds, transformErrors.
|
||||
return {
|
||||
...stateP,
|
||||
controlState: 'OUTDATED_DOCUMENTS_SEARCH_READ',
|
||||
|
|
|
@ -80,7 +80,7 @@ export const nextActionMap = (
|
|||
) => {
|
||||
return {
|
||||
INIT: (state: InitState) =>
|
||||
Actions.initAction({ client, indices: [state.currentAlias, state.versionAlias] }),
|
||||
Actions.fetchIndices({ client, indices: [state.currentAlias, state.versionAlias] }),
|
||||
WAIT_FOR_MIGRATION_COMPLETION: (state: WaitForMigrationCompletionState) =>
|
||||
Actions.fetchIndices({ client, indices: [state.currentAlias, state.versionAlias] }),
|
||||
WAIT_FOR_YELLOW_SOURCE: (state: WaitForYellowSourceState) =>
|
||||
|
@ -117,6 +117,7 @@ export const nextActionMap = (
|
|||
Actions.updateAliases({ client, aliasActions: state.preTransformDocsActions }),
|
||||
REFRESH_SOURCE: (state: RefreshSource) =>
|
||||
Actions.refreshIndex({ client, index: state.sourceIndex.value }),
|
||||
CHECK_CLUSTER_ROUTING_ALLOCATION: () => Actions.checkClusterRoutingAllocationEnabled(client),
|
||||
CHECK_UNKNOWN_DOCUMENTS: (state: CheckUnknownDocumentsState) =>
|
||||
Actions.checkForUnknownDocs({
|
||||
client,
|
||||
|
@ -287,6 +288,8 @@ export const nextActionMap = (
|
|||
),
|
||||
MARK_VERSION_INDEX_READY_CONFLICT: (state: MarkVersionIndexReadyConflict) =>
|
||||
Actions.fetchIndices({ client, indices: [state.currentAlias, state.versionAlias] }),
|
||||
LEGACY_CHECK_CLUSTER_ROUTING_ALLOCATION: () =>
|
||||
Actions.checkClusterRoutingAllocationEnabled(client),
|
||||
LEGACY_SET_WRITE_BLOCK: (state: LegacySetWriteBlockState) =>
|
||||
Actions.setWriteBlock({ client, index: state.legacyIndex }),
|
||||
LEGACY_CREATE_REINDEX_TARGET: (state: LegacyCreateReindexTargetState) =>
|
||||
|
|
|
@ -384,6 +384,10 @@ export interface RefreshTarget extends PostInitState {
|
|||
readonly targetIndex: string;
|
||||
}
|
||||
|
||||
export interface CheckClusterRoutingAllocationState extends SourceExistsState {
|
||||
readonly controlState: 'CHECK_CLUSTER_ROUTING_ALLOCATION';
|
||||
}
|
||||
|
||||
export interface CheckTargetTypesMappingsState extends PostInitState {
|
||||
readonly controlState: 'CHECK_TARGET_MAPPINGS';
|
||||
}
|
||||
|
@ -510,6 +514,10 @@ export interface LegacyBaseState extends SourceExistsState {
|
|||
readonly legacyPreMigrationDoneActions: AliasAction[];
|
||||
}
|
||||
|
||||
export interface LegacyCheckClusterRoutingAllocationState extends LegacyBaseState {
|
||||
readonly controlState: 'LEGACY_CHECK_CLUSTER_ROUTING_ALLOCATION';
|
||||
}
|
||||
|
||||
export interface LegacySetWriteBlockState extends LegacyBaseState {
|
||||
/** Set a write block on the legacy index to prevent any further writes */
|
||||
readonly controlState: 'LEGACY_SET_WRITE_BLOCK';
|
||||
|
@ -549,6 +557,7 @@ export interface LegacyDeleteState extends LegacyBaseState {
|
|||
|
||||
export type State = Readonly<
|
||||
| CalculateExcludeFiltersState
|
||||
| CheckClusterRoutingAllocationState
|
||||
| CheckTargetTypesMappingsState
|
||||
| CheckUnknownDocumentsState
|
||||
| CheckVersionIndexReadyActions
|
||||
|
@ -561,6 +570,7 @@ export type State = Readonly<
|
|||
| DoneState
|
||||
| FatalState
|
||||
| InitState
|
||||
| LegacyCheckClusterRoutingAllocationState
|
||||
| LegacyCreateReindexTargetState
|
||||
| LegacyDeleteState
|
||||
| LegacyReindexState
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
import type { ActionErrorTypeMap as BaseActionErrorTypeMap } from '../../actions';
|
||||
|
||||
export {
|
||||
initAction as init,
|
||||
fetchIndices,
|
||||
waitForIndexStatus,
|
||||
createIndex,
|
||||
updateAliases,
|
||||
|
@ -25,7 +25,6 @@ export {
|
|||
transformDocs,
|
||||
bulkOverwriteTransformedDocuments,
|
||||
noop,
|
||||
type InitActionParams,
|
||||
type IncompatibleClusterRoutingAllocation,
|
||||
type RetryableEsClientError,
|
||||
type WaitForTaskCompletionTimeout,
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
import './model.test.mocks';
|
||||
import * as Either from 'fp-ts/lib/Either';
|
||||
import { createContextMock, MockedMigratorContext } from '../test_helpers';
|
||||
import type { RetryableEsClientError } from '../../actions';
|
||||
import type { FetchIndexResponse, RetryableEsClientError } from '../../actions';
|
||||
import type { State, BaseState, FatalState, AllActionStates } from '../state';
|
||||
import type { StateActionResponse } from './types';
|
||||
import { model, modelStageMap } from './model';
|
||||
|
@ -89,7 +89,7 @@ describe('model', () => {
|
|||
mappings: { properties: {} },
|
||||
settings: {},
|
||||
},
|
||||
});
|
||||
}) as Either.Right<FetchIndexResponse>;
|
||||
const newState = model(state, res, context);
|
||||
|
||||
expect(newState.retryCount).toEqual(0);
|
||||
|
|
|
@ -75,24 +75,12 @@ describe('Stage: init', () => {
|
|||
});
|
||||
});
|
||||
|
||||
it('INIT -> INIT when cluster routing allocation is incompatible', () => {
|
||||
const state = createState();
|
||||
const res: StateActionResponse<'INIT'> = Either.left({
|
||||
type: 'incompatible_cluster_routing_allocation',
|
||||
});
|
||||
|
||||
const newState = init(state, res, context);
|
||||
|
||||
expect(newState.controlState).toEqual('INIT');
|
||||
expect(newState.retryCount).toEqual(1);
|
||||
expect(newState.retryDelay).toEqual(2000);
|
||||
expect(newState.logs).toHaveLength(1);
|
||||
});
|
||||
|
||||
it('calls getCurrentIndex with the correct parameters', () => {
|
||||
const state = createState();
|
||||
const fetchIndexResponse = createResponse();
|
||||
const res: StateActionResponse<'INIT'> = Either.right(fetchIndexResponse);
|
||||
const res: StateActionResponse<'INIT'> = Either.right(
|
||||
fetchIndexResponse
|
||||
) as Either.Right<FetchIndexResponse>;
|
||||
|
||||
const aliases = { '.foo': '.bar' };
|
||||
getAliasesMock.mockReturnValue(Either.right(aliases));
|
||||
|
@ -110,7 +98,9 @@ describe('Stage: init', () => {
|
|||
it('calls checkVersionCompatibility with the correct parameters', () => {
|
||||
const state = createState();
|
||||
const fetchIndexResponse = createResponse();
|
||||
const res: StateActionResponse<'INIT'> = Either.right(fetchIndexResponse);
|
||||
const res: StateActionResponse<'INIT'> = Either.right(
|
||||
fetchIndexResponse
|
||||
) as Either.Right<FetchIndexResponse>;
|
||||
|
||||
init(state, res, context);
|
||||
|
||||
|
@ -130,7 +120,9 @@ describe('Stage: init', () => {
|
|||
|
||||
it('adds a log entry about the algo check', () => {
|
||||
const state = createState();
|
||||
const res: StateActionResponse<'INIT'> = Either.right(createResponse());
|
||||
const res: StateActionResponse<'INIT'> = Either.right(
|
||||
createResponse()
|
||||
) as Either.Right<FetchIndexResponse>;
|
||||
|
||||
const newState = init(state, res, context);
|
||||
|
||||
|
@ -142,7 +134,9 @@ describe('Stage: init', () => {
|
|||
it('INIT -> FATAL', () => {
|
||||
const state = createState();
|
||||
const fetchIndexResponse = createResponse();
|
||||
const res: StateActionResponse<'INIT'> = Either.right(fetchIndexResponse);
|
||||
const res: StateActionResponse<'INIT'> = Either.right(
|
||||
fetchIndexResponse
|
||||
) as Either.Right<FetchIndexResponse>;
|
||||
|
||||
const newState = init(state, res, context);
|
||||
|
||||
|
@ -162,7 +156,9 @@ describe('Stage: init', () => {
|
|||
|
||||
it('adds a log entry about the algo check', () => {
|
||||
const state = createState();
|
||||
const res: StateActionResponse<'INIT'> = Either.right(createResponse());
|
||||
const res: StateActionResponse<'INIT'> = Either.right(
|
||||
createResponse()
|
||||
) as Either.Right<FetchIndexResponse>;
|
||||
|
||||
const newState = init(state, res, context);
|
||||
|
||||
|
@ -174,7 +170,9 @@ describe('Stage: init', () => {
|
|||
it('INIT -> FATAL', () => {
|
||||
const state = createState();
|
||||
const fetchIndexResponse = createResponse();
|
||||
const res: StateActionResponse<'INIT'> = Either.right(fetchIndexResponse);
|
||||
const res: StateActionResponse<'INIT'> = Either.right(
|
||||
fetchIndexResponse
|
||||
) as Either.Right<FetchIndexResponse>;
|
||||
|
||||
const newState = init(state, res, context);
|
||||
|
||||
|
@ -195,7 +193,9 @@ describe('Stage: init', () => {
|
|||
|
||||
it('calls buildIndexMappings with the correct parameters', () => {
|
||||
const state = createState();
|
||||
const res: StateActionResponse<'INIT'> = Either.right(createResponse());
|
||||
const res: StateActionResponse<'INIT'> = Either.right(
|
||||
createResponse()
|
||||
) as Either.Right<FetchIndexResponse>;
|
||||
|
||||
init(state, res, context);
|
||||
|
||||
|
@ -207,7 +207,9 @@ describe('Stage: init', () => {
|
|||
|
||||
it('adds a log entry about the algo check', () => {
|
||||
const state = createState();
|
||||
const res: StateActionResponse<'INIT'> = Either.right(createResponse());
|
||||
const res: StateActionResponse<'INIT'> = Either.right(
|
||||
createResponse()
|
||||
) as Either.Right<FetchIndexResponse>;
|
||||
|
||||
const newState = init(state, res, context);
|
||||
|
||||
|
@ -219,7 +221,9 @@ describe('Stage: init', () => {
|
|||
it('INIT -> UPDATE_INDEX_MAPPINGS', () => {
|
||||
const state = createState();
|
||||
const fetchIndexResponse = createResponse();
|
||||
const res: StateActionResponse<'INIT'> = Either.right(fetchIndexResponse);
|
||||
const res: StateActionResponse<'INIT'> = Either.right(
|
||||
fetchIndexResponse
|
||||
) as Either.Right<FetchIndexResponse>;
|
||||
|
||||
const mockMappings = { properties: { someMappings: 'string' } };
|
||||
buildIndexMappingsMock.mockReturnValue(mockMappings);
|
||||
|
@ -248,7 +252,9 @@ describe('Stage: init', () => {
|
|||
|
||||
it('adds a log entry about the algo check', () => {
|
||||
const state = createState();
|
||||
const res: StateActionResponse<'INIT'> = Either.right(createResponse());
|
||||
const res: StateActionResponse<'INIT'> = Either.right(
|
||||
createResponse()
|
||||
) as Either.Right<FetchIndexResponse>;
|
||||
|
||||
const newState = init(state, res, context);
|
||||
|
||||
|
@ -260,7 +266,9 @@ describe('Stage: init', () => {
|
|||
it('INIT -> UPDATE_INDEX_MAPPINGS', () => {
|
||||
const state = createState();
|
||||
const fetchIndexResponse = createResponse();
|
||||
const res: StateActionResponse<'INIT'> = Either.right(fetchIndexResponse);
|
||||
const res: StateActionResponse<'INIT'> = Either.right(
|
||||
fetchIndexResponse
|
||||
) as Either.Right<FetchIndexResponse>;
|
||||
|
||||
const mockMappings = { properties: { someMappings: 'string' } };
|
||||
buildIndexMappingsMock.mockReturnValue(mockMappings);
|
||||
|
@ -285,7 +293,9 @@ describe('Stage: init', () => {
|
|||
|
||||
it('adds a log entry about the algo check', () => {
|
||||
const state = createState();
|
||||
const res: StateActionResponse<'INIT'> = Either.right(createResponse());
|
||||
const res: StateActionResponse<'INIT'> = Either.right(
|
||||
createResponse()
|
||||
) as Either.Right<FetchIndexResponse>;
|
||||
|
||||
const newState = init(state, res, context);
|
||||
|
||||
|
@ -302,7 +312,9 @@ describe('Stage: init', () => {
|
|||
it('calls buildIndexMappings with the correct parameters', () => {
|
||||
const state = createState();
|
||||
const fetchIndexResponse = createResponse();
|
||||
const res: StateActionResponse<'INIT'> = Either.right(fetchIndexResponse);
|
||||
const res: StateActionResponse<'INIT'> = Either.right(
|
||||
fetchIndexResponse
|
||||
) as Either.Right<FetchIndexResponse>;
|
||||
|
||||
init(state, res, context);
|
||||
|
||||
|
@ -315,7 +327,9 @@ describe('Stage: init', () => {
|
|||
it('calls getCreationAliases with the correct parameters', () => {
|
||||
const state = createState();
|
||||
const fetchIndexResponse = createResponse();
|
||||
const res: StateActionResponse<'INIT'> = Either.right(fetchIndexResponse);
|
||||
const res: StateActionResponse<'INIT'> = Either.right(
|
||||
fetchIndexResponse
|
||||
) as Either.Right<FetchIndexResponse>;
|
||||
|
||||
init(state, res, context);
|
||||
|
||||
|
@ -329,7 +343,9 @@ describe('Stage: init', () => {
|
|||
it('INIT -> CREATE_TARGET_INDEX', () => {
|
||||
const state = createState();
|
||||
const fetchIndexResponse = createResponse();
|
||||
const res: StateActionResponse<'INIT'> = Either.right(fetchIndexResponse);
|
||||
const res: StateActionResponse<'INIT'> = Either.right(
|
||||
fetchIndexResponse
|
||||
) as Either.Right<FetchIndexResponse>;
|
||||
|
||||
const mockMappings = { properties: { someMappings: 'string' } };
|
||||
buildIndexMappingsMock.mockReturnValue(mockMappings);
|
||||
|
@ -354,7 +370,9 @@ describe('Stage: init', () => {
|
|||
it('calls generateAdditiveMappingDiff with the correct parameters', () => {
|
||||
const state = createState();
|
||||
const fetchIndexResponse = createResponse();
|
||||
const res: StateActionResponse<'INIT'> = Either.right(fetchIndexResponse);
|
||||
const res: StateActionResponse<'INIT'> = Either.right(
|
||||
fetchIndexResponse
|
||||
) as Either.Right<FetchIndexResponse>;
|
||||
|
||||
checkVersionCompatibilityMock.mockReturnValue({
|
||||
status: 'greater',
|
||||
|
@ -373,7 +391,9 @@ describe('Stage: init', () => {
|
|||
it('INIT -> UPDATE_INDEX_MAPPINGS', () => {
|
||||
const state = createState();
|
||||
const fetchIndexResponse = createResponse();
|
||||
const res: StateActionResponse<'INIT'> = Either.right(fetchIndexResponse);
|
||||
const res: StateActionResponse<'INIT'> = Either.right(
|
||||
fetchIndexResponse
|
||||
) as Either.Right<FetchIndexResponse>;
|
||||
|
||||
checkVersionCompatibilityMock.mockReturnValue({
|
||||
status: 'greater',
|
||||
|
@ -396,7 +416,9 @@ describe('Stage: init', () => {
|
|||
|
||||
it('adds a log entry about the version check', () => {
|
||||
const state = createState();
|
||||
const res: StateActionResponse<'INIT'> = Either.right(createResponse());
|
||||
const res: StateActionResponse<'INIT'> = Either.right(
|
||||
createResponse()
|
||||
) as Either.Right<FetchIndexResponse>;
|
||||
|
||||
checkVersionCompatibilityMock.mockReturnValue({
|
||||
status: 'greater',
|
||||
|
@ -414,7 +436,9 @@ describe('Stage: init', () => {
|
|||
it('INIT -> UPDATE_ALIASES if alias actions are not empty', () => {
|
||||
const state = createState();
|
||||
const fetchIndexResponse = createResponse();
|
||||
const res: StateActionResponse<'INIT'> = Either.right(fetchIndexResponse);
|
||||
const res: StateActionResponse<'INIT'> = Either.right(
|
||||
fetchIndexResponse
|
||||
) as Either.Right<FetchIndexResponse>;
|
||||
|
||||
checkVersionCompatibilityMock.mockReturnValue({
|
||||
status: 'equal',
|
||||
|
@ -437,7 +461,9 @@ describe('Stage: init', () => {
|
|||
it('INIT -> INDEX_STATE_UPDATE_DONE if alias actions are empty', () => {
|
||||
const state = createState();
|
||||
const fetchIndexResponse = createResponse();
|
||||
const res: StateActionResponse<'INIT'> = Either.right(fetchIndexResponse);
|
||||
const res: StateActionResponse<'INIT'> = Either.right(
|
||||
fetchIndexResponse
|
||||
) as Either.Right<FetchIndexResponse>;
|
||||
|
||||
checkVersionCompatibilityMock.mockReturnValue({
|
||||
status: 'equal',
|
||||
|
@ -459,7 +485,9 @@ describe('Stage: init', () => {
|
|||
|
||||
it('adds a log entry about the version check', () => {
|
||||
const state = createState();
|
||||
const res: StateActionResponse<'INIT'> = Either.right(createResponse());
|
||||
const res: StateActionResponse<'INIT'> = Either.right(
|
||||
createResponse()
|
||||
) as Either.Right<FetchIndexResponse>;
|
||||
|
||||
checkVersionCompatibilityMock.mockReturnValue({
|
||||
status: 'equal',
|
||||
|
@ -477,7 +505,9 @@ describe('Stage: init', () => {
|
|||
it('INIT -> INDEX_STATE_UPDATE_DONE', () => {
|
||||
const state = createState();
|
||||
const fetchIndexResponse = createResponse();
|
||||
const res: StateActionResponse<'INIT'> = Either.right(fetchIndexResponse);
|
||||
const res: StateActionResponse<'INIT'> = Either.right(
|
||||
fetchIndexResponse
|
||||
) as Either.Right<FetchIndexResponse>;
|
||||
|
||||
checkVersionCompatibilityMock.mockReturnValue({
|
||||
status: 'lesser',
|
||||
|
@ -494,7 +524,9 @@ describe('Stage: init', () => {
|
|||
|
||||
it('adds a log entry about the version check', () => {
|
||||
const state = createState();
|
||||
const res: StateActionResponse<'INIT'> = Either.right(createResponse());
|
||||
const res: StateActionResponse<'INIT'> = Either.right(
|
||||
createResponse()
|
||||
) as Either.Right<FetchIndexResponse>;
|
||||
|
||||
checkVersionCompatibilityMock.mockReturnValue({
|
||||
status: 'lesser',
|
||||
|
@ -512,7 +544,9 @@ describe('Stage: init', () => {
|
|||
it('INIT -> FATAL', () => {
|
||||
const state = createState();
|
||||
const fetchIndexResponse = createResponse();
|
||||
const res: StateActionResponse<'INIT'> = Either.right(fetchIndexResponse);
|
||||
const res: StateActionResponse<'INIT'> = Either.right(
|
||||
fetchIndexResponse
|
||||
) as Either.Right<FetchIndexResponse>;
|
||||
|
||||
checkVersionCompatibilityMock.mockReturnValue({
|
||||
status: 'conflict',
|
||||
|
@ -530,7 +564,9 @@ describe('Stage: init', () => {
|
|||
|
||||
it('adds a log entry about the version check', () => {
|
||||
const state = createState();
|
||||
const res: StateActionResponse<'INIT'> = Either.right(createResponse());
|
||||
const res: StateActionResponse<'INIT'> = Either.right(
|
||||
createResponse()
|
||||
) as Either.Right<FetchIndexResponse>;
|
||||
|
||||
checkVersionCompatibilityMock.mockReturnValue({
|
||||
status: 'conflict',
|
||||
|
|
|
@ -8,10 +8,7 @@
|
|||
|
||||
import { cloneDeep } from 'lodash';
|
||||
import * as Either from 'fp-ts/lib/Either';
|
||||
import { delayRetryState } from '../../../model/retry_state';
|
||||
import { throwBadResponse } from '../../../model/helpers';
|
||||
import type { MigrationLog } from '../../../types';
|
||||
import { isTypeof } from '../../actions';
|
||||
import { getAliases } from '../../../model/helpers';
|
||||
import {
|
||||
getCurrentIndex,
|
||||
|
@ -33,16 +30,6 @@ export const init: ModelStage<
|
|||
| 'INDEX_STATE_UPDATE_DONE'
|
||||
| 'FATAL'
|
||||
> = (state, res, context) => {
|
||||
if (Either.isLeft(res)) {
|
||||
const left = res.left;
|
||||
if (isTypeof(left, 'incompatible_cluster_routing_allocation')) {
|
||||
const retryErrorMessage = `[${left.type}] Incompatible Elasticsearch cluster settings detected. Remove the persistent and transient Elasticsearch cluster setting 'cluster.routing.allocation.enable' or set it to a value of 'all' to allow migrations to proceed. Refer to ${context.migrationDocLinks.routingAllocationDisabled} for more information on how to resolve the issue.`;
|
||||
return delayRetryState(state, retryErrorMessage, context.maxRetryAttempts);
|
||||
} else {
|
||||
return throwBadResponse(state, left);
|
||||
}
|
||||
}
|
||||
|
||||
const types = context.types.map((type) => context.typeRegistry.getType(type)!);
|
||||
const logs: MigrationLog[] = [...state.logs];
|
||||
|
||||
|
|
|
@ -57,7 +57,7 @@ export const nextActionMap = (context: MigratorContext) => {
|
|||
const client = context.elasticsearchClient;
|
||||
return {
|
||||
INIT: (state: InitState) =>
|
||||
Actions.init({
|
||||
Actions.fetchIndices({
|
||||
client,
|
||||
indices: [`${context.indexPrefix}_*`],
|
||||
}),
|
||||
|
|
|
@ -39,11 +39,12 @@ import {
|
|||
removeWriteBlock,
|
||||
transformDocs,
|
||||
waitForIndexStatus,
|
||||
initAction,
|
||||
fetchIndices,
|
||||
cloneIndex,
|
||||
type DocumentsTransformFailed,
|
||||
type DocumentsTransformSuccess,
|
||||
createBulkIndexOperationTuple,
|
||||
checkClusterRoutingAllocationEnabled,
|
||||
} from '@kbn/core-saved-objects-migration-server-internal';
|
||||
|
||||
const { startES } = createTestServers({
|
||||
|
@ -132,7 +133,7 @@ describe('migration actions', () => {
|
|||
await esServer.stop();
|
||||
});
|
||||
|
||||
describe('initAction', () => {
|
||||
describe('fetchIndices', () => {
|
||||
afterAll(async () => {
|
||||
await client.cluster.putSettings({
|
||||
body: {
|
||||
|
@ -145,7 +146,7 @@ describe('migration actions', () => {
|
|||
});
|
||||
it('resolves right empty record if no indices were found', async () => {
|
||||
expect.assertions(1);
|
||||
const task = initAction({ client, indices: ['no_such_index'] });
|
||||
const task = fetchIndices({ client, indices: ['no_such_index'] });
|
||||
await expect(task()).resolves.toMatchInlineSnapshot(`
|
||||
Object {
|
||||
"_tag": "Right",
|
||||
|
@ -155,7 +156,7 @@ describe('migration actions', () => {
|
|||
});
|
||||
it('resolves right record with found indices', async () => {
|
||||
expect.assertions(1);
|
||||
const res = (await initAction({
|
||||
const res = (await fetchIndices({
|
||||
client,
|
||||
indices: ['no_such_index', 'existing_index_with_docs'],
|
||||
})()) as Either.Right<unknown>;
|
||||
|
@ -174,7 +175,7 @@ describe('migration actions', () => {
|
|||
});
|
||||
it('includes the _meta data of the indices in the response', async () => {
|
||||
expect.assertions(1);
|
||||
const res = (await initAction({
|
||||
const res = (await fetchIndices({
|
||||
client,
|
||||
indices: ['existing_index_with_docs'],
|
||||
})()) as Either.Right<unknown>;
|
||||
|
@ -200,6 +201,9 @@ describe('migration actions', () => {
|
|||
})
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('checkClusterRoutingAllocation', () => {
|
||||
it('resolves left when cluster.routing.allocation.enabled is incompatible', async () => {
|
||||
expect.assertions(3);
|
||||
await client.cluster.putSettings({
|
||||
|
@ -210,10 +214,7 @@ describe('migration actions', () => {
|
|||
},
|
||||
},
|
||||
});
|
||||
const task = initAction({
|
||||
client,
|
||||
indices: ['existing_index_with_docs'],
|
||||
});
|
||||
const task = checkClusterRoutingAllocationEnabled(client);
|
||||
await expect(task()).resolves.toMatchInlineSnapshot(`
|
||||
Object {
|
||||
"_tag": "Left",
|
||||
|
@ -230,10 +231,7 @@ describe('migration actions', () => {
|
|||
},
|
||||
},
|
||||
});
|
||||
const task2 = initAction({
|
||||
client,
|
||||
indices: ['existing_index_with_docs'],
|
||||
});
|
||||
const task2 = checkClusterRoutingAllocationEnabled(client);
|
||||
await expect(task2()).resolves.toMatchInlineSnapshot(`
|
||||
Object {
|
||||
"_tag": "Left",
|
||||
|
@ -250,10 +248,7 @@ describe('migration actions', () => {
|
|||
},
|
||||
},
|
||||
});
|
||||
const task3 = initAction({
|
||||
client,
|
||||
indices: ['existing_index_with_docs'],
|
||||
});
|
||||
const task3 = checkClusterRoutingAllocationEnabled(client);
|
||||
await expect(task3()).resolves.toMatchInlineSnapshot(`
|
||||
Object {
|
||||
"_tag": "Left",
|
||||
|
@ -272,10 +267,7 @@ describe('migration actions', () => {
|
|||
},
|
||||
},
|
||||
});
|
||||
const task = initAction({
|
||||
client,
|
||||
indices: ['existing_index_with_docs'],
|
||||
});
|
||||
const task = checkClusterRoutingAllocationEnabled(client);
|
||||
const result = await task();
|
||||
expect(Either.isRight(result)).toBe(true);
|
||||
});
|
||||
|
|
|
@ -39,11 +39,12 @@ import {
|
|||
removeWriteBlock,
|
||||
transformDocs,
|
||||
waitForIndexStatus,
|
||||
initAction,
|
||||
fetchIndices,
|
||||
cloneIndex,
|
||||
type DocumentsTransformFailed,
|
||||
type DocumentsTransformSuccess,
|
||||
createBulkIndexOperationTuple,
|
||||
checkClusterRoutingAllocationEnabled,
|
||||
} from '@kbn/core-saved-objects-migration-server-internal';
|
||||
|
||||
interface EsServer {
|
||||
|
@ -169,7 +170,7 @@ export const runActionTestSuite = ({
|
|||
await esServer.stop();
|
||||
});
|
||||
|
||||
describe('initAction', () => {
|
||||
describe('fetchIndices', () => {
|
||||
afterAll(async () => {
|
||||
await client.cluster.putSettings({
|
||||
body: {
|
||||
|
@ -182,7 +183,7 @@ export const runActionTestSuite = ({
|
|||
});
|
||||
it('resolves right empty record if no indices were found', async () => {
|
||||
expect.assertions(1);
|
||||
const task = initAction({ client, indices: ['no_such_index'] });
|
||||
const task = fetchIndices({ client, indices: ['no_such_index'] });
|
||||
await expect(task()).resolves.toMatchInlineSnapshot(`
|
||||
Object {
|
||||
"_tag": "Right",
|
||||
|
@ -192,7 +193,7 @@ export const runActionTestSuite = ({
|
|||
});
|
||||
it('resolves right record with found indices', async () => {
|
||||
expect.assertions(1);
|
||||
const res = (await initAction({
|
||||
const res = (await fetchIndices({
|
||||
client,
|
||||
indices: ['no_such_index', 'existing_index_with_docs'],
|
||||
})()) as Either.Right<unknown>;
|
||||
|
@ -211,7 +212,7 @@ export const runActionTestSuite = ({
|
|||
});
|
||||
it('includes the _meta data of the indices in the response', async () => {
|
||||
expect.assertions(1);
|
||||
const res = (await initAction({
|
||||
const res = (await fetchIndices({
|
||||
client,
|
||||
indices: ['existing_index_with_docs'],
|
||||
})()) as Either.Right<unknown>;
|
||||
|
@ -237,6 +238,9 @@ export const runActionTestSuite = ({
|
|||
})
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('checkClusterRoutingAllocation', () => {
|
||||
it('resolves left when cluster.routing.allocation.enabled is incompatible', async () => {
|
||||
expect.assertions(3);
|
||||
await client.cluster.putSettings({
|
||||
|
@ -247,10 +251,7 @@ export const runActionTestSuite = ({
|
|||
},
|
||||
},
|
||||
});
|
||||
const task = initAction({
|
||||
client,
|
||||
indices: ['existing_index_with_docs'],
|
||||
});
|
||||
const task = checkClusterRoutingAllocationEnabled(client);
|
||||
await expect(task()).resolves.toMatchInlineSnapshot(`
|
||||
Object {
|
||||
"_tag": "Left",
|
||||
|
@ -267,10 +268,7 @@ export const runActionTestSuite = ({
|
|||
},
|
||||
},
|
||||
});
|
||||
const task2 = initAction({
|
||||
client,
|
||||
indices: ['existing_index_with_docs'],
|
||||
});
|
||||
const task2 = checkClusterRoutingAllocationEnabled(client);
|
||||
await expect(task2()).resolves.toMatchInlineSnapshot(`
|
||||
Object {
|
||||
"_tag": "Left",
|
||||
|
@ -287,10 +285,7 @@ export const runActionTestSuite = ({
|
|||
},
|
||||
},
|
||||
});
|
||||
const task3 = initAction({
|
||||
client,
|
||||
indices: ['existing_index_with_docs'],
|
||||
});
|
||||
const task3 = checkClusterRoutingAllocationEnabled(client);
|
||||
await expect(task3()).resolves.toMatchInlineSnapshot(`
|
||||
Object {
|
||||
"_tag": "Left",
|
||||
|
@ -309,10 +304,7 @@ export const runActionTestSuite = ({
|
|||
},
|
||||
},
|
||||
});
|
||||
const task = initAction({
|
||||
client,
|
||||
indices: ['existing_index_with_docs'],
|
||||
});
|
||||
const task = checkClusterRoutingAllocationEnabled(client);
|
||||
const result = await task();
|
||||
expect(Either.isRight(result)).toBe(true);
|
||||
});
|
||||
|
|
|
@ -366,7 +366,10 @@ describe('when upgrading to a new stack version', () => {
|
|||
const logs = await readLog();
|
||||
expect(logs).toMatch('INIT -> WAIT_FOR_YELLOW_SOURCE');
|
||||
expect(logs).toMatch('WAIT_FOR_YELLOW_SOURCE -> UPDATE_SOURCE_MAPPINGS_PROPERTIES.');
|
||||
expect(logs).toMatch('UPDATE_SOURCE_MAPPINGS_PROPERTIES -> CHECK_UNKNOWN_DOCUMENTS.');
|
||||
expect(logs).toMatch(
|
||||
'UPDATE_SOURCE_MAPPINGS_PROPERTIES -> CHECK_CLUSTER_ROUTING_ALLOCATION.'
|
||||
);
|
||||
expect(logs).toMatch('CHECK_CLUSTER_ROUTING_ALLOCATION -> CHECK_UNKNOWN_DOCUMENTS.');
|
||||
expect(logs).toMatch('CHECK_UNKNOWN_DOCUMENTS -> SET_SOURCE_WRITE_BLOCK.');
|
||||
expect(logs).toMatch('CHECK_TARGET_MAPPINGS -> UPDATE_TARGET_MAPPINGS_PROPERTIES.');
|
||||
expect(logs).toMatch('UPDATE_TARGET_MAPPINGS_META -> CHECK_VERSION_INDEX_READY_ACTIONS.');
|
||||
|
|
|
@ -306,7 +306,8 @@ describe('split .kibana index into multiple system indices', () => {
|
|||
expect(logs).toContainLogEntries(
|
||||
[
|
||||
// .kibana_task_manager index exists and has no aliases => LEGACY_* migration path
|
||||
'[.kibana_task_manager] INIT -> LEGACY_SET_WRITE_BLOCK.',
|
||||
'[.kibana_task_manager] INIT -> LEGACY_CHECK_CLUSTER_ROUTING_ALLOCATION.',
|
||||
'[.kibana_task_manager] LEGACY_CHECK_CLUSTER_ROUTING_ALLOCATION -> LEGACY_SET_WRITE_BLOCK.',
|
||||
'[.kibana_task_manager] LEGACY_REINDEX_WAIT_FOR_TASK -> LEGACY_DELETE.',
|
||||
'[.kibana_task_manager] LEGACY_DELETE -> SET_SOURCE_WRITE_BLOCK.',
|
||||
'[.kibana_task_manager] SET_SOURCE_WRITE_BLOCK -> CALCULATE_EXCLUDE_FILTERS.',
|
||||
|
@ -360,7 +361,8 @@ describe('split .kibana index into multiple system indices', () => {
|
|||
expect(logs).toContainLogEntries(
|
||||
[
|
||||
'[.kibana] INIT -> WAIT_FOR_YELLOW_SOURCE.',
|
||||
'[.kibana] WAIT_FOR_YELLOW_SOURCE -> CHECK_UNKNOWN_DOCUMENTS.',
|
||||
'[.kibana] WAIT_FOR_YELLOW_SOURCE -> CHECK_CLUSTER_ROUTING_ALLOCATION.',
|
||||
'[.kibana] CHECK_CLUSTER_ROUTING_ALLOCATION -> CHECK_UNKNOWN_DOCUMENTS.',
|
||||
'[.kibana] CHECK_UNKNOWN_DOCUMENTS -> SET_SOURCE_WRITE_BLOCK.',
|
||||
'[.kibana] SET_SOURCE_WRITE_BLOCK -> CALCULATE_EXCLUDE_FILTERS.',
|
||||
'[.kibana] CALCULATE_EXCLUDE_FILTERS -> CREATE_REINDEX_TEMP.',
|
||||
|
|
|
@ -104,7 +104,10 @@ describe('when migrating to a new version', () => {
|
|||
const logs = await readLog();
|
||||
expect(logs).toMatch('INIT -> WAIT_FOR_YELLOW_SOURCE.');
|
||||
expect(logs).toMatch('WAIT_FOR_YELLOW_SOURCE -> UPDATE_SOURCE_MAPPINGS_PROPERTIES.');
|
||||
expect(logs).toMatch('UPDATE_SOURCE_MAPPINGS_PROPERTIES -> CHECK_UNKNOWN_DOCUMENTS.');
|
||||
expect(logs).toMatch(
|
||||
'UPDATE_SOURCE_MAPPINGS_PROPERTIES -> CHECK_CLUSTER_ROUTING_ALLOCATION.'
|
||||
);
|
||||
expect(logs).toMatch('CHECK_CLUSTER_ROUTING_ALLOCATION -> CHECK_UNKNOWN_DOCUMENTS.');
|
||||
expect(logs).toMatch('CHECK_TARGET_MAPPINGS -> UPDATE_TARGET_MAPPINGS_PROPERTIES.');
|
||||
expect(logs).toMatch('UPDATE_TARGET_MAPPINGS_META -> CHECK_VERSION_INDEX_READY_ACTIONS.');
|
||||
expect(logs).toMatch('CHECK_VERSION_INDEX_READY_ACTIONS -> MARK_VERSION_INDEX_READY.');
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue