mirror of
https://github.com/elastic/kibana.git
synced 2025-04-24 09:48:58 -04:00
Background task instances wait for another instance to complete the migration (#143135)
* Add WAIT_FOR_MIGRATION_COMPLETION step to migrations to wait for another instance * WIP set waitForMigrationCompletion based on node roles * Fix bazel deps * NodeService tests * Additional tests * Fix tests and types * Fix tests * migrations integration test * Address review feedback * [CI] Auto-commit changed files from 'node scripts/precommit_hook.js --ref HEAD~1..HEAD --fix' * In WAIT_FOR_MIGRATION_COMPLETION skip to DONE if migration complete * Fix bug and add more tests Co-authored-by: Kibana Machine <42973632+kibanamachine@users.noreply.github.com>
This commit is contained in:
parent
e8359390b9
commit
84d3f3857d
25 changed files with 1182 additions and 228 deletions
|
@ -9,4 +9,4 @@
|
|||
export { nodeConfig } from './src/node_config';
|
||||
|
||||
export { NodeService, type PrebootDeps } from './src/node_service';
|
||||
export type { InternalNodeServicePreboot } from './src/node_service';
|
||||
export type { InternalNodeServicePreboot, InternalNodeServiceStart } from './src/node_service';
|
||||
|
|
|
@ -117,4 +117,62 @@ describe('NodeService', () => {
|
|||
});
|
||||
});
|
||||
});
|
||||
describe('#start()', () => {
|
||||
it('returns default roles values when wildcard is provided', async () => {
|
||||
configService = getMockedConfigService({ roles: ['*'] });
|
||||
coreContext = mockCoreContext.create({ logger, configService });
|
||||
|
||||
service = new NodeService(coreContext);
|
||||
await service.preboot({ loggingSystem: logger });
|
||||
const { roles } = service.start();
|
||||
|
||||
expect(roles.backgroundTasks).toBe(true);
|
||||
expect(roles.ui).toBe(true);
|
||||
});
|
||||
|
||||
it('returns correct roles when node is configured to `background_tasks`', async () => {
|
||||
configService = getMockedConfigService({ roles: ['background_tasks'] });
|
||||
coreContext = mockCoreContext.create({ logger, configService });
|
||||
|
||||
service = new NodeService(coreContext);
|
||||
await service.preboot({ loggingSystem: logger });
|
||||
const { roles } = service.start();
|
||||
|
||||
expect(roles.backgroundTasks).toBe(true);
|
||||
expect(roles.ui).toBe(false);
|
||||
});
|
||||
|
||||
it('returns correct roles when node is configured to `ui`', async () => {
|
||||
configService = getMockedConfigService({ roles: ['ui'] });
|
||||
coreContext = mockCoreContext.create({ logger, configService });
|
||||
|
||||
service = new NodeService(coreContext);
|
||||
await service.preboot({ loggingSystem: logger });
|
||||
const { roles } = service.start();
|
||||
|
||||
expect(roles.backgroundTasks).toBe(false);
|
||||
expect(roles.ui).toBe(true);
|
||||
});
|
||||
|
||||
it('returns correct roles when node is configured to both `background_tasks` and `ui`', async () => {
|
||||
configService = getMockedConfigService({ roles: ['background_tasks', 'ui'] });
|
||||
coreContext = mockCoreContext.create({ logger, configService });
|
||||
|
||||
service = new NodeService(coreContext);
|
||||
await service.preboot({ loggingSystem: logger });
|
||||
const { roles } = service.start();
|
||||
|
||||
expect(roles.backgroundTasks).toBe(true);
|
||||
expect(roles.ui).toBe(true);
|
||||
});
|
||||
it('throws if preboot has not been run', () => {
|
||||
configService = getMockedConfigService({ roles: ['background_tasks', 'ui'] });
|
||||
coreContext = mockCoreContext.create({ logger, configService });
|
||||
|
||||
service = new NodeService(coreContext);
|
||||
expect(() => service.start()).toThrowErrorMatchingInlineSnapshot(
|
||||
`"NodeService#start() can only be called after NodeService#preboot()"`
|
||||
);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
|
@ -28,7 +28,20 @@ const containsWildcard = (roles: string[]) => roles.includes(NODE_WILDCARD_CHAR)
|
|||
*/
|
||||
export interface InternalNodeServicePreboot {
|
||||
/**
|
||||
* Retrieve the Kibana instance uuid.
|
||||
* The Kibana process can take on specialised roles via the `node.roles` config.
|
||||
*
|
||||
* The roles can be used by plugins to adjust their behavior based
|
||||
* on the way the Kibana process has been configured.
|
||||
*/
|
||||
roles: NodeRoles;
|
||||
}
|
||||
|
||||
export interface InternalNodeServiceStart {
|
||||
/**
|
||||
* The Kibana process can take on specialised roles via the `node.roles` config.
|
||||
*
|
||||
* The roles can be used by plugins to adjust their behavior based
|
||||
* on the way the Kibana process has been configured.
|
||||
*/
|
||||
roles: NodeRoles;
|
||||
}
|
||||
|
@ -41,6 +54,7 @@ export interface PrebootDeps {
|
|||
export class NodeService {
|
||||
private readonly configService: IConfigService;
|
||||
private readonly log: Logger;
|
||||
private roles?: NodeRoles;
|
||||
|
||||
constructor(core: CoreContext) {
|
||||
this.configService = core.configService;
|
||||
|
@ -52,13 +66,22 @@ export class NodeService {
|
|||
loggingSystem.setGlobalContext({ service: { node: { roles } } });
|
||||
this.log.info(`Kibana process configured with roles: [${roles.join(', ')}]`);
|
||||
|
||||
this.roles = NODE_ACCEPTED_ROLES.reduce((acc, curr) => {
|
||||
return { ...acc, [camelCase(curr)]: roles.includes(curr) };
|
||||
}, {} as NodeRoles);
|
||||
|
||||
return {
|
||||
roles: NODE_ACCEPTED_ROLES.reduce((acc, curr) => {
|
||||
return { ...acc, [camelCase(curr)]: roles.includes(curr) };
|
||||
}, {} as NodeRoles),
|
||||
roles: this.roles,
|
||||
};
|
||||
}
|
||||
|
||||
public start(): InternalNodeServiceStart {
|
||||
if (this.roles == null) {
|
||||
throw new Error('NodeService#start() can only be called after NodeService#preboot()');
|
||||
}
|
||||
return { roles: this.roles };
|
||||
}
|
||||
|
||||
public stop() {
|
||||
// nothing to do here yet
|
||||
}
|
||||
|
|
|
@ -7,7 +7,11 @@
|
|||
*/
|
||||
|
||||
import type { PublicMethodsOf } from '@kbn/utility-types';
|
||||
import type { NodeService, InternalNodeServicePreboot } from '@kbn/core-node-server-internal';
|
||||
import type {
|
||||
NodeService,
|
||||
InternalNodeServicePreboot,
|
||||
InternalNodeServiceStart,
|
||||
} from '@kbn/core-node-server-internal';
|
||||
|
||||
const createInternalPrebootContractMock = () => {
|
||||
const prebootContract: jest.Mocked<InternalNodeServicePreboot> = {
|
||||
|
@ -19,17 +23,38 @@ const createInternalPrebootContractMock = () => {
|
|||
return prebootContract;
|
||||
};
|
||||
|
||||
const createInternalStartContractMock = (
|
||||
{
|
||||
ui,
|
||||
backgroundTasks,
|
||||
}: {
|
||||
ui: boolean;
|
||||
backgroundTasks: boolean;
|
||||
} = { ui: true, backgroundTasks: true }
|
||||
) => {
|
||||
const startContract: jest.Mocked<InternalNodeServiceStart> = {
|
||||
roles: {
|
||||
backgroundTasks,
|
||||
ui,
|
||||
},
|
||||
};
|
||||
return startContract;
|
||||
};
|
||||
|
||||
type NodeServiceContract = PublicMethodsOf<NodeService>;
|
||||
const createMock = () => {
|
||||
const mocked: jest.Mocked<NodeServiceContract> = {
|
||||
preboot: jest.fn(),
|
||||
start: jest.fn(),
|
||||
stop: jest.fn(),
|
||||
};
|
||||
mocked.preboot.mockResolvedValue(createInternalPrebootContractMock());
|
||||
mocked.start.mockReturnValue(createInternalStartContractMock());
|
||||
return mocked;
|
||||
};
|
||||
|
||||
export const nodeServiceMock = {
|
||||
create: createMock,
|
||||
createInternalPrebootContract: createInternalPrebootContractMock,
|
||||
createInternalStartContract: createInternalStartContractMock,
|
||||
};
|
||||
|
|
|
@ -167,14 +167,19 @@ the same version could have plugins enabled at any time that would introduce
|
|||
new transforms or mappings.
|
||||
→ `OUTDATED_DOCUMENTS_SEARCH`
|
||||
|
||||
3. If the `.kibana` alias exists we’re migrating from either a v1 or v2 index
|
||||
3. If `waitForMigrations` was set we're running on a background-tasks node and
|
||||
we should not participate in the migration but instead wait for the ui node(s)
|
||||
to complete the migration.
|
||||
→ `WAIT_FOR_MIGRATION_COMPLETION`
|
||||
|
||||
4. If the `.kibana` alias exists we’re migrating from either a v1 or v2 index
|
||||
and the migration source index is the index the `.kibana` alias points to.
|
||||
→ `WAIT_FOR_YELLOW_SOURCE`
|
||||
|
||||
4. If `.kibana` is a concrete index, we’re migrating from a legacy index
|
||||
5. If `.kibana` is a concrete index, we’re migrating from a legacy index
|
||||
→ `LEGACY_SET_WRITE_BLOCK`
|
||||
|
||||
5. If there are no `.kibana` indices, this is a fresh deployment. Initialize a
|
||||
6. If there are no `.kibana` indices, this is a fresh deployment. Initialize a
|
||||
new saved objects index
|
||||
→ `CREATE_NEW_TARGET`
|
||||
|
||||
|
@ -259,6 +264,15 @@ new `.kibana` alias that points to `.kibana_pre6.5.0_001`.
|
|||
`index_not_found_exception` another instance has already completed this step.
|
||||
→ `SET_SOURCE_WRITE_BLOCK`
|
||||
|
||||
## WAIT_FOR_MIGRATION_COMPLETION
|
||||
### Next action
|
||||
`fetchIndices`
|
||||
### New control state
|
||||
1. If the ui node finished the migration
|
||||
→ `DONE`
|
||||
2. Otherwise wait 2s and check again
|
||||
→ WAIT_FOR_MIGRATION_COMPLETION
|
||||
|
||||
## WAIT_FOR_YELLOW_SOURCE
|
||||
### Next action
|
||||
`waitForIndexStatus` (status='yellow')
|
||||
|
@ -417,6 +431,13 @@ update the mappings and then use an update_by_query to ensure that all fields ar
|
|||
|
||||
## UPDATE_TARGET_MAPPINGS_WAIT_FOR_TASK
|
||||
### Next action
|
||||
`waitForPickupUpdatedMappingsTask`
|
||||
|
||||
### New control state
|
||||
→ `MARK_VERSION_INDEX_READY`
|
||||
|
||||
## MARK_VERSION_INDEX_READY
|
||||
### Next action
|
||||
`updateAliases`
|
||||
|
||||
Atomically apply the `versionIndexReadyActions` using the _alias actions API. By performing the following actions we guarantee that if multiple versions of Kibana started the upgrade in parallel, only one version will succeed.
|
||||
|
|
|
@ -178,6 +178,7 @@ Object {
|
|||
"transformedDocBatches": Array [],
|
||||
"versionAlias": ".my-so-index_7.11.0",
|
||||
"versionIndex": ".my-so-index_7.11.0_001",
|
||||
"waitForMigrationCompletion": false,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -362,6 +363,7 @@ Object {
|
|||
"transformedDocBatches": Array [],
|
||||
"versionAlias": ".my-so-index_7.11.0",
|
||||
"versionIndex": ".my-so-index_7.11.0_001",
|
||||
"waitForMigrationCompletion": false,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -550,6 +552,7 @@ Object {
|
|||
"transformedDocBatches": Array [],
|
||||
"versionAlias": ".my-so-index_7.11.0",
|
||||
"versionIndex": ".my-so-index_7.11.0_001",
|
||||
"waitForMigrationCompletion": false,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -742,6 +745,7 @@ Object {
|
|||
"transformedDocBatches": Array [],
|
||||
"versionAlias": ".my-so-index_7.11.0",
|
||||
"versionIndex": ".my-so-index_7.11.0_001",
|
||||
"waitForMigrationCompletion": false,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -971,6 +975,7 @@ Object {
|
|||
],
|
||||
"versionAlias": ".my-so-index_7.11.0",
|
||||
"versionIndex": ".my-so-index_7.11.0_001",
|
||||
"waitForMigrationCompletion": false,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -1166,6 +1171,7 @@ Object {
|
|||
],
|
||||
"versionAlias": ".my-so-index_7.11.0",
|
||||
"versionIndex": ".my-so-index_7.11.0_001",
|
||||
"waitForMigrationCompletion": false,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
|
@ -40,6 +40,7 @@ describe('createInitialState', () => {
|
|||
expect(
|
||||
createInitialState({
|
||||
kibanaVersion: '8.1.0',
|
||||
waitForMigrationCompletion: true,
|
||||
targetMappings: {
|
||||
dynamic: 'strict',
|
||||
properties: { my_type: { properties: { title: { type: 'text' } } } },
|
||||
|
@ -216,10 +217,32 @@ describe('createInitialState', () => {
|
|||
},
|
||||
"versionAlias": ".kibana_task_manager_8.1.0",
|
||||
"versionIndex": ".kibana_task_manager_8.1.0_001",
|
||||
"waitForMigrationCompletion": true,
|
||||
}
|
||||
`);
|
||||
});
|
||||
|
||||
it('creates the initial state for the model with waitForMigrationCompletion false,', () => {
|
||||
expect(
|
||||
createInitialState({
|
||||
kibanaVersion: '8.1.0',
|
||||
waitForMigrationCompletion: false,
|
||||
targetMappings: {
|
||||
dynamic: 'strict',
|
||||
properties: { my_type: { properties: { title: { type: 'text' } } } },
|
||||
},
|
||||
migrationVersionPerType: {},
|
||||
indexPrefix: '.kibana_task_manager',
|
||||
migrationsConfig,
|
||||
typeRegistry,
|
||||
docLinks,
|
||||
logger: mockLogger.get(),
|
||||
})
|
||||
).toMatchObject({
|
||||
waitForMigrationCompletion: false,
|
||||
});
|
||||
});
|
||||
|
||||
it('returns state with the correct `knownTypes`', () => {
|
||||
typeRegistry.registerType({
|
||||
name: 'foo',
|
||||
|
@ -236,6 +259,7 @@ describe('createInitialState', () => {
|
|||
|
||||
const initialState = createInitialState({
|
||||
kibanaVersion: '8.1.0',
|
||||
waitForMigrationCompletion: false,
|
||||
targetMappings: {
|
||||
dynamic: 'strict',
|
||||
properties: { my_type: { properties: { title: { type: 'text' } } } },
|
||||
|
@ -263,6 +287,7 @@ describe('createInitialState', () => {
|
|||
|
||||
const initialState = createInitialState({
|
||||
kibanaVersion: '8.1.0',
|
||||
waitForMigrationCompletion: false,
|
||||
targetMappings: {
|
||||
dynamic: 'strict',
|
||||
properties: { my_type: { properties: { title: { type: 'text' } } } },
|
||||
|
@ -282,6 +307,7 @@ describe('createInitialState', () => {
|
|||
const preMigrationScript = "ctx._id = ctx._source.type + ':' + ctx._id";
|
||||
const initialState = createInitialState({
|
||||
kibanaVersion: '8.1.0',
|
||||
waitForMigrationCompletion: false,
|
||||
targetMappings: {
|
||||
dynamic: 'strict',
|
||||
properties: { my_type: { properties: { title: { type: 'text' } } } },
|
||||
|
@ -305,6 +331,7 @@ describe('createInitialState', () => {
|
|||
Option.isNone(
|
||||
createInitialState({
|
||||
kibanaVersion: '8.1.0',
|
||||
waitForMigrationCompletion: false,
|
||||
targetMappings: {
|
||||
dynamic: 'strict',
|
||||
properties: { my_type: { properties: { title: { type: 'text' } } } },
|
||||
|
@ -324,6 +351,7 @@ describe('createInitialState', () => {
|
|||
expect(
|
||||
createInitialState({
|
||||
kibanaVersion: '8.1.0',
|
||||
waitForMigrationCompletion: false,
|
||||
targetMappings: {
|
||||
dynamic: 'strict',
|
||||
properties: { my_type: { properties: { title: { type: 'text' } } } },
|
||||
|
@ -378,6 +406,7 @@ describe('createInitialState', () => {
|
|||
const logger = mockLogger.get();
|
||||
const initialState = createInitialState({
|
||||
kibanaVersion: '8.1.0',
|
||||
waitForMigrationCompletion: false,
|
||||
targetMappings: {
|
||||
dynamic: 'strict',
|
||||
properties: { my_type: { properties: { title: { type: 'text' } } } },
|
||||
|
@ -398,6 +427,7 @@ describe('createInitialState', () => {
|
|||
const logger = mockLogger.get();
|
||||
const initialState = createInitialState({
|
||||
kibanaVersion: '8.1.0',
|
||||
waitForMigrationCompletion: false,
|
||||
targetMappings: {
|
||||
dynamic: 'strict',
|
||||
properties: { my_type: { properties: { title: { type: 'text' } } } },
|
||||
|
@ -423,6 +453,7 @@ describe('createInitialState', () => {
|
|||
it('initializes the `discardUnknownObjects` flag to true if the value provided in the config matches the current kibana version', () => {
|
||||
const initialState = createInitialState({
|
||||
kibanaVersion: '8.1.0',
|
||||
waitForMigrationCompletion: false,
|
||||
targetMappings: {
|
||||
dynamic: 'strict',
|
||||
properties: { my_type: { properties: { title: { type: 'text' } } } },
|
||||
|
@ -445,6 +476,7 @@ describe('createInitialState', () => {
|
|||
const logger = mockLogger.get();
|
||||
const initialState = createInitialState({
|
||||
kibanaVersion: '8.1.0',
|
||||
waitForMigrationCompletion: false,
|
||||
targetMappings: {
|
||||
dynamic: 'strict',
|
||||
properties: { my_type: { properties: { title: { type: 'text' } } } },
|
||||
|
@ -470,6 +502,7 @@ describe('createInitialState', () => {
|
|||
it('initializes the `discardCorruptObjects` flag to true if the value provided in the config matches the current kibana version', () => {
|
||||
const initialState = createInitialState({
|
||||
kibanaVersion: '8.1.0',
|
||||
waitForMigrationCompletion: false,
|
||||
targetMappings: {
|
||||
dynamic: 'strict',
|
||||
properties: { my_type: { properties: { title: { type: 'text' } } } },
|
||||
|
|
|
@ -23,6 +23,7 @@ import { excludeUnusedTypesQuery } from './core';
|
|||
*/
|
||||
export const createInitialState = ({
|
||||
kibanaVersion,
|
||||
waitForMigrationCompletion,
|
||||
targetMappings,
|
||||
preMigrationScript,
|
||||
migrationVersionPerType,
|
||||
|
@ -33,6 +34,7 @@ export const createInitialState = ({
|
|||
logger,
|
||||
}: {
|
||||
kibanaVersion: string;
|
||||
waitForMigrationCompletion: boolean;
|
||||
targetMappings: IndexMapping;
|
||||
preMigrationScript?: string;
|
||||
migrationVersionPerType: SavedObjectsMigrationVersion;
|
||||
|
@ -95,6 +97,7 @@ export const createInitialState = ({
|
|||
|
||||
return {
|
||||
controlState: 'INIT',
|
||||
waitForMigrationCompletion,
|
||||
indexPrefix,
|
||||
legacyIndex: indexPrefix,
|
||||
currentAlias: indexPrefix,
|
||||
|
|
|
@ -253,6 +253,7 @@ const mockOptions = () => {
|
|||
const options: MockedOptions = {
|
||||
logger: loggingSystemMock.create().get(),
|
||||
kibanaVersion: '8.2.3',
|
||||
waitForMigrationCompletion: false,
|
||||
typeRegistry: createRegistry([
|
||||
{
|
||||
name: 'testtype',
|
||||
|
|
|
@ -45,6 +45,7 @@ export interface KibanaMigratorOptions {
|
|||
kibanaVersion: string;
|
||||
logger: Logger;
|
||||
docLinks: DocLinksServiceStart;
|
||||
waitForMigrationCompletion: boolean;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -65,7 +66,7 @@ export class KibanaMigrator implements IKibanaMigrator {
|
|||
private readonly activeMappings: IndexMapping;
|
||||
private readonly soMigrationsConfig: SavedObjectsMigrationConfigType;
|
||||
private readonly docLinks: DocLinksServiceStart;
|
||||
|
||||
private readonly waitForMigrationCompletion: boolean;
|
||||
public readonly kibanaVersion: string;
|
||||
|
||||
/**
|
||||
|
@ -79,6 +80,7 @@ export class KibanaMigrator implements IKibanaMigrator {
|
|||
kibanaVersion,
|
||||
logger,
|
||||
docLinks,
|
||||
waitForMigrationCompletion,
|
||||
}: KibanaMigratorOptions) {
|
||||
this.client = client;
|
||||
this.kibanaIndex = kibanaIndex;
|
||||
|
@ -93,6 +95,7 @@ export class KibanaMigrator implements IKibanaMigrator {
|
|||
typeRegistry,
|
||||
log: this.log,
|
||||
});
|
||||
this.waitForMigrationCompletion = waitForMigrationCompletion;
|
||||
// Building the active mappings (and associated md5sums) is an expensive
|
||||
// operation so we cache the result
|
||||
this.activeMappings = buildActiveMappings(this.mappingProperties);
|
||||
|
@ -148,6 +151,7 @@ export class KibanaMigrator implements IKibanaMigrator {
|
|||
return runResilientMigrator({
|
||||
client: this.client,
|
||||
kibanaVersion: this.kibanaVersion,
|
||||
waitForMigrationCompletion: this.waitForMigrationCompletion,
|
||||
targetMappings: buildActiveMappings(indexMap[index].typeMappings),
|
||||
logger: this.log,
|
||||
preMigrationScript: indexMap[index].script,
|
||||
|
|
|
@ -39,6 +39,7 @@ describe('migrationsStateActionMachine', () => {
|
|||
|
||||
const initialState = createInitialState({
|
||||
kibanaVersion: '7.11.0',
|
||||
waitForMigrationCompletion: false,
|
||||
targetMappings: { properties: {} },
|
||||
migrationVersionPerType: {},
|
||||
indexPrefix: '.my-so-index',
|
||||
|
|
|
@ -12,6 +12,7 @@ import {
|
|||
addMustClausesToBoolQuery,
|
||||
addMustNotClausesToBoolQuery,
|
||||
getAliases,
|
||||
versionMigrationCompleted,
|
||||
} from './helpers';
|
||||
|
||||
describe('addExcludedTypesToBoolQuery', () => {
|
||||
|
@ -230,3 +231,39 @@ describe('getAliases', () => {
|
|||
`);
|
||||
});
|
||||
});
|
||||
|
||||
describe('versionMigrationCompleted', () => {
|
||||
it('returns true if the current and version alias points to the same index', () => {
|
||||
expect(
|
||||
versionMigrationCompleted('.current-alias', '.version-alias', {
|
||||
'.current-alias': 'myindex',
|
||||
'.version-alias': 'myindex',
|
||||
})
|
||||
).toBe(true);
|
||||
});
|
||||
it('returns false if the current and version alias does not point to the same index', () => {
|
||||
expect(
|
||||
versionMigrationCompleted('.current-alias', '.version-alias', {
|
||||
'.current-alias': 'myindex',
|
||||
'.version-alias': 'anotherindex',
|
||||
})
|
||||
).toBe(false);
|
||||
});
|
||||
it('returns false if the current alias does not exist', () => {
|
||||
expect(
|
||||
versionMigrationCompleted('.current-alias', '.version-alias', {
|
||||
'.version-alias': 'myindex',
|
||||
})
|
||||
).toBe(false);
|
||||
});
|
||||
it('returns false if the version alias does not exist', () => {
|
||||
expect(
|
||||
versionMigrationCompleted('.current-alias', '.version-alias', {
|
||||
'.current-alias': 'myindex',
|
||||
})
|
||||
).toBe(false);
|
||||
});
|
||||
it('returns false if neither the version or current alias exists', () => {
|
||||
expect(versionMigrationCompleted('.current-alias', '.version-alias', {})).toBe(false);
|
||||
});
|
||||
});
|
||||
|
|
|
@ -68,6 +68,19 @@ export function mergeMigrationMappingPropertyHashes(
|
|||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* If `.kibana` and the version specific aliases both exists and
|
||||
* are pointing to the same index. This version's migration has already
|
||||
* been completed.
|
||||
*/
|
||||
export function versionMigrationCompleted(
|
||||
currentAlias: string,
|
||||
versionAlias: string,
|
||||
aliases: Record<string, string | undefined>
|
||||
): boolean {
|
||||
return aliases[currentAlias] != null && aliases[currentAlias] === aliases[versionAlias];
|
||||
}
|
||||
|
||||
export function indexBelongsToLaterVersion(indexName: string, kibanaVersion: string): boolean {
|
||||
const version = valid(indexVersion(indexName));
|
||||
return version != null ? gt(version, kibanaVersion) : false;
|
||||
|
@ -157,16 +170,17 @@ export function getAliases(
|
|||
indices: FetchIndexResponse
|
||||
): Either.Either<
|
||||
{ type: 'multiple_indices_per_alias'; alias: string; indices: string[] },
|
||||
Record<string, string>
|
||||
Record<string, string | undefined>
|
||||
> {
|
||||
const aliases = {} as Record<string, string>;
|
||||
const aliases = {} as Record<string, string | undefined>;
|
||||
for (const index of Object.getOwnPropertyNames(indices)) {
|
||||
for (const alias of Object.getOwnPropertyNames(indices[index].aliases || {})) {
|
||||
if (aliases[alias] != null) {
|
||||
const secondIndexThisAliasPointsTo = aliases[alias];
|
||||
if (secondIndexThisAliasPointsTo != null) {
|
||||
return Either.left({
|
||||
type: 'multiple_indices_per_alias',
|
||||
alias,
|
||||
indices: [aliases[alias], index],
|
||||
indices: [secondIndexThisAliasPointsTo, index],
|
||||
});
|
||||
}
|
||||
aliases[alias] = index;
|
||||
|
|
|
@ -102,6 +102,7 @@ describe('migrations v2 model', () => {
|
|||
routingAllocationDisabled: 'routingAllocationDisabled',
|
||||
clusterShardLimitExceeded: 'clusterShardLimitExceeded',
|
||||
},
|
||||
waitForMigrationCompletion: false,
|
||||
};
|
||||
|
||||
describe('exponential retry delays for retryable_es_client_error', () => {
|
||||
|
@ -222,13 +223,14 @@ describe('migrations v2 model', () => {
|
|||
});
|
||||
|
||||
describe('INIT', () => {
|
||||
const initState: State = {
|
||||
const initBaseState: State = {
|
||||
...baseState,
|
||||
controlState: 'INIT',
|
||||
currentAlias: '.kibana',
|
||||
versionAlias: '.kibana_7.11.0',
|
||||
versionIndex: '.kibana_7.11.0_001',
|
||||
};
|
||||
|
||||
const mappingsWithUnknownType = {
|
||||
properties: {
|
||||
disabled_saved_object_type: {
|
||||
|
@ -244,110 +246,560 @@ describe('migrations v2 model', () => {
|
|||
},
|
||||
} as const;
|
||||
|
||||
test('INIT -> OUTDATED_DOCUMENTS_SEARCH_OPEN_PIT if .kibana is already pointing to the target index', () => {
|
||||
const res: ResponseType<'INIT'> = Either.right({
|
||||
'.kibana_7.11.0_001': {
|
||||
aliases: {
|
||||
'.kibana': {},
|
||||
'.kibana_7.11.0': {},
|
||||
},
|
||||
mappings: mappingsWithUnknownType,
|
||||
settings: {},
|
||||
},
|
||||
describe('if waitForMigrationCompletion=true', () => {
|
||||
const initState = Object.assign({}, initBaseState, {
|
||||
waitForMigrationCompletion: true,
|
||||
});
|
||||
const newState = model(initState, res);
|
||||
|
||||
expect(newState.controlState).toEqual('OUTDATED_DOCUMENTS_SEARCH_OPEN_PIT');
|
||||
// This snapshot asserts that we merge the
|
||||
// migrationMappingPropertyHashes of the existing index, but we leave
|
||||
// the mappings for the disabled_saved_object_type untouched. There
|
||||
// might be another Kibana instance that knows about this type and
|
||||
// needs these mappings in place.
|
||||
expect(newState.targetIndexMappings).toMatchInlineSnapshot(`
|
||||
Object {
|
||||
"_meta": Object {
|
||||
"migrationMappingPropertyHashes": Object {
|
||||
"disabled_saved_object_type": "7997cf5a56cc02bdc9c93361bde732b0",
|
||||
"new_saved_object_type": "4a11183eee21e6fbad864f7a30b39ad0",
|
||||
test('INIT -> OUTDATED_DOCUMENTS_SEARCH_OPEN_PIT if .kibana is already pointing to the target index', () => {
|
||||
const res: ResponseType<'INIT'> = Either.right({
|
||||
'.kibana_7.11.0_001': {
|
||||
aliases: {
|
||||
'.kibana': {},
|
||||
'.kibana_7.11.0': {},
|
||||
},
|
||||
mappings: mappingsWithUnknownType,
|
||||
settings: {},
|
||||
},
|
||||
"properties": Object {
|
||||
"new_saved_object_type": Object {
|
||||
"properties": Object {
|
||||
"value": Object {
|
||||
"type": "text",
|
||||
});
|
||||
const newState = model(initState, res);
|
||||
|
||||
expect(newState.controlState).toEqual('OUTDATED_DOCUMENTS_SEARCH_OPEN_PIT');
|
||||
// This snapshot asserts that we merge the
|
||||
// migrationMappingPropertyHashes of the existing index, but we leave
|
||||
// the mappings for the disabled_saved_object_type untouched. There
|
||||
// might be another Kibana instance that knows about this type and
|
||||
// needs these mappings in place.
|
||||
expect(newState.targetIndexMappings).toMatchInlineSnapshot(`
|
||||
Object {
|
||||
"_meta": Object {
|
||||
"migrationMappingPropertyHashes": Object {
|
||||
"disabled_saved_object_type": "7997cf5a56cc02bdc9c93361bde732b0",
|
||||
"new_saved_object_type": "4a11183eee21e6fbad864f7a30b39ad0",
|
||||
},
|
||||
},
|
||||
"properties": Object {
|
||||
"new_saved_object_type": Object {
|
||||
"properties": Object {
|
||||
"value": Object {
|
||||
"type": "text",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
`);
|
||||
expect(newState.retryCount).toEqual(0);
|
||||
expect(newState.retryDelay).toEqual(0);
|
||||
});
|
||||
test('INIT -> INIT when cluster routing allocation is incompatible', () => {
|
||||
const res: ResponseType<'INIT'> = Either.left({
|
||||
type: 'incompatible_cluster_routing_allocation',
|
||||
}
|
||||
`);
|
||||
expect(newState.retryCount).toEqual(0);
|
||||
expect(newState.retryDelay).toEqual(0);
|
||||
});
|
||||
const newState = model(initState, res) as FatalState;
|
||||
test('INIT -> INIT when cluster routing allocation is incompatible', () => {
|
||||
const res: ResponseType<'INIT'> = Either.left({
|
||||
type: 'incompatible_cluster_routing_allocation',
|
||||
});
|
||||
const newState = model(initState, res) as FatalState;
|
||||
|
||||
expect(newState.controlState).toEqual('INIT');
|
||||
expect(newState.retryCount).toEqual(1);
|
||||
expect(newState.retryDelay).toEqual(2000);
|
||||
expect(newState.logs[0]).toMatchInlineSnapshot(`
|
||||
Object {
|
||||
"level": "error",
|
||||
"message": "Action failed with '[incompatible_cluster_routing_allocation] Incompatible Elasticsearch cluster settings detected. Remove the persistent and transient Elasticsearch cluster setting 'cluster.routing.allocation.enable' or set it to a value of 'all' to allow migrations to proceed. Refer to routingAllocationDisabled for more information on how to resolve the issue.'. Retrying attempt 1 in 2 seconds.",
|
||||
}
|
||||
`);
|
||||
});
|
||||
test("INIT -> FATAL when .kibana points to newer version's index", () => {
|
||||
const res: ResponseType<'INIT'> = Either.right({
|
||||
'.kibana_7.12.0_001': {
|
||||
aliases: {
|
||||
'.kibana': {},
|
||||
'.kibana_7.12.0': {},
|
||||
},
|
||||
mappings: { properties: {}, _meta: { migrationMappingPropertyHashes: {} } },
|
||||
settings: {},
|
||||
},
|
||||
'.kibana_7.11.0_001': {
|
||||
aliases: { '.kibana_7.11.0': {} },
|
||||
mappings: { properties: {}, _meta: { migrationMappingPropertyHashes: {} } },
|
||||
settings: {},
|
||||
},
|
||||
expect(newState.controlState).toEqual('INIT');
|
||||
expect(newState.retryCount).toEqual(1);
|
||||
expect(newState.retryDelay).toEqual(2000);
|
||||
expect(newState.logs[0]).toMatchInlineSnapshot(`
|
||||
Object {
|
||||
"level": "error",
|
||||
"message": "Action failed with '[incompatible_cluster_routing_allocation] Incompatible Elasticsearch cluster settings detected. Remove the persistent and transient Elasticsearch cluster setting 'cluster.routing.allocation.enable' or set it to a value of 'all' to allow migrations to proceed. Refer to routingAllocationDisabled for more information on how to resolve the issue.'. Retrying attempt 1 in 2 seconds.",
|
||||
}
|
||||
`);
|
||||
});
|
||||
const newState = model(initState, res) as FatalState;
|
||||
|
||||
expect(newState.controlState).toEqual('FATAL');
|
||||
expect(newState.reason).toMatchInlineSnapshot(
|
||||
`"The .kibana alias is pointing to a newer version of Kibana: v7.12.0"`
|
||||
);
|
||||
});
|
||||
test('INIT -> FATAL when .kibana points to multiple indices', () => {
|
||||
const res: ResponseType<'INIT'> = Either.right({
|
||||
'.kibana_7.12.0_001': {
|
||||
aliases: {
|
||||
'.kibana': {},
|
||||
'.kibana_7.12.0': {},
|
||||
test("INIT -> FATAL when .kibana points to newer version's index", () => {
|
||||
const res: ResponseType<'INIT'> = Either.right({
|
||||
'.kibana_7.12.0_001': {
|
||||
aliases: {
|
||||
'.kibana': {},
|
||||
'.kibana_7.12.0': {},
|
||||
},
|
||||
mappings: { properties: {}, _meta: { migrationMappingPropertyHashes: {} } },
|
||||
settings: {},
|
||||
},
|
||||
mappings: { properties: {}, _meta: { migrationMappingPropertyHashes: {} } },
|
||||
settings: {},
|
||||
},
|
||||
'.kibana_7.11.0_001': {
|
||||
aliases: { '.kibana': {}, '.kibana_7.11.0': {} },
|
||||
mappings: { properties: {}, _meta: { migrationMappingPropertyHashes: {} } },
|
||||
settings: {},
|
||||
},
|
||||
});
|
||||
const newState = model(initState, res) as FatalState;
|
||||
'.kibana_7.11.0_001': {
|
||||
aliases: { '.kibana_7.11.0': {} },
|
||||
mappings: { properties: {}, _meta: { migrationMappingPropertyHashes: {} } },
|
||||
settings: {},
|
||||
},
|
||||
});
|
||||
const newState = model(initState, res) as FatalState;
|
||||
|
||||
expect(newState.controlState).toEqual('FATAL');
|
||||
expect(newState.reason).toMatchInlineSnapshot(
|
||||
`"The .kibana alias is pointing to multiple indices: .kibana_7.12.0_001,.kibana_7.11.0_001."`
|
||||
);
|
||||
expect(newState.controlState).toEqual('FATAL');
|
||||
expect(newState.reason).toMatchInlineSnapshot(
|
||||
`"The .kibana alias is pointing to a newer version of Kibana: v7.12.0"`
|
||||
);
|
||||
});
|
||||
test('INIT -> FATAL when .kibana points to multiple indices', () => {
|
||||
const res: ResponseType<'INIT'> = Either.right({
|
||||
'.kibana_7.12.0_001': {
|
||||
aliases: {
|
||||
'.kibana': {},
|
||||
'.kibana_7.12.0': {},
|
||||
},
|
||||
mappings: { properties: {}, _meta: { migrationMappingPropertyHashes: {} } },
|
||||
settings: {},
|
||||
},
|
||||
'.kibana_7.11.0_001': {
|
||||
aliases: { '.kibana': {}, '.kibana_7.11.0': {} },
|
||||
mappings: { properties: {}, _meta: { migrationMappingPropertyHashes: {} } },
|
||||
settings: {},
|
||||
},
|
||||
});
|
||||
const newState = model(initState, res) as FatalState;
|
||||
|
||||
expect(newState.controlState).toEqual('FATAL');
|
||||
expect(newState.reason).toMatchInlineSnapshot(
|
||||
`"The .kibana alias is pointing to multiple indices: .kibana_7.12.0_001,.kibana_7.11.0_001."`
|
||||
);
|
||||
});
|
||||
test('INIT -> WAIT_FOR_MIGRATION_COMPLETION when .kibana points to an index with an invalid version', () => {
|
||||
// If users tamper with our index version naming scheme we can no
|
||||
// longer accurately detect a newer version. Older Kibana versions
|
||||
// will have indices like `.kibana_10` and users might choose an
|
||||
// invalid name when restoring from a snapshot. So we try to be
|
||||
// lenient and assume it's an older index and perform a migration.
|
||||
// If the tampered index belonged to a newer version the migration
|
||||
// will fail when we start transforming documents.
|
||||
const res: ResponseType<'INIT'> = Either.right({
|
||||
'.kibana_7.invalid.0_001': {
|
||||
aliases: {
|
||||
'.kibana': {},
|
||||
'.kibana_7.12.0': {},
|
||||
},
|
||||
mappings: mappingsWithUnknownType,
|
||||
settings: {},
|
||||
},
|
||||
'.kibana_7.11.0_001': {
|
||||
aliases: { '.kibana_7.11.0': {} },
|
||||
mappings: { properties: {}, _meta: { migrationMappingPropertyHashes: {} } },
|
||||
settings: {},
|
||||
},
|
||||
});
|
||||
const newState = model(initState, res) as WaitForYellowSourceState;
|
||||
|
||||
expect(newState.controlState).toBe('WAIT_FOR_MIGRATION_COMPLETION');
|
||||
expect(newState.retryDelay).toBe(2000);
|
||||
});
|
||||
test('INIT -> WAIT_FOR_MIGRATION_COMPLETION when migrating from a v2 migrations index (>= 7.11.0)', () => {
|
||||
const res: ResponseType<'INIT'> = Either.right({
|
||||
'.kibana_7.11.0_001': {
|
||||
aliases: { '.kibana': {}, '.kibana_7.11.0': {} },
|
||||
mappings: mappingsWithUnknownType,
|
||||
settings: {},
|
||||
},
|
||||
'.kibana_3': {
|
||||
aliases: {},
|
||||
mappings: { properties: {}, _meta: { migrationMappingPropertyHashes: {} } },
|
||||
settings: {},
|
||||
},
|
||||
});
|
||||
const newState = model(
|
||||
{
|
||||
...initState,
|
||||
...{
|
||||
kibanaVersion: '7.12.0',
|
||||
versionAlias: '.kibana_7.12.0',
|
||||
versionIndex: '.kibana_7.12.0_001',
|
||||
},
|
||||
},
|
||||
res
|
||||
) as WaitForYellowSourceState;
|
||||
|
||||
expect(newState.controlState).toBe('WAIT_FOR_MIGRATION_COMPLETION');
|
||||
expect(newState.retryDelay).toEqual(2000);
|
||||
});
|
||||
test('INIT -> WAIT_FOR_MIGRATION_COMPLETION when migrating from a v1 migrations index (>= 6.5 < 7.11.0)', () => {
|
||||
const res: ResponseType<'INIT'> = Either.right({
|
||||
'.kibana_3': {
|
||||
aliases: {
|
||||
'.kibana': {},
|
||||
},
|
||||
mappings: mappingsWithUnknownType,
|
||||
settings: {},
|
||||
},
|
||||
});
|
||||
const newState = model(initState, res) as WaitForYellowSourceState;
|
||||
|
||||
expect(newState.controlState).toBe('WAIT_FOR_MIGRATION_COMPLETION');
|
||||
expect(newState.retryDelay).toEqual(2000);
|
||||
});
|
||||
test('INIT -> WAIT_FOR_MIGRATION_COMPLETION when migrating from a legacy index (>= 6.0.0 < 6.5)', () => {
|
||||
const res: ResponseType<'INIT'> = Either.right({
|
||||
'.kibana': {
|
||||
aliases: {},
|
||||
mappings: mappingsWithUnknownType,
|
||||
settings: {},
|
||||
},
|
||||
});
|
||||
const newState = model(initState, res);
|
||||
|
||||
expect(newState.controlState).toBe('WAIT_FOR_MIGRATION_COMPLETION');
|
||||
expect(newState.retryDelay).toEqual(2000);
|
||||
});
|
||||
test('INIT -> WAIT_FOR_MIGRATION_COMPLETION when migrating from a custom kibana.index name (>= 6.5 < 7.11.0)', () => {
|
||||
const res: ResponseType<'INIT'> = Either.right({
|
||||
'my-saved-objects_3': {
|
||||
aliases: {
|
||||
'my-saved-objects': {},
|
||||
},
|
||||
mappings: mappingsWithUnknownType,
|
||||
settings: {},
|
||||
},
|
||||
});
|
||||
const newState = model(
|
||||
{
|
||||
...initState,
|
||||
controlState: 'INIT',
|
||||
currentAlias: 'my-saved-objects',
|
||||
versionAlias: 'my-saved-objects_7.11.0',
|
||||
versionIndex: 'my-saved-objects_7.11.0_001',
|
||||
},
|
||||
res
|
||||
) as WaitForYellowSourceState;
|
||||
|
||||
expect(newState.controlState).toBe('WAIT_FOR_MIGRATION_COMPLETION');
|
||||
expect(newState.retryDelay).toEqual(2000);
|
||||
});
|
||||
test('INIT -> WAIT_FOR_MIGRATION_COMPLETION when migrating from a custom kibana.index v2 migrations index (>= 7.11.0)', () => {
|
||||
const res: ResponseType<'INIT'> = Either.right({
|
||||
'my-saved-objects_7.11.0': {
|
||||
aliases: {
|
||||
'my-saved-objects': {},
|
||||
},
|
||||
mappings: mappingsWithUnknownType,
|
||||
settings: {},
|
||||
},
|
||||
});
|
||||
const newState = model(
|
||||
{
|
||||
...initState,
|
||||
controlState: 'INIT',
|
||||
kibanaVersion: '7.12.0',
|
||||
currentAlias: 'my-saved-objects',
|
||||
versionAlias: 'my-saved-objects_7.12.0',
|
||||
versionIndex: 'my-saved-objects_7.12.0_001',
|
||||
},
|
||||
res
|
||||
) as WaitForYellowSourceState;
|
||||
|
||||
expect(newState.controlState).toBe('WAIT_FOR_MIGRATION_COMPLETION');
|
||||
expect(newState.retryDelay).toEqual(2000);
|
||||
});
|
||||
test('INIT -> WAIT_FOR_MIGRATION_COMPLETION when no indices/aliases exist', () => {
|
||||
const res: ResponseType<'INIT'> = Either.right({});
|
||||
const newState = model(initState, res);
|
||||
|
||||
expect(newState.controlState).toBe('WAIT_FOR_MIGRATION_COMPLETION');
|
||||
expect(newState.retryDelay).toEqual(2000);
|
||||
});
|
||||
});
|
||||
test('INIT -> WAIT_FOR_YELLOW_SOURCE when .kibana points to an index with an invalid version', () => {
|
||||
describe('if waitForMigrationCompletion=false', () => {
|
||||
const initState = Object.assign({}, initBaseState, {
|
||||
waitForMigrationCompletion: false,
|
||||
});
|
||||
test('INIT -> OUTDATED_DOCUMENTS_SEARCH_OPEN_PIT if .kibana is already pointing to the target index', () => {
|
||||
const res: ResponseType<'INIT'> = Either.right({
|
||||
'.kibana_7.11.0_001': {
|
||||
aliases: {
|
||||
'.kibana': {},
|
||||
'.kibana_7.11.0': {},
|
||||
},
|
||||
mappings: mappingsWithUnknownType,
|
||||
settings: {},
|
||||
},
|
||||
});
|
||||
const newState = model(initState, res);
|
||||
|
||||
expect(newState.controlState).toEqual('OUTDATED_DOCUMENTS_SEARCH_OPEN_PIT');
|
||||
// This snapshot asserts that we merge the
|
||||
// migrationMappingPropertyHashes of the existing index, but we leave
|
||||
// the mappings for the disabled_saved_object_type untouched. There
|
||||
// might be another Kibana instance that knows about this type and
|
||||
// needs these mappings in place.
|
||||
expect(newState.targetIndexMappings).toMatchInlineSnapshot(`
|
||||
Object {
|
||||
"_meta": Object {
|
||||
"migrationMappingPropertyHashes": Object {
|
||||
"disabled_saved_object_type": "7997cf5a56cc02bdc9c93361bde732b0",
|
||||
"new_saved_object_type": "4a11183eee21e6fbad864f7a30b39ad0",
|
||||
},
|
||||
},
|
||||
"properties": Object {
|
||||
"new_saved_object_type": Object {
|
||||
"properties": Object {
|
||||
"value": Object {
|
||||
"type": "text",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
`);
|
||||
expect(newState.retryCount).toEqual(0);
|
||||
expect(newState.retryDelay).toEqual(0);
|
||||
});
|
||||
test('INIT -> INIT when cluster routing allocation is incompatible', () => {
|
||||
const res: ResponseType<'INIT'> = Either.left({
|
||||
type: 'incompatible_cluster_routing_allocation',
|
||||
});
|
||||
const newState = model(initState, res) as FatalState;
|
||||
|
||||
expect(newState.controlState).toEqual('INIT');
|
||||
expect(newState.retryCount).toEqual(1);
|
||||
expect(newState.retryDelay).toEqual(2000);
|
||||
expect(newState.logs[0]).toMatchInlineSnapshot(`
|
||||
Object {
|
||||
"level": "error",
|
||||
"message": "Action failed with '[incompatible_cluster_routing_allocation] Incompatible Elasticsearch cluster settings detected. Remove the persistent and transient Elasticsearch cluster setting 'cluster.routing.allocation.enable' or set it to a value of 'all' to allow migrations to proceed. Refer to routingAllocationDisabled for more information on how to resolve the issue.'. Retrying attempt 1 in 2 seconds.",
|
||||
}
|
||||
`);
|
||||
});
|
||||
test("INIT -> FATAL when .kibana points to newer version's index", () => {
|
||||
const res: ResponseType<'INIT'> = Either.right({
|
||||
'.kibana_7.12.0_001': {
|
||||
aliases: {
|
||||
'.kibana': {},
|
||||
'.kibana_7.12.0': {},
|
||||
},
|
||||
mappings: { properties: {}, _meta: { migrationMappingPropertyHashes: {} } },
|
||||
settings: {},
|
||||
},
|
||||
'.kibana_7.11.0_001': {
|
||||
aliases: { '.kibana_7.11.0': {} },
|
||||
mappings: { properties: {}, _meta: { migrationMappingPropertyHashes: {} } },
|
||||
settings: {},
|
||||
},
|
||||
});
|
||||
const newState = model(initState, res) as FatalState;
|
||||
|
||||
expect(newState.controlState).toEqual('FATAL');
|
||||
expect(newState.reason).toMatchInlineSnapshot(
|
||||
`"The .kibana alias is pointing to a newer version of Kibana: v7.12.0"`
|
||||
);
|
||||
});
|
||||
test('INIT -> FATAL when .kibana points to multiple indices', () => {
|
||||
const res: ResponseType<'INIT'> = Either.right({
|
||||
'.kibana_7.12.0_001': {
|
||||
aliases: {
|
||||
'.kibana': {},
|
||||
'.kibana_7.12.0': {},
|
||||
},
|
||||
mappings: { properties: {}, _meta: { migrationMappingPropertyHashes: {} } },
|
||||
settings: {},
|
||||
},
|
||||
'.kibana_7.11.0_001': {
|
||||
aliases: { '.kibana': {}, '.kibana_7.11.0': {} },
|
||||
mappings: { properties: {}, _meta: { migrationMappingPropertyHashes: {} } },
|
||||
settings: {},
|
||||
},
|
||||
});
|
||||
const newState = model(initState, res) as FatalState;
|
||||
|
||||
expect(newState.controlState).toEqual('FATAL');
|
||||
expect(newState.reason).toMatchInlineSnapshot(
|
||||
`"The .kibana alias is pointing to multiple indices: .kibana_7.12.0_001,.kibana_7.11.0_001."`
|
||||
);
|
||||
});
|
||||
test('INIT -> WAIT_FOR_YELLOW_SOURCE when .kibana points to an index with an invalid version', () => {
|
||||
// If users tamper with our index version naming scheme we can no
|
||||
// longer accurately detect a newer version. Older Kibana versions
|
||||
// will have indices like `.kibana_10` and users might choose an
|
||||
// invalid name when restoring from a snapshot. So we try to be
|
||||
// lenient and assume it's an older index and perform a migration.
|
||||
// If the tampered index belonged to a newer version the migration
|
||||
// will fail when we start transforming documents.
|
||||
const res: ResponseType<'INIT'> = Either.right({
|
||||
'.kibana_7.invalid.0_001': {
|
||||
aliases: {
|
||||
'.kibana': {},
|
||||
'.kibana_7.12.0': {},
|
||||
},
|
||||
mappings: mappingsWithUnknownType,
|
||||
settings: {},
|
||||
},
|
||||
'.kibana_7.11.0_001': {
|
||||
aliases: { '.kibana_7.11.0': {} },
|
||||
mappings: { properties: {}, _meta: { migrationMappingPropertyHashes: {} } },
|
||||
settings: {},
|
||||
},
|
||||
});
|
||||
const newState = model(initState, res) as WaitForYellowSourceState;
|
||||
|
||||
expect(newState.controlState).toBe('WAIT_FOR_YELLOW_SOURCE');
|
||||
expect(newState.sourceIndex.value).toBe('.kibana_7.invalid.0_001');
|
||||
});
|
||||
|
||||
test('INIT -> WAIT_FOR_YELLOW_SOURCE when migrating from a v2 migrations index (>= 7.11.0)', () => {
|
||||
const res: ResponseType<'INIT'> = Either.right({
|
||||
'.kibana_7.11.0_001': {
|
||||
aliases: { '.kibana': {}, '.kibana_7.11.0': {} },
|
||||
mappings: mappingsWithUnknownType,
|
||||
settings: {},
|
||||
},
|
||||
'.kibana_3': {
|
||||
aliases: {},
|
||||
mappings: { properties: {}, _meta: { migrationMappingPropertyHashes: {} } },
|
||||
settings: {},
|
||||
},
|
||||
});
|
||||
const newState = model(
|
||||
{
|
||||
...initState,
|
||||
...{
|
||||
kibanaVersion: '7.12.0',
|
||||
versionAlias: '.kibana_7.12.0',
|
||||
versionIndex: '.kibana_7.12.0_001',
|
||||
},
|
||||
},
|
||||
res
|
||||
) as WaitForYellowSourceState;
|
||||
|
||||
expect(newState.controlState).toBe('WAIT_FOR_YELLOW_SOURCE');
|
||||
expect(newState.sourceIndex.value).toBe('.kibana_7.11.0_001');
|
||||
expect(newState.retryCount).toEqual(0);
|
||||
expect(newState.retryDelay).toEqual(0);
|
||||
});
|
||||
|
||||
test('INIT -> WAIT_FOR_YELLOW_SOURCE when migrating from a v1 migrations index (>= 6.5 < 7.11.0)', () => {
|
||||
const res: ResponseType<'INIT'> = Either.right({
|
||||
'.kibana_3': {
|
||||
aliases: {
|
||||
'.kibana': {},
|
||||
},
|
||||
mappings: mappingsWithUnknownType,
|
||||
settings: {},
|
||||
},
|
||||
});
|
||||
const newState = model(initState, res) as WaitForYellowSourceState;
|
||||
|
||||
expect(newState.controlState).toBe('WAIT_FOR_YELLOW_SOURCE');
|
||||
expect(newState.sourceIndex.value).toBe('.kibana_3');
|
||||
expect(newState.retryCount).toEqual(0);
|
||||
expect(newState.retryDelay).toEqual(0);
|
||||
});
|
||||
test('INIT -> LEGACY_SET_WRITE_BLOCK when migrating from a legacy index (>= 6.0.0 < 6.5)', () => {
|
||||
const res: ResponseType<'INIT'> = Either.right({
|
||||
'.kibana': {
|
||||
aliases: {},
|
||||
mappings: mappingsWithUnknownType,
|
||||
settings: {},
|
||||
},
|
||||
});
|
||||
const newState = model(initState, res);
|
||||
|
||||
expect(newState).toMatchObject({
|
||||
controlState: 'LEGACY_SET_WRITE_BLOCK',
|
||||
sourceIndex: Option.some('.kibana_pre6.5.0_001'),
|
||||
targetIndex: '.kibana_7.11.0_001',
|
||||
});
|
||||
// This snapshot asserts that we disable the unknown saved object
|
||||
// type. Because it's mappings are disabled, we also don't copy the
|
||||
// `_meta.migrationMappingPropertyHashes` for the disabled type.
|
||||
expect(newState.targetIndexMappings).toMatchInlineSnapshot(`
|
||||
Object {
|
||||
"_meta": Object {
|
||||
"migrationMappingPropertyHashes": Object {
|
||||
"new_saved_object_type": "4a11183eee21e6fbad864f7a30b39ad0",
|
||||
},
|
||||
},
|
||||
"properties": Object {
|
||||
"disabled_saved_object_type": Object {
|
||||
"dynamic": false,
|
||||
"properties": Object {},
|
||||
},
|
||||
"new_saved_object_type": Object {
|
||||
"properties": Object {
|
||||
"value": Object {
|
||||
"type": "text",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
`);
|
||||
expect(newState.retryCount).toEqual(0);
|
||||
expect(newState.retryDelay).toEqual(0);
|
||||
});
|
||||
test('INIT -> WAIT_FOR_YELLOW_SOURCE when migrating from a custom kibana.index name (>= 6.5 < 7.11.0)', () => {
|
||||
const res: ResponseType<'INIT'> = Either.right({
|
||||
'my-saved-objects_3': {
|
||||
aliases: {
|
||||
'my-saved-objects': {},
|
||||
},
|
||||
mappings: mappingsWithUnknownType,
|
||||
settings: {},
|
||||
},
|
||||
});
|
||||
const newState = model(
|
||||
{
|
||||
...baseState,
|
||||
controlState: 'INIT',
|
||||
currentAlias: 'my-saved-objects',
|
||||
versionAlias: 'my-saved-objects_7.11.0',
|
||||
versionIndex: 'my-saved-objects_7.11.0_001',
|
||||
},
|
||||
res
|
||||
) as WaitForYellowSourceState;
|
||||
|
||||
expect(newState.controlState).toBe('WAIT_FOR_YELLOW_SOURCE');
|
||||
expect(newState.sourceIndex.value).toBe('my-saved-objects_3');
|
||||
expect(newState.retryCount).toEqual(0);
|
||||
expect(newState.retryDelay).toEqual(0);
|
||||
});
|
||||
test('INIT -> WAIT_FOR_YELLOW_SOURCE when migrating from a custom kibana.index v2 migrations index (>= 7.11.0)', () => {
|
||||
const res: ResponseType<'INIT'> = Either.right({
|
||||
'my-saved-objects_7.11.0': {
|
||||
aliases: {
|
||||
'my-saved-objects': {},
|
||||
},
|
||||
mappings: mappingsWithUnknownType,
|
||||
settings: {},
|
||||
},
|
||||
});
|
||||
const newState = model(
|
||||
{
|
||||
...baseState,
|
||||
controlState: 'INIT',
|
||||
kibanaVersion: '7.12.0',
|
||||
currentAlias: 'my-saved-objects',
|
||||
versionAlias: 'my-saved-objects_7.12.0',
|
||||
versionIndex: 'my-saved-objects_7.12.0_001',
|
||||
},
|
||||
res
|
||||
) as WaitForYellowSourceState;
|
||||
|
||||
expect(newState.controlState).toBe('WAIT_FOR_YELLOW_SOURCE');
|
||||
expect(newState.sourceIndex.value).toBe('my-saved-objects_7.11.0');
|
||||
|
||||
expect(newState.retryCount).toEqual(0);
|
||||
expect(newState.retryDelay).toEqual(0);
|
||||
});
|
||||
test('INIT -> CREATE_NEW_TARGET when no indices/aliases exist', () => {
|
||||
const res: ResponseType<'INIT'> = Either.right({});
|
||||
const newState = model(initState, res);
|
||||
|
||||
expect(newState).toMatchObject({
|
||||
controlState: 'CREATE_NEW_TARGET',
|
||||
sourceIndex: Option.none,
|
||||
targetIndex: '.kibana_7.11.0_001',
|
||||
});
|
||||
expect(newState.retryCount).toEqual(0);
|
||||
expect(newState.retryDelay).toEqual(0);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('WAIT_FOR_MIGRATION_COMPLETION', () => {
|
||||
const waitForMState: State = {
|
||||
...baseState,
|
||||
controlState: 'WAIT_FOR_MIGRATION_COMPLETION',
|
||||
currentAlias: '.kibana',
|
||||
versionAlias: '.kibana_7.11.0',
|
||||
versionIndex: '.kibana_7.11.0_001',
|
||||
};
|
||||
|
||||
test('WAIT_FOR_MIGRATION_COMPLETION -> WAIT_FOR_MIGRATION_COMPLETION when .kibana points to an index with an invalid version', () => {
|
||||
// If users tamper with our index version naming scheme we can no
|
||||
// longer accurately detect a newer version. Older Kibana versions
|
||||
// will have indices like `.kibana_10` and users might choose an
|
||||
|
@ -355,13 +807,13 @@ describe('migrations v2 model', () => {
|
|||
// lenient and assume it's an older index and perform a migration.
|
||||
// If the tampered index belonged to a newer version the migration
|
||||
// will fail when we start transforming documents.
|
||||
const res: ResponseType<'INIT'> = Either.right({
|
||||
const res: ResponseType<'WAIT_FOR_MIGRATION_COMPLETION'> = Either.right({
|
||||
'.kibana_7.invalid.0_001': {
|
||||
aliases: {
|
||||
'.kibana': {},
|
||||
'.kibana_7.12.0': {},
|
||||
},
|
||||
mappings: mappingsWithUnknownType,
|
||||
mappings: { properties: {} },
|
||||
settings: {},
|
||||
},
|
||||
'.kibana_7.11.0_001': {
|
||||
|
@ -370,17 +822,16 @@ describe('migrations v2 model', () => {
|
|||
settings: {},
|
||||
},
|
||||
});
|
||||
const newState = model(initState, res) as WaitForYellowSourceState;
|
||||
const newState = model(waitForMState, res) as WaitForYellowSourceState;
|
||||
|
||||
expect(newState.controlState).toBe('WAIT_FOR_YELLOW_SOURCE');
|
||||
expect(newState.sourceIndex.value).toBe('.kibana_7.invalid.0_001');
|
||||
expect(newState.controlState).toBe('WAIT_FOR_MIGRATION_COMPLETION');
|
||||
expect(newState.retryDelay).toBe(2000);
|
||||
});
|
||||
|
||||
test('INIT -> WAIT_FOR_YELLOW_SOURCE when migrating from a v2 migrations index (>= 7.11.0)', () => {
|
||||
const res: ResponseType<'INIT'> = Either.right({
|
||||
test('WAIT_FOR_MIGRATION_COMPLETION -> WAIT_FOR_MIGRATION_COMPLETION when migrating from a v2 migrations index (>= 7.11.0)', () => {
|
||||
const res: ResponseType<'WAIT_FOR_MIGRATION_COMPLETION'> = Either.right({
|
||||
'.kibana_7.11.0_001': {
|
||||
aliases: { '.kibana': {}, '.kibana_7.11.0': {} },
|
||||
mappings: mappingsWithUnknownType,
|
||||
mappings: { properties: {} },
|
||||
settings: {},
|
||||
},
|
||||
'.kibana_3': {
|
||||
|
@ -391,7 +842,7 @@ describe('migrations v2 model', () => {
|
|||
});
|
||||
const newState = model(
|
||||
{
|
||||
...initState,
|
||||
...waitForMState,
|
||||
...{
|
||||
kibanaVersion: '7.12.0',
|
||||
versionAlias: '.kibana_7.12.0',
|
||||
|
@ -401,86 +852,50 @@ describe('migrations v2 model', () => {
|
|||
res
|
||||
) as WaitForYellowSourceState;
|
||||
|
||||
expect(newState.controlState).toBe('WAIT_FOR_YELLOW_SOURCE');
|
||||
expect(newState.sourceIndex.value).toBe('.kibana_7.11.0_001');
|
||||
expect(newState.retryCount).toEqual(0);
|
||||
expect(newState.retryDelay).toEqual(0);
|
||||
expect(newState.controlState).toBe('WAIT_FOR_MIGRATION_COMPLETION');
|
||||
expect(newState.retryDelay).toEqual(2000);
|
||||
});
|
||||
|
||||
test('INIT -> WAIT_FOR_YELLOW_SOURCE when migrating from a v1 migrations index (>= 6.5 < 7.11.0)', () => {
|
||||
const res: ResponseType<'INIT'> = Either.right({
|
||||
test('WAIT_FOR_MIGRATION_COMPLETION -> WAIT_FOR_MIGRATION_COMPLETION when migrating from a v1 migrations index (>= 6.5 < 7.11.0)', () => {
|
||||
const res: ResponseType<'WAIT_FOR_MIGRATION_COMPLETION'> = Either.right({
|
||||
'.kibana_3': {
|
||||
aliases: {
|
||||
'.kibana': {},
|
||||
},
|
||||
mappings: mappingsWithUnknownType,
|
||||
mappings: { properties: {} },
|
||||
settings: {},
|
||||
},
|
||||
});
|
||||
const newState = model(initState, res) as WaitForYellowSourceState;
|
||||
const newState = model(waitForMState, res) as WaitForYellowSourceState;
|
||||
|
||||
expect(newState.controlState).toBe('WAIT_FOR_YELLOW_SOURCE');
|
||||
expect(newState.sourceIndex.value).toBe('.kibana_3');
|
||||
expect(newState.retryCount).toEqual(0);
|
||||
expect(newState.retryDelay).toEqual(0);
|
||||
expect(newState.controlState).toBe('WAIT_FOR_MIGRATION_COMPLETION');
|
||||
expect(newState.retryDelay).toEqual(2000);
|
||||
});
|
||||
test('INIT -> LEGACY_SET_WRITE_BLOCK when migrating from a legacy index (>= 6.0.0 < 6.5)', () => {
|
||||
const res: ResponseType<'INIT'> = Either.right({
|
||||
test('WAIT_FOR_MIGRATION_COMPLETION -> WAIT_FOR_MIGRATION_COMPLETION when migrating from a legacy index (>= 6.0.0 < 6.5)', () => {
|
||||
const res: ResponseType<'WAIT_FOR_MIGRATION_COMPLETION'> = Either.right({
|
||||
'.kibana': {
|
||||
aliases: {},
|
||||
mappings: mappingsWithUnknownType,
|
||||
mappings: { properties: {} },
|
||||
settings: {},
|
||||
},
|
||||
});
|
||||
const newState = model(initState, res);
|
||||
const newState = model(waitForMState, res);
|
||||
|
||||
expect(newState).toMatchObject({
|
||||
controlState: 'LEGACY_SET_WRITE_BLOCK',
|
||||
sourceIndex: Option.some('.kibana_pre6.5.0_001'),
|
||||
targetIndex: '.kibana_7.11.0_001',
|
||||
});
|
||||
// This snapshot asserts that we disable the unknown saved object
|
||||
// type. Because it's mappings are disabled, we also don't copy the
|
||||
// `_meta.migrationMappingPropertyHashes` for the disabled type.
|
||||
expect(newState.targetIndexMappings).toMatchInlineSnapshot(`
|
||||
Object {
|
||||
"_meta": Object {
|
||||
"migrationMappingPropertyHashes": Object {
|
||||
"new_saved_object_type": "4a11183eee21e6fbad864f7a30b39ad0",
|
||||
},
|
||||
},
|
||||
"properties": Object {
|
||||
"disabled_saved_object_type": Object {
|
||||
"dynamic": false,
|
||||
"properties": Object {},
|
||||
},
|
||||
"new_saved_object_type": Object {
|
||||
"properties": Object {
|
||||
"value": Object {
|
||||
"type": "text",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
`);
|
||||
expect(newState.retryCount).toEqual(0);
|
||||
expect(newState.retryDelay).toEqual(0);
|
||||
expect(newState.controlState).toBe('WAIT_FOR_MIGRATION_COMPLETION');
|
||||
expect(newState.retryDelay).toEqual(2000);
|
||||
});
|
||||
test('INIT -> WAIT_FOR_YELLOW_SOURCE when migrating from a custom kibana.index name (>= 6.5 < 7.11.0)', () => {
|
||||
const res: ResponseType<'INIT'> = Either.right({
|
||||
test('WAIT_FOR_MIGRATION_COMPLETION -> WAIT_FOR_MIGRATION_COMPLETION when migrating from a custom kibana.index name (>= 6.5 < 7.11.0)', () => {
|
||||
const res: ResponseType<'WAIT_FOR_MIGRATION_COMPLETION'> = Either.right({
|
||||
'my-saved-objects_3': {
|
||||
aliases: {
|
||||
'my-saved-objects': {},
|
||||
},
|
||||
mappings: mappingsWithUnknownType,
|
||||
mappings: { properties: {} },
|
||||
settings: {},
|
||||
},
|
||||
});
|
||||
const newState = model(
|
||||
{
|
||||
...baseState,
|
||||
controlState: 'INIT',
|
||||
...waitForMState,
|
||||
currentAlias: 'my-saved-objects',
|
||||
versionAlias: 'my-saved-objects_7.11.0',
|
||||
versionIndex: 'my-saved-objects_7.11.0_001',
|
||||
|
@ -488,25 +903,22 @@ describe('migrations v2 model', () => {
|
|||
res
|
||||
) as WaitForYellowSourceState;
|
||||
|
||||
expect(newState.controlState).toBe('WAIT_FOR_YELLOW_SOURCE');
|
||||
expect(newState.sourceIndex.value).toBe('my-saved-objects_3');
|
||||
expect(newState.retryCount).toEqual(0);
|
||||
expect(newState.retryDelay).toEqual(0);
|
||||
expect(newState.controlState).toBe('WAIT_FOR_MIGRATION_COMPLETION');
|
||||
expect(newState.retryDelay).toEqual(2000);
|
||||
});
|
||||
test('INIT -> WAIT_FOR_YELLOW_SOURCE when migrating from a custom kibana.index v2 migrations index (>= 7.11.0)', () => {
|
||||
const res: ResponseType<'INIT'> = Either.right({
|
||||
test('WAIT_FOR_MIGRATION_COMPLETION -> WAIT_FOR_MIGRATION_COMPLETION when migrating from a custom kibana.index v2 migrations index (>= 7.11.0)', () => {
|
||||
const res: ResponseType<'WAIT_FOR_MIGRATION_COMPLETION'> = Either.right({
|
||||
'my-saved-objects_7.11.0': {
|
||||
aliases: {
|
||||
'my-saved-objects': {},
|
||||
},
|
||||
mappings: mappingsWithUnknownType,
|
||||
mappings: { properties: {} },
|
||||
settings: {},
|
||||
},
|
||||
});
|
||||
const newState = model(
|
||||
{
|
||||
...baseState,
|
||||
controlState: 'INIT',
|
||||
...waitForMState,
|
||||
kibanaVersion: '7.12.0',
|
||||
currentAlias: 'my-saved-objects',
|
||||
versionAlias: 'my-saved-objects_7.12.0',
|
||||
|
@ -515,23 +927,31 @@ describe('migrations v2 model', () => {
|
|||
res
|
||||
) as WaitForYellowSourceState;
|
||||
|
||||
expect(newState.controlState).toBe('WAIT_FOR_YELLOW_SOURCE');
|
||||
expect(newState.sourceIndex.value).toBe('my-saved-objects_7.11.0');
|
||||
|
||||
expect(newState.retryCount).toEqual(0);
|
||||
expect(newState.retryDelay).toEqual(0);
|
||||
expect(newState.controlState).toBe('WAIT_FOR_MIGRATION_COMPLETION');
|
||||
expect(newState.retryDelay).toEqual(2000);
|
||||
});
|
||||
test('INIT -> CREATE_NEW_TARGET when no indices/aliases exist', () => {
|
||||
const res: ResponseType<'INIT'> = Either.right({});
|
||||
const newState = model(initState, res);
|
||||
test('WAIT_FOR_MIGRATION_COMPLETION -> WAIT_FOR_MIGRATION_COMPLETION when no indices/aliases exist', () => {
|
||||
const res: ResponseType<'WAIT_FOR_MIGRATION_COMPLETION'> = Either.right({});
|
||||
const newState = model(waitForMState, res);
|
||||
|
||||
expect(newState).toMatchObject({
|
||||
controlState: 'CREATE_NEW_TARGET',
|
||||
sourceIndex: Option.none,
|
||||
targetIndex: '.kibana_7.11.0_001',
|
||||
expect(newState.controlState).toBe('WAIT_FOR_MIGRATION_COMPLETION');
|
||||
expect(newState.retryDelay).toEqual(2000);
|
||||
});
|
||||
|
||||
it('WAIT_FOR_MIGRATION_COMPLETION -> DONE when another instance finished the migration', () => {
|
||||
const res: ResponseType<'WAIT_FOR_MIGRATION_COMPLETION'> = Either.right({
|
||||
'.kibana_7.11.0_001': {
|
||||
aliases: {
|
||||
'.kibana': {},
|
||||
'.kibana_7.11.0': {},
|
||||
},
|
||||
mappings: { properties: {} },
|
||||
settings: {},
|
||||
},
|
||||
});
|
||||
expect(newState.retryCount).toEqual(0);
|
||||
expect(newState.retryDelay).toEqual(0);
|
||||
const newState = model(waitForMState, res);
|
||||
|
||||
expect(newState.controlState).toEqual('DONE');
|
||||
});
|
||||
});
|
||||
|
||||
|
|
|
@ -38,6 +38,7 @@ import {
|
|||
mergeMigrationMappingPropertyHashes,
|
||||
throwBadControlState,
|
||||
throwBadResponse,
|
||||
versionMigrationCompleted,
|
||||
} from './helpers';
|
||||
import { createBatches } from './create_batches';
|
||||
import type { MigrationLog } from '../types';
|
||||
|
@ -98,12 +99,8 @@ export const model = (currentState: State, resW: ResponseType<AllActionStates>):
|
|||
const aliases = aliasesRes.right;
|
||||
|
||||
if (
|
||||
// `.kibana` and the version specific aliases both exists and
|
||||
// are pointing to the same index. This version's migration has already
|
||||
// been completed.
|
||||
aliases[stateP.currentAlias] != null &&
|
||||
aliases[stateP.versionAlias] != null &&
|
||||
aliases[stateP.currentAlias] === aliases[stateP.versionAlias]
|
||||
// This version's migration has already been completed.
|
||||
versionMigrationCompleted(stateP.currentAlias, stateP.versionAlias, aliases)
|
||||
) {
|
||||
return {
|
||||
...stateP,
|
||||
|
@ -117,7 +114,7 @@ export const model = (currentState: State, resW: ResponseType<AllActionStates>):
|
|||
targetIndex: `${stateP.indexPrefix}_${stateP.kibanaVersion}_001`,
|
||||
targetIndexMappings: mergeMigrationMappingPropertyHashes(
|
||||
stateP.targetIndexMappings,
|
||||
indices[aliases[stateP.currentAlias]].mappings
|
||||
indices[aliases[stateP.currentAlias]!].mappings
|
||||
),
|
||||
versionIndexReadyActions: Option.none,
|
||||
};
|
||||
|
@ -125,7 +122,7 @@ export const model = (currentState: State, resW: ResponseType<AllActionStates>):
|
|||
// `.kibana` is pointing to an index that belongs to a later
|
||||
// version of Kibana .e.g. a 7.11.0 instance found the `.kibana` alias
|
||||
// pointing to `.kibana_7.12.0_001`
|
||||
indexBelongsToLaterVersion(aliases[stateP.currentAlias], stateP.kibanaVersion)
|
||||
indexBelongsToLaterVersion(aliases[stateP.currentAlias]!, stateP.kibanaVersion)
|
||||
) {
|
||||
return {
|
||||
...stateP,
|
||||
|
@ -136,12 +133,29 @@ export const model = (currentState: State, resW: ResponseType<AllActionStates>):
|
|||
aliases[stateP.currentAlias]
|
||||
)}`,
|
||||
};
|
||||
} else if (
|
||||
// Don't actively participate in this migration but wait for another instance to complete it
|
||||
stateP.waitForMigrationCompletion === true
|
||||
) {
|
||||
return {
|
||||
...stateP,
|
||||
controlState: 'WAIT_FOR_MIGRATION_COMPLETION',
|
||||
// Wait for 2s before checking again if the migration has completed
|
||||
retryDelay: 2000,
|
||||
logs: [
|
||||
...stateP.logs,
|
||||
{
|
||||
level: 'info',
|
||||
message: `Migration required. Waiting until another Kibana instance completes the migration.`,
|
||||
},
|
||||
],
|
||||
};
|
||||
} else if (
|
||||
// If the `.kibana` alias exists
|
||||
aliases[stateP.currentAlias] != null
|
||||
) {
|
||||
// The source index is the index the `.kibana` alias points to
|
||||
const source = aliases[stateP.currentAlias];
|
||||
const source = aliases[stateP.currentAlias]!;
|
||||
return {
|
||||
...stateP,
|
||||
controlState: 'WAIT_FOR_YELLOW_SOURCE',
|
||||
|
@ -219,6 +233,47 @@ export const model = (currentState: State, resW: ResponseType<AllActionStates>):
|
|||
} else {
|
||||
return throwBadResponse(stateP, res);
|
||||
}
|
||||
} else if (stateP.controlState === 'WAIT_FOR_MIGRATION_COMPLETION') {
|
||||
const res = resW as ExcludeRetryableEsError<ResponseType<typeof stateP.controlState>>;
|
||||
const indices = res.right;
|
||||
const aliasesRes = getAliases(indices);
|
||||
if (
|
||||
// If this version's migration has already been completed we can proceed
|
||||
Either.isRight(aliasesRes) &&
|
||||
versionMigrationCompleted(stateP.currentAlias, stateP.versionAlias, aliasesRes.right)
|
||||
) {
|
||||
return {
|
||||
...stateP,
|
||||
// Proceed to 'DONE' and start serving traffic.
|
||||
// Because WAIT_FOR_MIGRATION_COMPLETION can only be used by
|
||||
// background-task nodes on Cloud, we can be confident that this node
|
||||
// has exactly the same plugins enabled as the node that finished the
|
||||
// migration. So we won't need to transform any old documents or update
|
||||
// the mappings.
|
||||
controlState: 'DONE',
|
||||
// Source is a none because we didn't do any migration from a source
|
||||
// index
|
||||
sourceIndex: Option.none,
|
||||
targetIndex: `${stateP.indexPrefix}_${stateP.kibanaVersion}_001`,
|
||||
versionIndexReadyActions: Option.none,
|
||||
};
|
||||
} else {
|
||||
// When getAliases returns a left 'multiple_indices_per_alias' error or
|
||||
// the migration is not yet up to date just continue waiting
|
||||
return {
|
||||
...stateP,
|
||||
controlState: 'WAIT_FOR_MIGRATION_COMPLETION',
|
||||
// Wait for 2s before checking again if the migration has completed
|
||||
retryDelay: 2000,
|
||||
logs: [
|
||||
...stateP.logs,
|
||||
{
|
||||
level: 'info',
|
||||
message: `Migration required. Waiting until another Kibana instance completes the migration.`,
|
||||
},
|
||||
],
|
||||
};
|
||||
}
|
||||
} else if (stateP.controlState === 'LEGACY_SET_WRITE_BLOCK') {
|
||||
const res = resW as ExcludeRetryableEsError<ResponseType<typeof stateP.controlState>>;
|
||||
// If the write block is successfully in place
|
||||
|
@ -938,27 +993,6 @@ export const model = (currentState: State, resW: ResponseType<AllActionStates>):
|
|||
throwBadResponse(stateP, res.left);
|
||||
}
|
||||
}
|
||||
} else if (stateP.controlState === 'UPDATE_TARGET_MAPPINGS') {
|
||||
const res = resW as ExcludeRetryableEsError<ResponseType<typeof stateP.controlState>>;
|
||||
if (Either.isRight(res)) {
|
||||
return {
|
||||
...stateP,
|
||||
controlState: 'UPDATE_TARGET_MAPPINGS_WAIT_FOR_TASK',
|
||||
updateTargetMappingsTaskId: res.right.taskId,
|
||||
};
|
||||
} else {
|
||||
throwBadResponse(stateP, res as never);
|
||||
}
|
||||
} else if (stateP.controlState === 'OUTDATED_DOCUMENTS_REFRESH') {
|
||||
const res = resW as ExcludeRetryableEsError<ResponseType<typeof stateP.controlState>>;
|
||||
if (Either.isRight(res)) {
|
||||
return {
|
||||
...stateP,
|
||||
controlState: 'UPDATE_TARGET_MAPPINGS',
|
||||
};
|
||||
} else {
|
||||
throwBadResponse(stateP, res);
|
||||
}
|
||||
} else if (stateP.controlState === 'OUTDATED_DOCUMENTS_SEARCH_CLOSE_PIT') {
|
||||
const res = resW as ExcludeRetryableEsError<ResponseType<typeof stateP.controlState>>;
|
||||
if (Either.isRight(res)) {
|
||||
|
@ -976,6 +1010,27 @@ export const model = (currentState: State, resW: ResponseType<AllActionStates>):
|
|||
} else {
|
||||
throwBadResponse(stateP, res);
|
||||
}
|
||||
} else if (stateP.controlState === 'OUTDATED_DOCUMENTS_REFRESH') {
|
||||
const res = resW as ExcludeRetryableEsError<ResponseType<typeof stateP.controlState>>;
|
||||
if (Either.isRight(res)) {
|
||||
return {
|
||||
...stateP,
|
||||
controlState: 'UPDATE_TARGET_MAPPINGS',
|
||||
};
|
||||
} else {
|
||||
throwBadResponse(stateP, res);
|
||||
}
|
||||
} else if (stateP.controlState === 'UPDATE_TARGET_MAPPINGS') {
|
||||
const res = resW as ExcludeRetryableEsError<ResponseType<typeof stateP.controlState>>;
|
||||
if (Either.isRight(res)) {
|
||||
return {
|
||||
...stateP,
|
||||
controlState: 'UPDATE_TARGET_MAPPINGS_WAIT_FOR_TASK',
|
||||
updateTargetMappingsTaskId: res.right.taskId,
|
||||
};
|
||||
} else {
|
||||
throwBadResponse(stateP, res as never);
|
||||
}
|
||||
} else if (stateP.controlState === 'UPDATE_TARGET_MAPPINGS_WAIT_FOR_TASK') {
|
||||
const res = resW as ExcludeRetryableEsError<ResponseType<typeof stateP.controlState>>;
|
||||
if (Either.isRight(res)) {
|
||||
|
|
|
@ -40,6 +40,7 @@ import type {
|
|||
OutdatedDocumentsRefresh,
|
||||
CheckUnknownDocumentsState,
|
||||
CalculateExcludeFiltersState,
|
||||
WaitForMigrationCompletionState,
|
||||
} from './state';
|
||||
import type { TransformRawDocs } from './types';
|
||||
import * as Actions from './actions';
|
||||
|
@ -60,6 +61,8 @@ export const nextActionMap = (client: ElasticsearchClient, transformRawDocs: Tra
|
|||
return {
|
||||
INIT: (state: InitState) =>
|
||||
Actions.initAction({ client, indices: [state.currentAlias, state.versionAlias] }),
|
||||
WAIT_FOR_MIGRATION_COMPLETION: (state: WaitForMigrationCompletionState) =>
|
||||
Actions.fetchIndices({ client, indices: [state.currentAlias, state.versionAlias] }),
|
||||
WAIT_FOR_YELLOW_SOURCE: (state: WaitForYellowSourceState) =>
|
||||
Actions.waitForIndexStatus({ client, index: state.sourceIndex.value, status: 'yellow' }),
|
||||
CHECK_UNKNOWN_DOCUMENTS: (state: CheckUnknownDocumentsState) =>
|
||||
|
|
|
@ -44,6 +44,7 @@ export const MIGRATION_CLIENT_OPTIONS = { maxRetries: 0, requestTimeout: 120_000
|
|||
export async function runResilientMigrator({
|
||||
client,
|
||||
kibanaVersion,
|
||||
waitForMigrationCompletion,
|
||||
targetMappings,
|
||||
logger,
|
||||
preMigrationScript,
|
||||
|
@ -56,6 +57,7 @@ export async function runResilientMigrator({
|
|||
}: {
|
||||
client: ElasticsearchClient;
|
||||
kibanaVersion: string;
|
||||
waitForMigrationCompletion: boolean;
|
||||
targetMappings: IndexMapping;
|
||||
preMigrationScript?: string;
|
||||
logger: Logger;
|
||||
|
@ -68,6 +70,7 @@ export async function runResilientMigrator({
|
|||
}): Promise<MigrationResult> {
|
||||
const initialState = createInitialState({
|
||||
kibanaVersion,
|
||||
waitForMigrationCompletion,
|
||||
targetMappings,
|
||||
preMigrationScript,
|
||||
migrationVersionPerType,
|
||||
|
|
|
@ -149,12 +149,18 @@ export interface BaseState extends ControlState {
|
|||
* DocLinks for savedObjects. to reference online documentation
|
||||
*/
|
||||
readonly migrationDocLinks: DocLinks['kibanaUpgradeSavedObjects'];
|
||||
readonly waitForMigrationCompletion: boolean;
|
||||
}
|
||||
|
||||
export interface InitState extends BaseState {
|
||||
readonly controlState: 'INIT';
|
||||
}
|
||||
|
||||
export interface WaitForMigrationCompletionState extends BaseState {
|
||||
/** Wait until another instance completes the migration */
|
||||
readonly controlState: 'WAIT_FOR_MIGRATION_COMPLETION';
|
||||
}
|
||||
|
||||
export interface PostInitState extends BaseState {
|
||||
/**
|
||||
* The source index is the index from which the migration reads. If the
|
||||
|
@ -430,6 +436,7 @@ export interface LegacyDeleteState extends LegacyBaseState {
|
|||
export type State = Readonly<
|
||||
| FatalState
|
||||
| InitState
|
||||
| WaitForMigrationCompletionState
|
||||
| DoneState
|
||||
| WaitForYellowSourceState
|
||||
| CheckUnknownDocumentsState
|
||||
|
|
|
@ -67,6 +67,7 @@ TYPES_DEPS = [
|
|||
"//packages/core/saved-objects/core-saved-objects-import-export-server-internal:npm_module_types",
|
||||
"//packages/core/usage-data/core-usage-data-base-server-internal:npm_module_types",
|
||||
"//packages/core/deprecations/core-deprecations-server:npm_module_types",
|
||||
"//packages/core/node/core-node-server:npm_module_types",
|
||||
]
|
||||
|
||||
jsts_transpiler(
|
||||
|
|
|
@ -23,6 +23,7 @@ import { ByteSizeValue } from '@kbn/config-schema';
|
|||
import { REPO_ROOT } from '@kbn/utils';
|
||||
import { getEnvOptions } from '@kbn/config-mocks';
|
||||
import { docLinksServiceMock } from '@kbn/core-doc-links-server-mocks';
|
||||
import { nodeServiceMock } from '@kbn/core-node-server-mocks';
|
||||
import { mockCoreContext } from '@kbn/core-base-server-mocks';
|
||||
import { httpServiceMock, httpServerMock } from '@kbn/core-http-server-mocks';
|
||||
import type { SavedObjectsClientFactoryProvider } from '@kbn/core-saved-objects-server';
|
||||
|
@ -84,6 +85,7 @@ describe('SavedObjectsService', () => {
|
|||
pluginsInitialized,
|
||||
elasticsearch: elasticsearchServiceMock.createInternalStart(),
|
||||
docLinks: docLinksServiceMock.createStartContract(),
|
||||
node: nodeServiceMock.createInternalStartContract(),
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -285,6 +287,81 @@ describe('SavedObjectsService', () => {
|
|||
expect(KibanaMigratorMock).toHaveBeenCalledWith(expect.objectContaining({ kibanaVersion }));
|
||||
});
|
||||
|
||||
it('calls KibanaMigrator with waitForMigrationCompletion=false for the default ui+background tasks role', async () => {
|
||||
const pkg = loadJsonFile.sync(join(REPO_ROOT, 'package.json')) as RawPackageInfo;
|
||||
const kibanaVersion = pkg.version;
|
||||
|
||||
const coreContext = createCoreContext({
|
||||
env: Env.createDefault(REPO_ROOT, getEnvOptions(), {
|
||||
...pkg,
|
||||
version: `${kibanaVersion}-beta1`, // test behavior when release has a version qualifier
|
||||
}),
|
||||
});
|
||||
|
||||
const soService = new SavedObjectsService(coreContext);
|
||||
await soService.setup(createSetupDeps());
|
||||
const startDeps = createStartDeps();
|
||||
startDeps.node = nodeServiceMock.createInternalStartContract({
|
||||
ui: true,
|
||||
backgroundTasks: true,
|
||||
});
|
||||
await soService.start(startDeps);
|
||||
|
||||
expect(KibanaMigratorMock).toHaveBeenCalledWith(
|
||||
expect.objectContaining({ waitForMigrationCompletion: false })
|
||||
);
|
||||
});
|
||||
|
||||
it('calls KibanaMigrator with waitForMigrationCompletion=false for the ui only role', async () => {
|
||||
const pkg = loadJsonFile.sync(join(REPO_ROOT, 'package.json')) as RawPackageInfo;
|
||||
const kibanaVersion = pkg.version;
|
||||
|
||||
const coreContext = createCoreContext({
|
||||
env: Env.createDefault(REPO_ROOT, getEnvOptions(), {
|
||||
...pkg,
|
||||
version: `${kibanaVersion}-beta1`, // test behavior when release has a version qualifier
|
||||
}),
|
||||
});
|
||||
|
||||
const soService = new SavedObjectsService(coreContext);
|
||||
await soService.setup(createSetupDeps());
|
||||
const startDeps = createStartDeps();
|
||||
startDeps.node = nodeServiceMock.createInternalStartContract({
|
||||
ui: true,
|
||||
backgroundTasks: false,
|
||||
});
|
||||
await soService.start(startDeps);
|
||||
|
||||
expect(KibanaMigratorMock).toHaveBeenCalledWith(
|
||||
expect.objectContaining({ waitForMigrationCompletion: false })
|
||||
);
|
||||
});
|
||||
|
||||
it('calls KibanaMigrator with waitForMigrationCompletion=true for the background tasks only role', async () => {
|
||||
const pkg = loadJsonFile.sync(join(REPO_ROOT, 'package.json')) as RawPackageInfo;
|
||||
const kibanaVersion = pkg.version;
|
||||
|
||||
const coreContext = createCoreContext({
|
||||
env: Env.createDefault(REPO_ROOT, getEnvOptions(), {
|
||||
...pkg,
|
||||
version: `${kibanaVersion}-beta1`, // test behavior when release has a version qualifier
|
||||
}),
|
||||
});
|
||||
|
||||
const soService = new SavedObjectsService(coreContext);
|
||||
await soService.setup(createSetupDeps());
|
||||
const startDeps = createStartDeps();
|
||||
startDeps.node = nodeServiceMock.createInternalStartContract({
|
||||
ui: false,
|
||||
backgroundTasks: true,
|
||||
});
|
||||
await soService.start(startDeps);
|
||||
|
||||
expect(KibanaMigratorMock).toHaveBeenCalledWith(
|
||||
expect.objectContaining({ waitForMigrationCompletion: true })
|
||||
);
|
||||
});
|
||||
|
||||
it('waits for all es nodes to be compatible before running migrations', async () => {
|
||||
expect.assertions(2);
|
||||
const coreContext = createCoreContext({ skipMigration: false });
|
||||
|
|
|
@ -48,6 +48,7 @@ import {
|
|||
} from '@kbn/core-saved-objects-import-export-server-internal';
|
||||
import type { InternalCoreUsageDataSetup } from '@kbn/core-usage-data-base-server-internal';
|
||||
import type { DeprecationRegistryProvider } from '@kbn/core-deprecations-server';
|
||||
import type { NodeInfo } from '@kbn/core-node-server';
|
||||
import { registerRoutes } from './routes';
|
||||
import { calculateStatus$ } from './status';
|
||||
import { registerCoreObjectTypes } from './object_types';
|
||||
|
@ -85,6 +86,7 @@ export interface SavedObjectsStartDeps {
|
|||
elasticsearch: InternalElasticsearchServiceStart;
|
||||
pluginsInitialized?: boolean;
|
||||
docLinks: DocLinksServiceStart;
|
||||
node: NodeInfo;
|
||||
}
|
||||
|
||||
export class SavedObjectsService
|
||||
|
@ -185,6 +187,7 @@ export class SavedObjectsService
|
|||
elasticsearch,
|
||||
pluginsInitialized = true,
|
||||
docLinks,
|
||||
node,
|
||||
}: SavedObjectsStartDeps): Promise<InternalSavedObjectsServiceStart> {
|
||||
if (!this.setupDeps || !this.config) {
|
||||
throw new Error('#setup() needs to be run first');
|
||||
|
@ -194,10 +197,12 @@ export class SavedObjectsService
|
|||
|
||||
const client = elasticsearch.client;
|
||||
|
||||
const waitForMigrationCompletion = node.roles.backgroundTasks && !node.roles.ui;
|
||||
const migrator = this.createMigrator(
|
||||
this.config.migration,
|
||||
elasticsearch.client.asInternalUser,
|
||||
docLinks
|
||||
docLinks,
|
||||
waitForMigrationCompletion
|
||||
);
|
||||
|
||||
this.migrator$.next(migrator);
|
||||
|
@ -313,7 +318,8 @@ export class SavedObjectsService
|
|||
private createMigrator(
|
||||
soMigrationsConfig: SavedObjectsMigrationConfigType,
|
||||
client: ElasticsearchClient,
|
||||
docLinks: DocLinksServiceStart
|
||||
docLinks: DocLinksServiceStart,
|
||||
waitForMigrationCompletion: boolean
|
||||
): IKibanaMigrator {
|
||||
return new KibanaMigrator({
|
||||
typeRegistry: this.typeRegistry,
|
||||
|
@ -323,6 +329,7 @@ export class SavedObjectsService
|
|||
kibanaIndex,
|
||||
client,
|
||||
docLinks,
|
||||
waitForMigrationCompletion,
|
||||
});
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,154 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
import Path from 'path';
|
||||
import fs from 'fs/promises';
|
||||
import JSON5 from 'json5';
|
||||
import { kibanaPackageJson as pkg } from '@kbn/utils';
|
||||
import { retryAsync } from '@kbn/core-saved-objects-migration-server-mocks';
|
||||
import * as kbnTestServer from '../../../../test_helpers/kbn_server';
|
||||
import { Root } from '../../../root';
|
||||
|
||||
const logFilePath = Path.join(__dirname, 'wait_for_migration_completion.log');
|
||||
|
||||
async function removeLogFile() {
|
||||
// ignore errors if it doesn't exist
|
||||
await fs.unlink(logFilePath).catch(() => void 0);
|
||||
}
|
||||
|
||||
describe('migration with waitForCompletion=true', () => {
|
||||
let esServer: kbnTestServer.TestElasticsearchUtils;
|
||||
let root: Root;
|
||||
|
||||
beforeAll(async () => {
|
||||
await removeLogFile();
|
||||
});
|
||||
|
||||
afterAll(async () => {
|
||||
if (root) {
|
||||
await root.shutdown();
|
||||
}
|
||||
if (esServer) {
|
||||
await esServer.stop();
|
||||
}
|
||||
|
||||
await new Promise((resolve) => setTimeout(resolve, 10000));
|
||||
});
|
||||
|
||||
it('waits for another instance to complete the migration', async () => {
|
||||
const { startES } = kbnTestServer.createTestServers({
|
||||
adjustTimeout: (t: number) => jest.setTimeout(t),
|
||||
settings: {
|
||||
es: {
|
||||
license: 'basic',
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
root = createRoot();
|
||||
|
||||
esServer = await startES();
|
||||
await root.preboot();
|
||||
await root.setup();
|
||||
|
||||
root.start();
|
||||
const esClient = esServer.es.getClient();
|
||||
|
||||
await retryAsync(
|
||||
async () => {
|
||||
const logFileContent = await fs.readFile(logFilePath, 'utf-8');
|
||||
const records = logFileContent
|
||||
.split('\n')
|
||||
.filter(Boolean)
|
||||
.map((str) => JSON5.parse(str)) as any[];
|
||||
|
||||
expect(
|
||||
records.find((rec) =>
|
||||
rec.message.startsWith(
|
||||
`[.kibana] Migration required. Waiting until another Kibana instance completes the migration.`
|
||||
)
|
||||
)
|
||||
).toBeDefined();
|
||||
|
||||
expect(
|
||||
records.find((rec) =>
|
||||
rec.message.startsWith(`[.kibana] INIT -> WAIT_FOR_MIGRATION_COMPLETION`)
|
||||
)
|
||||
).toBeDefined();
|
||||
|
||||
expect(
|
||||
records.find((rec) =>
|
||||
rec.message.startsWith(
|
||||
`[.kibana] WAIT_FOR_MIGRATION_COMPLETION -> WAIT_FOR_MIGRATION_COMPLETION`
|
||||
)
|
||||
)
|
||||
).toBeDefined();
|
||||
},
|
||||
{ retryAttempts: 100, retryDelayMs: 200 }
|
||||
);
|
||||
|
||||
const aliases: Record<string, {}> = { '.kibana': {} };
|
||||
aliases[`.kibana_${pkg.version}`] = {};
|
||||
await esClient.indices.create({ index: `.kibana_${pkg.version}_001`, aliases });
|
||||
|
||||
await retryAsync(
|
||||
async () => {
|
||||
const logFileContent = await fs.readFile(logFilePath, 'utf-8');
|
||||
const records = logFileContent
|
||||
.split('\n')
|
||||
.filter(Boolean)
|
||||
.map((str) => JSON5.parse(str)) as any[];
|
||||
|
||||
expect(
|
||||
records.find((rec) =>
|
||||
rec.message.startsWith(`[.kibana] WAIT_FOR_MIGRATION_COMPLETION -> DONE`)
|
||||
)
|
||||
).toBeDefined();
|
||||
|
||||
expect(
|
||||
records.find((rec) => rec.message.startsWith(`[.kibana] Migration completed`))
|
||||
).toBeDefined();
|
||||
},
|
||||
{ retryAttempts: 100, retryDelayMs: 200 }
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
function createRoot() {
|
||||
return kbnTestServer.createRootWithCorePlugins(
|
||||
{
|
||||
migrations: {
|
||||
skip: false,
|
||||
},
|
||||
node: {
|
||||
roles: ['background_tasks'],
|
||||
},
|
||||
logging: {
|
||||
appenders: {
|
||||
file: {
|
||||
type: 'file',
|
||||
fileName: logFilePath,
|
||||
layout: {
|
||||
type: 'json',
|
||||
},
|
||||
},
|
||||
},
|
||||
loggers: [
|
||||
{
|
||||
name: 'root',
|
||||
level: 'info',
|
||||
appenders: ['file'],
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
{
|
||||
oss: true,
|
||||
}
|
||||
);
|
||||
}
|
|
@ -384,6 +384,7 @@ export class Server {
|
|||
elasticsearch: elasticsearchStart,
|
||||
pluginsInitialized: this.#pluginsInitialized,
|
||||
docLinks: docLinkStart,
|
||||
node: await this.node.start(),
|
||||
});
|
||||
await this.resolveSavedObjectsStartPromise!(savedObjectsStart);
|
||||
|
||||
|
|
|
@ -40,7 +40,7 @@ export default async function ({ readConfigFile }: FtrConfigProviderContext) {
|
|||
'--usageCollection.usageCounters.bufferDuration=0',
|
||||
|
||||
`--plugin-path=${path.resolve(__dirname, 'plugins', 'core_plugin_initializer_context')}`,
|
||||
'--node.roles=["background_tasks"]',
|
||||
'--node.roles=["ui","background_tasks"]',
|
||||
],
|
||||
},
|
||||
};
|
||||
|
|
|
@ -16,7 +16,7 @@ export default function ({ getService, getPageObjects }: PluginFunctionalProvide
|
|||
it('passes node roles to server PluginInitializerContext', async () => {
|
||||
await supertest.get('/core_plugin_initializer_context/node/roles').expect(200, {
|
||||
backgroundTasks: true,
|
||||
ui: false,
|
||||
ui: true,
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue