[SO migration] remove v1 implementation (#118000)

* remove v1 implementation

* fix type

* remove unused mock

* expose kibanaVersion again

* fix migrator mock

* move KibanaMigrator out of the kibana subfolder

* fix imports

* moves migrationsv2 into migrations

* fix test mocking
This commit is contained in:
Pierre Gayvallet 2021-11-10 13:41:47 +01:00 committed by GitHub
parent 7ab3593fb7
commit f7163878c0
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
147 changed files with 714 additions and 3991 deletions

View file

@ -1,222 +1,504 @@
# Saved Object Migrations
- [Introduction](#introduction)
- [Algorithm steps](#algorithm-steps)
- [INIT](#init)
- [Next action](#next-action)
- [New control state](#new-control-state)
- [CREATE_NEW_TARGET](#create_new_target)
- [Next action](#next-action-1)
- [New control state](#new-control-state-1)
- [LEGACY_SET_WRITE_BLOCK](#legacy_set_write_block)
- [Next action](#next-action-2)
- [New control state](#new-control-state-2)
- [LEGACY_CREATE_REINDEX_TARGET](#legacy_create_reindex_target)
- [Next action](#next-action-3)
- [New control state](#new-control-state-3)
- [LEGACY_REINDEX](#legacy_reindex)
- [Next action](#next-action-4)
- [New control state](#new-control-state-4)
- [LEGACY_REINDEX_WAIT_FOR_TASK](#legacy_reindex_wait_for_task)
- [Next action](#next-action-5)
- [New control state](#new-control-state-5)
- [LEGACY_DELETE](#legacy_delete)
- [Next action](#next-action-6)
- [New control state](#new-control-state-6)
- [WAIT_FOR_YELLOW_SOURCE](#wait_for_yellow_source)
- [Next action](#next-action-7)
- [New control state](#new-control-state-7)
- [SET_SOURCE_WRITE_BLOCK](#set_source_write_block)
- [Next action](#next-action-8)
- [New control state](#new-control-state-8)
- [CREATE_REINDEX_TEMP](#create_reindex_temp)
- [Next action](#next-action-9)
- [New control state](#new-control-state-9)
- [REINDEX_SOURCE_TO_TEMP_OPEN_PIT](#reindex_source_to_temp_open_pit)
- [Next action](#next-action-10)
- [New control state](#new-control-state-10)
- [REINDEX_SOURCE_TO_TEMP_READ](#reindex_source_to_temp_read)
- [Next action](#next-action-11)
- [New control state](#new-control-state-11)
- [REINDEX_SOURCE_TO_TEMP_TRANSFORM](#REINDEX_SOURCE_TO_TEMP_TRANSFORM)
- [Next action](#next-action-12)
- [New control state](#new-control-state-12)
- [REINDEX_SOURCE_TO_TEMP_INDEX_BULK](#reindex_source_to_temp_index_bulk)
- [Next action](#next-action-13)
- [New control state](#new-control-state-13)
- [REINDEX_SOURCE_TO_TEMP_CLOSE_PIT](#reindex_source_to_temp_close_pit)
- [Next action](#next-action-14)
- [New control state](#new-control-state-14)
- [SET_TEMP_WRITE_BLOCK](#set_temp_write_block)
- [Next action](#next-action-15)
- [New control state](#new-control-state-15)
- [CLONE_TEMP_TO_TARGET](#clone_temp_to_target)
- [Next action](#next-action-16)
- [New control state](#new-control-state-16)
- [OUTDATED_DOCUMENTS_SEARCH](#outdated_documents_search)
- [Next action](#next-action-17)
- [New control state](#new-control-state-17)
- [OUTDATED_DOCUMENTS_TRANSFORM](#outdated_documents_transform)
- [Next action](#next-action-18)
- [New control state](#new-control-state-18)
- [UPDATE_TARGET_MAPPINGS](#update_target_mappings)
- [Next action](#next-action-19)
- [New control state](#new-control-state-19)
- [UPDATE_TARGET_MAPPINGS_WAIT_FOR_TASK](#update_target_mappings_wait_for_task)
- [Next action](#next-action-20)
- [New control state](#new-control-state-20)
- [MARK_VERSION_INDEX_READY_CONFLICT](#mark_version_index_ready_conflict)
- [Next action](#next-action-21)
- [New control state](#new-control-state-21)
- [Manual QA Test Plan](#manual-qa-test-plan)
- [1. Legacy pre-migration](#1-legacy-pre-migration)
- [2. Plugins enabled/disabled](#2-plugins-enableddisabled)
- [Test scenario 1 (enable a plugin after migration):](#test-scenario-1-enable-a-plugin-after-migration)
- [Test scenario 2 (disable a plugin after migration):](#test-scenario-2-disable-a-plugin-after-migration)
- [Test scenario 3 (multiple instances, enable a plugin after migration):](#test-scenario-3-multiple-instances-enable-a-plugin-after-migration)
- [Test scenario 4 (multiple instances, mixed plugin enabled configs):](#test-scenario-4-multiple-instances-mixed-plugin-enabled-configs)
Migrations are the mechanism by which saved object indices are kept up to date with the Kibana system. Plugin authors write their plugins to work with a certain set of mappings, and documents of a certain shape. Migrations ensure that the index actually conforms to those expectations.
# Introduction
In the past, the risk of downtime caused by Kibana's saved object upgrade
migrations have discouraged users from adopting the latest features. v2
migrations aims to solve this problem by minimizing the operational impact on
our users.
## Migrating the index
To achieve this it uses a new migration algorithm where every step of the
algorithm is idempotent. No matter at which step a Kibana instance gets
interrupted, it can always restart the migration from the beginning and repeat
all the steps without requiring any user intervention. This doesn't mean
migrations will never fail, but when they fail for intermittent reasons like
an Elasticsearch cluster running out of heap, Kibana will automatically be
able to successfully complete the migration once the cluster has enough heap.
When Kibana boots, prior to serving any requests, it performs a check to see if the kibana index needs to be migrated.
For more background information on the problem see the [saved object
migrations
RFC](https://github.com/elastic/kibana/blob/main/rfcs/text/0013_saved_object_migrations.md).
- If there are out of date docs, or mapping changes, or the current index is not aliased, the index is migrated.
- If the Kibana index does not exist, it is created.
# Algorithm steps
The design goals for the algorithm was to keep downtime below 10 minutes for
100k saved objects while guaranteeing no data loss and keeping steps as simple
and explicit as possible.
All of this happens prior to Kibana serving any http requests.
Here is the gist of what happens if an index migration is necessary:
* If `.kibana` (or whatever the Kibana index is named) is not an alias, it will be converted to one:
* Reindex `.kibana` into `.kibana_1`
* Delete `.kibana`
* Create an alias `.kibana` that points to `.kibana_1`
* Create a `.kibana_2` index
* Copy all documents from `.kibana_1` into `.kibana_2`, running them through any applicable migrations
* Point the `.kibana` alias to `.kibana_2`
## Migrating Kibana clusters
If Kibana is being run in a cluster, migrations will be coordinated so that they only run on one Kibana instance at a time. This is done in a fairly rudimentary way. Let's say we have two Kibana instances, kibana1 and kibana2.
* kibana1 and kibana2 both start simultaneously and detect that the index requires migration
* kibana1 begins the migration and creates index `.kibana_4`
* kibana2 tries to begin the migration, but fails with the error `.kibana_4 already exists`
* kibana2 logs that it failed to create the migration index, and instead begins polling
* Every few seconds, kibana2 instance checks the `.kibana` index to see if it is done migrating
* Once `.kibana` is determined to be up to date, the kibana2 instance continues booting
In this example, if the `.kibana_4` index existed prior to Kibana booting, the entire migration process will fail, as all Kibana instances will assume another instance is migrating to the `.kibana_4` index. This problem is only fixable by deleting the `.kibana_4` index.
## Import / export
If a user attempts to import FanciPlugin 1.0 documents into a Kibana system that is running FanciPlugin 2.0, those documents will be migrated prior to being persisted in the Kibana index. If a user attempts to import documents having a migration version that is _greater_ than the current Kibana version, the documents will fail to import.
## Validation
It might happen that a user modifies their FanciPlugin 1.0 export file to have documents with a migrationVersion of 2.0.0. In this scenario, Kibana will store those documents as if they are up to date, even though they are not, and the result will be unknown, but probably undesirable behavior.
Similarly, Kibana server APIs assume that they are sent up to date documents unless a document specifies a migrationVersion. This means that out-of-date callers of our APIs will send us out-of-date documents, and those documents will be accepted and stored as if they are up-to-date.
To prevent this from happening, migration authors should _always_ write a [validation](../validation) function that throws an error if a document is not up to date, and this validation function should always be updated any time a new migration is added for the relevant document types.
## Document ownership
In the eyes of the migration system, only one plugin can own a saved object type, or a root-level property on a saved object.
So, let's say we have a document that looks like this:
```js
{
type: 'dashboard',
attributes: { title: 'whatever' },
securityKey: '324234234kjlke2',
}
```
In this document, one plugin might own the `dashboard` type, and another plugin might own the `securityKey` type. If two or more plugins define securityKey migrations `{ migrations: { securityKey: { ... } } }`, Kibana will fail to start.
To write a migration for this document, the dashboard plugin might look something like this:
```js
uiExports: {
migrations: {
// This is whatever value your document's "type" field is
dashboard: {
// Takes a pre 1.9.0 dashboard doc, and converts it to 1.9.0
'1.9.0': (doc) => {
doc.attributes.title = doc.attributes.title.toUpperCase();
return doc;
},
// Takes a 1.9.0 dashboard doc, and converts it to a 2.0.0
'2.0.0': (doc) => {
doc.attributes.title = doc.attributes.title + '!!!';
return doc;
},
},
},
// ... normal uiExport stuff
}
```
After Kibana migrates the index, our example document would have `{ attributes: { title: 'WHATEVER!!' } }`.
Each migration function only needs to be able to handle documents belonging to the previous version. The initial migration function (in this example, `1.9.0`) needs to be more flexible, as it may be passed documents of any pre `1.9.0` shape.
## Disabled plugins
If a plugin is disabled, all of its documents are retained in the Kibana index. They can be imported and exported. When the plugin is re-enabled, Kibana will migrate any out of date documents that were imported or retained while it was disabled.
## Configuration
Kibana index migrations expose a few config settings which might be tweaked:
* `migrations.scrollDuration` - The
[scroll](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-scroll.html#scroll-search-context)
value used to read batches of documents from the source index. Defaults to
`15m`.
* `migrations.batchSize` - The number of documents to read / transform / write
at a time during index migrations
* `migrations.pollInterval` - How often, in milliseconds, secondary Kibana
instances will poll to see if the primary Kibana instance has finished
migrating the index.
* `migrations.skip` - Skip running migrations on startup (defaults to false).
This should only be used for running integration tests without a running
elasticsearch cluster. Note: even though migrations won't run on startup,
individual docs will still be migrated when read from ES.
## Example
To illustrate how migrations work, let's walk through an example, using a fictional plugin: `FanciPlugin`.
FanciPlugin 1.0 had a mapping that looked like this:
```js
{
fanci: {
properties: {
fanciName: { type: 'keyword' },
},
},
}
```
But in 2.0, it was decided that `fanciName` should be renamed to `title`.
So, FanciPlugin 2.0 has a mapping that looks like this:
```js
{
fanci: {
properties: {
title: { type: 'keyword' },
},
},
}
```
Note, the `fanciName` property is gone altogether. The problem is that lots of people have used FanciPlugin 1.0, and there are lots of documents out in the wild that have the `fanciName` property. FanciPlugin 2.0 won't know how to handle these documents, as it now expects that property to be called `title`.
To solve this problem, the FanciPlugin authors write a migration which will take all 1.0 documents and transform them into 2.0 documents.
FanciPlugin's uiExports is modified to have a migrations section that looks like this:
```js
uiExports: {
migrations: {
// This is whatever value your document's "type" field is
fanci: {
// This is the version of the plugin for which this migration was written, and
// should follow semver conventions. Here, doc is a pre 2.0.0 document which this
// function will modify to have the shape we expect in 2.0.0
'2.0.0': (doc) => {
const { fanciName } = doc.attributes;
delete doc.attributes.fanciName;
doc.attributes.title = fanciName;
return doc;
},
},
},
// ... normal uiExport stuff
}
```
Now, whenever Kibana boots, if FanciPlugin is enabled, Kibana scans its index for any documents that have type 'fanci' and have a `migrationVersion.fanci` property that is anything other than `2.0.0`. If any such documents are found, the index is determined to be out of date (or at least of the wrong version), and Kibana attempts to migrate the index.
At the end of the migration, Kibana's fanci documents will look something like this:
```js
{
id: 'someid',
type: 'fanci',
attributes: {
title: 'Shazm!',
},
migrationVersion: { fanci: '2.0.0' },
}
```
Note, the migrationVersion property has been added, and it contains information about what migrations were applied to the document.
## Source code
The migrations source code is grouped into two folders:
* `core` - Contains index-agnostic, general migration logic, which could be reused for indices other than `.kibana`
* `kibana` - Contains a relatively light-weight wrapper around core, which provides `.kibana` index-specific logic
Generally, the code eschews classes in favor of functions and basic data structures. The publicly exported code is all class-based, however, in an attempt to conform to Kibana norms.
### Core
There are three core entry points.
* index_migrator - Logic for migrating an index
* document_migrator - Logic for migrating an individual document, used by index_migrator, but also by the saved object client to migrate docs during document creation
* build_active_mappings - Logic to convert mapping properties into a full index mapping object, including the core properties required by any saved object index
## Testing
Run Jest tests:
Documentation: https://www.elastic.co/guide/en/kibana/current/development-tests.html#_unit_testing
The algorithm is implemented as a state-action machine based on https://www.microsoft.com/en-us/research/uploads/prod/2016/12/Computation-and-State-Machines.pdf
The state-action machine defines it's behaviour in steps. Each step is a
transition from a control state s_i to the contral state s_i+1 caused by an
action a_i.
```
yarn test:jest src/core/server/saved_objects/migrations --watch
s_i -> a_i -> s_i+1
s_i+1 -> a_i+1 -> s_i+2
```
Run integration tests:
Given a control state s1, `next(s1)` returns the next action to execute.
Actions are asynchronous, once the action resolves, we can use the action
response to determine the next state to transition to as defined by the
function `model(state, response)`.
We can then loosely define a step as:
```
node scripts/functional_tests_server
node scripts/functional_test_runner --config test/api_integration/config.js --grep migration
s_i+1 = model(s_i, await next(s_i)())
```
When there are no more actions returned by `next` the state-action machine
terminates such as in the DONE and FATAL control states.
What follows is a list of all control states. For each control state the
following is described:
- _next action_: the next action triggered by the current control state
- _new control state_: based on the action response, the possible new control states that the machine will transition to
Since the algorithm runs once for each saved object index the steps below
always reference a single saved object index `.kibana`. When Kibana starts up,
all the steps are also repeated for the `.kibana_task_manager` index but this
is left out of the description for brevity.
## INIT
### Next action
`fetchIndices`
Fetch the saved object indices, mappings and aliases to find the source index
and determine whether were migrating from a legacy index or a v1 migrations
index.
### New control state
1. If `.kibana` and the version specific aliases both exists and are pointing
to the same index. This version's migration has already been completed. Since
the same version could have plugins enabled at any time that would introduce
new transforms or mappings.
`OUTDATED_DOCUMENTS_SEARCH`
2. If `.kibana` is pointing to an index that belongs to a later version of
Kibana .e.g. a 7.11.0 instance found the `.kibana` alias pointing to
`.kibana_7.12.0_001` fail the migration
`FATAL`
3. If the `.kibana` alias exists were migrating from either a v1 or v2 index
and the migration source index is the index the `.kibana` alias points to.
`WAIT_FOR_YELLOW_SOURCE`
4. If `.kibana` is a concrete index, were migrating from a legacy index
`LEGACY_SET_WRITE_BLOCK`
5. If there are no `.kibana` indices, this is a fresh deployment. Initialize a
new saved objects index
`CREATE_NEW_TARGET`
## CREATE_NEW_TARGET
### Next action
`createIndex`
Create the target index. This operation is idempotent, if the index already exist, we wait until its status turns yellow
### New control state
`MARK_VERSION_INDEX_READY`
## LEGACY_SET_WRITE_BLOCK
### Next action
`setWriteBlock`
Set a write block on the legacy index to prevent any older Kibana instances
from writing to the index while the migration is in progress which could cause
lost acknowledged writes.
This is the first of a series of `LEGACY_*` control states that will:
- reindex the concrete legacy `.kibana` index into a `.kibana_pre6.5.0_001` index
- delete the concrete `.kibana` _index_ so that we're able to create a `.kibana` _alias_
### New control state
1. If the write block was successfully added
`LEGACY_CREATE_REINDEX_TARGET`
2. If the write block failed because the index doesn't exist, it means another instance already completed the legacy pre-migration. Proceed to the next step.
`LEGACY_CREATE_REINDEX_TARGET`
## LEGACY_CREATE_REINDEX_TARGET
### Next action
`createIndex`
Create a new `.kibana_pre6.5.0_001` index into which we can reindex the legacy
index. (Since the task manager index was converted from a data index into a
saved objects index in 7.4 it will be reindexed into `.kibana_pre7.4.0_001`)
### New control state
`LEGACY_REINDEX`
## LEGACY_REINDEX
### Next action
`reindex`
Let Elasticsearch reindex the legacy index into `.kibana_pre6.5.0_001`. (For
the task manager index we specify a `preMigrationScript` to convert the
original task manager documents into valid saved objects)
### New control state
`LEGACY_REINDEX_WAIT_FOR_TASK`
## LEGACY_REINDEX_WAIT_FOR_TASK
### Next action
`waitForReindexTask`
Wait for up to 60s for the reindex task to complete.
### New control state
1. If the reindex task completed
`LEGACY_DELETE`
2. If the reindex task failed with a `target_index_had_write_block` or
`index_not_found_exception` another instance already completed this step
`LEGACY_DELETE`
3. If the reindex task is still in progress
`LEGACY_REINDEX_WAIT_FOR_TASK`
## LEGACY_DELETE
### Next action
`updateAliases`
Use the updateAliases API to atomically remove the legacy index and create a
new `.kibana` alias that points to `.kibana_pre6.5.0_001`.
### New control state
1. If the action succeeds
`SET_SOURCE_WRITE_BLOCK`
2. If the action fails with `remove_index_not_a_concrete_index` or
`index_not_found_exception` another instance has already completed this step.
`SET_SOURCE_WRITE_BLOCK`
## WAIT_FOR_YELLOW_SOURCE
### Next action
`waitForIndexStatusYellow`
Wait for the Elasticsearch cluster to be in "yellow" state. It means the index's primary shard is allocated and the index is ready for searching/indexing documents, but ES wasn't able to allocate the replicas.
We don't have as much data redundancy as we could have, but it's enough to start the migration.
### New control state
`SET_SOURCE_WRITE_BLOCK`
## SET_SOURCE_WRITE_BLOCK
### Next action
`setWriteBlock`
Set a write block on the source index to prevent any older Kibana instances from writing to the index while the migration is in progress which could cause lost acknowledged writes.
### New control state
`CREATE_REINDEX_TEMP`
## CREATE_REINDEX_TEMP
### Next action
`createIndex`
This operation is idempotent, if the index already exist, we wait until its status turns yellow.
- Because we will be transforming documents before writing them into this index, we can already set the mappings to the target mappings for this version. The source index might contain documents belonging to a disabled plugin. So set `dynamic: false` mappings for any unknown saved object types.
- (Since we never query the temporary index we can potentially disable refresh to speed up indexing performance. Profile to see if gains justify complexity)
### New control state
`REINDEX_SOURCE_TO_TEMP_OPEN_PIT`
## REINDEX_SOURCE_TO_TEMP_OPEN_PIT
### Next action
`openPIT`
Open a PIT. Since there is a write block on the source index there is basically no overhead to keeping the PIT so we can lean towards a larger `keep_alive` value like 10 minutes.
### New control state
`REINDEX_SOURCE_TO_TEMP_READ`
## REINDEX_SOURCE_TO_TEMP_READ
### Next action
`readNextBatchOfSourceDocuments`
Read the next batch of outdated documents from the source index by using search after with our PIT.
### New control state
1. If the batch contained > 0 documents
`REINDEX_SOURCE_TO_TEMP_TRANSFORM`
2. If there are no more documents returned
`REINDEX_SOURCE_TO_TEMP_CLOSE_PIT`
## REINDEX_SOURCE_TO_TEMP_TRANSFORM
### Next action
`transformRawDocs`
Transform the current batch of documents
In order to support sharing saved objects to multiple spaces in 8.0, the
transforms will also regenerate document `_id`'s. To ensure that this step
remains idempotent, the new `_id` is deterministically generated using UUIDv5
ensuring that each Kibana instance generates the same new `_id` for the same document.
### New control state
`REINDEX_SOURCE_TO_TEMP_INDEX_BULK`
## REINDEX_SOURCE_TO_TEMP_INDEX_BULK
### Next action
`bulkIndexTransformedDocuments`
Use the bulk API create action to write a batch of up-to-date documents. The
create action ensures that there will be only one write per reindexed document
even if multiple Kibana instances are performing this step. Use
`refresh=false` to speed up the create actions, the `UPDATE_TARGET_MAPPINGS`
step will ensure that the index is refreshed before we start serving traffic.
The following errors are ignored because it means another instance already
completed this step:
- documents already exist in the temp index
- temp index has a write block
- temp index is not found
### New control state
1. If `currentBatch` is the last batch in `transformedDocBatches`
`REINDEX_SOURCE_TO_TEMP_READ`
2. If there are more batches left in `transformedDocBatches`
`REINDEX_SOURCE_TO_TEMP_INDEX_BULK`
## REINDEX_SOURCE_TO_TEMP_CLOSE_PIT
### Next action
`closePIT`
### New control state
`SET_TEMP_WRITE_BLOCK`
## SET_TEMP_WRITE_BLOCK
### Next action
`setWriteBlock`
Set a write block on the temporary index so that we can clone it.
### New control state
`CLONE_TEMP_TO_TARGET`
## CLONE_TEMP_TO_TARGET
### Next action
`cloneIndex`
Ask elasticsearch to clone the temporary index into the target index. If the target index already exists (because another node already started the clone operation), wait until the clone is complete by waiting for a yellow index status.
We cant use the temporary index as our target index because one instance can complete the migration, delete a document, and then a second instance starts the reindex operation and re-creates the deleted document. By cloning the temporary index and only accepting writes/deletes from the cloned target index, we prevent lost acknowledged deletes.
### New control state
`OUTDATED_DOCUMENTS_SEARCH`
## OUTDATED_DOCUMENTS_SEARCH
### Next action
`searchForOutdatedDocuments`
Search for outdated saved object documents. Will return one batch of
documents.
If another instance has a disabled plugin it will reindex that plugin's
documents without transforming them. Because this instance doesn't know which
plugins were disabled by the instance that performed the
`REINDEX_SOURCE_TO_TEMP_TRANSFORM` step, we need to search for outdated documents
and transform them to ensure that everything is up to date.
### New control state
1. Found outdated documents?
`OUTDATED_DOCUMENTS_TRANSFORM`
2. All documents up to date
`UPDATE_TARGET_MAPPINGS`
## OUTDATED_DOCUMENTS_TRANSFORM
### Next action
`transformRawDocs` + `bulkOverwriteTransformedDocuments`
Once transformed we use an index operation to overwrite the outdated document with the up-to-date version. Optimistic concurrency control ensures that we only overwrite the document once so that any updates/writes by another instance which already completed the migration arent overwritten and lost.
### New control state
`OUTDATED_DOCUMENTS_SEARCH`
## UPDATE_TARGET_MAPPINGS
### Next action
`updateAndPickupMappings`
If another instance has some plugins disabled it will disable the mappings of that plugin's types when creating the temporary index. This action will
update the mappings and then use an update_by_query to ensure that all fields are “picked-up” and ready to be searched over.
### New control state
`UPDATE_TARGET_MAPPINGS_WAIT_FOR_TASK`
## UPDATE_TARGET_MAPPINGS_WAIT_FOR_TASK
### Next action
`updateAliases`
Atomically apply the `versionIndexReadyActions` using the _alias actions API. By performing the following actions we guarantee that if multiple versions of Kibana started the upgrade in parallel, only one version will succeed.
1. verify that the current alias is still pointing to the source index
2. Point the version alias and the current alias to the target index.
3. Remove the temporary index
### New control state
1. If all the actions succeed were ready to serve traffic
`DONE`
2. If action (1) fails with alias_not_found_exception or action (3) fails with index_not_found_exception another instance already completed the migration
`MARK_VERSION_INDEX_READY_CONFLICT`
## MARK_VERSION_INDEX_READY_CONFLICT
### Next action
`fetchIndices`
Fetch the saved object indices
### New control state
If another instance completed a migration from the same source we need to verify that it is running the same version.
1. If the current and version aliases are pointing to the same index the instance that completed the migration was on the same version and its safe to start serving traffic.
`DONE`
2. If the other instance was running a different version we fail the migration. Once we restart one of two things can happen: the other instance is an older version and we will restart the migration, or, its a newer version and we will refuse to start up.
`FATAL`
# Manual QA Test Plan
## 1. Legacy pre-migration
When upgrading from a legacy index additional steps are required before the
regular migration process can start.
We have the following potential legacy indices:
- v5.x index that wasn't upgraded -> kibana should refuse to start the migration
- v5.x index that was upgraded to v6.x: `.kibana-6` _index_ with `.kibana` _alias_
- < v6.5 `.kibana` _index_ (Saved Object Migrations were
introduced in v6.5 https://github.com/elastic/kibana/pull/20243)
- TODO: Test versions which introduced the `kibana_index_template` template?
- < v7.4 `.kibana_task_manager` _index_ (Task Manager started
using Saved Objects in v7.4 https://github.com/elastic/kibana/pull/39829)
Test plan:
1. Ensure that the different versions of Kibana listed above can successfully
upgrade to 7.11.
2. Ensure that multiple Kibana nodes can migrate a legacy index in parallel
(choose a representative legacy version to test with e.g. v6.4). Add a lot
of Saved Objects to Kibana to increase the time it takes for a migration to
complete which will make it easier to introduce failures.
1. If all instances are started in parallel the upgrade should succeed
2. If nodes are randomly restarted shortly after they start participating
in the migration the upgrade should either succeed or never complete.
However, if a fatal error occurs it should never result in permanent
failure.
1. Start one instance, wait 500 ms
2. Start a second instance
3. If an instance starts a saved object migration, wait X ms before
killing the process and restarting the migration.
4. Keep decreasing X until migrations are barely able to complete.
5. If a migration fails with a fatal error, start a Kibana that doesn't
get restarted. Given enough time, it should always be able to
successfully complete the migration.
For a successful migration the following behaviour should be observed:
1. The `.kibana` index should be reindexed into a `.kibana_pre6.5.0` index
2. The `.kibana` index should be deleted
3. The `.kibana_index_template` should be deleted
4. The `.kibana_pre6.5.0` index should have a write block applied
5. Documents from `.kibana_pre6.5.0` should be migrated into `.kibana_7.11.0_001`
6. Once migration has completed, the `.kibana_current` and `.kibana_7.11.0`
aliases should point to the `.kibana_7.11.0_001` index.
## 2. Plugins enabled/disabled
Kibana plugins can be disabled/enabled at any point in time. We need to ensure
that Saved Object documents are migrated for all the possible sequences of
enabling, disabling, before or after a version upgrade.
### Test scenario 1 (enable a plugin after migration):
1. Start an old version of Kibana (< 7.11)
2. Create a document that we know will be migrated in a later version (i.e.
create a `dashboard`)
3. Disable the plugin to which the document belongs (i.e `dashboard` plugin)
4. Upgrade Kibana to v7.11 making sure the plugin in step (3) is still disabled.
5. Enable the plugin from step (3)
6. Restart Kibana
7. Ensure that the document from step (2) has been migrated
(`migrationVersion` contains 7.11.0)
### Test scenario 2 (disable a plugin after migration):
1. Start an old version of Kibana (< 7.11)
2. Create a document that we know will be migrated in a later version (i.e.
create a `dashboard`)
3. Upgrade Kibana to v7.11 making sure the plugin in step (3) is enabled.
4. Disable the plugin to which the document belongs (i.e `dashboard` plugin)
6. Restart Kibana
7. Ensure that Kibana logs a warning, but continues to start even though there
are saved object documents which don't belong to an enable plugin
### Test scenario 3 (multiple instances, enable a plugin after migration):
Follow the steps from 'Test scenario 1', but perform the migration with
multiple instances of Kibana
### Test scenario 4 (multiple instances, mixed plugin enabled configs):
We don't support this upgrade scenario, but it's worth making sure we don't
have data loss when there's a user error.
1. Start an old version of Kibana (< 7.11)
2. Create a document that we know will be migrated in a later version (i.e.
create a `dashboard`)
3. Disable the plugin to which the document belongs (i.e `dashboard` plugin)
4. Upgrade Kibana to v7.11 using multiple instances of Kibana. The plugin from
step (3) should be enabled on half of the instances and disabled on the
other half.
5. Ensure that the document from step (2) has been migrated
(`migrationVersion` contains 7.11.0)

View file

@ -39,7 +39,7 @@ import {
import * as Either from 'fp-ts/lib/Either';
import * as Option from 'fp-ts/lib/Option';
import { errors } from '@elastic/elasticsearch';
import { DocumentsTransformFailed, DocumentsTransformSuccess } from '../../../migrations/core';
import { DocumentsTransformFailed, DocumentsTransformSuccess } from '../../core';
import { TaskEither } from 'fp-ts/lib/TaskEither';
import Path from 'path';

View file

@ -1,40 +0,0 @@
// Jest Snapshot v1, https://goo.gl/fbAQLP
exports[`ElasticIndex write writes documents in bulk to the index 1`] = `
Array [
Object {
"body": Array [
Object {
"index": Object {
"_id": "niceguy:fredrogers",
"_index": ".myalias",
},
},
Object {
"niceguy": Object {
"aka": "Mr Rogers",
},
"quotes": Array [
"The greatest gift you ever give is your honest self.",
],
"type": "niceguy",
},
Object {
"index": Object {
"_id": "badguy:rickygervais",
"_index": ".myalias",
},
},
Object {
"badguy": Object {
"aka": "Dominic Badguy",
},
"migrationVersion": Object {
"badguy": "2.3.4",
},
"type": "badguy",
},
],
},
]
`;

View file

@ -1,28 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
/**
* This file is nothing more than type signatures for the subset of
* elasticsearch.js that migrations use. There is no actual logic /
* funcationality contained here.
*/
import * as estypes from '@elastic/elasticsearch/lib/api/typesWithBodyKey';
export type AliasAction =
| {
remove_index: { index: string };
}
| { remove: { index: string; alias: string } }
| { add: { index: string; alias: string } };
export interface RawDoc {
_id: estypes.Id;
_source: any;
_type?: string;
}

View file

@ -6,7 +6,7 @@
* Side Public License, v 1.
*/
import { disableUnknownTypeMappingFields } from './migration_context';
import { disableUnknownTypeMappingFields } from './disable_unknown_type_mapping_fields';
describe('disableUnknownTypeMappingFields', () => {
const sourceMappings = {

View file

@ -0,0 +1,60 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
import { SavedObjectsMappingProperties, IndexMapping } from '../../mappings';
/**
* Merges the active mappings and the source mappings while disabling the
* fields of any unknown Saved Object types present in the source index's
* mappings.
*
* Since the Saved Objects index has `dynamic: strict` defined at the
* top-level, only Saved Object types for which a mapping exists can be
* inserted into the index. To ensure that we can continue to store Saved
* Object documents belonging to a disabled plugin we define a mapping for all
* the unknown Saved Object types that were present in the source index's
* mappings. To limit the field count as much as possible, these unkwnown
* type's mappings are set to `dynamic: false`.
*
* (Since we're using the source index mappings instead of looking at actual
* document types in the inedx, we potentially add more "unknown types" than
* what would be necessary to support migrating all the data over to the
* target index.)
*
* @param activeMappings The mappings compiled from all the Saved Object types
* known to this Kibana node.
* @param sourceMappings The mappings of index used as the migration source.
* @returns The mappings that should be applied to the target index.
*/
export function disableUnknownTypeMappingFields(
activeMappings: IndexMapping,
sourceMappings: IndexMapping
): IndexMapping {
const targetTypes = Object.keys(activeMappings.properties);
const disabledTypesProperties = Object.keys(sourceMappings.properties ?? {})
.filter((sourceType) => {
const isObjectType = 'properties' in sourceMappings.properties[sourceType];
// Only Object/Nested datatypes can be excluded from the field count by
// using `dynamic: false`.
return !targetTypes.includes(sourceType) && isObjectType;
})
.reduce((disabledTypesAcc, sourceType) => {
disabledTypesAcc[sourceType] = { dynamic: false, properties: {} };
return disabledTypesAcc;
}, {} as SavedObjectsMappingProperties);
return {
...activeMappings,
properties: {
...sourceMappings.properties,
...disabledTypesProperties,
...activeMappings.properties,
},
};
}

View file

@ -1,702 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
import * as estypes from '@elastic/elasticsearch/lib/api/typesWithBodyKey';
import _ from 'lodash';
import { elasticsearchClientMock } from '../../../elasticsearch/client/mocks';
import * as Index from './elastic_index';
describe('ElasticIndex', () => {
let client: ReturnType<typeof elasticsearchClientMock.createElasticsearchClient>;
beforeEach(() => {
client = elasticsearchClientMock.createElasticsearchClient();
});
describe('fetchInfo', () => {
test('it handles 404', async () => {
client.indices.get.mockResolvedValue(
elasticsearchClientMock.createSuccessTransportRequestPromise({}, { statusCode: 404 })
);
const info = await Index.fetchInfo(client, '.kibana-test');
expect(info).toEqual({
aliases: {},
exists: false,
indexName: '.kibana-test',
mappings: { dynamic: 'strict', properties: {} },
});
expect(client.indices.get).toHaveBeenCalledWith({ index: '.kibana-test' }, { ignore: [404] });
});
test('decorates index info with exists and indexName', async () => {
client.indices.get.mockImplementation((params) => {
const index = params!.index as string;
return elasticsearchClientMock.createSuccessTransportRequestPromise({
[index]: {
aliases: { foo: index },
mappings: { dynamic: 'strict', properties: { a: 'b' } as any },
settings: {},
},
} as estypes.IndicesGetResponse);
});
const info = await Index.fetchInfo(client, '.baz');
expect(info).toEqual({
aliases: { foo: '.baz' },
mappings: { dynamic: 'strict', properties: { a: 'b' } },
exists: true,
indexName: '.baz',
settings: {},
});
});
});
describe('createIndex', () => {
test('calls indices.create', async () => {
await Index.createIndex(client, '.abcd', { foo: 'bar' } as any);
expect(client.indices.create).toHaveBeenCalledTimes(1);
expect(client.indices.create).toHaveBeenCalledWith({
body: {
mappings: { foo: 'bar' },
settings: {
auto_expand_replicas: '0-1',
number_of_shards: 1,
},
},
index: '.abcd',
});
});
});
describe('claimAlias', () => {
test('handles unaliased indices', async () => {
client.indices.getAlias.mockResolvedValue(
elasticsearchClientMock.createSuccessTransportRequestPromise({}, { statusCode: 404 })
);
await Index.claimAlias(client, '.hola-42', '.hola');
expect(client.indices.getAlias).toHaveBeenCalledWith(
{
name: '.hola',
},
{ ignore: [404] }
);
expect(client.indices.updateAliases).toHaveBeenCalledWith({
body: {
actions: [{ add: { index: '.hola-42', alias: '.hola' } }],
},
});
expect(client.indices.refresh).toHaveBeenCalledWith({
index: '.hola-42',
});
});
test('removes existing alias', async () => {
client.indices.getAlias.mockResolvedValue(
elasticsearchClientMock.createSuccessTransportRequestPromise({
'.my-fanci-index': { aliases: { '.muchacha': {} } },
})
);
await Index.claimAlias(client, '.ze-index', '.muchacha');
expect(client.indices.getAlias).toHaveBeenCalledTimes(1);
expect(client.indices.updateAliases).toHaveBeenCalledWith({
body: {
actions: [
{ remove: { index: '.my-fanci-index', alias: '.muchacha' } },
{ add: { index: '.ze-index', alias: '.muchacha' } },
],
},
});
expect(client.indices.refresh).toHaveBeenCalledWith({
index: '.ze-index',
});
});
test('allows custom alias actions', async () => {
client.indices.getAlias.mockResolvedValue(
elasticsearchClientMock.createSuccessTransportRequestPromise({
'.my-fanci-index': { aliases: { '.muchacha': {} } },
})
);
await Index.claimAlias(client, '.ze-index', '.muchacha', [
{ remove_index: { index: 'awww-snap!' } },
]);
expect(client.indices.getAlias).toHaveBeenCalledTimes(1);
expect(client.indices.updateAliases).toHaveBeenCalledWith({
body: {
actions: [
{ remove_index: { index: 'awww-snap!' } },
{ remove: { index: '.my-fanci-index', alias: '.muchacha' } },
{ add: { index: '.ze-index', alias: '.muchacha' } },
],
},
});
expect(client.indices.refresh).toHaveBeenCalledWith({
index: '.ze-index',
});
});
});
describe('convertToAlias', () => {
test('it creates the destination index, then reindexes to it', async () => {
client.indices.getAlias.mockResolvedValue(
elasticsearchClientMock.createSuccessTransportRequestPromise({
'.my-fanci-index': { aliases: { '.muchacha': {} } },
})
);
client.reindex.mockResolvedValue(
elasticsearchClientMock.createSuccessTransportRequestPromise({
task: 'abc',
} as estypes.ReindexResponse)
);
client.tasks.get.mockResolvedValue(
elasticsearchClientMock.createSuccessTransportRequestPromise({
completed: true,
} as estypes.TasksGetResponse)
);
const info = {
aliases: {},
exists: true,
indexName: '.ze-index',
mappings: {
dynamic: 'strict' as const,
properties: { foo: { type: 'keyword' } },
},
} as const;
await Index.convertToAlias(
client,
info,
'.muchacha',
10,
`ctx._id = ctx._source.type + ':' + ctx._id`
);
expect(client.indices.create).toHaveBeenCalledWith({
body: {
mappings: {
dynamic: 'strict',
properties: { foo: { type: 'keyword' } },
},
settings: { auto_expand_replicas: '0-1', number_of_shards: 1 },
},
index: '.ze-index',
});
expect(client.reindex).toHaveBeenCalledWith({
body: {
dest: { index: '.ze-index' },
source: { index: '.muchacha', size: 10 },
script: {
source: `ctx._id = ctx._source.type + ':' + ctx._id`,
lang: 'painless',
},
},
refresh: true,
wait_for_completion: false,
});
expect(client.tasks.get).toHaveBeenCalledWith({
task_id: 'abc',
});
expect(client.indices.updateAliases).toHaveBeenCalledWith({
body: {
actions: [
{ remove_index: { index: '.muchacha' } },
{ remove: { alias: '.muchacha', index: '.my-fanci-index' } },
{ add: { index: '.ze-index', alias: '.muchacha' } },
],
},
});
expect(client.indices.refresh).toHaveBeenCalledWith({
index: '.ze-index',
});
});
test('throws error if re-index task fails', async () => {
client.indices.getAlias.mockResolvedValue(
elasticsearchClientMock.createSuccessTransportRequestPromise({
'.my-fanci-index': { aliases: { '.muchacha': {} } },
})
);
client.reindex.mockResolvedValue(
elasticsearchClientMock.createSuccessTransportRequestPromise({
task: 'abc',
} as estypes.ReindexResponse)
);
client.tasks.get.mockResolvedValue(
// @ts-expect-error @elastic/elasticsearch GetTaskResponse requires a `task` property even on errors
elasticsearchClientMock.createSuccessTransportRequestPromise({
completed: true,
error: {
type: 'search_phase_execution_exception',
reason: 'all shards failed',
failed_shards: [],
},
} as estypes.TasksGetResponse)
);
const info = {
aliases: {},
exists: true,
indexName: '.ze-index',
mappings: {
dynamic: 'strict',
properties: { foo: { type: 'keyword' } },
},
};
// @ts-expect-error
await expect(Index.convertToAlias(client, info, '.muchacha', 10)).rejects.toThrow(
/Re-index failed \[search_phase_execution_exception\] all shards failed/
);
expect(client.indices.create).toHaveBeenCalledWith({
body: {
mappings: {
dynamic: 'strict',
properties: { foo: { type: 'keyword' } },
},
settings: { auto_expand_replicas: '0-1', number_of_shards: 1 },
},
index: '.ze-index',
});
expect(client.reindex).toHaveBeenCalledWith({
body: {
dest: { index: '.ze-index' },
source: { index: '.muchacha', size: 10 },
},
refresh: true,
wait_for_completion: false,
});
expect(client.tasks.get).toHaveBeenCalledWith({
task_id: 'abc',
});
});
});
describe('write', () => {
test('writes documents in bulk to the index', async () => {
client.bulk.mockResolvedValue(
elasticsearchClientMock.createSuccessTransportRequestPromise({
items: [] as any[],
} as estypes.BulkResponse)
);
const index = '.myalias';
const docs = [
{
_id: 'niceguy:fredrogers',
_source: {
type: 'niceguy',
niceguy: {
aka: 'Mr Rogers',
},
quotes: ['The greatest gift you ever give is your honest self.'],
},
},
{
_id: 'badguy:rickygervais',
_source: {
type: 'badguy',
badguy: {
aka: 'Dominic Badguy',
},
migrationVersion: { badguy: '2.3.4' },
},
},
];
await Index.write(client, index, docs);
expect(client.bulk).toHaveBeenCalled();
expect(client.bulk.mock.calls[0]).toMatchSnapshot();
});
test('fails if any document fails', async () => {
client.bulk.mockResolvedValue(
elasticsearchClientMock.createSuccessTransportRequestPromise({
items: [{ index: { error: { type: 'shazm', reason: 'dern' } } }],
} as estypes.BulkResponse)
);
const index = '.myalias';
const docs = [
{
_id: 'niceguy:fredrogers',
_source: {
type: 'niceguy',
niceguy: {
aka: 'Mr Rogers',
},
},
},
];
await expect(Index.write(client as any, index, docs)).rejects.toThrow(/dern/);
expect(client.bulk).toHaveBeenCalledTimes(1);
});
});
describe('reader', () => {
test('returns docs in batches', async () => {
const index = '.myalias';
const batch1 = [
{
_id: 'such:1',
_source: { type: 'such', such: { num: 1 } },
},
];
const batch2 = [
{
_id: 'aaa:2',
_source: { type: 'aaa', aaa: { num: 2 } },
},
{
_id: 'bbb:3',
_source: {
bbb: { num: 3 },
migrationVersion: { bbb: '3.2.5' },
type: 'bbb',
},
},
];
client.search = jest.fn().mockReturnValue(
elasticsearchClientMock.createSuccessTransportRequestPromise({
_scroll_id: 'x',
_shards: { success: 1, total: 1 },
hits: { hits: _.cloneDeep(batch1) },
})
);
client.scroll = jest
.fn()
.mockReturnValueOnce(
elasticsearchClientMock.createSuccessTransportRequestPromise({
_scroll_id: 'y',
_shards: { success: 1, total: 1 },
hits: { hits: _.cloneDeep(batch2) },
})
)
.mockReturnValueOnce(
elasticsearchClientMock.createSuccessTransportRequestPromise({
_scroll_id: 'z',
_shards: { success: 1, total: 1 },
hits: { hits: [] },
})
);
const read = Index.reader(client, index, { batchSize: 100, scrollDuration: '5m' });
expect(await read()).toEqual(batch1);
expect(await read()).toEqual(batch2);
expect(await read()).toEqual([]);
expect(client.search).toHaveBeenCalledWith({
body: {
size: 100,
query: Index.excludeUnusedTypesQuery,
},
index,
scroll: '5m',
});
expect(client.scroll).toHaveBeenCalledWith({
scroll: '5m',
scroll_id: 'x',
});
expect(client.scroll).toHaveBeenCalledWith({
scroll: '5m',
scroll_id: 'y',
});
expect(client.clearScroll).toHaveBeenCalledWith({
scroll_id: 'z',
});
});
test('returns all root-level properties', async () => {
const index = '.myalias';
const batch = [
{
_id: 'such:1',
_source: {
acls: '3230a',
foos: { is: 'fun' },
such: { num: 1 },
type: 'such',
},
},
];
client.search = jest.fn().mockReturnValueOnce(
elasticsearchClientMock.createSuccessTransportRequestPromise({
_scroll_id: 'x',
_shards: { success: 1, total: 1 },
hits: { hits: _.cloneDeep(batch) },
})
);
client.scroll = jest.fn().mockReturnValueOnce(
elasticsearchClientMock.createSuccessTransportRequestPromise({
_scroll_id: 'z',
_shards: { success: 1, total: 1 },
hits: { hits: [] },
})
);
const read = Index.reader(client, index, {
batchSize: 100,
scrollDuration: '5m',
});
expect(await read()).toEqual(batch);
});
test('fails if not all shards were successful', async () => {
const index = '.myalias';
client.search = jest.fn().mockReturnValueOnce(
elasticsearchClientMock.createSuccessTransportRequestPromise({
_shards: { successful: 1, total: 2 },
})
);
const read = Index.reader(client, index, {
batchSize: 100,
scrollDuration: '5m',
});
await expect(read()).rejects.toThrow(/shards failed/);
});
test('handles shards not being returned', async () => {
const index = '.myalias';
const batch = [
{
_id: 'such:1',
_source: {
acls: '3230a',
foos: { is: 'fun' },
such: { num: 1 },
type: 'such',
},
},
];
client.search = jest.fn().mockReturnValueOnce(
elasticsearchClientMock.createSuccessTransportRequestPromise({
_scroll_id: 'x',
hits: { hits: _.cloneDeep(batch) },
})
);
client.scroll = jest.fn().mockReturnValueOnce(
elasticsearchClientMock.createSuccessTransportRequestPromise({
_scroll_id: 'z',
hits: { hits: [] },
})
);
const read = Index.reader(client, index, {
batchSize: 100,
scrollDuration: '5m',
});
expect(await read()).toEqual(batch);
});
});
describe('migrationsUpToDate', () => {
// A helper to reduce boilerplate in the hasMigration tests that follow.
async function testMigrationsUpToDate({
index = '.myindex',
mappings,
count,
migrations,
kibanaVersion,
}: any) {
client.indices.get = jest.fn().mockReturnValueOnce(
elasticsearchClientMock.createSuccessTransportRequestPromise({
[index]: { mappings },
})
);
client.count = jest.fn().mockReturnValueOnce(
elasticsearchClientMock.createSuccessTransportRequestPromise({
count,
_shards: { success: 1, total: 1 },
})
);
const hasMigrations = await Index.migrationsUpToDate(
client,
index,
migrations,
kibanaVersion
);
return { hasMigrations };
}
test('is false if the index mappings do not contain migrationVersion', async () => {
const { hasMigrations } = await testMigrationsUpToDate({
index: '.myalias',
mappings: {
properties: {
dashboard: { type: 'text' },
},
},
count: 0,
migrations: { dashy: '2.3.4' },
kibanaVersion: '7.10.0',
});
expect(hasMigrations).toBeFalsy();
expect(client.indices.get).toHaveBeenCalledWith(
{
index: '.myalias',
},
{
ignore: [404],
}
);
});
test('is true if there are no migrations defined', async () => {
const { hasMigrations } = await testMigrationsUpToDate({
index: '.myalias',
mappings: {
properties: {
migrationVersion: {
dynamic: 'true',
type: 'object',
},
dashboard: { type: 'text' },
},
},
count: 2,
migrations: {},
kibanaVersion: '7.10.0',
});
expect(hasMigrations).toBeTruthy();
expect(client.indices.get).toHaveBeenCalledTimes(1);
});
test('is true if there are no documents out of date', async () => {
const { hasMigrations } = await testMigrationsUpToDate({
index: '.myalias',
mappings: {
properties: {
migrationVersion: {
dynamic: 'true',
type: 'object',
},
dashboard: { type: 'text' },
},
},
count: 0,
migrations: { dashy: '23.2.5' },
});
expect(hasMigrations).toBeTruthy();
expect(client.indices.get).toHaveBeenCalledTimes(1);
expect(client.count).toHaveBeenCalledTimes(1);
});
test('is false if there are documents out of date', async () => {
const { hasMigrations } = await testMigrationsUpToDate({
index: '.myalias',
mappings: {
properties: {
migrationVersion: {
dynamic: 'true',
type: 'object',
},
dashboard: { type: 'text' },
},
},
count: 3,
migrations: { dashy: '23.2.5' },
kibanaVersion: '7.10.0',
});
expect(hasMigrations).toBeFalsy();
expect(client.indices.get).toHaveBeenCalledTimes(1);
expect(client.count).toHaveBeenCalledTimes(1);
});
test('counts docs that are out of date', async () => {
await testMigrationsUpToDate({
index: '.myalias',
mappings: {
properties: {
migrationVersion: {
dynamic: 'true',
type: 'object',
},
dashboard: { type: 'text' },
},
},
count: 0,
migrations: {
dashy: '23.2.5',
bashy: '99.9.3',
flashy: '3.4.5',
},
kibanaVersion: '7.10.0',
});
function shouldClause(type: string, version: string) {
return {
bool: {
must: [
{ exists: { field: type } },
{
bool: {
must_not: { term: { [`migrationVersion.${type}`]: version } },
},
},
],
},
};
}
expect(client.count).toHaveBeenCalledWith({
body: {
query: {
bool: {
should: [
shouldClause('dashy', '23.2.5'),
shouldClause('bashy', '99.9.3'),
shouldClause('flashy', '3.4.5'),
{
bool: {
must_not: {
term: {
coreMigrationVersion: '7.10.0',
},
},
},
},
],
},
},
},
index: '.myalias',
});
});
});
});

View file

@ -1,425 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
/*
* This module contains various functions for querying and manipulating
* elasticsearch indices.
*/
import _ from 'lodash';
import * as estypes from '@elastic/elasticsearch/lib/api/typesWithBodyKey';
import { MigrationEsClient } from './migration_es_client';
import { IndexMapping } from '../../mappings';
import { SavedObjectsMigrationVersion } from '../../types';
import { AliasAction, RawDoc } from './call_cluster';
import { SavedObjectsRawDocSource } from '../../serialization';
const settings = { number_of_shards: 1, auto_expand_replicas: '0-1' };
export interface FullIndexInfo {
aliases: { [name: string]: object };
exists: boolean;
indexName: string;
mappings: IndexMapping;
}
/**
* Types that are no longer registered and need to be removed
*/
export const REMOVED_TYPES: string[] = [
'apm-services-telemetry',
'background-session',
'cases-sub-case',
'file-upload-telemetry',
// https://github.com/elastic/kibana/issues/91869
'fleet-agent-events',
// Was removed in 7.12
'ml-telemetry',
'server',
// https://github.com/elastic/kibana/issues/95617
'tsvb-validation-telemetry',
// replaced by osquery-manager-usage-metric
'osquery-usage-metric',
// Was removed in 7.16
'timelion-sheet',
].sort();
// When migrating from the outdated index we use a read query which excludes
// saved objects which are no longer used. These saved objects will still be
// kept in the outdated index for backup purposes, but won't be available in
// the upgraded index.
export const excludeUnusedTypesQuery: estypes.QueryDslQueryContainer = {
bool: {
must_not: [
...REMOVED_TYPES.map((typeName) => ({
term: {
type: typeName,
},
})),
// https://github.com/elastic/kibana/issues/96131
{
bool: {
must: [
{
match: {
type: 'search-session',
},
},
{
match: {
'search-session.persisted': false,
},
},
],
},
},
],
},
};
/**
* A slight enhancement to indices.get, that adds indexName, and validates that the
* index mappings are somewhat what we expect.
*/
export async function fetchInfo(client: MigrationEsClient, index: string): Promise<FullIndexInfo> {
const { body, statusCode } = await client.indices.get({ index }, { ignore: [404] });
if (statusCode === 404) {
return {
aliases: {},
exists: false,
indexName: index,
mappings: { dynamic: 'strict', properties: {} },
};
}
const [indexName, indexInfo] = Object.entries(body)[0];
// @ts-expect-error @elastic/elasticsearch IndexState.alias and IndexState.mappings should be required
return assertIsSupportedIndex({ ...indexInfo, exists: true, indexName });
}
/**
* Creates a reader function that serves up batches of documents from the index. We aren't using
* an async generator, as that feature currently breaks Kibana's tooling.
*
* @param client - The elastic search connection
* @param index - The index to be read from
* @param {opts}
* @prop batchSize - The number of documents to read at a time
* @prop scrollDuration - The scroll duration used for scrolling through the index
*/
export function reader(
client: MigrationEsClient,
index: string,
{ batchSize = 10, scrollDuration = '15m' }: { batchSize: number; scrollDuration: string }
) {
const scroll = scrollDuration;
let scrollId: string | undefined;
const nextBatch = () =>
scrollId !== undefined
? client.scroll<SavedObjectsRawDocSource>({
scroll,
scroll_id: scrollId,
})
: client.search<SavedObjectsRawDocSource>({
body: {
size: batchSize,
query: excludeUnusedTypesQuery,
},
index,
scroll,
});
const close = async () => scrollId && (await client.clearScroll({ scroll_id: scrollId }));
return async function read() {
const result = await nextBatch();
assertResponseIncludeAllShards(result.body);
scrollId = result.body._scroll_id;
const docs = result.body.hits.hits;
if (!docs.length) {
await close();
}
return docs;
};
}
/**
* Writes the specified documents to the index, throws an exception
* if any of the documents fail to save.
*/
export async function write(client: MigrationEsClient, index: string, docs: RawDoc[]) {
const { body } = await client.bulk({
body: docs.reduce((acc: object[], doc: RawDoc) => {
acc.push({
index: {
_id: doc._id,
_index: index,
},
});
acc.push(doc._source);
return acc;
}, []),
});
const err = _.find(body.items, 'index.error.reason');
if (!err) {
return;
}
const exception: any = new Error(err.index!.error!.reason);
exception.detail = err;
throw exception;
}
/**
* Checks to see if the specified index is up to date. It does this by checking
* that the index has the expected mappings and by counting
* the number of documents that have a property which has migrations defined for it,
* but which has not had those migrations applied. We don't want to cache the
* results of this function (e.g. in context or somewhere), as it is important that
* it performs the check *each* time it is called, rather than memoizing itself,
* as this is used to determine if migrations are complete.
*
* @param client - The connection to ElasticSearch
* @param index
* @param migrationVersion - The latest versions of the migrations
*/
export async function migrationsUpToDate(
client: MigrationEsClient,
index: string,
migrationVersion: SavedObjectsMigrationVersion,
kibanaVersion: string,
retryCount: number = 10
): Promise<boolean> {
try {
const indexInfo = await fetchInfo(client, index);
if (!indexInfo.mappings.properties?.migrationVersion) {
return false;
}
// If no migrations are actually defined, we're up to date!
if (Object.keys(migrationVersion).length <= 0) {
return true;
}
const { body } = await client.count({
body: {
query: {
bool: {
should: [
...Object.entries(migrationVersion).map(([type, latestVersion]) => ({
bool: {
must: [
{ exists: { field: type } },
{
bool: {
must_not: { term: { [`migrationVersion.${type}`]: latestVersion } },
},
},
],
},
})),
{
bool: {
must_not: {
term: {
coreMigrationVersion: kibanaVersion,
},
},
},
},
],
},
},
},
index,
});
assertResponseIncludeAllShards(body);
return body.count === 0;
} catch (e) {
// retry for Service Unavailable
if (e.status !== 503 || retryCount === 0) {
throw e;
}
await new Promise((r) => setTimeout(r, 1000));
return await migrationsUpToDate(client, index, migrationVersion, kibanaVersion, retryCount - 1);
}
}
export async function createIndex(
client: MigrationEsClient,
index: string,
mappings?: IndexMapping
) {
await client.indices.create({
body: { mappings, settings },
index,
});
}
/**
* Converts an index to an alias. The `alias` parameter is the desired alias name which currently
* is a concrete index. This function will reindex `alias` into a new index, delete the `alias`
* index, and then create an alias `alias` that points to the new index.
*
* @param client - The ElasticSearch connection
* @param info - Information about the mappings and name of the new index
* @param alias - The name of the index being converted to an alias
*/
export async function convertToAlias(
client: MigrationEsClient,
info: FullIndexInfo,
alias: string,
batchSize: number,
script?: string
) {
await client.indices.create({
body: { mappings: info.mappings, settings },
index: info.indexName,
});
await reindex(client, alias, info.indexName, batchSize, script);
await claimAlias(client, info.indexName, alias, [{ remove_index: { index: alias } }]);
}
/**
* Points the specified alias to the specified index. This is an exclusive
* alias, meaning that it will only point to one index at a time, so we
* remove any other indices from the alias.
*
* @param {MigrationEsClient} client
* @param {string} index
* @param {string} alias
* @param {AliasAction[]} aliasActions - Optional actions to be added to the updateAliases call
*/
export async function claimAlias(
client: MigrationEsClient,
index: string,
alias: string,
aliasActions: AliasAction[] = []
) {
const { body, statusCode } = await client.indices.getAlias({ name: alias }, { ignore: [404] });
const aliasInfo = statusCode === 404 ? {} : body;
const removeActions = Object.keys(aliasInfo).map((key) => ({ remove: { index: key, alias } }));
await client.indices.updateAliases({
body: {
actions: aliasActions.concat(removeActions).concat({ add: { index, alias } }),
},
});
await client.indices.refresh({ index });
}
/**
* This is a rough check to ensure that the index being migrated satisfies at least
* some rudimentary expectations. Past Kibana indices had multiple root documents, etc
* and the migration system does not (yet?) handle those indices. They need to be upgraded
* via v5 -> v6 upgrade tools first. This file contains index-agnostic logic, and this
* check is itself index-agnostic, though the error hint is a bit Kibana specific.
*
* @param {FullIndexInfo} indexInfo
*/
function assertIsSupportedIndex(indexInfo: FullIndexInfo) {
const mappings = indexInfo.mappings as any;
const isV7Index = !!mappings.properties;
if (!isV7Index) {
throw new Error(
`Index ${indexInfo.indexName} belongs to a version of Kibana ` +
`that cannot be automatically migrated. Reset it or use the X-Pack upgrade assistant.`
);
}
return indexInfo;
}
/**
* Provides protection against reading/re-indexing against an index with missing
* shards which could result in data loss. This shouldn't be common, as the Saved
* Object indices should only ever have a single shard. This is more to handle
* instances where customers manually expand the shards of an index.
*/
function assertResponseIncludeAllShards({ _shards }: { _shards: estypes.ShardStatistics }) {
if (!_.has(_shards, 'total') || !_.has(_shards, 'successful')) {
return;
}
const failed = _shards.total - _shards.successful;
if (failed > 0) {
throw new Error(
`Re-index failed :: ${failed} of ${_shards.total} shards failed. ` +
`Check Elasticsearch cluster health for more information.`
);
}
}
/**
* Reindexes from source to dest, polling for the reindex completion.
*/
async function reindex(
client: MigrationEsClient,
source: string,
dest: string,
batchSize: number,
script?: string
) {
// We poll instead of having the request wait for completion, as for large indices,
// the request times out on the Elasticsearch side of things. We have a relatively tight
// polling interval, as the request is fairly efficient, and we don't
// want to block index migrations for too long on this.
const pollInterval = 250;
const { body: reindexBody } = await client.reindex({
body: {
dest: { index: dest },
source: { index: source, size: batchSize },
script: script
? {
source: script,
lang: 'painless',
}
: undefined,
},
refresh: true,
wait_for_completion: false,
});
const task = reindexBody.task;
let completed = false;
while (!completed) {
await new Promise((r) => setTimeout(r, pollInterval));
const { body } = await client.tasks.get({
task_id: String(task),
});
const e = body.error;
if (e) {
throw new Error(`Re-index failed [${e.type}] ${e.reason} :: ${JSON.stringify(e)}`);
}
completed = body.completed;
}
}

View file

@ -7,16 +7,14 @@
*/
export { DocumentMigrator } from './document_migrator';
export { IndexMigrator } from './index_migrator';
export { buildActiveMappings } from './build_active_mappings';
export type { LogFn, SavedObjectsMigrationLogger } from './migration_logger';
export type { MigrationResult, MigrationStatus } from './migration_coordinator';
export { createMigrationEsClient } from './migration_es_client';
export type { MigrationEsClient } from './migration_es_client';
export { excludeUnusedTypesQuery, REMOVED_TYPES } from './elastic_index';
export { excludeUnusedTypesQuery, REMOVED_TYPES } from './unused_types';
export { TransformSavedObjectDocumentError } from './transform_saved_object_document_error';
export type {
DocumentsTransformFailed,
DocumentsTransformSuccess,
TransformErrorObjects,
} from './migrate_raw_docs';
export { disableUnknownTypeMappingFields } from './disable_unknown_type_mapping_fields';
export type { MigrationResult, MigrationStatus } from './types';

View file

@ -1,478 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
import _ from 'lodash';
import * as estypes from '@elastic/elasticsearch/lib/api/typesWithBodyKey';
import { elasticsearchClientMock } from '../../../elasticsearch/client/mocks';
import { SavedObjectUnsanitizedDoc, SavedObjectsSerializer } from '../../serialization';
import { SavedObjectTypeRegistry } from '../../saved_objects_type_registry';
import { IndexMigrator } from './index_migrator';
import { MigrationOpts } from './migration_context';
import { loggingSystemMock } from '../../../logging/logging_system.mock';
describe('IndexMigrator', () => {
let testOpts: jest.Mocked<MigrationOpts> & {
client: ReturnType<typeof elasticsearchClientMock.createElasticsearchClient>;
};
beforeEach(() => {
testOpts = {
batchSize: 10,
client: elasticsearchClientMock.createElasticsearchClient(),
index: '.kibana',
kibanaVersion: '7.10.0',
log: loggingSystemMock.create().get(),
setStatus: jest.fn(),
mappingProperties: {},
pollInterval: 1,
scrollDuration: '1m',
documentMigrator: {
migrationVersion: {},
migrate: _.identity,
migrateAndConvert: _.identity,
prepareMigrations: jest.fn(),
},
serializer: new SavedObjectsSerializer(new SavedObjectTypeRegistry()),
};
});
test('creates the index if it does not exist', async () => {
const { client } = testOpts;
testOpts.mappingProperties = { foo: { type: 'long' } as any };
withIndex(client, { index: { statusCode: 404 }, alias: { statusCode: 404 } });
await new IndexMigrator(testOpts).migrate();
expect(client.indices.create).toHaveBeenCalledWith({
body: {
mappings: {
dynamic: 'strict',
_meta: {
migrationMappingPropertyHashes: {
foo: '18c78c995965207ed3f6e7fc5c6e55fe',
migrationVersion: '4a1746014a75ade3a714e1db5763276f',
namespace: '2f4316de49999235636386fe51dc06c1',
namespaces: '2f4316de49999235636386fe51dc06c1',
originId: '2f4316de49999235636386fe51dc06c1',
references: '7997cf5a56cc02bdc9c93361bde732b0',
coreMigrationVersion: '2f4316de49999235636386fe51dc06c1',
type: '2f4316de49999235636386fe51dc06c1',
updated_at: '00da57df13e94e9d98437d13ace4bfe0',
},
},
properties: {
foo: { type: 'long' },
migrationVersion: { dynamic: 'true', type: 'object' },
namespace: { type: 'keyword' },
namespaces: { type: 'keyword' },
originId: { type: 'keyword' },
type: { type: 'keyword' },
updated_at: { type: 'date' },
references: {
type: 'nested',
properties: {
name: { type: 'keyword' },
type: { type: 'keyword' },
id: { type: 'keyword' },
},
},
coreMigrationVersion: { type: 'keyword' },
},
},
settings: { number_of_shards: 1, auto_expand_replicas: '0-1' },
},
index: '.kibana_1',
});
});
test('returns stats about the migration', async () => {
const { client } = testOpts;
withIndex(client, { index: { statusCode: 404 }, alias: { statusCode: 404 } });
const result = await new IndexMigrator(testOpts).migrate();
expect(result).toMatchObject({
destIndex: '.kibana_1',
sourceIndex: '.kibana',
status: 'migrated',
});
});
test('fails if there are multiple root doc types', async () => {
const { client } = testOpts;
withIndex(client, {
index: {
'.kibana_1': {
aliases: {},
mappings: {
foo: { properties: {} },
doc: {
properties: {
author: { type: 'text' },
},
},
},
},
},
});
await expect(new IndexMigrator(testOpts).migrate()).rejects.toThrow(
/use the X-Pack upgrade assistant/
);
});
test('fails if root doc type is not "doc"', async () => {
const { client } = testOpts;
withIndex(client, {
index: {
'.kibana_1': {
aliases: {},
mappings: {
poc: {
properties: {
author: { type: 'text' },
},
},
},
},
},
});
await expect(new IndexMigrator(testOpts).migrate()).rejects.toThrow(
/use the X-Pack upgrade assistant/
);
});
test('retains unknown core field mappings from the previous index', async () => {
const { client } = testOpts;
testOpts.mappingProperties = { foo: { type: 'text' } as any };
withIndex(client, {
index: {
'.kibana_1': {
aliases: {},
mappings: {
properties: {
unknown_core_field: { type: 'text' },
},
},
},
},
});
await new IndexMigrator(testOpts).migrate();
expect(client.indices.create).toHaveBeenCalledWith({
body: {
mappings: {
dynamic: 'strict',
_meta: {
migrationMappingPropertyHashes: {
foo: '625b32086eb1d1203564cf85062dd22e',
migrationVersion: '4a1746014a75ade3a714e1db5763276f',
namespace: '2f4316de49999235636386fe51dc06c1',
namespaces: '2f4316de49999235636386fe51dc06c1',
originId: '2f4316de49999235636386fe51dc06c1',
references: '7997cf5a56cc02bdc9c93361bde732b0',
coreMigrationVersion: '2f4316de49999235636386fe51dc06c1',
type: '2f4316de49999235636386fe51dc06c1',
updated_at: '00da57df13e94e9d98437d13ace4bfe0',
},
},
properties: {
unknown_core_field: { type: 'text' },
foo: { type: 'text' },
migrationVersion: { dynamic: 'true', type: 'object' },
namespace: { type: 'keyword' },
namespaces: { type: 'keyword' },
originId: { type: 'keyword' },
type: { type: 'keyword' },
updated_at: { type: 'date' },
references: {
type: 'nested',
properties: {
name: { type: 'keyword' },
type: { type: 'keyword' },
id: { type: 'keyword' },
},
},
coreMigrationVersion: { type: 'keyword' },
},
},
settings: { number_of_shards: 1, auto_expand_replicas: '0-1' },
},
index: '.kibana_2',
});
});
test('disables complex field mappings from unknown types in the previous index', async () => {
const { client } = testOpts;
testOpts.mappingProperties = { foo: { type: 'text' } as any };
withIndex(client, {
index: {
'.kibana_1': {
aliases: {},
mappings: {
properties: {
unknown_complex_field: { properties: { description: { type: 'text' } } },
},
},
},
},
});
await new IndexMigrator(testOpts).migrate();
expect(client.indices.create).toHaveBeenCalledWith({
body: {
mappings: {
dynamic: 'strict',
_meta: {
migrationMappingPropertyHashes: {
foo: '625b32086eb1d1203564cf85062dd22e',
migrationVersion: '4a1746014a75ade3a714e1db5763276f',
namespace: '2f4316de49999235636386fe51dc06c1',
namespaces: '2f4316de49999235636386fe51dc06c1',
originId: '2f4316de49999235636386fe51dc06c1',
references: '7997cf5a56cc02bdc9c93361bde732b0',
coreMigrationVersion: '2f4316de49999235636386fe51dc06c1',
type: '2f4316de49999235636386fe51dc06c1',
updated_at: '00da57df13e94e9d98437d13ace4bfe0',
},
},
properties: {
unknown_complex_field: { dynamic: false, properties: {} },
foo: { type: 'text' },
migrationVersion: { dynamic: 'true', type: 'object' },
namespace: { type: 'keyword' },
namespaces: { type: 'keyword' },
originId: { type: 'keyword' },
type: { type: 'keyword' },
updated_at: { type: 'date' },
references: {
type: 'nested',
properties: {
name: { type: 'keyword' },
type: { type: 'keyword' },
id: { type: 'keyword' },
},
},
coreMigrationVersion: { type: 'keyword' },
},
},
settings: { number_of_shards: 1, auto_expand_replicas: '0-1' },
},
index: '.kibana_2',
});
});
test('points the alias at the dest index', async () => {
const { client } = testOpts;
withIndex(client, { index: { statusCode: 404 }, alias: { statusCode: 404 } });
await new IndexMigrator(testOpts).migrate();
expect(client.indices.create).toHaveBeenCalledWith(expect.any(Object));
expect(client.indices.updateAliases).toHaveBeenCalledWith({
body: { actions: [{ add: { alias: '.kibana', index: '.kibana_1' } }] },
});
});
test('removes previous indices from the alias', async () => {
const { client } = testOpts;
testOpts.documentMigrator.migrationVersion = {
dashboard: '2.4.5',
};
withIndex(client, { numOutOfDate: 1 });
await new IndexMigrator(testOpts).migrate();
expect(client.indices.create).toHaveBeenCalledWith(expect.any(Object));
expect(client.indices.updateAliases).toHaveBeenCalledWith({
body: {
actions: [
{ remove: { alias: '.kibana', index: '.kibana_1' } },
{ add: { alias: '.kibana', index: '.kibana_2' } },
],
},
});
});
test('transforms all docs from the original index', async () => {
let count = 0;
const { client } = testOpts;
const migrateAndConvertDoc = jest.fn((doc: SavedObjectUnsanitizedDoc) => {
return [{ ...doc, attributes: { name: ++count } }];
});
testOpts.documentMigrator = {
migrationVersion: { foo: '1.2.3' },
migrate: jest.fn(),
migrateAndConvert: migrateAndConvertDoc,
prepareMigrations: jest.fn(),
};
withIndex(client, {
numOutOfDate: 1,
docs: [
[{ _id: 'foo:1', _source: { type: 'foo', foo: { name: 'Bar' } } }],
[{ _id: 'foo:2', _source: { type: 'foo', foo: { name: 'Baz' } } }],
],
});
await new IndexMigrator(testOpts).migrate();
expect(count).toEqual(2);
expect(migrateAndConvertDoc).toHaveBeenNthCalledWith(1, {
id: '1',
type: 'foo',
attributes: { name: 'Bar' },
migrationVersion: {},
references: [],
});
expect(migrateAndConvertDoc).toHaveBeenNthCalledWith(2, {
id: '2',
type: 'foo',
attributes: { name: 'Baz' },
migrationVersion: {},
references: [],
});
expect(client.bulk).toHaveBeenCalledTimes(2);
expect(client.bulk).toHaveBeenNthCalledWith(1, {
body: [
{ index: { _id: 'foo:1', _index: '.kibana_2' } },
{ foo: { name: 1 }, type: 'foo', migrationVersion: {}, references: [] },
],
});
expect(client.bulk).toHaveBeenNthCalledWith(2, {
body: [
{ index: { _id: 'foo:2', _index: '.kibana_2' } },
{ foo: { name: 2 }, type: 'foo', migrationVersion: {}, references: [] },
],
});
});
test('rejects when the migration function throws an error', async () => {
const { client } = testOpts;
const migrateAndConvertDoc = jest.fn((doc: SavedObjectUnsanitizedDoc) => {
throw new Error('error migrating document');
});
testOpts.documentMigrator = {
migrationVersion: { foo: '1.2.3' },
migrate: jest.fn(),
migrateAndConvert: migrateAndConvertDoc,
prepareMigrations: jest.fn(),
};
withIndex(client, {
numOutOfDate: 1,
docs: [
[{ _id: 'foo:1', _source: { type: 'foo', foo: { name: 'Bar' } } }],
[{ _id: 'foo:2', _source: { type: 'foo', foo: { name: 'Baz' } } }],
],
});
await expect(new IndexMigrator(testOpts).migrate()).rejects.toThrowErrorMatchingInlineSnapshot(
`"error migrating document"`
);
});
});
function withIndex(
client: ReturnType<typeof elasticsearchClientMock.createElasticsearchClient>,
opts: any = {}
) {
const defaultIndex = {
'.kibana_1': {
aliases: { '.kibana': {} },
mappings: {
dynamic: 'strict',
properties: {
migrationVersion: { dynamic: 'true', type: 'object' },
},
},
},
};
const defaultAlias = {
'.kibana_1': {},
};
const { numOutOfDate = 0 } = opts;
const { alias = defaultAlias } = opts;
const { index = defaultIndex } = opts;
const { docs = [] } = opts;
const searchResult = (i: number) => ({
_scroll_id: i,
_shards: {
successful: 1,
total: 1,
},
hits: {
hits: docs[i] || [],
},
});
let scrollCallCounter = 1;
client.indices.get.mockReturnValue(
elasticsearchClientMock.createSuccessTransportRequestPromise(index, {
statusCode: index.statusCode,
})
);
client.indices.getAlias.mockReturnValue(
elasticsearchClientMock.createSuccessTransportRequestPromise(alias, {
statusCode: index.statusCode,
})
);
client.reindex.mockReturnValue(
elasticsearchClientMock.createSuccessTransportRequestPromise({
task: 'zeid',
_shards: { successful: 1, total: 1 },
} as estypes.ReindexResponse)
);
client.tasks.get.mockReturnValue(
elasticsearchClientMock.createSuccessTransportRequestPromise({
completed: true,
} as estypes.TasksGetResponse)
);
client.search.mockReturnValue(
elasticsearchClientMock.createSuccessTransportRequestPromise(searchResult(0) as any)
);
client.bulk.mockReturnValue(
elasticsearchClientMock.createSuccessTransportRequestPromise({
items: [] as any[],
} as estypes.BulkResponse)
);
client.count.mockReturnValue(
elasticsearchClientMock.createSuccessTransportRequestPromise({
count: numOutOfDate,
_shards: { successful: 1, total: 1 },
} as estypes.CountResponse)
);
// @ts-expect-error
client.scroll.mockImplementation(() => {
if (scrollCallCounter <= docs.length) {
const result = searchResult(scrollCallCounter);
scrollCallCounter++;
return elasticsearchClientMock.createSuccessTransportRequestPromise(result);
}
return elasticsearchClientMock.createSuccessTransportRequestPromise({});
});
}

View file

@ -1,194 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
import { diffMappings } from './build_active_mappings';
import * as Index from './elastic_index';
import { migrateRawDocs } from './migrate_raw_docs';
import { Context, migrationContext, MigrationOpts } from './migration_context';
import { coordinateMigration, MigrationResult } from './migration_coordinator';
/*
* Core logic for migrating the mappings and documents in an index.
*/
export class IndexMigrator {
private opts: MigrationOpts;
/**
* Creates an instance of IndexMigrator.
*
* @param {MigrationOpts} opts
*/
constructor(opts: MigrationOpts) {
this.opts = opts;
}
/**
* Migrates the index, or, if another Kibana instance appears to be running the migration,
* waits for the migration to complete.
*
* @returns {Promise<MigrationResult>}
*/
public async migrate(): Promise<MigrationResult> {
const context = await migrationContext(this.opts);
return coordinateMigration({
log: context.log,
pollInterval: context.pollInterval,
setStatus: context.setStatus,
async isMigrated() {
return !(await requiresMigration(context));
},
async runMigration() {
if (await requiresMigration(context)) {
return migrateIndex(context);
}
return { status: 'skipped' };
},
});
}
}
/**
* Determines what action the migration system needs to take (none, patch, migrate).
*/
async function requiresMigration(context: Context): Promise<boolean> {
const { client, alias, documentMigrator, dest, kibanaVersion, log } = context;
// Have all of our known migrations been run against the index?
const hasMigrations = await Index.migrationsUpToDate(
client,
alias,
documentMigrator.migrationVersion,
kibanaVersion
);
if (!hasMigrations) {
return true;
}
// Is our index aliased?
const refreshedSource = await Index.fetchInfo(client, alias);
if (!refreshedSource.aliases[alias]) {
return true;
}
// Do the actual index mappings match our expectations?
const diffResult = diffMappings(refreshedSource.mappings, dest.mappings);
if (diffResult) {
log.info(`Detected mapping change in "${diffResult.changedProp}"`);
return true;
}
return false;
}
/**
* Performs an index migration if the source index exists, otherwise
* this simply creates the dest index with the proper mappings.
*/
async function migrateIndex(context: Context): Promise<MigrationResult> {
const startTime = Date.now();
const { client, alias, source, dest, log } = context;
await deleteIndexTemplates(context);
log.info(`Creating index ${dest.indexName}.`);
await Index.createIndex(client, dest.indexName, dest.mappings);
await migrateSourceToDest(context);
log.info(`Pointing alias ${alias} to ${dest.indexName}.`);
await Index.claimAlias(client, dest.indexName, alias);
const result: MigrationResult = {
status: 'migrated',
destIndex: dest.indexName,
sourceIndex: source.indexName,
elapsedMs: Date.now() - startTime,
};
log.info(`Finished in ${result.elapsedMs}ms.`);
return result;
}
/**
* If the obsoleteIndexTemplatePattern option is specified, this will delete any index templates
* that match it.
*/
async function deleteIndexTemplates({ client, log, obsoleteIndexTemplatePattern }: Context) {
if (!obsoleteIndexTemplatePattern) {
return;
}
const { body: templates } = await client.cat.templates({
format: 'json',
name: obsoleteIndexTemplatePattern,
});
if (!templates.length) {
return;
}
const templateNames = templates.map((t) => t.name);
log.info(`Removing index templates: ${templateNames}`);
return Promise.all(templateNames.map((name) => client.indices.deleteTemplate({ name: name! })));
}
/**
* Moves all docs from sourceIndex to destIndex, migrating each as necessary.
* This moves documents from the concrete index, rather than the alias, to prevent
* a situation where the alias moves out from under us as we're migrating docs.
*/
async function migrateSourceToDest(context: Context) {
const { client, alias, dest, source, batchSize } = context;
const { scrollDuration, documentMigrator, log, serializer } = context;
if (!source.exists) {
return;
}
if (!source.aliases[alias]) {
log.info(`Reindexing ${alias} to ${source.indexName}`);
await Index.convertToAlias(client, source, alias, batchSize, context.convertToAliasScript);
}
const read = Index.reader(client, source.indexName, { batchSize, scrollDuration });
log.info(`Migrating ${source.indexName} saved objects to ${dest.indexName}`);
while (true) {
const docs = await read();
if (!docs || !docs.length) {
return;
}
log.debug(`Migrating saved objects ${docs.map((d) => d._id).join(', ')}`);
await Index.write(
client,
dest.indexName,
// @ts-expect-error @elastic/elasticsearch _source is optional
await migrateRawDocs(serializer, documentMigrator.migrateAndConvert, docs)
);
}
}

View file

@ -1,188 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
/**
* The MigrationOpts interface defines the minimum set of data required
* in order to properly migrate an index. MigrationContext expands this
* with computed values and values from the index being migrated, and is
* serves as a central blueprint for what migrations will end up doing.
*/
import { Logger } from '../../../logging';
import { MigrationEsClient } from './migration_es_client';
import { SavedObjectsSerializer } from '../../serialization';
import {
SavedObjectsTypeMappingDefinitions,
SavedObjectsMappingProperties,
IndexMapping,
} from '../../mappings';
import { buildActiveMappings } from './build_active_mappings';
import { VersionedTransformer } from './document_migrator';
import * as Index from './elastic_index';
import { SavedObjectsMigrationLogger, MigrationLogger } from './migration_logger';
import { KibanaMigratorStatus } from '../kibana';
export interface MigrationOpts {
batchSize: number;
pollInterval: number;
scrollDuration: string;
client: MigrationEsClient;
index: string;
kibanaVersion: string;
log: Logger;
setStatus: (status: KibanaMigratorStatus) => void;
mappingProperties: SavedObjectsTypeMappingDefinitions;
documentMigrator: VersionedTransformer;
serializer: SavedObjectsSerializer;
convertToAliasScript?: string;
/**
* If specified, templates matching the specified pattern will be removed
* prior to running migrations. For example: 'kibana_index_template*'
*/
obsoleteIndexTemplatePattern?: string;
}
/**
* @internal
*/
export interface Context {
client: MigrationEsClient;
alias: string;
source: Index.FullIndexInfo;
dest: Index.FullIndexInfo;
documentMigrator: VersionedTransformer;
kibanaVersion: string;
log: SavedObjectsMigrationLogger;
setStatus: (status: KibanaMigratorStatus) => void;
batchSize: number;
pollInterval: number;
scrollDuration: string;
serializer: SavedObjectsSerializer;
obsoleteIndexTemplatePattern?: string;
convertToAliasScript?: string;
}
/**
* Builds up an uber object which has all of the config options, settings,
* and various info needed to migrate the source index.
*/
export async function migrationContext(opts: MigrationOpts): Promise<Context> {
const { log, client, setStatus } = opts;
const alias = opts.index;
const source = createSourceContext(await Index.fetchInfo(client, alias), alias);
const dest = createDestContext(source, alias, opts.mappingProperties);
return {
client,
alias,
source,
dest,
kibanaVersion: opts.kibanaVersion,
log: new MigrationLogger(log),
setStatus,
batchSize: opts.batchSize,
documentMigrator: opts.documentMigrator,
pollInterval: opts.pollInterval,
scrollDuration: opts.scrollDuration,
serializer: opts.serializer,
obsoleteIndexTemplatePattern: opts.obsoleteIndexTemplatePattern,
convertToAliasScript: opts.convertToAliasScript,
};
}
function createSourceContext(source: Index.FullIndexInfo, alias: string) {
if (source.exists && source.indexName === alias) {
return {
...source,
indexName: nextIndexName(alias, alias),
};
}
return source;
}
function createDestContext(
source: Index.FullIndexInfo,
alias: string,
typeMappingDefinitions: SavedObjectsTypeMappingDefinitions
): Index.FullIndexInfo {
const targetMappings = disableUnknownTypeMappingFields(
buildActiveMappings(typeMappingDefinitions),
source.mappings
);
return {
aliases: {},
exists: false,
indexName: nextIndexName(source.indexName, alias),
mappings: targetMappings,
};
}
/**
* Merges the active mappings and the source mappings while disabling the
* fields of any unknown Saved Object types present in the source index's
* mappings.
*
* Since the Saved Objects index has `dynamic: strict` defined at the
* top-level, only Saved Object types for which a mapping exists can be
* inserted into the index. To ensure that we can continue to store Saved
* Object documents belonging to a disabled plugin we define a mapping for all
* the unknown Saved Object types that were present in the source index's
* mappings. To limit the field count as much as possible, these unkwnown
* type's mappings are set to `dynamic: false`.
*
* (Since we're using the source index mappings instead of looking at actual
* document types in the inedx, we potentially add more "unknown types" than
* what would be necessary to support migrating all the data over to the
* target index.)
*
* @param activeMappings The mappings compiled from all the Saved Object types
* known to this Kibana node.
* @param sourceMappings The mappings of index used as the migration source.
* @returns The mappings that should be applied to the target index.
*/
export function disableUnknownTypeMappingFields(
activeMappings: IndexMapping,
sourceMappings: IndexMapping
): IndexMapping {
const targetTypes = Object.keys(activeMappings.properties);
const disabledTypesProperties = Object.keys(sourceMappings.properties ?? {})
.filter((sourceType) => {
const isObjectType = 'properties' in sourceMappings.properties[sourceType];
// Only Object/Nested datatypes can be excluded from the field count by
// using `dynamic: false`.
return !targetTypes.includes(sourceType) && isObjectType;
})
.reduce((disabledTypesAcc, sourceType) => {
disabledTypesAcc[sourceType] = { dynamic: false, properties: {} };
return disabledTypesAcc;
}, {} as SavedObjectsMappingProperties);
return {
...activeMappings,
properties: {
...sourceMappings.properties,
...disabledTypesProperties,
...activeMappings.properties,
},
};
}
/**
* Gets the next index name in a sequence, based on specified current index's info.
* We're using a numeric counter to create new indices. So, `.kibana_1`, `.kibana_2`, etc
* There are downsides to this, but it seemed like a simple enough approach.
*/
function nextIndexName(indexName: string, alias: string) {
const indexSuffix = (indexName.match(/[\d]+$/) || [])[0];
const indexNum = parseInt(indexSuffix, 10) || 0;
return `${alias}_${indexNum + 1}`;
}

View file

@ -1,75 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
import { coordinateMigration } from './migration_coordinator';
import { createSavedObjectsMigrationLoggerMock } from '../mocks';
describe('coordinateMigration', () => {
const log = createSavedObjectsMigrationLoggerMock();
test('waits for isMigrated, if there is an index conflict', async () => {
const pollInterval = 1;
const runMigration = jest.fn(() => {
// eslint-disable-next-line no-throw-literal
throw { body: { error: { index: '.foo', type: 'resource_already_exists_exception' } } };
});
const isMigrated = jest.fn();
const setStatus = jest.fn();
isMigrated.mockResolvedValueOnce(false).mockResolvedValueOnce(true);
await coordinateMigration({
log,
runMigration,
pollInterval,
isMigrated,
setStatus,
});
expect(runMigration).toHaveBeenCalledTimes(1);
expect(isMigrated).toHaveBeenCalledTimes(2);
const warnings = log.warning.mock.calls.filter((msg: any) => /deleting index \.foo/.test(msg));
expect(warnings.length).toEqual(1);
});
test('does not poll if the runMigration succeeds', async () => {
const pollInterval = 1;
const runMigration = jest.fn<any, any>(() => Promise.resolve());
const isMigrated = jest.fn(() => Promise.resolve(true));
const setStatus = jest.fn();
await coordinateMigration({
log,
runMigration,
pollInterval,
isMigrated,
setStatus,
});
expect(isMigrated).not.toHaveBeenCalled();
});
test('does not swallow exceptions', async () => {
const pollInterval = 1;
const runMigration = jest.fn(() => {
throw new Error('Doh');
});
const isMigrated = jest.fn(() => Promise.resolve(true));
const setStatus = jest.fn();
await expect(
coordinateMigration({
log,
runMigration,
pollInterval,
isMigrated,
setStatus,
})
).rejects.toThrow(/Doh/);
expect(isMigrated).not.toHaveBeenCalled();
});
});

View file

@ -1,124 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
/*
* This provides a mechanism for preventing multiple Kibana instances from
* simultaneously running migrations on the same index. It synchronizes this
* by handling index creation conflicts, and putting this instance into a
* poll loop that periodically checks to see if the index is migrated.
*
* The reason we have to coordinate this, rather than letting each Kibana instance
* perform duplicate work, is that if we allowed each Kibana to simply run migrations in
* parallel, they would each try to reindex and each try to create the destination index.
* If those indices already exist, it may be due to contention between multiple Kibana
* instances (which is safe to ignore), but it may be due to a partially completed migration,
* or someone tampering with the Kibana alias. In these cases, it's not clear that we should
* just migrate data into an existing index. Such an action could result in data loss. Instead,
* we should probably fail, and the Kibana sys-admin should clean things up before relaunching
* Kibana.
*/
import _ from 'lodash';
import { KibanaMigratorStatus } from '../kibana';
import { SavedObjectsMigrationLogger } from './migration_logger';
const DEFAULT_POLL_INTERVAL = 15000;
export type MigrationStatus =
| 'waiting_to_start'
| 'waiting_for_other_nodes'
| 'running'
| 'completed';
export type MigrationResult =
| { status: 'skipped' }
| { status: 'patched' }
| {
status: 'migrated';
destIndex: string;
sourceIndex: string;
elapsedMs: number;
};
interface Opts {
runMigration: () => Promise<MigrationResult>;
isMigrated: () => Promise<boolean>;
setStatus: (status: KibanaMigratorStatus) => void;
log: SavedObjectsMigrationLogger;
pollInterval?: number;
}
/**
* Runs the migration specified by opts. If the migration fails due to an index
* creation conflict, this falls into a polling loop, checking every pollInterval
* milliseconds if the index is migrated.
*
* @export
* @param {Opts} opts
* @prop {Migration} runMigration - A function that runs the index migration
* @prop {IsMigrated} isMigrated - A function which checks if the index is already migrated
* @prop {Logger} log - The migration logger
* @prop {number} pollInterval - How often, in ms, to check that the index is migrated
* @returns
*/
export async function coordinateMigration(opts: Opts): Promise<MigrationResult> {
try {
return await opts.runMigration();
} catch (error) {
const waitingIndex = handleIndexExists(error, opts.log);
if (waitingIndex) {
opts.setStatus({ status: 'waiting_for_other_nodes', waitingIndex });
await waitForMigration(opts.isMigrated, opts.pollInterval);
return { status: 'skipped' };
}
throw error;
}
}
/**
* If the specified error is an index exists error, this logs a warning,
* and is the cue for us to fall into a polling loop, waiting for some
* other Kibana instance to complete the migration.
*/
function handleIndexExists(error: any, log: SavedObjectsMigrationLogger): string | undefined {
const isIndexExistsError =
_.get(error, 'body.error.type') === 'resource_already_exists_exception';
if (!isIndexExistsError) {
return undefined;
}
const index = _.get(error, 'body.error.index');
log.warning(
`Another Kibana instance appears to be migrating the index. Waiting for ` +
`that migration to complete. If no other Kibana instance is attempting ` +
`migrations, you can get past this message by deleting index ${index} and ` +
`restarting Kibana.`
);
return index;
}
/**
* Polls isMigrated every pollInterval milliseconds until it returns true.
*/
async function waitForMigration(
isMigrated: () => Promise<boolean>,
pollInterval = DEFAULT_POLL_INTERVAL
) {
while (true) {
if (await isMigrated()) {
return;
}
await sleep(pollInterval);
}
}
function sleep(ms: number) {
return new Promise((r) => setTimeout(r, ms));
}

View file

@ -1,12 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
export const migrationRetryCallClusterMock = jest.fn((fn) => fn());
jest.doMock('../../../elasticsearch/client/retry_call_cluster', () => ({
migrationRetryCallCluster: migrationRetryCallClusterMock,
}));

View file

@ -1,55 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
import { migrationRetryCallClusterMock } from './migration_es_client.test.mock';
import { createMigrationEsClient, MigrationEsClient } from './migration_es_client';
import { elasticsearchClientMock } from '../../../elasticsearch/client/mocks';
import { loggerMock } from '../../../logging/logger.mock';
import { SavedObjectsErrorHelpers } from '../../service/lib/errors';
describe('MigrationEsClient', () => {
let client: ReturnType<typeof elasticsearchClientMock.createElasticsearchClient>;
let migrationEsClient: MigrationEsClient;
beforeEach(() => {
client = elasticsearchClientMock.createElasticsearchClient();
migrationEsClient = createMigrationEsClient(client, loggerMock.create());
migrationRetryCallClusterMock.mockClear();
});
it('delegates call to ES client method', async () => {
expect(migrationEsClient.bulk).toStrictEqual(expect.any(Function));
await migrationEsClient.bulk({ body: [] });
expect(client.bulk).toHaveBeenCalledTimes(1);
});
it('wraps a method call in migrationRetryCallClusterMock', async () => {
await migrationEsClient.bulk({ body: [] });
expect(migrationRetryCallClusterMock).toHaveBeenCalledTimes(1);
});
it('sets maxRetries: 0 to delegate retry logic to migrationRetryCallCluster', async () => {
expect(migrationEsClient.bulk).toStrictEqual(expect.any(Function));
await migrationEsClient.bulk({ body: [] });
expect(client.bulk).toHaveBeenCalledWith(
expect.any(Object),
expect.objectContaining({ maxRetries: 0 })
);
});
it('do not transform elasticsearch errors into saved objects errors', async () => {
expect.assertions(1);
client.bulk = jest.fn().mockRejectedValue(new Error('reason'));
try {
await migrationEsClient.bulk({ body: [] });
} catch (e) {
expect(SavedObjectsErrorHelpers.isSavedObjectsClientError(e)).toBe(false);
}
});
});

View file

@ -1,78 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
import type { Client, TransportRequestOptions } from '@elastic/elasticsearch';
import { get } from 'lodash';
import { set } from '@elastic/safer-lodash-set';
import { ElasticsearchClient } from '../../../elasticsearch';
import { migrationRetryCallCluster } from '../../../elasticsearch/client/retry_call_cluster';
import { Logger } from '../../../logging';
const methods = [
'bulk',
'cat.templates',
'clearScroll',
'count',
'indices.create',
'indices.deleteTemplate',
'indices.get',
'indices.getAlias',
'indices.refresh',
'indices.updateAliases',
'reindex',
'search',
'scroll',
'tasks.get',
] as const;
type MethodName = typeof methods[number];
export interface MigrationEsClient {
bulk: ElasticsearchClient['bulk'];
cat: {
templates: ElasticsearchClient['cat']['templates'];
};
clearScroll: ElasticsearchClient['clearScroll'];
count: ElasticsearchClient['count'];
indices: {
create: ElasticsearchClient['indices']['create'];
delete: ElasticsearchClient['indices']['delete'];
deleteTemplate: ElasticsearchClient['indices']['deleteTemplate'];
get: ElasticsearchClient['indices']['get'];
getAlias: ElasticsearchClient['indices']['getAlias'];
refresh: ElasticsearchClient['indices']['refresh'];
updateAliases: ElasticsearchClient['indices']['updateAliases'];
};
reindex: ElasticsearchClient['reindex'];
search: ElasticsearchClient['search'];
scroll: ElasticsearchClient['scroll'];
tasks: {
get: ElasticsearchClient['tasks']['get'];
};
}
export function createMigrationEsClient(
client: ElasticsearchClient | Client,
log: Logger,
delay?: number
): MigrationEsClient {
return methods.reduce((acc: MigrationEsClient, key: MethodName) => {
set(acc, key, async (params?: unknown, options?: TransportRequestOptions) => {
const fn = get(client, key);
if (!fn) {
throw new Error(`unknown ElasticsearchClient client method [${key}]`);
}
return await migrationRetryCallCluster(
() => fn.call(client, params, { maxRetries: 0, meta: true, ...options }),
log,
delay
);
});
return acc;
}, {} as MigrationEsClient);
}

View file

@ -6,10 +6,18 @@
* Side Public License, v 1.
*/
import { mockKibanaMigrator } from '../kibana_migrator.mock';
export type MigrationStatus =
| 'waiting_to_start'
| 'waiting_for_other_nodes'
| 'running'
| 'completed';
export const mockKibanaMigratorInstance = mockKibanaMigrator.create();
const mockConstructor = jest.fn().mockImplementation(() => mockKibanaMigratorInstance);
export const KibanaMigrator = mockConstructor;
export type MigrationResult =
| { status: 'skipped' }
| { status: 'patched' }
| {
status: 'migrated';
destIndex: string;
sourceIndex: string;
elapsedMs: number;
};

View file

@ -0,0 +1,63 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
import * as estypes from '@elastic/elasticsearch/lib/api/typesWithBodyKey';
/**
* Types that are no longer registered and need to be removed
*/
export const REMOVED_TYPES: string[] = [
'apm-services-telemetry',
'background-session',
'cases-sub-case',
'file-upload-telemetry',
// https://github.com/elastic/kibana/issues/91869
'fleet-agent-events',
// Was removed in 7.12
'ml-telemetry',
'server',
// https://github.com/elastic/kibana/issues/95617
'tsvb-validation-telemetry',
// replaced by osquery-manager-usage-metric
'osquery-usage-metric',
// Was removed in 7.16
'timelion-sheet',
].sort();
// When migrating from the outdated index we use a read query which excludes
// saved objects which are no longer used. These saved objects will still be
// kept in the outdated index for backup purposes, but won't be available in
// the upgraded index.
export const excludeUnusedTypesQuery: estypes.QueryDslQueryContainer = {
bool: {
must_not: [
...REMOVED_TYPES.map((typeName) => ({
term: {
type: typeName,
},
})),
// https://github.com/elastic/kibana/issues/96131
{
bool: {
must: [
{
match: {
type: 'search-session',
},
},
{
match: {
'search-session.persisted': false,
},
},
],
},
},
],
},
};

View file

@ -7,8 +7,8 @@
*/
export type { MigrationResult } from './core';
export { KibanaMigrator } from './kibana';
export type { IKibanaMigrator } from './kibana';
export { KibanaMigrator } from './kibana_migrator';
export type { IKibanaMigrator, KibanaMigratorStatus } from './kibana_migrator';
export type {
SavedObjectMigrationFn,
SavedObjectMigrationMap,

View file

@ -11,7 +11,7 @@ import { IndexMapping } from '../mappings';
import { SavedObjectsMigrationVersion } from '../../../types';
import { SavedObjectsMigrationConfigType } from '../saved_objects_config';
import type { ISavedObjectTypeRegistry } from '../saved_objects_type_registry';
import { InitState } from './types';
import { InitState } from './state';
import { excludeUnusedTypesQuery } from '../migrations/core';
/**

Some files were not shown because too many files have changed in this diff Show more