mirror of
https://github.com/elastic/kibana.git
synced 2025-04-24 09:48:58 -04:00
* [esArchiver] fetch kibana plugin ids before mucking with .kibana * only clean when x-pack in use * continue to limit clean to once per archive * actually delete kibana index if using a pre-7 mapping * when loading into a cleaned index, reroute docs to .kibana * continue adding default space when building index from scratch * only delete kibana indices when using pre K7 mappings * cleaning kibana index on load doesn't work unless we force all archives to use current mapping * move once- helper out of index handler * continue casting to a boolean * only create default space after migrations are complete
This commit is contained in:
parent
ece4709a45
commit
a694623048
11 changed files with 126 additions and 71 deletions
|
@ -16,11 +16,18 @@
|
|||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
import { migrateKibanaIndex, deleteKibanaIndices, createStats } from '../lib';
|
||||
import {
|
||||
migrateKibanaIndex,
|
||||
deleteKibanaIndices,
|
||||
createStats,
|
||||
getEnabledKibanaPluginIds
|
||||
} from '../lib';
|
||||
|
||||
export async function emptyKibanaIndexAction({ client, log, kibanaUrl }) {
|
||||
const stats = createStats('emptyKibanaIndex', log);
|
||||
const kibanaPluginIds = await getEnabledKibanaPluginIds(kibanaUrl);
|
||||
|
||||
await deleteKibanaIndices({ client, stats });
|
||||
await migrateKibanaIndex({ client, log, stats, kibanaUrl });
|
||||
await migrateKibanaIndex({ client, log, stats, kibanaPluginIds });
|
||||
return stats;
|
||||
}
|
||||
|
|
|
@ -35,6 +35,8 @@ import {
|
|||
createIndexDocRecordsStream,
|
||||
migrateKibanaIndex,
|
||||
Progress,
|
||||
getEnabledKibanaPluginIds,
|
||||
createDefaultSpace,
|
||||
} from '../lib';
|
||||
|
||||
// pipe a series of streams into each other so that data and errors
|
||||
|
@ -51,6 +53,7 @@ export async function loadAction({ name, skipExisting, client, dataDir, log, kib
|
|||
const inputDir = resolve(dataDir, name);
|
||||
const stats = createStats(name, log);
|
||||
const files = prioritizeMappings(await readDirectory(inputDir));
|
||||
const kibanaPluginIds = await getEnabledKibanaPluginIds(kibanaUrl);
|
||||
|
||||
// a single stream that emits records from all archive files, in
|
||||
// order, so that createIndexStream can track the state of indexes
|
||||
|
@ -72,7 +75,7 @@ export async function loadAction({ name, skipExisting, client, dataDir, log, kib
|
|||
|
||||
await createPromiseFromStreams([
|
||||
recordStream,
|
||||
createCreateIndexStream({ client, stats, skipExisting, log, kibanaUrl }),
|
||||
createCreateIndexStream({ client, stats, skipExisting, log, kibanaPluginIds }),
|
||||
createIndexDocRecordsStream(client, stats, progress),
|
||||
]);
|
||||
|
||||
|
@ -92,7 +95,11 @@ export async function loadAction({ name, skipExisting, client, dataDir, log, kib
|
|||
|
||||
// If we affected the Kibana index, we need to ensure it's migrated...
|
||||
if (Object.keys(result).some(k => k.startsWith('.kibana'))) {
|
||||
await migrateKibanaIndex({ client, log, kibanaUrl });
|
||||
await migrateKibanaIndex({ client, log, kibanaPluginIds });
|
||||
|
||||
if (kibanaPluginIds.includes('spaces')) {
|
||||
await createDefaultSpace({ client, index: '.kibana' });
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
|
|
|
@ -31,12 +31,14 @@ import {
|
|||
readDirectory,
|
||||
createParseArchiveStreams,
|
||||
createFilterRecordsStream,
|
||||
createDeleteIndexStream
|
||||
createDeleteIndexStream,
|
||||
getEnabledKibanaPluginIds,
|
||||
} from '../lib';
|
||||
|
||||
export async function unloadAction({ name, client, dataDir, log, kibanaUrl }) {
|
||||
const inputDir = resolve(dataDir, name);
|
||||
const stats = createStats(name, log);
|
||||
const kibanaPluginIds = await getEnabledKibanaPluginIds(kibanaUrl);
|
||||
|
||||
const files = prioritizeMappings(await readDirectory(inputDir));
|
||||
for (const filename of files) {
|
||||
|
@ -46,7 +48,7 @@ export async function unloadAction({ name, client, dataDir, log, kibanaUrl }) {
|
|||
createReadStream(resolve(inputDir, filename)),
|
||||
...createParseArchiveStreams({ gzip: isGzip(filename) }),
|
||||
createFilterRecordsStream('index'),
|
||||
createDeleteIndexStream(client, stats, log, kibanaUrl)
|
||||
createDeleteIndexStream(client, stats, log, kibanaPluginIds)
|
||||
]);
|
||||
}
|
||||
|
||||
|
|
|
@ -28,6 +28,7 @@ export {
|
|||
createGenerateIndexRecordsStream,
|
||||
deleteKibanaIndices,
|
||||
migrateKibanaIndex,
|
||||
createDefaultSpace,
|
||||
} from './indices';
|
||||
|
||||
export {
|
||||
|
@ -52,3 +53,7 @@ export {
|
|||
export {
|
||||
Progress
|
||||
} from './progress';
|
||||
|
||||
export {
|
||||
getEnabledKibanaPluginIds,
|
||||
} from './kibana_plugins';
|
||||
|
|
|
@ -20,18 +20,16 @@
|
|||
import { Transform } from 'stream';
|
||||
|
||||
import { get, once } from 'lodash';
|
||||
import { deleteKibanaIndices, isSpacesEnabled, createDefaultSpace } from './kibana_index';
|
||||
|
||||
import { deleteKibanaIndices } from './kibana_index';
|
||||
import { deleteIndex } from './delete_index';
|
||||
|
||||
export function createCreateIndexStream({ client, stats, skipExisting, log, kibanaUrl }) {
|
||||
export function createCreateIndexStream({ client, stats, skipExisting, log }) {
|
||||
const skipDocsFromIndices = new Set();
|
||||
|
||||
// If we're trying to import Kibana index docs, we need to ensure that
|
||||
// previous indices are removed so we're starting w/ a clean slate for
|
||||
// migrations. This only needs to be done once per archive load operation.
|
||||
// For the '.kibana' index, we will ignore 'skipExisting' and always load.
|
||||
const clearKibanaIndices = once(async () => await deleteKibanaIndices({ client, stats }));
|
||||
const deleteKibanaIndicesOnce = once(deleteKibanaIndices);
|
||||
|
||||
async function handleDoc(stream, record) {
|
||||
if (skipDocsFromIndices.has(record.value.index)) {
|
||||
|
@ -46,24 +44,25 @@ export function createCreateIndexStream({ client, stats, skipExisting, log, kiba
|
|||
|
||||
// Determine if the mapping belongs to a pre-7.0 instance, for BWC tests, mainly
|
||||
const isPre7Mapping = !!mappings && Object.keys(mappings).length > 0 && !mappings.properties;
|
||||
const isKibana = index.startsWith('.kibana');
|
||||
|
||||
async function attemptToCreate(attemptNumber = 1) {
|
||||
try {
|
||||
if (index.startsWith('.kibana')) {
|
||||
await clearKibanaIndices();
|
||||
if (isKibana) {
|
||||
await deleteKibanaIndicesOnce({ client, stats, log });
|
||||
}
|
||||
|
||||
await client.indices.create({
|
||||
method: 'PUT',
|
||||
index,
|
||||
include_type_name: isPre7Mapping,
|
||||
body: { settings, mappings, aliases },
|
||||
body: {
|
||||
settings,
|
||||
mappings,
|
||||
aliases
|
||||
},
|
||||
});
|
||||
|
||||
if (index.startsWith('.kibana') && await isSpacesEnabled({ kibanaUrl })) {
|
||||
await createDefaultSpace({ index, client });
|
||||
}
|
||||
|
||||
stats.createdIndex(index, { settings });
|
||||
} catch (err) {
|
||||
if (get(err, 'body.error.type') !== 'resource_already_exists_exception' || attemptNumber >= 3) {
|
||||
|
|
|
@ -22,7 +22,7 @@ import { Transform } from 'stream';
|
|||
import { deleteIndex } from './delete_index';
|
||||
import { cleanKibanaIndices } from './kibana_index';
|
||||
|
||||
export function createDeleteIndexStream(client, stats, log, kibanaUrl) {
|
||||
export function createDeleteIndexStream(client, stats, log, kibanaPluginIds) {
|
||||
return new Transform({
|
||||
readableObjectMode: true,
|
||||
writableObjectMode: true,
|
||||
|
@ -30,8 +30,9 @@ export function createDeleteIndexStream(client, stats, log, kibanaUrl) {
|
|||
try {
|
||||
if (!record || record.type === 'index') {
|
||||
const { index } = record.value;
|
||||
|
||||
if (index.startsWith('.kibana')) {
|
||||
await cleanKibanaIndices({ client, stats, log, kibanaUrl });
|
||||
await cleanKibanaIndices({ client, stats, log, kibanaPluginIds });
|
||||
} else {
|
||||
await deleteIndex({ client, stats, log, index });
|
||||
}
|
||||
|
|
|
@ -20,4 +20,4 @@
|
|||
export { createCreateIndexStream } from './create_index_stream';
|
||||
export { createDeleteIndexStream } from './delete_index_stream';
|
||||
export { createGenerateIndexRecordsStream } from './generate_index_records_stream';
|
||||
export { migrateKibanaIndex, deleteKibanaIndices } from './kibana_index';
|
||||
export { migrateKibanaIndex, deleteKibanaIndices, createDefaultSpace } from './kibana_index';
|
||||
|
|
|
@ -22,7 +22,6 @@ import fs from 'fs';
|
|||
import path from 'path';
|
||||
import { promisify } from 'util';
|
||||
import { toArray } from 'rxjs/operators';
|
||||
import wreck from '@hapi/wreck';
|
||||
|
||||
import { deleteIndex } from './delete_index';
|
||||
import { collectUiExports } from '../../../legacy/ui/ui_exports';
|
||||
|
@ -33,11 +32,8 @@ import { findPluginSpecs } from '../../../legacy/plugin_discovery';
|
|||
* Load the uiExports for a Kibana instance, only load uiExports from xpack if
|
||||
* it is enabled in the Kibana server.
|
||||
*/
|
||||
const getUiExports = async kibanaUrl => {
|
||||
const xpackEnabled = await getKibanaPluginEnabled({
|
||||
kibanaUrl,
|
||||
pluginId: 'xpack_main',
|
||||
});
|
||||
const getUiExports = async (kibanaPluginIds) => {
|
||||
const xpackEnabled = kibanaPluginIds.includes('xpack_main');
|
||||
|
||||
const { spec$ } = await findPluginSpecs({
|
||||
plugins: {
|
||||
|
@ -79,8 +75,8 @@ export async function deleteKibanaIndices({ client, stats, log }) {
|
|||
* builds up an object that implements just enough of the kbnMigrations interface
|
||||
* as is required by migrations.
|
||||
*/
|
||||
export async function migrateKibanaIndex({ client, log, kibanaUrl }) {
|
||||
const uiExports = await getUiExports(kibanaUrl);
|
||||
export async function migrateKibanaIndex({ client, log, kibanaPluginIds }) {
|
||||
const uiExports = await getUiExports(kibanaPluginIds);
|
||||
const version = await loadElasticVersion();
|
||||
const config = {
|
||||
'kibana.index': '.kibana',
|
||||
|
@ -118,46 +114,6 @@ async function loadElasticVersion() {
|
|||
return JSON.parse(packageJson).version;
|
||||
}
|
||||
|
||||
export async function isSpacesEnabled({ kibanaUrl }) {
|
||||
return await getKibanaPluginEnabled({
|
||||
kibanaUrl,
|
||||
pluginId: 'spaces',
|
||||
});
|
||||
}
|
||||
|
||||
async function getKibanaPluginEnabled({ pluginId, kibanaUrl }) {
|
||||
try {
|
||||
const { payload } = await wreck.get('/api/status', {
|
||||
baseUrl: kibanaUrl,
|
||||
json: true,
|
||||
});
|
||||
|
||||
return payload.status.statuses.some(({ id }) => id.includes(`plugin:${pluginId}@`));
|
||||
} catch (error) {
|
||||
throw new Error(
|
||||
`Unable to fetch Kibana status API response from Kibana at ${kibanaUrl}: ${error}`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
export async function createDefaultSpace({ index, client }) {
|
||||
await client.index({
|
||||
index,
|
||||
type: '_doc',
|
||||
id: 'space:default',
|
||||
body: {
|
||||
type: 'space',
|
||||
updated_at: new Date().toISOString(),
|
||||
space: {
|
||||
name: 'Default Space',
|
||||
description: 'This is the default space',
|
||||
disabledFeatures: [],
|
||||
_reserved: true,
|
||||
},
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Migrations mean that the Kibana index will look something like:
|
||||
* .kibana, .kibana_1, .kibana_323, etc. This finds all indices starting
|
||||
|
@ -172,8 +128,8 @@ async function fetchKibanaIndices(client) {
|
|||
return kibanaIndices.map(x => x.index).filter(isKibanaIndex);
|
||||
}
|
||||
|
||||
export async function cleanKibanaIndices({ client, stats, log, kibanaUrl }) {
|
||||
if (!(await isSpacesEnabled({ kibanaUrl }))) {
|
||||
export async function cleanKibanaIndices({ client, stats, log, kibanaPluginIds }) {
|
||||
if (!kibanaPluginIds.includes('spaces')) {
|
||||
return await deleteKibanaIndices({
|
||||
client,
|
||||
stats,
|
||||
|
@ -204,3 +160,22 @@ export async function cleanKibanaIndices({ client, stats, log, kibanaUrl }) {
|
|||
|
||||
stats.deletedIndex('.kibana');
|
||||
}
|
||||
|
||||
export async function createDefaultSpace({ index, client }) {
|
||||
await client.create({
|
||||
index,
|
||||
type: '_doc',
|
||||
id: 'space:default',
|
||||
ignore: 409,
|
||||
body: {
|
||||
type: 'space',
|
||||
updated_at: new Date().toISOString(),
|
||||
space: {
|
||||
name: 'Default Space',
|
||||
description: 'This is the default space',
|
||||
disabledFeatures: [],
|
||||
_reserved: true,
|
||||
},
|
||||
},
|
||||
});
|
||||
}
|
||||
|
|
55
src/es_archiver/lib/kibana_plugins.ts
Normal file
55
src/es_archiver/lib/kibana_plugins.ts
Normal file
|
@ -0,0 +1,55 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch B.V. under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch B.V. licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
import Axios from 'axios';
|
||||
|
||||
const PLUGIN_STATUS_ID = /^plugin:(.+?)@/;
|
||||
const isString = (v: any): v is string => typeof v === 'string';
|
||||
|
||||
/**
|
||||
* Get the list of enabled plugins from Kibana, used to determine which
|
||||
* uiExports to collect, whether we should clean or clean the kibana index,
|
||||
* and if we need to inject the default space document in new versions of
|
||||
* the index.
|
||||
*
|
||||
* This must be called before touching the Kibana index as Kibana becomes
|
||||
* unstable when the .kibana index is deleted/cleaned and the status API
|
||||
* will fail in situations where status.allowAnonymous=false and security
|
||||
* is enabled.
|
||||
*/
|
||||
export async function getEnabledKibanaPluginIds(kibanaUrl: string): Promise<string[]> {
|
||||
try {
|
||||
const { data } = await Axios.get('/api/status', {
|
||||
baseURL: kibanaUrl,
|
||||
});
|
||||
|
||||
return (data.status.statuses as Array<{ id: string }>)
|
||||
.map(({ id }) => {
|
||||
const match = id.match(PLUGIN_STATUS_ID);
|
||||
if (match) {
|
||||
return match[1];
|
||||
}
|
||||
})
|
||||
.filter(isString);
|
||||
} catch (error) {
|
||||
throw new Error(
|
||||
`Unable to fetch Kibana status API response from Kibana at ${kibanaUrl}: ${error}`
|
||||
);
|
||||
}
|
||||
}
|
|
@ -18,6 +18,8 @@
|
|||
*/
|
||||
|
||||
import { constant, once, compact, flatten } from 'lodash';
|
||||
|
||||
|
||||
import { isWorker } from 'cluster';
|
||||
import { fromRoot, pkg } from '../utils';
|
||||
import { Config } from './config';
|
||||
|
|
|
@ -63,6 +63,8 @@ export function createTestConfig(name: string, options: CreateTestConfigOptions)
|
|||
serverArgs: [
|
||||
...config.xpack.api.get('kbnTestServer.serverArgs'),
|
||||
'--optimize.enabled=false',
|
||||
// disable anonymouse access so that we're testing both on and off in different suites
|
||||
'--status.allowAnonymous=false',
|
||||
'--server.xsrf.disableProtection=true',
|
||||
...disabledPlugins.map(key => `--xpack.${key}.enabled=false`),
|
||||
],
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue