mirror of
https://github.com/elastic/kibana.git
synced 2025-04-23 09:19:04 -04:00
[6.x] Implement LegacyService
. Use core
to start legacy Kibana. (#22769)
* Implement `LegacyService`. Use `core` to start legacy Kibana. * Fix Worker tests * Do not rely on kbnServer when testing mixins.
This commit is contained in:
parent
31ad5530b3
commit
b0767168af
70 changed files with 2123 additions and 1696 deletions
|
@ -256,7 +256,7 @@
|
|||
"@types/redux-actions": "^2.2.1",
|
||||
"@types/sinon": "^5.0.0",
|
||||
"@types/strip-ansi": "^3.0.0",
|
||||
"@types/supertest": "^2.0.4",
|
||||
"@types/supertest": "^2.0.5",
|
||||
"@types/type-detect": "^4.0.1",
|
||||
"angular-mocks": "1.4.7",
|
||||
"babel-eslint": "8.1.2",
|
||||
|
@ -335,8 +335,8 @@
|
|||
"simple-git": "1.37.0",
|
||||
"sinon": "^5.0.7",
|
||||
"strip-ansi": "^3.0.1",
|
||||
"supertest": "3.0.0",
|
||||
"supertest-as-promised": "4.0.2",
|
||||
"supertest": "^3.1.0",
|
||||
"supertest-as-promised": "^4.0.2",
|
||||
"tree-kill": "^1.1.0",
|
||||
"ts-jest": "^22.4.6",
|
||||
"ts-loader": "^3.5.0",
|
||||
|
|
|
@ -16,15 +16,14 @@
|
|||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
/* eslint-env jest */
|
||||
|
||||
import EventEmitter from 'events';
|
||||
import { assign, random } from 'lodash';
|
||||
import sinon from 'sinon';
|
||||
import cluster from 'cluster';
|
||||
import { delay } from 'bluebird';
|
||||
|
||||
export default class MockClusterFork extends EventEmitter {
|
||||
constructor() {
|
||||
class MockClusterFork extends EventEmitter {
|
||||
constructor(cluster) {
|
||||
super();
|
||||
|
||||
let dead = true;
|
||||
|
@ -35,7 +34,7 @@ export default class MockClusterFork extends EventEmitter {
|
|||
|
||||
assign(this, {
|
||||
process: {
|
||||
kill: sinon.spy(() => {
|
||||
kill: jest.fn(() => {
|
||||
(async () => {
|
||||
await wait();
|
||||
this.emit('disconnect');
|
||||
|
@ -46,13 +45,13 @@ export default class MockClusterFork extends EventEmitter {
|
|||
})();
|
||||
}),
|
||||
},
|
||||
isDead: sinon.spy(() => dead),
|
||||
send: sinon.stub()
|
||||
isDead: jest.fn(() => dead),
|
||||
send: jest.fn()
|
||||
});
|
||||
|
||||
sinon.spy(this, 'on');
|
||||
sinon.spy(this, 'removeListener');
|
||||
sinon.spy(this, 'emit');
|
||||
jest.spyOn(this, 'on');
|
||||
jest.spyOn(this, 'removeListener');
|
||||
jest.spyOn(this, 'emit');
|
||||
|
||||
(async () => {
|
||||
await wait();
|
||||
|
@ -61,3 +60,12 @@ export default class MockClusterFork extends EventEmitter {
|
|||
})();
|
||||
}
|
||||
}
|
||||
|
||||
class MockCluster extends EventEmitter {
|
||||
fork = jest.fn(() => new MockClusterFork(this));
|
||||
setupMaster = jest.fn();
|
||||
}
|
||||
|
||||
export function mockCluster() {
|
||||
return new MockCluster();
|
||||
}
|
|
@ -19,30 +19,29 @@
|
|||
|
||||
import { resolve } from 'path';
|
||||
import { debounce, invoke, bindAll, once, uniq } from 'lodash';
|
||||
import { fromEvent, race } from 'rxjs';
|
||||
import { first } from 'rxjs/operators';
|
||||
|
||||
import Log from '../log';
|
||||
import Worker from './worker';
|
||||
import { Config } from '../../server/config/config';
|
||||
import { transformDeprecations } from '../../server/config/transform_deprecations';
|
||||
import { configureBasePathProxy } from './configure_base_path_proxy';
|
||||
|
||||
process.env.kbnWorkerType = 'managr';
|
||||
|
||||
export default class ClusterManager {
|
||||
static async create(opts = {}, settings = {}) {
|
||||
const transformedSettings = transformDeprecations(settings);
|
||||
const config = Config.withDefaultSchema(transformedSettings);
|
||||
|
||||
const basePathProxy = opts.basePath
|
||||
? await configureBasePathProxy(config)
|
||||
: undefined;
|
||||
|
||||
return new ClusterManager(opts, config, basePathProxy);
|
||||
static create(opts, settings = {}, basePathProxy) {
|
||||
return new ClusterManager(
|
||||
opts,
|
||||
Config.withDefaultSchema(transformDeprecations(settings)),
|
||||
basePathProxy
|
||||
);
|
||||
}
|
||||
|
||||
constructor(opts, config, basePathProxy) {
|
||||
this.log = new Log(opts.quiet, opts.silent);
|
||||
this.addedCount = 0;
|
||||
this.basePathProxy = basePathProxy;
|
||||
|
||||
const serverArgv = [];
|
||||
const optimizerArgv = [
|
||||
|
@ -50,17 +49,15 @@ export default class ClusterManager {
|
|||
'--server.autoListen=false',
|
||||
];
|
||||
|
||||
if (basePathProxy) {
|
||||
this.basePathProxy = basePathProxy;
|
||||
|
||||
if (this.basePathProxy) {
|
||||
optimizerArgv.push(
|
||||
`--server.basePath=${this.basePathProxy.getBasePath()}`,
|
||||
`--server.basePath=${this.basePathProxy.basePath}`,
|
||||
'--server.rewriteBasePath=true',
|
||||
);
|
||||
|
||||
serverArgv.push(
|
||||
`--server.port=${this.basePathProxy.getTargetPort()}`,
|
||||
`--server.basePath=${this.basePathProxy.getBasePath()}`,
|
||||
`--server.port=${this.basePathProxy.targetPort}`,
|
||||
`--server.basePath=${this.basePathProxy.basePath}`,
|
||||
'--server.rewriteBasePath=true',
|
||||
);
|
||||
}
|
||||
|
@ -81,12 +78,6 @@ export default class ClusterManager {
|
|||
})
|
||||
];
|
||||
|
||||
if (basePathProxy) {
|
||||
// Pass server worker to the basepath proxy so that it can hold off the
|
||||
// proxying until server worker is ready.
|
||||
this.basePathProxy.serverWorker = this.server;
|
||||
}
|
||||
|
||||
// broker messages between workers
|
||||
this.workers.forEach((worker) => {
|
||||
worker.on('broadcast', (msg) => {
|
||||
|
@ -129,7 +120,10 @@ export default class ClusterManager {
|
|||
this.setupManualRestart();
|
||||
invoke(this.workers, 'start');
|
||||
if (this.basePathProxy) {
|
||||
this.basePathProxy.start();
|
||||
this.basePathProxy.start({
|
||||
blockUntil: this.blockUntil.bind(this),
|
||||
shouldRedirectFromOldBasePath: this.shouldRedirectFromOldBasePath.bind(this),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -215,4 +209,23 @@ export default class ClusterManager {
|
|||
this.log.bad('failed to watch files!\n', err.stack);
|
||||
process.exit(1); // eslint-disable-line no-process-exit
|
||||
}
|
||||
|
||||
shouldRedirectFromOldBasePath(path) {
|
||||
const isApp = path.startsWith('app/');
|
||||
const isKnownShortPath = ['login', 'logout', 'status'].includes(path);
|
||||
|
||||
return isApp || isKnownShortPath;
|
||||
}
|
||||
|
||||
blockUntil() {
|
||||
// Wait until `server` worker either crashes or starts to listen.
|
||||
if (this.server.listening || this.server.crashed) {
|
||||
return Promise.resolve();
|
||||
}
|
||||
|
||||
return race(
|
||||
fromEvent(this.server, 'listening'),
|
||||
fromEvent(this.server, 'crashed')
|
||||
).pipe(first()).toPromise();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,36 +17,43 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
import sinon from 'sinon';
|
||||
import { mockCluster } from './__mocks__/cluster';
|
||||
jest.mock('cluster', () => mockCluster());
|
||||
jest.mock('readline', () => ({
|
||||
createInterface: jest.fn(() => ({
|
||||
on: jest.fn(),
|
||||
prompt: jest.fn(),
|
||||
setPrompt: jest.fn(),
|
||||
})),
|
||||
}));
|
||||
|
||||
import cluster from 'cluster';
|
||||
import { sample } from 'lodash';
|
||||
|
||||
import ClusterManager from './cluster_manager';
|
||||
import Worker from './worker';
|
||||
|
||||
describe('CLI cluster manager', function () {
|
||||
const sandbox = sinon.createSandbox();
|
||||
|
||||
beforeEach(function () {
|
||||
sandbox.stub(cluster, 'fork').callsFake(() => {
|
||||
describe('CLI cluster manager', () => {
|
||||
beforeEach(() => {
|
||||
cluster.fork.mockImplementation(() => {
|
||||
return {
|
||||
process: {
|
||||
kill: sinon.stub(),
|
||||
kill: jest.fn(),
|
||||
},
|
||||
isDead: sinon.stub().returns(false),
|
||||
removeListener: sinon.stub(),
|
||||
on: sinon.stub(),
|
||||
send: sinon.stub()
|
||||
isDead: jest.fn().mockReturnValue(false),
|
||||
removeListener: jest.fn(),
|
||||
addListener: jest.fn(),
|
||||
send: jest.fn()
|
||||
};
|
||||
});
|
||||
});
|
||||
|
||||
afterEach(function () {
|
||||
sandbox.restore();
|
||||
afterEach(() => {
|
||||
cluster.fork.mockReset();
|
||||
});
|
||||
|
||||
it('has two workers', async function () {
|
||||
const manager = await ClusterManager.create({});
|
||||
test('has two workers', () => {
|
||||
const manager = ClusterManager.create({});
|
||||
|
||||
expect(manager.workers).toHaveLength(2);
|
||||
for (const worker of manager.workers) expect(worker).toBeInstanceOf(Worker);
|
||||
|
@ -55,8 +62,8 @@ describe('CLI cluster manager', function () {
|
|||
expect(manager.server).toBeInstanceOf(Worker);
|
||||
});
|
||||
|
||||
it('delivers broadcast messages to other workers', async function () {
|
||||
const manager = await ClusterManager.create({});
|
||||
test('delivers broadcast messages to other workers', () => {
|
||||
const manager = ClusterManager.create({});
|
||||
|
||||
for (const worker of manager.workers) {
|
||||
Worker.prototype.start.call(worker);// bypass the debounced start method
|
||||
|
@ -69,10 +76,111 @@ describe('CLI cluster manager', function () {
|
|||
messenger.emit('broadcast', football);
|
||||
for (const worker of manager.workers) {
|
||||
if (worker === messenger) {
|
||||
expect(worker.fork.send.callCount).toBe(0);
|
||||
expect(worker.fork.send).not.toHaveBeenCalled();
|
||||
} else {
|
||||
expect(worker.fork.send.firstCall.args[0]).toBe(football);
|
||||
expect(worker.fork.send).toHaveBeenCalledTimes(1);
|
||||
expect(worker.fork.send).toHaveBeenCalledWith(football);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
describe('interaction with BasePathProxy', () => {
|
||||
test('correctly configures `BasePathProxy`.', async () => {
|
||||
const basePathProxyMock = { start: jest.fn() };
|
||||
|
||||
ClusterManager.create({}, {}, basePathProxyMock);
|
||||
|
||||
expect(basePathProxyMock.start).toHaveBeenCalledWith({
|
||||
shouldRedirectFromOldBasePath: expect.any(Function),
|
||||
blockUntil: expect.any(Function),
|
||||
});
|
||||
});
|
||||
|
||||
describe('proxy is configured with the correct `shouldRedirectFromOldBasePath` and `blockUntil` functions.', () => {
|
||||
let clusterManager;
|
||||
let shouldRedirectFromOldBasePath;
|
||||
let blockUntil;
|
||||
beforeEach(async () => {
|
||||
const basePathProxyMock = { start: jest.fn() };
|
||||
|
||||
clusterManager = ClusterManager.create({}, {}, basePathProxyMock);
|
||||
|
||||
jest.spyOn(clusterManager.server, 'addListener');
|
||||
jest.spyOn(clusterManager.server, 'removeListener');
|
||||
|
||||
[[{ blockUntil, shouldRedirectFromOldBasePath }]] = basePathProxyMock.start.mock.calls;
|
||||
});
|
||||
|
||||
test('`shouldRedirectFromOldBasePath()` returns `false` for unknown paths.', () => {
|
||||
expect(shouldRedirectFromOldBasePath('')).toBe(false);
|
||||
expect(shouldRedirectFromOldBasePath('some-path/')).toBe(false);
|
||||
expect(shouldRedirectFromOldBasePath('some-other-path')).toBe(false);
|
||||
});
|
||||
|
||||
test('`shouldRedirectFromOldBasePath()` returns `true` for `app` and other known paths.', () => {
|
||||
expect(shouldRedirectFromOldBasePath('app/')).toBe(true);
|
||||
expect(shouldRedirectFromOldBasePath('login')).toBe(true);
|
||||
expect(shouldRedirectFromOldBasePath('logout')).toBe(true);
|
||||
expect(shouldRedirectFromOldBasePath('status')).toBe(true);
|
||||
});
|
||||
|
||||
test('`blockUntil()` resolves immediately if worker has already crashed.', async () => {
|
||||
clusterManager.server.crashed = true;
|
||||
|
||||
await expect(blockUntil()).resolves.not.toBeDefined();
|
||||
expect(clusterManager.server.addListener).not.toHaveBeenCalled();
|
||||
expect(clusterManager.server.removeListener).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
test('`blockUntil()` resolves immediately if worker is already listening.', async () => {
|
||||
clusterManager.server.listening = true;
|
||||
|
||||
await expect(blockUntil()).resolves.not.toBeDefined();
|
||||
expect(clusterManager.server.addListener).not.toHaveBeenCalled();
|
||||
expect(clusterManager.server.removeListener).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
test('`blockUntil()` resolves when worker crashes.', async () => {
|
||||
const blockUntilPromise = blockUntil();
|
||||
|
||||
expect(clusterManager.server.addListener).toHaveBeenCalledTimes(2);
|
||||
expect(clusterManager.server.addListener).toHaveBeenCalledWith(
|
||||
'crashed',
|
||||
expect.any(Function)
|
||||
);
|
||||
|
||||
const [, [eventName, onCrashed]] = clusterManager.server.addListener.mock.calls;
|
||||
// Check event name to make sure we call the right callback,
|
||||
// in Jest 23 we could use `toHaveBeenNthCalledWith` instead.
|
||||
expect(eventName).toBe('crashed');
|
||||
expect(clusterManager.server.removeListener).not.toHaveBeenCalled();
|
||||
|
||||
onCrashed();
|
||||
await expect(blockUntilPromise).resolves.not.toBeDefined();
|
||||
|
||||
expect(clusterManager.server.removeListener).toHaveBeenCalledTimes(2);
|
||||
});
|
||||
|
||||
test('`blockUntil()` resolves when worker starts listening.', async () => {
|
||||
const blockUntilPromise = blockUntil();
|
||||
|
||||
expect(clusterManager.server.addListener).toHaveBeenCalledTimes(2);
|
||||
expect(clusterManager.server.addListener).toHaveBeenCalledWith(
|
||||
'listening',
|
||||
expect.any(Function)
|
||||
);
|
||||
|
||||
const [[eventName, onListening]] = clusterManager.server.addListener.mock.calls;
|
||||
// Check event name to make sure we call the right callback,
|
||||
// in Jest 23 we could use `toHaveBeenNthCalledWith` instead.
|
||||
expect(eventName).toBe('listening');
|
||||
expect(clusterManager.server.removeListener).not.toHaveBeenCalled();
|
||||
|
||||
onListening();
|
||||
await expect(blockUntilPromise).resolves.not.toBeDefined();
|
||||
|
||||
expect(clusterManager.server.removeListener).toHaveBeenCalledTimes(2);
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
|
@ -1,64 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch B.V. under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch B.V. licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
import { Server } from 'hapi';
|
||||
import { createBasePathProxy } from '../../core';
|
||||
import { setupLogging } from '../../server/logging';
|
||||
|
||||
export async function configureBasePathProxy(config) {
|
||||
// New platform forwards all logs to the legacy platform so we need HapiJS server
|
||||
// here just for logging purposes and nothing else.
|
||||
const server = new Server();
|
||||
setupLogging(server, config);
|
||||
|
||||
const basePathProxy = createBasePathProxy({ server, config });
|
||||
|
||||
await basePathProxy.configure({
|
||||
shouldRedirectFromOldBasePath: path => {
|
||||
const isApp = path.startsWith('app/');
|
||||
const isKnownShortPath = ['login', 'logout', 'status'].includes(path);
|
||||
|
||||
return isApp || isKnownShortPath;
|
||||
},
|
||||
|
||||
blockUntil: () => {
|
||||
// Wait until `serverWorker either crashes or starts to listen.
|
||||
// The `serverWorker` property should be set by the ClusterManager
|
||||
// once it creates the worker.
|
||||
const serverWorker = basePathProxy.serverWorker;
|
||||
if (serverWorker.listening || serverWorker.crashed) {
|
||||
return Promise.resolve();
|
||||
}
|
||||
|
||||
return new Promise(resolve => {
|
||||
const done = () => {
|
||||
serverWorker.removeListener('listening', done);
|
||||
serverWorker.removeListener('crashed', done);
|
||||
|
||||
resolve();
|
||||
};
|
||||
|
||||
serverWorker.on('listening', done);
|
||||
serverWorker.on('crashed', done);
|
||||
});
|
||||
},
|
||||
});
|
||||
|
||||
return basePathProxy;
|
||||
}
|
|
@ -1,163 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch B.V. under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch B.V. licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
jest.mock('../../core', () => ({
|
||||
createBasePathProxy: jest.fn(),
|
||||
}));
|
||||
|
||||
jest.mock('../../server/logging', () => ({
|
||||
setupLogging: jest.fn(),
|
||||
}));
|
||||
|
||||
import { Server } from 'hapi';
|
||||
import { createBasePathProxy as createBasePathProxyMock } from '../../core';
|
||||
import { setupLogging as setupLoggingMock } from '../../server/logging';
|
||||
import { configureBasePathProxy } from './configure_base_path_proxy';
|
||||
|
||||
describe('configureBasePathProxy()', () => {
|
||||
it('returns `BasePathProxy` instance.', async () => {
|
||||
const basePathProxyMock = { configure: jest.fn() };
|
||||
createBasePathProxyMock.mockReturnValue(basePathProxyMock);
|
||||
|
||||
const basePathProxy = await configureBasePathProxy({});
|
||||
|
||||
expect(basePathProxy).toBe(basePathProxyMock);
|
||||
});
|
||||
|
||||
it('correctly configures `BasePathProxy`.', async () => {
|
||||
const configMock = {};
|
||||
const basePathProxyMock = { configure: jest.fn() };
|
||||
createBasePathProxyMock.mockReturnValue(basePathProxyMock);
|
||||
|
||||
await configureBasePathProxy(configMock);
|
||||
|
||||
// Check that logging is configured with the right parameters.
|
||||
expect(setupLoggingMock).toHaveBeenCalledWith(
|
||||
expect.any(Server),
|
||||
configMock
|
||||
);
|
||||
|
||||
const [[server]] = setupLoggingMock.mock.calls;
|
||||
expect(createBasePathProxyMock).toHaveBeenCalledWith({
|
||||
config: configMock,
|
||||
server,
|
||||
});
|
||||
|
||||
expect(basePathProxyMock.configure).toHaveBeenCalledWith({
|
||||
shouldRedirectFromOldBasePath: expect.any(Function),
|
||||
blockUntil: expect.any(Function),
|
||||
});
|
||||
});
|
||||
|
||||
describe('configured with the correct `shouldRedirectFromOldBasePath` and `blockUntil` functions.', async () => {
|
||||
let serverWorkerMock;
|
||||
let shouldRedirectFromOldBasePath;
|
||||
let blockUntil;
|
||||
beforeEach(async () => {
|
||||
serverWorkerMock = {
|
||||
listening: false,
|
||||
crashed: false,
|
||||
on: jest.fn(),
|
||||
removeListener: jest.fn(),
|
||||
};
|
||||
|
||||
const basePathProxyMock = {
|
||||
configure: jest.fn(),
|
||||
serverWorker: serverWorkerMock,
|
||||
};
|
||||
|
||||
createBasePathProxyMock.mockReturnValue(basePathProxyMock);
|
||||
|
||||
await configureBasePathProxy({});
|
||||
|
||||
[[{ blockUntil, shouldRedirectFromOldBasePath }]] = basePathProxyMock.configure.mock.calls;
|
||||
});
|
||||
|
||||
it('`shouldRedirectFromOldBasePath()` returns `false` for unknown paths.', async () => {
|
||||
expect(shouldRedirectFromOldBasePath('')).toBe(false);
|
||||
expect(shouldRedirectFromOldBasePath('some-path/')).toBe(false);
|
||||
expect(shouldRedirectFromOldBasePath('some-other-path')).toBe(false);
|
||||
});
|
||||
|
||||
it('`shouldRedirectFromOldBasePath()` returns `true` for `app` and other known paths.', async () => {
|
||||
expect(shouldRedirectFromOldBasePath('app/')).toBe(true);
|
||||
expect(shouldRedirectFromOldBasePath('login')).toBe(true);
|
||||
expect(shouldRedirectFromOldBasePath('logout')).toBe(true);
|
||||
expect(shouldRedirectFromOldBasePath('status')).toBe(true);
|
||||
});
|
||||
|
||||
it('`blockUntil()` resolves immediately if worker has already crashed.', async () => {
|
||||
serverWorkerMock.crashed = true;
|
||||
|
||||
await expect(blockUntil()).resolves.not.toBeDefined();
|
||||
expect(serverWorkerMock.on).not.toHaveBeenCalled();
|
||||
expect(serverWorkerMock.removeListener).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('`blockUntil()` resolves immediately if worker is already listening.', async () => {
|
||||
serverWorkerMock.listening = true;
|
||||
|
||||
await expect(blockUntil()).resolves.not.toBeDefined();
|
||||
expect(serverWorkerMock.on).not.toHaveBeenCalled();
|
||||
expect(serverWorkerMock.removeListener).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('`blockUntil()` resolves when worker crashes.', async () => {
|
||||
const blockUntilPromise = blockUntil();
|
||||
|
||||
expect(serverWorkerMock.on).toHaveBeenCalledTimes(2);
|
||||
expect(serverWorkerMock.on).toHaveBeenCalledWith(
|
||||
'crashed',
|
||||
expect.any(Function)
|
||||
);
|
||||
|
||||
const [, [eventName, onCrashed]] = serverWorkerMock.on.mock.calls;
|
||||
// Check event name to make sure we call the right callback,
|
||||
// in Jest 23 we could use `toHaveBeenNthCalledWith` instead.
|
||||
expect(eventName).toBe('crashed');
|
||||
expect(serverWorkerMock.removeListener).not.toHaveBeenCalled();
|
||||
|
||||
onCrashed();
|
||||
await expect(blockUntilPromise).resolves.not.toBeDefined();
|
||||
|
||||
expect(serverWorkerMock.removeListener).toHaveBeenCalledTimes(2);
|
||||
});
|
||||
|
||||
it('`blockUntil()` resolves when worker starts listening.', async () => {
|
||||
const blockUntilPromise = blockUntil();
|
||||
|
||||
expect(serverWorkerMock.on).toHaveBeenCalledTimes(2);
|
||||
expect(serverWorkerMock.on).toHaveBeenCalledWith(
|
||||
'listening',
|
||||
expect.any(Function)
|
||||
);
|
||||
|
||||
const [[eventName, onListening]] = serverWorkerMock.on.mock.calls;
|
||||
// Check event name to make sure we call the right callback,
|
||||
// in Jest 23 we could use `toHaveBeenNthCalledWith` instead.
|
||||
expect(eventName).toBe('listening');
|
||||
expect(serverWorkerMock.removeListener).not.toHaveBeenCalled();
|
||||
|
||||
onListening();
|
||||
await expect(blockUntilPromise).resolves.not.toBeDefined();
|
||||
|
||||
expect(serverWorkerMock.removeListener).toHaveBeenCalledTimes(2);
|
||||
});
|
||||
});
|
||||
});
|
|
@ -17,26 +17,25 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
import sinon from 'sinon';
|
||||
import cluster from 'cluster';
|
||||
import { findIndex } from 'lodash';
|
||||
import { mockCluster } from './__mocks__/cluster';
|
||||
jest.mock('cluster', () => mockCluster());
|
||||
|
||||
import cluster from 'cluster';
|
||||
|
||||
import MockClusterFork from './_mock_cluster_fork';
|
||||
import Worker from './worker';
|
||||
import Log from '../log';
|
||||
|
||||
const workersToShutdown = [];
|
||||
|
||||
function assertListenerAdded(emitter, event) {
|
||||
sinon.assert.calledWith(emitter.on, event);
|
||||
expect(emitter.on).toHaveBeenCalledWith(event, expect.any(Function));
|
||||
}
|
||||
|
||||
function assertListenerRemoved(emitter, event) {
|
||||
sinon.assert.calledWith(
|
||||
emitter.removeListener,
|
||||
event,
|
||||
emitter.on.args[findIndex(emitter.on.args, { 0: event })][1]
|
||||
);
|
||||
const [, onEventListener] = emitter.on.mock.calls.find(([eventName]) => {
|
||||
return eventName === event;
|
||||
});
|
||||
expect(emitter.removeListener).toHaveBeenCalledWith(event, onEventListener);
|
||||
}
|
||||
|
||||
function setup(opts = {}) {
|
||||
|
@ -50,81 +49,82 @@ function setup(opts = {}) {
|
|||
return worker;
|
||||
}
|
||||
|
||||
describe('CLI cluster manager', function () {
|
||||
const sandbox = sinon.createSandbox();
|
||||
describe('CLI cluster manager', () => {
|
||||
afterEach(async () => {
|
||||
while(workersToShutdown.length > 0) {
|
||||
const worker = workersToShutdown.pop();
|
||||
// If `fork` exists we should set `exitCode` to the non-zero value to
|
||||
// prevent worker from auto restart.
|
||||
if (worker.fork) {
|
||||
worker.fork.exitCode = 1;
|
||||
}
|
||||
|
||||
beforeEach(function () {
|
||||
sandbox.stub(cluster, 'fork').callsFake(() => new MockClusterFork());
|
||||
});
|
||||
|
||||
afterEach(async function () {
|
||||
sandbox.restore();
|
||||
|
||||
for (const worker of workersToShutdown) {
|
||||
await worker.shutdown();
|
||||
}
|
||||
|
||||
cluster.fork.mockClear();
|
||||
});
|
||||
|
||||
describe('#onChange', function () {
|
||||
describe('opts.watch = true', function () {
|
||||
it('restarts the fork', function () {
|
||||
describe('#onChange', () => {
|
||||
describe('opts.watch = true', () => {
|
||||
test('restarts the fork', () => {
|
||||
const worker = setup({ watch: true });
|
||||
sinon.stub(worker, 'start');
|
||||
jest.spyOn(worker, 'start').mockImplementation(() => {});
|
||||
worker.onChange('/some/path');
|
||||
expect(worker.changes).toEqual(['/some/path']);
|
||||
sinon.assert.calledOnce(worker.start);
|
||||
expect(worker.start).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
});
|
||||
|
||||
describe('opts.watch = false', function () {
|
||||
it('does not restart the fork', function () {
|
||||
describe('opts.watch = false', () => {
|
||||
test('does not restart the fork', () => {
|
||||
const worker = setup({ watch: false });
|
||||
sinon.stub(worker, 'start');
|
||||
jest.spyOn(worker, 'start').mockImplementation(() => {});
|
||||
worker.onChange('/some/path');
|
||||
expect(worker.changes).toEqual([]);
|
||||
sinon.assert.notCalled(worker.start);
|
||||
expect(worker.start).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('#shutdown', function () {
|
||||
describe('after starting()', function () {
|
||||
it('kills the worker and unbinds from message, online, and disconnect events', async function () {
|
||||
describe('#shutdown', () => {
|
||||
describe('after starting()', () => {
|
||||
test('kills the worker and unbinds from message, online, and disconnect events', async () => {
|
||||
const worker = setup();
|
||||
await worker.start();
|
||||
expect(worker).toHaveProperty('online', true);
|
||||
const fork = worker.fork;
|
||||
sinon.assert.notCalled(fork.process.kill);
|
||||
expect(fork.process.kill).not.toHaveBeenCalled();
|
||||
assertListenerAdded(fork, 'message');
|
||||
assertListenerAdded(fork, 'online');
|
||||
assertListenerAdded(fork, 'disconnect');
|
||||
worker.shutdown();
|
||||
sinon.assert.calledOnce(fork.process.kill);
|
||||
expect(fork.process.kill).toHaveBeenCalledTimes(1);
|
||||
assertListenerRemoved(fork, 'message');
|
||||
assertListenerRemoved(fork, 'online');
|
||||
assertListenerRemoved(fork, 'disconnect');
|
||||
});
|
||||
});
|
||||
|
||||
describe('before being started', function () {
|
||||
it('does nothing', function () {
|
||||
describe('before being started', () => {
|
||||
test('does nothing', () => {
|
||||
const worker = setup();
|
||||
worker.shutdown();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('#parseIncomingMessage()', function () {
|
||||
describe('on a started worker', function () {
|
||||
it(`is bound to fork's message event`, async function () {
|
||||
describe('#parseIncomingMessage()', () => {
|
||||
describe('on a started worker', () => {
|
||||
test(`is bound to fork's message event`, async () => {
|
||||
const worker = setup();
|
||||
await worker.start();
|
||||
sinon.assert.calledWith(worker.fork.on, 'message');
|
||||
expect(worker.fork.on).toHaveBeenCalledWith('message', expect.any(Function));
|
||||
});
|
||||
});
|
||||
|
||||
describe('do after', function () {
|
||||
it('ignores non-array messages', function () {
|
||||
describe('do after', () => {
|
||||
test('ignores non-array messages', () => {
|
||||
const worker = setup();
|
||||
worker.parseIncomingMessage('some string thing');
|
||||
worker.parseIncomingMessage(0);
|
||||
|
@ -134,39 +134,39 @@ describe('CLI cluster manager', function () {
|
|||
worker.parseIncomingMessage(/weird/);
|
||||
});
|
||||
|
||||
it('calls #onMessage with message parts', function () {
|
||||
test('calls #onMessage with message parts', () => {
|
||||
const worker = setup();
|
||||
const stub = sinon.stub(worker, 'onMessage');
|
||||
jest.spyOn(worker, 'onMessage').mockImplementation(() => {});
|
||||
worker.parseIncomingMessage([10, 100, 1000, 10000]);
|
||||
sinon.assert.calledWith(stub, 10, 100, 1000, 10000);
|
||||
expect(worker.onMessage).toHaveBeenCalledWith(10, 100, 1000, 10000);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('#onMessage', function () {
|
||||
describe('when sent WORKER_BROADCAST message', function () {
|
||||
it('emits the data to be broadcasted', function () {
|
||||
describe('#onMessage', () => {
|
||||
describe('when sent WORKER_BROADCAST message', () => {
|
||||
test('emits the data to be broadcasted', () => {
|
||||
const worker = setup();
|
||||
const data = {};
|
||||
const stub = sinon.stub(worker, 'emit');
|
||||
jest.spyOn(worker, 'emit').mockImplementation(() => {});
|
||||
worker.onMessage('WORKER_BROADCAST', data);
|
||||
sinon.assert.calledWithExactly(stub, 'broadcast', data);
|
||||
expect(worker.emit).toHaveBeenCalledWith('broadcast', data);
|
||||
});
|
||||
});
|
||||
|
||||
describe('when sent WORKER_LISTENING message', function () {
|
||||
it('sets the listening flag and emits the listening event', function () {
|
||||
describe('when sent WORKER_LISTENING message', () => {
|
||||
test('sets the listening flag and emits the listening event', () => {
|
||||
const worker = setup();
|
||||
const stub = sinon.stub(worker, 'emit');
|
||||
jest.spyOn(worker, 'emit').mockImplementation(() => {});
|
||||
expect(worker).toHaveProperty('listening', false);
|
||||
worker.onMessage('WORKER_LISTENING');
|
||||
expect(worker).toHaveProperty('listening', true);
|
||||
sinon.assert.calledWithExactly(stub, 'listening');
|
||||
expect(worker.emit).toHaveBeenCalledWith('listening');
|
||||
});
|
||||
});
|
||||
|
||||
describe('when passed an unknown message', function () {
|
||||
it('does nothing', function () {
|
||||
describe('when passed an unknown message', () => {
|
||||
test('does nothing', () => {
|
||||
const worker = setup();
|
||||
worker.onMessage('asdlfkajsdfahsdfiohuasdofihsdoif');
|
||||
worker.onMessage({});
|
||||
|
@ -175,46 +175,46 @@ describe('CLI cluster manager', function () {
|
|||
});
|
||||
});
|
||||
|
||||
describe('#start', function () {
|
||||
describe('when not started', function () {
|
||||
// TODO This test is flaky, see https://github.com/elastic/kibana/issues/15888
|
||||
it.skip('creates a fork and waits for it to come online', async function () {
|
||||
describe('#start', () => {
|
||||
describe('when not started', () => {
|
||||
test('creates a fork and waits for it to come online', async () => {
|
||||
const worker = setup();
|
||||
|
||||
sinon.spy(worker, 'on');
|
||||
jest.spyOn(worker, 'on');
|
||||
|
||||
await worker.start();
|
||||
|
||||
sinon.assert.calledOnce(cluster.fork);
|
||||
sinon.assert.calledWith(worker.on, 'fork:online');
|
||||
expect(cluster.fork).toHaveBeenCalledTimes(1);
|
||||
expect(worker.on).toHaveBeenCalledWith('fork:online', expect.any(Function));
|
||||
});
|
||||
|
||||
// TODO This test is flaky, see https://github.com/elastic/kibana/issues/15888
|
||||
it.skip('listens for cluster and process "exit" events', async function () {
|
||||
test('listens for cluster and process "exit" events', async () => {
|
||||
const worker = setup();
|
||||
|
||||
sinon.spy(process, 'on');
|
||||
sinon.spy(cluster, 'on');
|
||||
jest.spyOn(process, 'on');
|
||||
jest.spyOn(cluster, 'on');
|
||||
|
||||
await worker.start();
|
||||
|
||||
sinon.assert.calledOnce(cluster.on);
|
||||
sinon.assert.calledWith(cluster.on, 'exit');
|
||||
sinon.assert.calledOnce(process.on);
|
||||
sinon.assert.calledWith(process.on, 'exit');
|
||||
expect(cluster.on).toHaveBeenCalledTimes(1);
|
||||
expect(cluster.on).toHaveBeenCalledWith('exit', expect.any(Function));
|
||||
expect(process.on).toHaveBeenCalledTimes(1);
|
||||
expect(process.on).toHaveBeenCalledWith('exit', expect.any(Function));
|
||||
});
|
||||
});
|
||||
|
||||
describe('when already started', function () {
|
||||
it('calls shutdown and waits for the graceful shutdown to cause a restart', async function () {
|
||||
describe('when already started', () => {
|
||||
test('calls shutdown and waits for the graceful shutdown to cause a restart', async () => {
|
||||
const worker = setup();
|
||||
await worker.start();
|
||||
sinon.spy(worker, 'shutdown');
|
||||
sinon.spy(worker, 'on');
|
||||
|
||||
jest.spyOn(worker, 'shutdown');
|
||||
jest.spyOn(worker, 'on');
|
||||
|
||||
worker.start();
|
||||
sinon.assert.calledOnce(worker.shutdown);
|
||||
sinon.assert.calledWith(worker.on, 'online');
|
||||
|
||||
expect(worker.shutdown).toHaveBeenCalledTimes(1);
|
||||
expect(worker.on).toHaveBeenCalledWith('online', expect.any(Function));
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
|
@ -17,9 +17,8 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
import _ from 'lodash';
|
||||
import chalk from 'chalk';
|
||||
|
||||
export const green = _.flow(chalk.black, chalk.bgGreen);
|
||||
export const red = _.flow(chalk.white, chalk.bgRed);
|
||||
export const yellow = _.flow(chalk.black, chalk.bgYellow);
|
||||
export const green = chalk.black.bgGreen;
|
||||
export const red = chalk.white.bgRed;
|
||||
export const yellow = chalk.black.bgYellow;
|
||||
|
|
|
@ -4,12 +4,15 @@ exports[`cli invalid config support exits with statusCode 64 and logs a single l
|
|||
Array [
|
||||
Object {
|
||||
"@timestamp": "## @timestamp ##",
|
||||
"error": "## Error with stack trace ##",
|
||||
"level": "fatal",
|
||||
"message": "\\"unknown.key\\", \\"other.unknown.key\\", \\"other.third\\", \\"some.flat.key\\", and \\"some.array\\" settings were not applied. Check for spelling errors and ensure that expected plugins are installed.",
|
||||
"pid": "## PID ##",
|
||||
"tags": Array [
|
||||
"fatal",
|
||||
"root",
|
||||
],
|
||||
"type": "log",
|
||||
"type": "error",
|
||||
},
|
||||
]
|
||||
`;
|
||||
|
|
|
@ -39,7 +39,8 @@ describe('cli invalid config support', function () {
|
|||
.map(obj => ({
|
||||
...obj,
|
||||
pid: '## PID ##',
|
||||
'@timestamp': '## @timestamp ##'
|
||||
'@timestamp': '## @timestamp ##',
|
||||
error: '## Error with stack trace ##',
|
||||
}));
|
||||
|
||||
expect(error).toBe(undefined);
|
||||
|
|
|
@ -19,15 +19,12 @@
|
|||
|
||||
import _ from 'lodash';
|
||||
import { statSync, lstatSync, realpathSync } from 'fs';
|
||||
import { isWorker } from 'cluster';
|
||||
import { resolve } from 'path';
|
||||
|
||||
import { fromRoot } from '../../utils';
|
||||
import { getConfig } from '../../server/path';
|
||||
import { Config } from '../../server/config/config';
|
||||
import { getConfigFromFiles } from '../../core/server/config';
|
||||
import { bootstrap } from '../../core/server';
|
||||
import { readKeystore } from './read_keystore';
|
||||
import { transformDeprecations } from '../../server/config/transform_deprecations';
|
||||
|
||||
import { DEV_SSL_CERT_PATH, DEV_SSL_KEY_PATH } from '../dev_ssl';
|
||||
|
||||
|
@ -77,12 +74,11 @@ const configPathCollector = pathCollector();
|
|||
const pluginDirCollector = pathCollector();
|
||||
const pluginPathCollector = pathCollector();
|
||||
|
||||
function readServerSettings(opts, extraCliOptions) {
|
||||
const settings = getConfigFromFiles([].concat(opts.config || []));
|
||||
const set = _.partial(_.set, settings);
|
||||
const get = _.partial(_.get, settings);
|
||||
const has = _.partial(_.has, settings);
|
||||
const merge = _.partial(_.merge, settings);
|
||||
function applyConfigOverrides(rawConfig, opts, extraCliOptions) {
|
||||
const set = _.partial(_.set, rawConfig);
|
||||
const get = _.partial(_.get, rawConfig);
|
||||
const has = _.partial(_.has, rawConfig);
|
||||
const merge = _.partial(_.merge, rawConfig);
|
||||
|
||||
if (opts.dev) {
|
||||
set('env', 'development');
|
||||
|
@ -131,7 +127,7 @@ function readServerSettings(opts, extraCliOptions) {
|
|||
merge(extraCliOptions);
|
||||
merge(readKeystore(get('path.data')));
|
||||
|
||||
return settings;
|
||||
return rawConfig;
|
||||
}
|
||||
|
||||
export default function (program) {
|
||||
|
@ -199,67 +195,23 @@ export default function (program) {
|
|||
}
|
||||
}
|
||||
|
||||
const getCurrentSettings = () => readServerSettings(opts, this.getUnknownOptions());
|
||||
const settings = getCurrentSettings();
|
||||
|
||||
if (CAN_CLUSTER && opts.dev && !isWorker) {
|
||||
// stop processing the action and handoff to cluster manager
|
||||
const ClusterManager = require(CLUSTER_MANAGER_PATH);
|
||||
await ClusterManager.create(opts, settings);
|
||||
return;
|
||||
}
|
||||
|
||||
let kbnServer = {};
|
||||
const KbnServer = require('../../server/kbn_server');
|
||||
try {
|
||||
kbnServer = new KbnServer(settings);
|
||||
await kbnServer.ready();
|
||||
} catch (error) {
|
||||
const { server } = kbnServer;
|
||||
|
||||
switch (error.code) {
|
||||
case 'EADDRINUSE':
|
||||
logFatal(`Port ${error.port} is already in use. Another instance of Kibana may be running!`, server);
|
||||
break;
|
||||
|
||||
case 'InvalidConfig':
|
||||
logFatal(error.message, server);
|
||||
break;
|
||||
|
||||
default:
|
||||
logFatal(error, server);
|
||||
break;
|
||||
}
|
||||
|
||||
kbnServer.close();
|
||||
const exitCode = error.processExitCode == null ? 1 : error.processExitCode;
|
||||
// eslint-disable-next-line no-process-exit
|
||||
process.exit(exitCode);
|
||||
}
|
||||
|
||||
process.on('SIGHUP', async function reloadConfig() {
|
||||
const settings = transformDeprecations(getCurrentSettings());
|
||||
const config = new Config(kbnServer.config.getSchema(), settings);
|
||||
|
||||
kbnServer.server.log(['info', 'config'], 'Reloading logging configuration due to SIGHUP.');
|
||||
await kbnServer.applyLoggingConfiguration(config);
|
||||
kbnServer.server.log(['info', 'config'], 'Reloaded logging configuration due to SIGHUP.');
|
||||
|
||||
// If new platform config subscription is active, let's notify it with the updated config.
|
||||
if (kbnServer.newPlatform) {
|
||||
kbnServer.newPlatform.updateConfig(config.get());
|
||||
}
|
||||
const unknownOptions = this.getUnknownOptions();
|
||||
await bootstrap({
|
||||
configs: [].concat(opts.config || []),
|
||||
cliArgs: {
|
||||
dev: !!opts.dev,
|
||||
envName: unknownOptions.env ? unknownOptions.env.name : undefined,
|
||||
quiet: !!opts.quiet,
|
||||
silent: !!opts.silent,
|
||||
watch: !!opts.watch,
|
||||
basePath: !!opts.basePath,
|
||||
},
|
||||
features: {
|
||||
isClusterModeSupported: CAN_CLUSTER,
|
||||
isOssModeSupported: XPACK_OPTIONAL,
|
||||
isXPackInstalled: XPACK_INSTALLED,
|
||||
},
|
||||
applyConfigOverrides: rawConfig => applyConfigOverrides(rawConfig, opts, unknownOptions),
|
||||
});
|
||||
|
||||
return kbnServer;
|
||||
});
|
||||
}
|
||||
|
||||
function logFatal(message, server) {
|
||||
if (server) {
|
||||
server.log(['fatal'], message);
|
||||
}
|
||||
|
||||
// It's possible for the Hapi logger to not be setup
|
||||
console.error('FATAL', message);
|
||||
}
|
||||
|
|
|
@ -5,26 +5,17 @@ Core is a set of systems (frontend, backend etc.) that Kibana and its plugins ar
|
|||
## Integration with the "legacy" Kibana
|
||||
|
||||
Most of the existing core functionality is still spread over "legacy" Kibana and it will take some time to upgrade it.
|
||||
Kibana is still started using existing "legacy" CLI and bootstraps `core` only when needed. At the moment `core` manages
|
||||
HTTP connections, handles TLS configuration and base path proxy. All requests to Kibana server will hit HTTP server
|
||||
exposed by the `core` first and it will decide whether request can be solely handled by the new platform or request should
|
||||
be proxied to the "legacy" Kibana. This setup allows `core` to gradually introduce any "pre-route" processing
|
||||
logic, expose new routes or replace old ones handled by the "legacy" Kibana currently.
|
||||
Kibana is started using existing "legacy" CLI that bootstraps `core` which in turn creates the "legacy" Kibana server.
|
||||
At the moment `core` manages HTTP connections, handles TLS configuration and base path proxy. All requests to Kibana server
|
||||
will hit HTTP server exposed by the `core` first and it will decide whether request can be solely handled by the new
|
||||
platform or request should be proxied to the "legacy" Kibana. This setup allows `core` to gradually introduce any "pre-route"
|
||||
processing logic, expose new routes or replace old ones handled by the "legacy" Kibana currently.
|
||||
|
||||
Once config has been loaded and validated by the "legacy" Kibana it's passed to the `core` where some of its parts will
|
||||
be additionally validated so that we can make config validation stricter with the new config validation system. Even though
|
||||
the new validation system provided by the `core` is also based on Joi internally it is complemented with custom rules
|
||||
tailored to our needs (e.g. `byteSize`, `duration` etc.). That means that config values that are accepted by the "legacy"
|
||||
Kibana may be rejected by the `core`.
|
||||
|
||||
One can also define new configuration keys under `__newPlatform` if these keys are supposed to be used by the `core` only
|
||||
and should not be validated by the "legacy" Kibana, e.g.
|
||||
|
||||
```yaml
|
||||
__newPlatform:
|
||||
plugins:
|
||||
scanDirs: ['./example_plugins']
|
||||
```
|
||||
Once config has been loaded and some of its parts were validated by the `core` it's passed to the "legacy" Kibana where
|
||||
it will be additionally validated so that we can make config validation stricter with the new config validation system.
|
||||
Even though the new validation system provided by the `core` is also based on Joi internally it is complemented with custom
|
||||
rules tailored to our needs (e.g. `byteSize`, `duration` etc.). That means that config values that were previously accepted
|
||||
by the "legacy" Kibana may be rejected by the `core` now.
|
||||
|
||||
Even though `core` has its own logging system it doesn't output log records directly (e.g. to file or terminal), but instead
|
||||
forward them to the "legacy" Kibana so that they look the same as the rest of the log records throughout Kibana.
|
||||
|
|
|
@ -1,20 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch B.V. under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch B.V. licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
export { injectIntoKbnServer, createBasePathProxy } from './server/legacy_compat';
|
21
src/core/server/__snapshots__/index.test.ts.snap
Normal file
21
src/core/server/__snapshots__/index.test.ts.snap
Normal file
|
@ -0,0 +1,21 @@
|
|||
// Jest Snapshot v1, https://goo.gl/fbAQLP
|
||||
|
||||
exports[`does not fail on "start" if there are unused paths detected: unused paths logs 1`] = `
|
||||
Object {
|
||||
"debug": Array [
|
||||
Array [
|
||||
"starting server",
|
||||
],
|
||||
],
|
||||
"error": Array [],
|
||||
"fatal": Array [],
|
||||
"info": Array [],
|
||||
"log": Array [],
|
||||
"trace": Array [
|
||||
Array [
|
||||
"some config paths are not handled by the core: [\\"some.path\\",\\"another.path\\"]",
|
||||
],
|
||||
],
|
||||
"warn": Array [],
|
||||
}
|
||||
`;
|
106
src/core/server/bootstrap.ts
Normal file
106
src/core/server/bootstrap.ts
Normal file
|
@ -0,0 +1,106 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch B.V. under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch B.V. licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
import chalk from 'chalk';
|
||||
import { isMaster } from 'cluster';
|
||||
import { CliArgs, Env, RawConfigService } from './config';
|
||||
import { LegacyObjectToConfigAdapter } from './legacy_compat';
|
||||
import { Root } from './root';
|
||||
|
||||
interface KibanaFeatures {
|
||||
// Indicates whether we can run Kibana in a so called cluster mode in which
|
||||
// Kibana is run as a "worker" process together with optimizer "worker" process
|
||||
// that are orchestrated by the "master" process (dev mode only feature).
|
||||
isClusterModeSupported: boolean;
|
||||
|
||||
// Indicates whether we can run Kibana without X-Pack plugin pack even if it's
|
||||
// installed (dev mode only feature).
|
||||
isOssModeSupported: boolean;
|
||||
|
||||
// Indicates whether X-Pack plugin pack is installed and available.
|
||||
isXPackInstalled: boolean;
|
||||
}
|
||||
|
||||
interface BootstrapArgs {
|
||||
configs: string[];
|
||||
cliArgs: CliArgs;
|
||||
applyConfigOverrides: (config: Record<string, any>) => Record<string, any>;
|
||||
features: KibanaFeatures;
|
||||
}
|
||||
|
||||
export async function bootstrap({
|
||||
configs,
|
||||
cliArgs,
|
||||
applyConfigOverrides,
|
||||
features,
|
||||
}: BootstrapArgs) {
|
||||
const env = Env.createDefault({
|
||||
configs,
|
||||
cliArgs,
|
||||
isDevClusterMaster: isMaster && cliArgs.dev && features.isClusterModeSupported,
|
||||
});
|
||||
|
||||
const rawConfigService = new RawConfigService(
|
||||
env.configs,
|
||||
rawConfig => new LegacyObjectToConfigAdapter(applyConfigOverrides(rawConfig))
|
||||
);
|
||||
|
||||
rawConfigService.loadConfig();
|
||||
|
||||
const root = new Root(rawConfigService.getConfig$(), env, onRootShutdown);
|
||||
|
||||
function shutdown(reason?: Error) {
|
||||
rawConfigService.stop();
|
||||
return root.shutdown(reason);
|
||||
}
|
||||
|
||||
try {
|
||||
await root.start();
|
||||
} catch (err) {
|
||||
await shutdown(err);
|
||||
}
|
||||
|
||||
process.on('SIGHUP', () => {
|
||||
const cliLogger = root.logger.get('cli');
|
||||
cliLogger.info('Reloading logging configuration due to SIGHUP.', { tags: ['config'] });
|
||||
|
||||
try {
|
||||
rawConfigService.reloadConfig();
|
||||
} catch (err) {
|
||||
return shutdown(err);
|
||||
}
|
||||
|
||||
cliLogger.info('Reloaded logging configuration due to SIGHUP.', { tags: ['config'] });
|
||||
});
|
||||
|
||||
process.on('SIGINT', () => shutdown());
|
||||
process.on('SIGTERM', () => shutdown());
|
||||
}
|
||||
|
||||
function onRootShutdown(reason?: any) {
|
||||
if (reason !== undefined) {
|
||||
// There is a chance that logger wasn't configured properly and error that
|
||||
// that forced root to shut down could go unnoticed. To prevent this we always
|
||||
// mirror such fatal errors in standard output with `console.error`.
|
||||
// tslint:disable no-console
|
||||
console.error(`\n${chalk.white.bgRed(' FATAL ')} ${reason}\n`);
|
||||
}
|
||||
|
||||
process.exit(reason === undefined ? 0 : (reason as any).processExitCode || 1);
|
||||
}
|
|
@ -21,11 +21,19 @@
|
|||
|
||||
import { EnvOptions } from '../../env';
|
||||
|
||||
export function getEnvOptions(options: Partial<EnvOptions> = {}): EnvOptions {
|
||||
type DeepPartial<T> = {
|
||||
[P in keyof T]?: T[P] extends Array<infer R> ? Array<DeepPartial<R>> : DeepPartial<T[P]>
|
||||
};
|
||||
|
||||
export function getEnvOptions(options: DeepPartial<EnvOptions> = {}): EnvOptions {
|
||||
return {
|
||||
configs: options.configs || [],
|
||||
cliArgs: {
|
||||
dev: true,
|
||||
quiet: false,
|
||||
silent: false,
|
||||
watch: false,
|
||||
basePath: false,
|
||||
...(options.cliArgs || {}),
|
||||
},
|
||||
isDevClusterMaster:
|
||||
|
|
|
@ -1,12 +1,82 @@
|
|||
// Jest Snapshot v1, https://goo.gl/fbAQLP
|
||||
|
||||
exports[`correctly creates default environment if \`--env.name\` is supplied.: dev env properties 1`] = `
|
||||
Env {
|
||||
"binDir": "/test/cwd/bin",
|
||||
"cliArgs": Object {
|
||||
"basePath": false,
|
||||
"dev": true,
|
||||
"envName": "development",
|
||||
"quiet": false,
|
||||
"silent": false,
|
||||
"watch": false,
|
||||
},
|
||||
"configDir": "/test/cwd/config",
|
||||
"configs": Array [
|
||||
"/some/other/path/some-kibana.yml",
|
||||
],
|
||||
"corePluginsDir": "/test/cwd/core_plugins",
|
||||
"homeDir": "/test/cwd",
|
||||
"isDevClusterMaster": false,
|
||||
"logDir": "/test/cwd/log",
|
||||
"mode": Object {
|
||||
"dev": true,
|
||||
"name": "development",
|
||||
"prod": false,
|
||||
},
|
||||
"packageInfo": Object {
|
||||
"branch": "feature-v1",
|
||||
"buildNum": 9007199254740991,
|
||||
"buildSha": "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX",
|
||||
"version": "v1",
|
||||
},
|
||||
"staticFilesDir": "/test/cwd/ui",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`correctly creates default environment if \`--env.name\` is supplied.: prod env properties 1`] = `
|
||||
Env {
|
||||
"binDir": "/test/cwd/bin",
|
||||
"cliArgs": Object {
|
||||
"basePath": false,
|
||||
"dev": false,
|
||||
"envName": "production",
|
||||
"quiet": false,
|
||||
"silent": false,
|
||||
"watch": false,
|
||||
},
|
||||
"configDir": "/test/cwd/config",
|
||||
"configs": Array [
|
||||
"/some/other/path/some-kibana.yml",
|
||||
],
|
||||
"corePluginsDir": "/test/cwd/core_plugins",
|
||||
"homeDir": "/test/cwd",
|
||||
"isDevClusterMaster": false,
|
||||
"logDir": "/test/cwd/log",
|
||||
"mode": Object {
|
||||
"dev": false,
|
||||
"name": "production",
|
||||
"prod": true,
|
||||
},
|
||||
"packageInfo": Object {
|
||||
"branch": "feature-v1",
|
||||
"buildNum": 9007199254740991,
|
||||
"buildSha": "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX",
|
||||
"version": "v1",
|
||||
},
|
||||
"staticFilesDir": "/test/cwd/ui",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`correctly creates default environment in dev mode.: env properties 1`] = `
|
||||
Env {
|
||||
"binDir": "/test/cwd/bin",
|
||||
"cliArgs": Object {
|
||||
"basePath": false,
|
||||
"dev": true,
|
||||
"someArg": 1,
|
||||
"someOtherArg": "2",
|
||||
"quiet": false,
|
||||
"silent": false,
|
||||
"watch": false,
|
||||
},
|
||||
"configDir": "/test/cwd/config",
|
||||
"configs": Array [
|
||||
|
@ -15,12 +85,6 @@ Env {
|
|||
"corePluginsDir": "/test/cwd/core_plugins",
|
||||
"homeDir": "/test/cwd",
|
||||
"isDevClusterMaster": true,
|
||||
"legacy": EventEmitter {
|
||||
"_events": Object {},
|
||||
"_eventsCount": 0,
|
||||
"_maxListeners": undefined,
|
||||
"domain": null,
|
||||
},
|
||||
"logDir": "/test/cwd/log",
|
||||
"mode": Object {
|
||||
"dev": true,
|
||||
|
@ -41,9 +105,11 @@ exports[`correctly creates default environment in prod distributable mode.: env
|
|||
Env {
|
||||
"binDir": "/test/cwd/bin",
|
||||
"cliArgs": Object {
|
||||
"basePath": false,
|
||||
"dev": false,
|
||||
"someArg": 1,
|
||||
"someOtherArg": "2",
|
||||
"quiet": false,
|
||||
"silent": false,
|
||||
"watch": false,
|
||||
},
|
||||
"configDir": "/test/cwd/config",
|
||||
"configs": Array [
|
||||
|
@ -52,12 +118,6 @@ Env {
|
|||
"corePluginsDir": "/test/cwd/core_plugins",
|
||||
"homeDir": "/test/cwd",
|
||||
"isDevClusterMaster": false,
|
||||
"legacy": EventEmitter {
|
||||
"_events": Object {},
|
||||
"_eventsCount": 0,
|
||||
"_maxListeners": undefined,
|
||||
"domain": null,
|
||||
},
|
||||
"logDir": "/test/cwd/log",
|
||||
"mode": Object {
|
||||
"dev": false,
|
||||
|
@ -78,9 +138,11 @@ exports[`correctly creates default environment in prod non-distributable mode.:
|
|||
Env {
|
||||
"binDir": "/test/cwd/bin",
|
||||
"cliArgs": Object {
|
||||
"basePath": false,
|
||||
"dev": false,
|
||||
"someArg": 1,
|
||||
"someOtherArg": "2",
|
||||
"quiet": false,
|
||||
"silent": false,
|
||||
"watch": false,
|
||||
},
|
||||
"configDir": "/test/cwd/config",
|
||||
"configs": Array [
|
||||
|
@ -89,12 +151,6 @@ Env {
|
|||
"corePluginsDir": "/test/cwd/core_plugins",
|
||||
"homeDir": "/test/cwd",
|
||||
"isDevClusterMaster": false,
|
||||
"legacy": EventEmitter {
|
||||
"_events": Object {},
|
||||
"_eventsCount": 0,
|
||||
"_maxListeners": undefined,
|
||||
"domain": null,
|
||||
},
|
||||
"logDir": "/test/cwd/log",
|
||||
"mode": Object {
|
||||
"dev": false,
|
||||
|
@ -115,9 +171,11 @@ exports[`correctly creates environment with constructor.: env properties 1`] = `
|
|||
Env {
|
||||
"binDir": "/some/home/dir/bin",
|
||||
"cliArgs": Object {
|
||||
"basePath": false,
|
||||
"dev": false,
|
||||
"someArg": 1,
|
||||
"someOtherArg": "2",
|
||||
"quiet": false,
|
||||
"silent": false,
|
||||
"watch": false,
|
||||
},
|
||||
"configDir": "/some/home/dir/config",
|
||||
"configs": Array [
|
||||
|
@ -126,12 +184,6 @@ Env {
|
|||
"corePluginsDir": "/some/home/dir/core_plugins",
|
||||
"homeDir": "/some/home/dir",
|
||||
"isDevClusterMaster": false,
|
||||
"legacy": EventEmitter {
|
||||
"_events": Object {},
|
||||
"_eventsCount": 0,
|
||||
"_maxListeners": undefined,
|
||||
"domain": null,
|
||||
},
|
||||
"logDir": "/some/home/dir/log",
|
||||
"mode": Object {
|
||||
"dev": false,
|
||||
|
|
|
@ -22,7 +22,7 @@ import { Config, ObjectToConfigAdapter } from '..';
|
|||
/**
|
||||
* Overrides some config values with ones from argv.
|
||||
*
|
||||
* @param config `RawConfig` instance to update config values for.
|
||||
* @param config `Config` instance to update config values for.
|
||||
* @param argv Argv object with key/value pairs.
|
||||
*/
|
||||
export function overrideConfigWithArgv(config: Config, argv: { [key: string]: any }) {
|
||||
|
|
|
@ -33,6 +33,7 @@ const mockPackage = new Proxy({ raw: {} as any }, { get: (obj, prop) => obj.raw[
|
|||
jest.mock('../../../../utils/package_json', () => ({ pkg: mockPackage }));
|
||||
|
||||
import { Env } from '../env';
|
||||
import { getEnvOptions } from './__mocks__/env';
|
||||
|
||||
test('correctly creates default environment in dev mode.', () => {
|
||||
mockPackage.raw = {
|
||||
|
@ -40,11 +41,12 @@ test('correctly creates default environment in dev mode.', () => {
|
|||
version: 'some-version',
|
||||
};
|
||||
|
||||
const defaultEnv = Env.createDefault({
|
||||
cliArgs: { dev: true, someArg: 1, someOtherArg: '2' },
|
||||
configs: ['/test/cwd/config/kibana.yml'],
|
||||
isDevClusterMaster: true,
|
||||
});
|
||||
const defaultEnv = Env.createDefault(
|
||||
getEnvOptions({
|
||||
configs: ['/test/cwd/config/kibana.yml'],
|
||||
isDevClusterMaster: true,
|
||||
})
|
||||
);
|
||||
|
||||
expect(defaultEnv).toMatchSnapshot('env properties');
|
||||
});
|
||||
|
@ -60,11 +62,12 @@ test('correctly creates default environment in prod distributable mode.', () =>
|
|||
},
|
||||
};
|
||||
|
||||
const defaultEnv = Env.createDefault({
|
||||
cliArgs: { dev: false, someArg: 1, someOtherArg: '2' },
|
||||
configs: ['/some/other/path/some-kibana.yml'],
|
||||
isDevClusterMaster: false,
|
||||
});
|
||||
const defaultEnv = Env.createDefault(
|
||||
getEnvOptions({
|
||||
cliArgs: { dev: false },
|
||||
configs: ['/some/other/path/some-kibana.yml'],
|
||||
})
|
||||
);
|
||||
|
||||
expect(defaultEnv).toMatchSnapshot('env properties');
|
||||
});
|
||||
|
@ -80,15 +83,45 @@ test('correctly creates default environment in prod non-distributable mode.', ()
|
|||
},
|
||||
};
|
||||
|
||||
const defaultEnv = Env.createDefault({
|
||||
cliArgs: { dev: false, someArg: 1, someOtherArg: '2' },
|
||||
configs: ['/some/other/path/some-kibana.yml'],
|
||||
isDevClusterMaster: false,
|
||||
});
|
||||
const defaultEnv = Env.createDefault(
|
||||
getEnvOptions({
|
||||
cliArgs: { dev: false },
|
||||
configs: ['/some/other/path/some-kibana.yml'],
|
||||
})
|
||||
);
|
||||
|
||||
expect(defaultEnv).toMatchSnapshot('env properties');
|
||||
});
|
||||
|
||||
test('correctly creates default environment if `--env.name` is supplied.', () => {
|
||||
mockPackage.raw = {
|
||||
branch: 'feature-v1',
|
||||
version: 'v1',
|
||||
build: {
|
||||
distributable: false,
|
||||
number: 100,
|
||||
sha: 'feature-v1-build-sha',
|
||||
},
|
||||
};
|
||||
|
||||
const defaultDevEnv = Env.createDefault(
|
||||
getEnvOptions({
|
||||
cliArgs: { envName: 'development' },
|
||||
configs: ['/some/other/path/some-kibana.yml'],
|
||||
})
|
||||
);
|
||||
|
||||
const defaultProdEnv = Env.createDefault(
|
||||
getEnvOptions({
|
||||
cliArgs: { dev: false, envName: 'production' },
|
||||
configs: ['/some/other/path/some-kibana.yml'],
|
||||
})
|
||||
);
|
||||
|
||||
expect(defaultDevEnv).toMatchSnapshot('dev env properties');
|
||||
expect(defaultProdEnv).toMatchSnapshot('prod env properties');
|
||||
});
|
||||
|
||||
test('correctly creates environment with constructor.', () => {
|
||||
mockPackage.raw = {
|
||||
branch: 'feature-v1',
|
||||
|
@ -100,11 +133,13 @@ test('correctly creates environment with constructor.', () => {
|
|||
},
|
||||
};
|
||||
|
||||
const env = new Env('/some/home/dir', {
|
||||
cliArgs: { dev: false, someArg: 1, someOtherArg: '2' },
|
||||
configs: ['/some/other/path/some-kibana.yml'],
|
||||
isDevClusterMaster: false,
|
||||
});
|
||||
const env = new Env(
|
||||
'/some/home/dir',
|
||||
getEnvOptions({
|
||||
cliArgs: { dev: false },
|
||||
configs: ['/some/other/path/some-kibana.yml'],
|
||||
})
|
||||
);
|
||||
|
||||
expect(env).toMatchSnapshot('env properties');
|
||||
});
|
||||
|
|
|
@ -17,7 +17,6 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
import { EventEmitter } from 'events';
|
||||
import { resolve } from 'path';
|
||||
import process from 'process';
|
||||
|
||||
|
@ -38,10 +37,19 @@ interface EnvironmentMode {
|
|||
|
||||
export interface EnvOptions {
|
||||
configs: string[];
|
||||
cliArgs: Record<string, any>;
|
||||
cliArgs: CliArgs;
|
||||
isDevClusterMaster: boolean;
|
||||
}
|
||||
|
||||
export interface CliArgs {
|
||||
dev: boolean;
|
||||
envName?: string;
|
||||
quiet: boolean;
|
||||
silent: boolean;
|
||||
watch: boolean;
|
||||
basePath: boolean;
|
||||
}
|
||||
|
||||
export class Env {
|
||||
/**
|
||||
* @internal
|
||||
|
@ -66,15 +74,10 @@ export class Env {
|
|||
*/
|
||||
public readonly mode: Readonly<EnvironmentMode>;
|
||||
|
||||
/**
|
||||
* @internal
|
||||
*/
|
||||
public readonly legacy: EventEmitter;
|
||||
|
||||
/**
|
||||
* Arguments provided through command line.
|
||||
*/
|
||||
public readonly cliArgs: Readonly<Record<string, any>>;
|
||||
public readonly cliArgs: Readonly<CliArgs>;
|
||||
|
||||
/**
|
||||
* Paths to the configuration files.
|
||||
|
@ -100,10 +103,11 @@ export class Env {
|
|||
this.configs = Object.freeze(options.configs);
|
||||
this.isDevClusterMaster = options.isDevClusterMaster;
|
||||
|
||||
const isDevMode = this.cliArgs.dev || this.cliArgs.envName === 'development';
|
||||
this.mode = Object.freeze<EnvironmentMode>({
|
||||
dev: this.cliArgs.dev,
|
||||
name: this.cliArgs.dev ? 'development' : 'production',
|
||||
prod: !this.cliArgs.dev,
|
||||
dev: isDevMode,
|
||||
name: isDevMode ? 'development' : 'production',
|
||||
prod: !isDevMode,
|
||||
});
|
||||
|
||||
const isKibanaDistributable = pkg.build && pkg.build.distributable === true;
|
||||
|
@ -113,7 +117,5 @@ export class Env {
|
|||
buildSha: isKibanaDistributable ? pkg.build.sha : 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
|
||||
version: pkg.version,
|
||||
});
|
||||
|
||||
this.legacy = new EventEmitter();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,6 +22,6 @@ export { RawConfigService } from './raw_config_service';
|
|||
export { Config, ConfigPath } from './config';
|
||||
/** @internal */
|
||||
export { ObjectToConfigAdapter } from './object_to_config_adapter';
|
||||
export { Env } from './env';
|
||||
export { Env, CliArgs } from './env';
|
||||
export { ConfigWithSchema } from './config_with_schema';
|
||||
export { getConfigFromFiles } from './read_config';
|
||||
|
|
|
@ -36,8 +36,7 @@ export class ByteSizeValue {
|
|||
const match = /([1-9][0-9]*)(b|kb|mb|gb)/.exec(text);
|
||||
if (!match) {
|
||||
throw new Error(
|
||||
`could not parse byte size value [${text}]. value must start with a ` +
|
||||
`number and end with bytes size unit, e.g. 10kb, 23mb, 3gb, 239493b`
|
||||
`could not parse byte size value [${text}]. Value must be a safe positive integer.`
|
||||
);
|
||||
}
|
||||
|
||||
|
|
|
@ -11,6 +11,7 @@ Object {
|
|||
|
||||
exports[`has defaults for config 1`] = `
|
||||
Object {
|
||||
"autoListen": true,
|
||||
"cors": false,
|
||||
"host": "localhost",
|
||||
"maxPayload": ByteSizeValue {
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
// Jest Snapshot v1, https://goo.gl/fbAQLP
|
||||
|
||||
exports[`broadcasts server and connection options to the legacy "channel" 1`] = `
|
||||
exports[`returns server and connection options on start 1`] = `
|
||||
Object {
|
||||
"host": "127.0.0.1",
|
||||
"port": 12345,
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
import { getEnvOptions } from '../../config/__tests__/__mocks__/env';
|
||||
import { Server } from 'http';
|
||||
|
||||
jest.mock('fs', () => ({
|
||||
readFileSync: jest.fn(),
|
||||
|
@ -26,7 +26,6 @@ jest.mock('fs', () => ({
|
|||
import Chance from 'chance';
|
||||
import supertest from 'supertest';
|
||||
|
||||
import { Env } from '../../config';
|
||||
import { ByteSizeValue } from '../../config/schema';
|
||||
import { logger } from '../../logging/__mocks__';
|
||||
import { HttpConfig } from '../http_config';
|
||||
|
@ -35,14 +34,9 @@ import { Router } from '../router';
|
|||
|
||||
const chance = new Chance();
|
||||
|
||||
let env: Env;
|
||||
let server: HttpServer;
|
||||
let config: HttpConfig;
|
||||
|
||||
function getServerListener(httpServer: HttpServer) {
|
||||
return (httpServer as any).server.listener;
|
||||
}
|
||||
|
||||
beforeEach(() => {
|
||||
config = {
|
||||
host: '127.0.0.1',
|
||||
|
@ -51,8 +45,7 @@ beforeEach(() => {
|
|||
ssl: {},
|
||||
} as HttpConfig;
|
||||
|
||||
env = new Env('/kibana', getEnvOptions());
|
||||
server = new HttpServer(logger.get(), env);
|
||||
server = new HttpServer(logger.get());
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
|
@ -77,9 +70,9 @@ test('200 OK with body', async () => {
|
|||
|
||||
server.registerRouter(router);
|
||||
|
||||
await server.start(config);
|
||||
const { server: innerServer } = await server.start(config);
|
||||
|
||||
await supertest(getServerListener(server))
|
||||
await supertest(innerServer.listener)
|
||||
.get('/foo/')
|
||||
.expect(200)
|
||||
.then(res => {
|
||||
|
@ -96,9 +89,9 @@ test('202 Accepted with body', async () => {
|
|||
|
||||
server.registerRouter(router);
|
||||
|
||||
await server.start(config);
|
||||
const { server: innerServer } = await server.start(config);
|
||||
|
||||
await supertest(getServerListener(server))
|
||||
await supertest(innerServer.listener)
|
||||
.get('/foo/')
|
||||
.expect(202)
|
||||
.then(res => {
|
||||
|
@ -115,9 +108,9 @@ test('204 No content', async () => {
|
|||
|
||||
server.registerRouter(router);
|
||||
|
||||
await server.start(config);
|
||||
const { server: innerServer } = await server.start(config);
|
||||
|
||||
await supertest(getServerListener(server))
|
||||
await supertest(innerServer.listener)
|
||||
.get('/foo/')
|
||||
.expect(204)
|
||||
.then(res => {
|
||||
|
@ -136,9 +129,9 @@ test('400 Bad request with error', async () => {
|
|||
|
||||
server.registerRouter(router);
|
||||
|
||||
await server.start(config);
|
||||
const { server: innerServer } = await server.start(config);
|
||||
|
||||
await supertest(getServerListener(server))
|
||||
await supertest(innerServer.listener)
|
||||
.get('/foo/')
|
||||
.expect(400)
|
||||
.then(res => {
|
||||
|
@ -165,9 +158,9 @@ test('valid params', async () => {
|
|||
|
||||
server.registerRouter(router);
|
||||
|
||||
await server.start(config);
|
||||
const { server: innerServer } = await server.start(config);
|
||||
|
||||
await supertest(getServerListener(server))
|
||||
await supertest(innerServer.listener)
|
||||
.get('/foo/some-string')
|
||||
.expect(200)
|
||||
.then(res => {
|
||||
|
@ -194,9 +187,9 @@ test('invalid params', async () => {
|
|||
|
||||
server.registerRouter(router);
|
||||
|
||||
await server.start(config);
|
||||
const { server: innerServer } = await server.start(config);
|
||||
|
||||
await supertest(getServerListener(server))
|
||||
await supertest(innerServer.listener)
|
||||
.get('/foo/some-string')
|
||||
.expect(400)
|
||||
.then(res => {
|
||||
|
@ -226,9 +219,9 @@ test('valid query', async () => {
|
|||
|
||||
server.registerRouter(router);
|
||||
|
||||
await server.start(config);
|
||||
const { server: innerServer } = await server.start(config);
|
||||
|
||||
await supertest(getServerListener(server))
|
||||
await supertest(innerServer.listener)
|
||||
.get('/foo/?bar=test&quux=123')
|
||||
.expect(200)
|
||||
.then(res => {
|
||||
|
@ -255,9 +248,9 @@ test('invalid query', async () => {
|
|||
|
||||
server.registerRouter(router);
|
||||
|
||||
await server.start(config);
|
||||
const { server: innerServer } = await server.start(config);
|
||||
|
||||
await supertest(getServerListener(server))
|
||||
await supertest(innerServer.listener)
|
||||
.get('/foo/?bar=test')
|
||||
.expect(400)
|
||||
.then(res => {
|
||||
|
@ -287,9 +280,9 @@ test('valid body', async () => {
|
|||
|
||||
server.registerRouter(router);
|
||||
|
||||
await server.start(config);
|
||||
const { server: innerServer } = await server.start(config);
|
||||
|
||||
await supertest(getServerListener(server))
|
||||
await supertest(innerServer.listener)
|
||||
.post('/foo/')
|
||||
.send({
|
||||
bar: 'test',
|
||||
|
@ -320,9 +313,9 @@ test('invalid body', async () => {
|
|||
|
||||
server.registerRouter(router);
|
||||
|
||||
await server.start(config);
|
||||
const { server: innerServer } = await server.start(config);
|
||||
|
||||
await supertest(getServerListener(server))
|
||||
await supertest(innerServer.listener)
|
||||
.post('/foo/')
|
||||
.send({ bar: 'test' })
|
||||
.expect(400)
|
||||
|
@ -352,9 +345,9 @@ test('handles putting', async () => {
|
|||
|
||||
server.registerRouter(router);
|
||||
|
||||
await server.start(config);
|
||||
const { server: innerServer } = await server.start(config);
|
||||
|
||||
await supertest(getServerListener(server))
|
||||
await supertest(innerServer.listener)
|
||||
.put('/foo/')
|
||||
.send({ key: 'new value' })
|
||||
.expect(200)
|
||||
|
@ -382,9 +375,9 @@ test('handles deleting', async () => {
|
|||
|
||||
server.registerRouter(router);
|
||||
|
||||
await server.start(config);
|
||||
const { server: innerServer } = await server.start(config);
|
||||
|
||||
await supertest(getServerListener(server))
|
||||
await supertest(innerServer.listener)
|
||||
.delete('/foo/3')
|
||||
.expect(200)
|
||||
.then(res => {
|
||||
|
@ -407,9 +400,9 @@ test('filtered headers', async () => {
|
|||
|
||||
server.registerRouter(router);
|
||||
|
||||
await server.start(config);
|
||||
const { server: innerServer } = await server.start(config);
|
||||
|
||||
await supertest(getServerListener(server))
|
||||
await supertest(innerServer.listener)
|
||||
.get('/foo/?bar=quux')
|
||||
.set('x-kibana-foo', 'bar')
|
||||
.set('x-kibana-bar', 'quux');
|
||||
|
@ -422,6 +415,7 @@ test('filtered headers', async () => {
|
|||
|
||||
describe('with `basepath: /bar` and `rewriteBasePath: false`', () => {
|
||||
let configWithBasePath: HttpConfig;
|
||||
let innerServerListener: Server;
|
||||
|
||||
beforeEach(async () => {
|
||||
configWithBasePath = {
|
||||
|
@ -438,29 +432,30 @@ describe('with `basepath: /bar` and `rewriteBasePath: false`', () => {
|
|||
|
||||
server.registerRouter(router);
|
||||
|
||||
await server.start(configWithBasePath);
|
||||
const { server: innerServer } = await server.start(configWithBasePath);
|
||||
innerServerListener = innerServer.listener;
|
||||
});
|
||||
|
||||
test('/bar => 404', async () => {
|
||||
await supertest(getServerListener(server))
|
||||
await supertest(innerServerListener)
|
||||
.get('/bar')
|
||||
.expect(404);
|
||||
});
|
||||
|
||||
test('/bar/ => 404', async () => {
|
||||
await supertest(getServerListener(server))
|
||||
await supertest(innerServerListener)
|
||||
.get('/bar/')
|
||||
.expect(404);
|
||||
});
|
||||
|
||||
test('/bar/foo => 404', async () => {
|
||||
await supertest(getServerListener(server))
|
||||
await supertest(innerServerListener)
|
||||
.get('/bar/foo')
|
||||
.expect(404);
|
||||
});
|
||||
|
||||
test('/ => /', async () => {
|
||||
await supertest(getServerListener(server))
|
||||
await supertest(innerServerListener)
|
||||
.get('/')
|
||||
.expect(200)
|
||||
.then(res => {
|
||||
|
@ -469,7 +464,7 @@ describe('with `basepath: /bar` and `rewriteBasePath: false`', () => {
|
|||
});
|
||||
|
||||
test('/foo => /foo', async () => {
|
||||
await supertest(getServerListener(server))
|
||||
await supertest(innerServerListener)
|
||||
.get('/foo')
|
||||
.expect(200)
|
||||
.then(res => {
|
||||
|
@ -480,6 +475,7 @@ describe('with `basepath: /bar` and `rewriteBasePath: false`', () => {
|
|||
|
||||
describe('with `basepath: /bar` and `rewriteBasePath: true`', () => {
|
||||
let configWithBasePath: HttpConfig;
|
||||
let innerServerListener: Server;
|
||||
|
||||
beforeEach(async () => {
|
||||
configWithBasePath = {
|
||||
|
@ -496,11 +492,12 @@ describe('with `basepath: /bar` and `rewriteBasePath: true`', () => {
|
|||
|
||||
server.registerRouter(router);
|
||||
|
||||
await server.start(configWithBasePath);
|
||||
const { server: innerServer } = await server.start(configWithBasePath);
|
||||
innerServerListener = innerServer.listener;
|
||||
});
|
||||
|
||||
test('/bar => /', async () => {
|
||||
await supertest(getServerListener(server))
|
||||
await supertest(innerServerListener)
|
||||
.get('/bar')
|
||||
.expect(200)
|
||||
.then(res => {
|
||||
|
@ -509,7 +506,7 @@ describe('with `basepath: /bar` and `rewriteBasePath: true`', () => {
|
|||
});
|
||||
|
||||
test('/bar/ => /', async () => {
|
||||
await supertest(getServerListener(server))
|
||||
await supertest(innerServerListener)
|
||||
.get('/bar/')
|
||||
.expect(200)
|
||||
.then(res => {
|
||||
|
@ -518,7 +515,7 @@ describe('with `basepath: /bar` and `rewriteBasePath: true`', () => {
|
|||
});
|
||||
|
||||
test('/bar/foo => /foo', async () => {
|
||||
await supertest(getServerListener(server))
|
||||
await supertest(innerServerListener)
|
||||
.get('/bar/foo')
|
||||
.expect(200)
|
||||
.then(res => {
|
||||
|
@ -527,13 +524,13 @@ describe('with `basepath: /bar` and `rewriteBasePath: true`', () => {
|
|||
});
|
||||
|
||||
test('/ => 404', async () => {
|
||||
await supertest(getServerListener(server))
|
||||
await supertest(innerServerListener)
|
||||
.get('/')
|
||||
.expect(404);
|
||||
});
|
||||
|
||||
test('/foo => 404', async () => {
|
||||
await supertest(getServerListener(server))
|
||||
await supertest(innerServerListener)
|
||||
.get('/foo')
|
||||
.expect(404);
|
||||
});
|
||||
|
@ -564,21 +561,13 @@ describe('with defined `redirectHttpFromPort`', () => {
|
|||
});
|
||||
});
|
||||
|
||||
test('broadcasts server and connection options to the legacy "channel"', async () => {
|
||||
const onConnectionListener = jest.fn();
|
||||
env.legacy.on('connection', onConnectionListener);
|
||||
|
||||
expect(onConnectionListener).not.toHaveBeenCalled();
|
||||
|
||||
await server.start({
|
||||
test('returns server and connection options on start', async () => {
|
||||
const { server: innerServer, options } = await server.start({
|
||||
...config,
|
||||
port: 12345,
|
||||
});
|
||||
|
||||
expect(onConnectionListener).toHaveBeenCalledTimes(1);
|
||||
|
||||
const [[{ options, server: rawServer }]] = onConnectionListener.mock.calls;
|
||||
expect(rawServer).toBeDefined();
|
||||
expect(rawServer).toBe((server as any).server);
|
||||
expect(innerServer).toBeDefined();
|
||||
expect(innerServer).toBe((server as any).server);
|
||||
expect(options).toMatchSnapshot();
|
||||
});
|
||||
|
|
|
@ -17,8 +17,6 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
import { getEnvOptions } from '../../config/__tests__/__mocks__/env';
|
||||
|
||||
const mockHttpServer = jest.fn();
|
||||
|
||||
jest.mock('../http_server', () => ({
|
||||
|
@ -27,8 +25,6 @@ jest.mock('../http_server', () => ({
|
|||
|
||||
import { noop } from 'lodash';
|
||||
import { BehaviorSubject } from 'rxjs';
|
||||
|
||||
import { Env } from '../../config';
|
||||
import { logger } from '../../logging/__mocks__';
|
||||
import { HttpConfig } from '../http_config';
|
||||
import { HttpService } from '../http_service';
|
||||
|
@ -55,11 +51,7 @@ test('creates and starts http server', async () => {
|
|||
};
|
||||
mockHttpServer.mockImplementation(() => httpServer);
|
||||
|
||||
const service = new HttpService(
|
||||
config$.asObservable(),
|
||||
logger,
|
||||
new Env('/kibana', getEnvOptions())
|
||||
);
|
||||
const service = new HttpService(config$.asObservable(), logger);
|
||||
|
||||
expect(mockHttpServer.mock.instances.length).toBe(1);
|
||||
expect(httpServer.start).not.toHaveBeenCalled();
|
||||
|
@ -81,11 +73,7 @@ test('logs error if already started', async () => {
|
|||
};
|
||||
mockHttpServer.mockImplementation(() => httpServer);
|
||||
|
||||
const service = new HttpService(
|
||||
config$.asObservable(),
|
||||
logger,
|
||||
new Env('/kibana', getEnvOptions())
|
||||
);
|
||||
const service = new HttpService(config$.asObservable(), logger);
|
||||
|
||||
await service.start();
|
||||
|
||||
|
@ -104,11 +92,7 @@ test('stops http server', async () => {
|
|||
};
|
||||
mockHttpServer.mockImplementation(() => httpServer);
|
||||
|
||||
const service = new HttpService(
|
||||
config$.asObservable(),
|
||||
logger,
|
||||
new Env('/kibana', getEnvOptions())
|
||||
);
|
||||
const service = new HttpService(config$.asObservable(), logger);
|
||||
|
||||
await service.start();
|
||||
|
||||
|
@ -132,11 +116,7 @@ test('register route handler', () => {
|
|||
};
|
||||
mockHttpServer.mockImplementation(() => httpServer);
|
||||
|
||||
const service = new HttpService(
|
||||
config$.asObservable(),
|
||||
logger,
|
||||
new Env('/kibana', getEnvOptions())
|
||||
);
|
||||
const service = new HttpService(config$.asObservable(), logger);
|
||||
|
||||
const router = new Router('/foo');
|
||||
service.registerRouter(router);
|
||||
|
@ -159,11 +139,7 @@ test('throws if registering route handler after http server is started', () => {
|
|||
};
|
||||
mockHttpServer.mockImplementation(() => httpServer);
|
||||
|
||||
const service = new HttpService(
|
||||
config$.asObservable(),
|
||||
logger,
|
||||
new Env('/kibana', getEnvOptions())
|
||||
);
|
||||
const service = new HttpService(config$.asObservable(), logger);
|
||||
|
||||
const router = new Router('/foo');
|
||||
service.registerRouter(router);
|
||||
|
@ -171,3 +147,20 @@ test('throws if registering route handler after http server is started', () => {
|
|||
expect(httpServer.registerRouter).toHaveBeenCalledTimes(0);
|
||||
expect(logger.mockCollect()).toMatchSnapshot();
|
||||
});
|
||||
|
||||
test('returns http server contract on start', async () => {
|
||||
const httpServerContract = {
|
||||
server: {},
|
||||
options: { someOption: true },
|
||||
};
|
||||
|
||||
mockHttpServer.mockImplementation(() => ({
|
||||
isListening: () => false,
|
||||
start: jest.fn().mockReturnValue(httpServerContract),
|
||||
stop: noop,
|
||||
}));
|
||||
|
||||
const service = new HttpService(new BehaviorSubject({ ssl: {} } as HttpConfig), logger);
|
||||
|
||||
expect(await service.start()).toBe(httpServerContract);
|
||||
});
|
||||
|
|
|
@ -29,8 +29,6 @@ import { createServer, getServerOptions } from './http_tools';
|
|||
const alphabet = 'abcdefghijklmnopqrztuvwxyz'.split('');
|
||||
|
||||
export interface BasePathProxyServerOptions {
|
||||
httpConfig: HttpConfig;
|
||||
devConfig: DevConfig;
|
||||
shouldRedirectFromOldBasePath: (path: string) => boolean;
|
||||
blockUntil: () => Promise<void>;
|
||||
}
|
||||
|
@ -40,34 +38,38 @@ export class BasePathProxyServer {
|
|||
private httpsAgent?: HttpsAgent;
|
||||
|
||||
get basePath() {
|
||||
return this.options.httpConfig.basePath;
|
||||
return this.httpConfig.basePath;
|
||||
}
|
||||
|
||||
get targetPort() {
|
||||
return this.options.devConfig.basePathProxyTargetPort;
|
||||
return this.devConfig.basePathProxyTargetPort;
|
||||
}
|
||||
|
||||
constructor(private readonly log: Logger, private readonly options: BasePathProxyServerOptions) {
|
||||
constructor(
|
||||
private readonly log: Logger,
|
||||
private readonly httpConfig: HttpConfig,
|
||||
private readonly devConfig: DevConfig
|
||||
) {
|
||||
const ONE_GIGABYTE = 1024 * 1024 * 1024;
|
||||
options.httpConfig.maxPayload = new ByteSizeValue(ONE_GIGABYTE);
|
||||
httpConfig.maxPayload = new ByteSizeValue(ONE_GIGABYTE);
|
||||
|
||||
if (!options.httpConfig.basePath) {
|
||||
options.httpConfig.basePath = `/${sample(alphabet, 3).join('')}`;
|
||||
if (!httpConfig.basePath) {
|
||||
httpConfig.basePath = `/${sample(alphabet, 3).join('')}`;
|
||||
}
|
||||
}
|
||||
|
||||
public async start() {
|
||||
const { httpConfig } = this.options;
|
||||
public async start(options: Readonly<BasePathProxyServerOptions>) {
|
||||
this.log.debug('starting basepath proxy server');
|
||||
|
||||
const options = getServerOptions(httpConfig);
|
||||
this.server = createServer(options);
|
||||
const serverOptions = getServerOptions(this.httpConfig);
|
||||
this.server = createServer(serverOptions);
|
||||
|
||||
// Register hapi plugin that adds proxying functionality. It can be configured
|
||||
// through the route configuration object (see { handler: { proxy: ... } }).
|
||||
await this.server.register({ plugin: require('h2o2-latest') });
|
||||
|
||||
if (httpConfig.ssl.enabled) {
|
||||
const tlsOptions = options.tls as TlsOptions;
|
||||
if (this.httpConfig.ssl.enabled) {
|
||||
const tlsOptions = serverOptions.tls as TlsOptions;
|
||||
this.httpsAgent = new HttpsAgent({
|
||||
ca: tlsOptions.ca,
|
||||
cert: tlsOptions.cert,
|
||||
|
@ -77,40 +79,42 @@ export class BasePathProxyServer {
|
|||
});
|
||||
}
|
||||
|
||||
this.setupRoutes();
|
||||
|
||||
this.log.info(
|
||||
`starting basepath proxy server at ${this.server.info.uri}${httpConfig.basePath}`
|
||||
);
|
||||
this.setupRoutes(options);
|
||||
|
||||
await this.server.start();
|
||||
|
||||
this.log.info(
|
||||
`basepath proxy server running at ${this.server.info.uri}${this.httpConfig.basePath}`
|
||||
);
|
||||
}
|
||||
|
||||
public async stop() {
|
||||
this.log.info('stopping basepath proxy server');
|
||||
|
||||
if (this.server !== undefined) {
|
||||
await this.server.stop();
|
||||
this.server = undefined;
|
||||
if (this.server === undefined) {
|
||||
return;
|
||||
}
|
||||
|
||||
this.log.debug('stopping basepath proxy server');
|
||||
await this.server.stop();
|
||||
this.server = undefined;
|
||||
|
||||
if (this.httpsAgent !== undefined) {
|
||||
this.httpsAgent.destroy();
|
||||
this.httpsAgent = undefined;
|
||||
}
|
||||
}
|
||||
|
||||
private setupRoutes() {
|
||||
private setupRoutes({
|
||||
blockUntil,
|
||||
shouldRedirectFromOldBasePath,
|
||||
}: Readonly<BasePathProxyServerOptions>) {
|
||||
if (this.server === undefined) {
|
||||
throw new Error(`Routes cannot be set up since server is not initialized.`);
|
||||
}
|
||||
|
||||
const { httpConfig, devConfig, blockUntil, shouldRedirectFromOldBasePath } = this.options;
|
||||
|
||||
// Always redirect from root URL to the URL with basepath.
|
||||
this.server.route({
|
||||
handler: (request, responseToolkit) => {
|
||||
return responseToolkit.redirect(httpConfig.basePath);
|
||||
return responseToolkit.redirect(this.httpConfig.basePath);
|
||||
},
|
||||
method: 'GET',
|
||||
path: '/',
|
||||
|
@ -122,7 +126,7 @@ export class BasePathProxyServer {
|
|||
agent: this.httpsAgent,
|
||||
host: this.server.info.host,
|
||||
passThrough: true,
|
||||
port: devConfig.basePathProxyTargetPort,
|
||||
port: this.devConfig.basePathProxyTargetPort,
|
||||
protocol: this.server.info.protocol,
|
||||
xforward: true,
|
||||
},
|
||||
|
@ -138,7 +142,7 @@ export class BasePathProxyServer {
|
|||
},
|
||||
],
|
||||
},
|
||||
path: `${httpConfig.basePath}/{kbnPath*}`,
|
||||
path: `${this.httpConfig.basePath}/{kbnPath*}`,
|
||||
});
|
||||
|
||||
// It may happen that basepath has changed, but user still uses the old one,
|
||||
|
@ -152,7 +156,7 @@ export class BasePathProxyServer {
|
|||
const isBasepathLike = oldBasePath.length === 3;
|
||||
|
||||
return isGet && isBasepathLike && shouldRedirectFromOldBasePath(kbnPath)
|
||||
? responseToolkit.redirect(`${httpConfig.basePath}/${kbnPath}`)
|
||||
? responseToolkit.redirect(`${this.httpConfig.basePath}/${kbnPath}`)
|
||||
: responseToolkit.response('Not Found').code(404);
|
||||
},
|
||||
method: '*',
|
||||
|
|
|
@ -28,6 +28,7 @@ const match = (regex: RegExp, errorMsg: string) => (str: string) =>
|
|||
|
||||
const createHttpSchema = schema.object(
|
||||
{
|
||||
autoListen: schema.boolean({ defaultValue: true }),
|
||||
basePath: schema.maybe(
|
||||
schema.string({
|
||||
validate: match(validBasePathRegex, "must start with a slash, don't end with one"),
|
||||
|
@ -90,6 +91,7 @@ export class HttpConfig {
|
|||
*/
|
||||
public static schema = createHttpSchema;
|
||||
|
||||
public autoListen: boolean;
|
||||
public host: string;
|
||||
public port: number;
|
||||
public cors: boolean | { origin: string[] };
|
||||
|
@ -103,6 +105,7 @@ export class HttpConfig {
|
|||
* @internal
|
||||
*/
|
||||
constructor(config: HttpConfigType, env: Env) {
|
||||
this.autoListen = config.autoListen;
|
||||
this.host = config.host;
|
||||
this.port = config.port;
|
||||
this.cors = config.cors;
|
||||
|
|
|
@ -17,20 +17,24 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
import { Server } from 'hapi-latest';
|
||||
import { Server, ServerOptions } from 'hapi-latest';
|
||||
|
||||
import { modifyUrl } from '../../utils';
|
||||
import { Env } from '../config';
|
||||
import { Logger } from '../logging';
|
||||
import { HttpConfig } from './http_config';
|
||||
import { createServer, getServerOptions } from './http_tools';
|
||||
import { Router } from './router';
|
||||
|
||||
export interface HttpServerInfo {
|
||||
server: Server;
|
||||
options: ServerOptions;
|
||||
}
|
||||
|
||||
export class HttpServer {
|
||||
private server?: Server;
|
||||
private registeredRouters: Set<Router> = new Set();
|
||||
|
||||
constructor(private readonly log: Logger, private readonly env: Env) {}
|
||||
constructor(private readonly log: Logger) {}
|
||||
|
||||
public isListening() {
|
||||
return this.server !== undefined && this.server.listener.listening;
|
||||
|
@ -62,21 +66,18 @@ export class HttpServer {
|
|||
}
|
||||
}
|
||||
|
||||
// Notify legacy compatibility layer about HTTP(S) connection providing server
|
||||
// instance with connection options so that we can properly bridge core and
|
||||
// the "legacy" Kibana internally.
|
||||
this.env.legacy.emit('connection', {
|
||||
options: serverOptions,
|
||||
server: this.server,
|
||||
});
|
||||
|
||||
await this.server.start();
|
||||
|
||||
this.log.info(
|
||||
`Server running at ${this.server.info.uri}${config.rewriteBasePath ? config.basePath : ''}`,
|
||||
// The "legacy" Kibana will output log records with `listening` tag even if `quiet` logging mode is enabled.
|
||||
{ tags: ['listening'] }
|
||||
this.log.debug(
|
||||
`http server running at ${this.server.info.uri}${
|
||||
config.rewriteBasePath ? config.basePath : ''
|
||||
}`
|
||||
);
|
||||
|
||||
// Return server instance with the connection options so that we can properly
|
||||
// bridge core and the "legacy" Kibana internally. Once this bridge isn't
|
||||
// needed anymore we shouldn't return anything from this method.
|
||||
return { server: this.server, options: serverOptions };
|
||||
}
|
||||
|
||||
public async stop() {
|
||||
|
|
|
@ -21,24 +21,23 @@ import { Observable, Subscription } from 'rxjs';
|
|||
import { first } from 'rxjs/operators';
|
||||
|
||||
import { CoreService } from '../../types/core_service';
|
||||
import { Env } from '../config';
|
||||
import { Logger, LoggerFactory } from '../logging';
|
||||
import { HttpConfig } from './http_config';
|
||||
import { HttpServer } from './http_server';
|
||||
import { HttpServer, HttpServerInfo } from './http_server';
|
||||
import { HttpsRedirectServer } from './https_redirect_server';
|
||||
import { Router } from './router';
|
||||
|
||||
export class HttpService implements CoreService {
|
||||
export class HttpService implements CoreService<HttpServerInfo> {
|
||||
private readonly httpServer: HttpServer;
|
||||
private readonly httpsRedirectServer: HttpsRedirectServer;
|
||||
private configSubscription?: Subscription;
|
||||
|
||||
private readonly log: Logger;
|
||||
|
||||
constructor(private readonly config$: Observable<HttpConfig>, logger: LoggerFactory, env: Env) {
|
||||
constructor(private readonly config$: Observable<HttpConfig>, logger: LoggerFactory) {
|
||||
this.log = logger.get('http');
|
||||
|
||||
this.httpServer = new HttpServer(logger.get('http', 'server'), env);
|
||||
this.httpServer = new HttpServer(logger.get('http', 'server'));
|
||||
this.httpsRedirectServer = new HttpsRedirectServer(logger.get('http', 'redirect', 'server'));
|
||||
}
|
||||
|
||||
|
@ -61,7 +60,7 @@ export class HttpService implements CoreService {
|
|||
await this.httpsRedirectServer.start(config);
|
||||
}
|
||||
|
||||
await this.httpServer.start(config);
|
||||
return await this.httpServer.start(config);
|
||||
}
|
||||
|
||||
public async stop() {
|
||||
|
|
|
@ -19,20 +19,26 @@
|
|||
|
||||
import { Observable } from 'rxjs';
|
||||
|
||||
import { Env } from '../config';
|
||||
import { LoggerFactory } from '../logging';
|
||||
import { HttpConfig } from './http_config';
|
||||
import { HttpService } from './http_service';
|
||||
import { Router } from './router';
|
||||
|
||||
export { Router, KibanaRequest } from './router';
|
||||
export { HttpService };
|
||||
export { HttpServerInfo } from './http_server';
|
||||
export { BasePathProxyServer } from './base_path_proxy_server';
|
||||
|
||||
export { HttpConfig };
|
||||
|
||||
export class HttpModule {
|
||||
public readonly service: HttpService;
|
||||
|
||||
constructor(readonly config$: Observable<HttpConfig>, logger: LoggerFactory, env: Env) {
|
||||
this.service = new HttpService(this.config$, logger, env);
|
||||
constructor(readonly config$: Observable<HttpConfig>, logger: LoggerFactory) {
|
||||
this.service = new HttpService(this.config$, logger);
|
||||
|
||||
const router = new Router('/core');
|
||||
router.get({ path: '/', validate: false }, async (req, res) => res.ok({ version: '0.0.1' }));
|
||||
this.service.registerRouter(router);
|
||||
}
|
||||
}
|
||||
|
|
121
src/core/server/index.test.ts
Normal file
121
src/core/server/index.test.ts
Normal file
|
@ -0,0 +1,121 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch B.V. under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch B.V. licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
const mockHttpService = { start: jest.fn(), stop: jest.fn(), registerRouter: jest.fn() };
|
||||
jest.mock('./http/http_service', () => ({
|
||||
HttpService: jest.fn(() => mockHttpService),
|
||||
}));
|
||||
|
||||
const mockLegacyService = { start: jest.fn(), stop: jest.fn() };
|
||||
jest.mock('./legacy_compat/legacy_service', () => ({
|
||||
LegacyService: jest.fn(() => mockLegacyService),
|
||||
}));
|
||||
|
||||
import { BehaviorSubject } from 'rxjs';
|
||||
import { Server } from '.';
|
||||
import { Env } from './config';
|
||||
import { getEnvOptions } from './config/__tests__/__mocks__/env';
|
||||
import { logger } from './logging/__mocks__';
|
||||
|
||||
const mockConfigService = { atPath: jest.fn(), getUnusedPaths: jest.fn().mockReturnValue([]) };
|
||||
const env = new Env('.', getEnvOptions());
|
||||
|
||||
beforeEach(() => {
|
||||
mockConfigService.atPath.mockReturnValue(new BehaviorSubject({ autoListen: true }));
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
logger.mockClear();
|
||||
mockConfigService.atPath.mockReset();
|
||||
mockHttpService.start.mockReset();
|
||||
mockHttpService.stop.mockReset();
|
||||
mockLegacyService.start.mockReset();
|
||||
mockLegacyService.stop.mockReset();
|
||||
});
|
||||
|
||||
test('starts services on "start"', async () => {
|
||||
const mockHttpServiceStartContract = { something: true };
|
||||
mockHttpService.start.mockReturnValue(Promise.resolve(mockHttpServiceStartContract));
|
||||
|
||||
const server = new Server(mockConfigService as any, logger, env);
|
||||
|
||||
expect(mockHttpService.start).not.toHaveBeenCalled();
|
||||
expect(mockLegacyService.start).not.toHaveBeenCalled();
|
||||
|
||||
await server.start();
|
||||
|
||||
expect(mockHttpService.start).toHaveBeenCalledTimes(1);
|
||||
expect(mockLegacyService.start).toHaveBeenCalledTimes(1);
|
||||
expect(mockLegacyService.start).toHaveBeenCalledWith(mockHttpServiceStartContract);
|
||||
});
|
||||
|
||||
test('does not fail on "start" if there are unused paths detected', async () => {
|
||||
mockConfigService.getUnusedPaths.mockReturnValue(['some.path', 'another.path']);
|
||||
|
||||
const server = new Server(mockConfigService as any, logger, env);
|
||||
await expect(server.start()).resolves.toBeUndefined();
|
||||
expect(logger.mockCollect()).toMatchSnapshot('unused paths logs');
|
||||
});
|
||||
|
||||
test('does not start http service is `autoListen:false`', async () => {
|
||||
mockConfigService.atPath.mockReturnValue(new BehaviorSubject({ autoListen: false }));
|
||||
|
||||
const server = new Server(mockConfigService as any, logger, env);
|
||||
|
||||
expect(mockLegacyService.start).not.toHaveBeenCalled();
|
||||
|
||||
await server.start();
|
||||
|
||||
expect(mockHttpService.start).not.toHaveBeenCalled();
|
||||
expect(mockLegacyService.start).toHaveBeenCalledTimes(1);
|
||||
expect(mockLegacyService.start).toHaveBeenCalledWith(undefined);
|
||||
});
|
||||
|
||||
test('does not start http service if process is dev cluster master', async () => {
|
||||
const server = new Server(
|
||||
mockConfigService as any,
|
||||
logger,
|
||||
new Env('.', getEnvOptions({ isDevClusterMaster: true }))
|
||||
);
|
||||
|
||||
expect(mockLegacyService.start).not.toHaveBeenCalled();
|
||||
|
||||
await server.start();
|
||||
|
||||
expect(mockHttpService.start).not.toHaveBeenCalled();
|
||||
expect(mockLegacyService.start).toHaveBeenCalledTimes(1);
|
||||
expect(mockLegacyService.start).toHaveBeenCalledWith(undefined);
|
||||
});
|
||||
|
||||
test('stops services on "stop"', async () => {
|
||||
const mockHttpServiceStartContract = { something: true };
|
||||
mockHttpService.start.mockReturnValue(Promise.resolve(mockHttpServiceStartContract));
|
||||
|
||||
const server = new Server(mockConfigService as any, logger, env);
|
||||
|
||||
await server.start();
|
||||
|
||||
expect(mockHttpService.stop).not.toHaveBeenCalled();
|
||||
expect(mockLegacyService.stop).not.toHaveBeenCalled();
|
||||
|
||||
await server.stop();
|
||||
|
||||
expect(mockHttpService.stop).toHaveBeenCalledTimes(1);
|
||||
expect(mockLegacyService.stop).toHaveBeenCalledTimes(1);
|
||||
});
|
|
@ -17,29 +17,44 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
export { bootstrap } from './bootstrap';
|
||||
|
||||
import { first } from 'rxjs/operators';
|
||||
import { ConfigService, Env } from './config';
|
||||
import { HttpConfig, HttpModule, Router } from './http';
|
||||
import { HttpConfig, HttpModule, HttpServerInfo } from './http';
|
||||
import { LegacyCompatModule } from './legacy_compat';
|
||||
import { Logger, LoggerFactory } from './logging';
|
||||
|
||||
export class Server {
|
||||
private readonly http: HttpModule;
|
||||
private readonly legacy: LegacyCompatModule;
|
||||
private readonly log: Logger;
|
||||
|
||||
constructor(private readonly configService: ConfigService, logger: LoggerFactory, env: Env) {
|
||||
constructor(
|
||||
private readonly configService: ConfigService,
|
||||
logger: LoggerFactory,
|
||||
private readonly env: Env
|
||||
) {
|
||||
this.log = logger.get('server');
|
||||
|
||||
const httpConfig$ = configService.atPath('server', HttpConfig);
|
||||
this.http = new HttpModule(httpConfig$, logger, env);
|
||||
this.http = new HttpModule(configService.atPath('server', HttpConfig), logger);
|
||||
this.legacy = new LegacyCompatModule(configService, logger, env);
|
||||
}
|
||||
|
||||
public async start() {
|
||||
this.log.debug('starting server :tada:');
|
||||
this.log.debug('starting server');
|
||||
|
||||
const router = new Router('/core');
|
||||
router.get({ path: '/', validate: false }, async (req, res) => res.ok({ version: '0.0.1' }));
|
||||
this.http.service.registerRouter(router);
|
||||
// We shouldn't start http service in two cases:
|
||||
// 1. If `server.autoListen` is explicitly set to `false`.
|
||||
// 2. When the process is run as dev cluster master in which case cluster manager
|
||||
// will fork a dedicated process where http service will be started instead.
|
||||
let httpServerInfo: HttpServerInfo | undefined;
|
||||
const httpConfig = await this.http.config$.pipe(first()).toPromise();
|
||||
if (!this.env.isDevClusterMaster && httpConfig.autoListen) {
|
||||
httpServerInfo = await this.http.service.start();
|
||||
}
|
||||
|
||||
await this.http.service.start();
|
||||
await this.legacy.service.start(httpServerInfo);
|
||||
|
||||
const unhandledConfigPaths = await this.configService.getUnusedPaths();
|
||||
if (unhandledConfigPaths.length > 0) {
|
||||
|
@ -54,6 +69,7 @@ export class Server {
|
|||
public async stop() {
|
||||
this.log.debug('stopping server');
|
||||
|
||||
await this.legacy.service.stop();
|
||||
await this.http.service.stop();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,21 +0,0 @@
|
|||
// Jest Snapshot v1, https://goo.gl/fbAQLP
|
||||
|
||||
exports[`correctly binds to the server.: proxy route options 1`] = `
|
||||
Array [
|
||||
Array [
|
||||
Object {
|
||||
"handler": [Function],
|
||||
"method": "*",
|
||||
"options": Object {
|
||||
"payload": Object {
|
||||
"maxBytes": 9007199254740991,
|
||||
"output": "stream",
|
||||
"parse": false,
|
||||
"timeout": false,
|
||||
},
|
||||
},
|
||||
"path": "/{p*}",
|
||||
},
|
||||
],
|
||||
]
|
||||
`;
|
|
@ -0,0 +1,135 @@
|
|||
// Jest Snapshot v1, https://goo.gl/fbAQLP
|
||||
|
||||
exports[`once LegacyService is started in \`devClusterMaster\` mode creates ClusterManager with base path proxy.: cluster manager with base path proxy 1`] = `
|
||||
Array [
|
||||
Array [
|
||||
Object {
|
||||
"basePath": true,
|
||||
"dev": true,
|
||||
"quiet": true,
|
||||
"silent": false,
|
||||
"watch": false,
|
||||
},
|
||||
Object {
|
||||
"server": Object {
|
||||
"autoListen": true,
|
||||
},
|
||||
},
|
||||
BasePathProxyServer {
|
||||
"devConfig": Object {
|
||||
"basePathProxyTargetPort": 100500,
|
||||
},
|
||||
"httpConfig": Object {
|
||||
"basePath": "/abc",
|
||||
"maxPayload": ByteSizeValue {
|
||||
"valueInBytes": 1073741824,
|
||||
},
|
||||
},
|
||||
"log": Object {
|
||||
"debug": [MockFunction] {
|
||||
"calls": Array [
|
||||
Array [
|
||||
"starting legacy service",
|
||||
],
|
||||
],
|
||||
},
|
||||
"error": [MockFunction],
|
||||
"fatal": [MockFunction],
|
||||
"info": [MockFunction],
|
||||
"log": [MockFunction],
|
||||
"trace": [MockFunction],
|
||||
"warn": [MockFunction],
|
||||
},
|
||||
},
|
||||
],
|
||||
]
|
||||
`;
|
||||
|
||||
exports[`once LegacyService is started in \`devClusterMaster\` mode creates ClusterManager without base path proxy.: cluster manager without base path proxy 1`] = `
|
||||
Array [
|
||||
Array [
|
||||
Object {
|
||||
"basePath": false,
|
||||
"dev": true,
|
||||
"quiet": false,
|
||||
"silent": true,
|
||||
"watch": false,
|
||||
},
|
||||
Object {
|
||||
"server": Object {
|
||||
"autoListen": true,
|
||||
},
|
||||
},
|
||||
undefined,
|
||||
],
|
||||
]
|
||||
`;
|
||||
|
||||
exports[`once LegacyService is started with connection info creates legacy kbnServer and closes it if \`listen\` fails. 1`] = `"something failed"`;
|
||||
|
||||
exports[`once LegacyService is started with connection info proxy route responds with \`503\` if \`kbnServer\` is not ready yet.: 503 response 1`] = `
|
||||
Object {
|
||||
"body": Array [
|
||||
Array [
|
||||
"Kibana server is not ready yet",
|
||||
],
|
||||
],
|
||||
"code": Array [
|
||||
Array [
|
||||
503,
|
||||
],
|
||||
],
|
||||
"header": Array [
|
||||
Array [
|
||||
"Retry-After",
|
||||
"30",
|
||||
],
|
||||
],
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`once LegacyService is started with connection info reconfigures logging configuration if new config is received.: applyLoggingConfiguration params 1`] = `
|
||||
Array [
|
||||
Array [
|
||||
Object {
|
||||
"logging": Object {
|
||||
"verbose": true,
|
||||
},
|
||||
},
|
||||
],
|
||||
]
|
||||
`;
|
||||
|
||||
exports[`once LegacyService is started with connection info register proxy route.: proxy route options 1`] = `
|
||||
Array [
|
||||
Array [
|
||||
Object {
|
||||
"handler": [Function],
|
||||
"method": "*",
|
||||
"options": Object {
|
||||
"payload": Object {
|
||||
"maxBytes": 9007199254740991,
|
||||
"output": "stream",
|
||||
"parse": false,
|
||||
"timeout": false,
|
||||
},
|
||||
},
|
||||
"path": "/{p*}",
|
||||
},
|
||||
],
|
||||
]
|
||||
`;
|
||||
|
||||
exports[`once LegacyService is started with connection info throws if fails to retrieve initial config. 1`] = `"something failed"`;
|
||||
|
||||
exports[`once LegacyService is started without connection info reconfigures logging configuration if new config is received.: applyLoggingConfiguration params 1`] = `
|
||||
Array [
|
||||
Array [
|
||||
Object {
|
||||
"logging": Object {
|
||||
"verbose": true,
|
||||
},
|
||||
},
|
||||
],
|
||||
]
|
||||
`;
|
|
@ -17,17 +17,12 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
import { Server as HapiServer } from 'hapi-latest';
|
||||
import { Server } from 'net';
|
||||
import { LegacyPlatformProxifier } from '..';
|
||||
import { Env } from '../../config';
|
||||
import { getEnvOptions } from '../../config/__tests__/__mocks__/env';
|
||||
import { logger } from '../../logging/__mocks__';
|
||||
|
||||
import { LegacyPlatformProxy } from '../legacy_platform_proxy';
|
||||
|
||||
let server: jest.Mocked<Server>;
|
||||
let mockHapiServer: jest.Mocked<HapiServer>;
|
||||
let root: any;
|
||||
let proxifier: LegacyPlatformProxifier;
|
||||
let proxy: LegacyPlatformProxy;
|
||||
beforeEach(() => {
|
||||
server = {
|
||||
addListener: jest.fn(),
|
||||
|
@ -36,29 +31,7 @@ beforeEach(() => {
|
|||
.mockReturnValue({ port: 1234, family: 'test-family', address: 'test-address' }),
|
||||
getConnections: jest.fn(),
|
||||
} as any;
|
||||
|
||||
mockHapiServer = { listener: server, route: jest.fn() } as any;
|
||||
|
||||
root = {
|
||||
logger,
|
||||
shutdown: jest.fn(),
|
||||
start: jest.fn(),
|
||||
} as any;
|
||||
|
||||
const env = new Env('/kibana', getEnvOptions());
|
||||
proxifier = new LegacyPlatformProxifier(root, env);
|
||||
env.legacy.emit('connection', {
|
||||
server: mockHapiServer,
|
||||
options: { someOption: 'foo', someAnotherOption: 'bar' },
|
||||
});
|
||||
});
|
||||
|
||||
test('correctly binds to the server.', () => {
|
||||
expect(mockHapiServer.route.mock.calls).toMatchSnapshot('proxy route options');
|
||||
expect(server.addListener).toHaveBeenCalledTimes(6);
|
||||
for (const eventName of ['clientError', 'close', 'connection', 'error', 'listening', 'upgrade']) {
|
||||
expect(server.addListener).toHaveBeenCalledWith(eventName, expect.any(Function));
|
||||
}
|
||||
proxy = new LegacyPlatformProxy({ debug: jest.fn() } as any, server);
|
||||
});
|
||||
|
||||
test('correctly redirects server events.', () => {
|
||||
|
@ -66,7 +39,7 @@ test('correctly redirects server events.', () => {
|
|||
expect(server.addListener).toHaveBeenCalledWith(eventName, expect.any(Function));
|
||||
|
||||
const listener = jest.fn();
|
||||
proxifier.addListener(eventName, listener);
|
||||
proxy.addListener(eventName, listener);
|
||||
|
||||
// Emit several events, to make sure that server is not being listened with `once`.
|
||||
const [, serverListener] = server.addListener.mock.calls.find(
|
||||
|
@ -78,68 +51,47 @@ test('correctly redirects server events.', () => {
|
|||
|
||||
expect(listener).toHaveBeenCalledTimes(2);
|
||||
expect(listener).toHaveBeenCalledWith(1, 2, 3, 4);
|
||||
expect(listener).toHaveBeenCalledWith(5, 6, 7, 8);
|
||||
|
||||
proxifier.removeListener(eventName, listener);
|
||||
proxy.removeListener(eventName, listener);
|
||||
}
|
||||
});
|
||||
|
||||
test('returns `address` from the underlying server.', () => {
|
||||
expect(proxifier.address()).toEqual({
|
||||
expect(proxy.address()).toEqual({
|
||||
address: 'test-address',
|
||||
family: 'test-family',
|
||||
port: 1234,
|
||||
});
|
||||
});
|
||||
|
||||
test('`listen` starts the `root`.', async () => {
|
||||
test('`listen` calls callback immediately.', async () => {
|
||||
const onListenComplete = jest.fn();
|
||||
|
||||
await proxifier.listen(1234, 'host-1', onListenComplete);
|
||||
await proxy.listen(1234, 'host-1', onListenComplete);
|
||||
|
||||
expect(root.start).toHaveBeenCalledTimes(1);
|
||||
expect(onListenComplete).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
test('`close` shuts down the `root`.', async () => {
|
||||
test('`close` calls callback immediately.', async () => {
|
||||
const onCloseComplete = jest.fn();
|
||||
|
||||
await proxifier.close(onCloseComplete);
|
||||
await proxy.close(onCloseComplete);
|
||||
|
||||
expect(root.shutdown).toHaveBeenCalledTimes(1);
|
||||
expect(onCloseComplete).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
test('returns connection count from the underlying server.', () => {
|
||||
server.getConnections.mockImplementation(callback => callback(null, 0));
|
||||
const onGetConnectionsComplete = jest.fn();
|
||||
proxifier.getConnections(onGetConnectionsComplete);
|
||||
proxy.getConnections(onGetConnectionsComplete);
|
||||
|
||||
expect(onGetConnectionsComplete).toHaveBeenCalledTimes(1);
|
||||
expect(onGetConnectionsComplete).toHaveBeenCalledWith(null, 0);
|
||||
onGetConnectionsComplete.mockReset();
|
||||
|
||||
server.getConnections.mockImplementation(callback => callback(null, 100500));
|
||||
proxifier.getConnections(onGetConnectionsComplete);
|
||||
proxy.getConnections(onGetConnectionsComplete);
|
||||
|
||||
expect(onGetConnectionsComplete).toHaveBeenCalledTimes(1);
|
||||
expect(onGetConnectionsComplete).toHaveBeenCalledWith(null, 100500);
|
||||
});
|
||||
|
||||
test('proxy route abandons request processing and forwards it to the legacy Kibana', async () => {
|
||||
const mockResponseToolkit = { response: jest.fn(), abandon: Symbol('abandon') };
|
||||
const mockRequest = { raw: { req: { a: 1 }, res: { b: 2 } } };
|
||||
|
||||
const onRequest = jest.fn();
|
||||
proxifier.addListener('request', onRequest);
|
||||
|
||||
const [[{ handler }]] = mockHapiServer.route.mock.calls;
|
||||
const response = await handler(mockRequest, mockResponseToolkit);
|
||||
|
||||
expect(response).toBe(mockResponseToolkit.abandon);
|
||||
expect(mockResponseToolkit.response).not.toHaveBeenCalled();
|
||||
|
||||
// Make sure request hasn't been passed to the legacy platform.
|
||||
expect(onRequest).toHaveBeenCalledTimes(1);
|
||||
expect(onRequest).toHaveBeenCalledWith(mockRequest.raw.req, mockRequest.raw.res);
|
||||
});
|
339
src/core/server/legacy_compat/__tests__/legacy_service.test.ts
Normal file
339
src/core/server/legacy_compat/__tests__/legacy_service.test.ts
Normal file
|
@ -0,0 +1,339 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch B.V. under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch B.V. licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
import { BehaviorSubject, Subject, throwError } from 'rxjs';
|
||||
|
||||
jest.mock('../legacy_platform_proxy');
|
||||
jest.mock('../../../../server/kbn_server');
|
||||
jest.mock('../../../../cli/cluster/cluster_manager');
|
||||
|
||||
import { first } from 'rxjs/operators';
|
||||
// @ts-ignore: implicit any for JS file
|
||||
import MockClusterManager from '../../../../cli/cluster/cluster_manager';
|
||||
// @ts-ignore: implicit any for JS file
|
||||
import MockKbnServer from '../../../../server/kbn_server';
|
||||
import { Config, ConfigService, Env, ObjectToConfigAdapter } from '../../config';
|
||||
import { getEnvOptions } from '../../config/__tests__/__mocks__/env';
|
||||
import { logger } from '../../logging/__mocks__';
|
||||
import { LegacyPlatformProxy } from '../legacy_platform_proxy';
|
||||
import { LegacyService } from '../legacy_service';
|
||||
|
||||
const MockLegacyPlatformProxy: jest.Mock<LegacyPlatformProxy> = LegacyPlatformProxy as any;
|
||||
|
||||
let legacyService: LegacyService;
|
||||
let configService: jest.Mocked<ConfigService>;
|
||||
let env: Env;
|
||||
let mockHttpServerInfo: any;
|
||||
let config$: BehaviorSubject<Config>;
|
||||
beforeEach(() => {
|
||||
env = Env.createDefault(getEnvOptions());
|
||||
|
||||
MockKbnServer.prototype.ready = jest.fn().mockReturnValue(Promise.resolve());
|
||||
|
||||
mockHttpServerInfo = {
|
||||
server: { listener: { addListener: jest.fn() }, route: jest.fn() },
|
||||
options: { someOption: 'foo', someAnotherOption: 'bar' },
|
||||
};
|
||||
|
||||
config$ = new BehaviorSubject<Config>(
|
||||
new ObjectToConfigAdapter({
|
||||
server: { autoListen: true },
|
||||
})
|
||||
);
|
||||
|
||||
configService = {
|
||||
getConfig$: jest.fn().mockReturnValue(config$),
|
||||
atPath: jest.fn().mockReturnValue(new BehaviorSubject({})),
|
||||
} as any;
|
||||
legacyService = new LegacyService(env, logger, configService);
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
MockLegacyPlatformProxy.mockClear();
|
||||
MockKbnServer.mockClear();
|
||||
MockClusterManager.create.mockClear();
|
||||
logger.mockClear();
|
||||
});
|
||||
|
||||
describe('once LegacyService is started with connection info', () => {
|
||||
test('register proxy route.', async () => {
|
||||
await legacyService.start(mockHttpServerInfo);
|
||||
|
||||
expect(mockHttpServerInfo.server.route.mock.calls).toMatchSnapshot('proxy route options');
|
||||
});
|
||||
|
||||
test('proxy route responds with `503` if `kbnServer` is not ready yet.', async () => {
|
||||
configService.atPath.mockReturnValue(new BehaviorSubject({ autoListen: true }));
|
||||
|
||||
const kbnServerListen$ = new Subject();
|
||||
MockKbnServer.prototype.listen = jest.fn(() => {
|
||||
kbnServerListen$.next();
|
||||
return kbnServerListen$.toPromise();
|
||||
});
|
||||
|
||||
// Wait until listen is called and proxy route is registered, but don't allow
|
||||
// listen to complete and make kbnServer available.
|
||||
const legacyStartPromise = legacyService.start(mockHttpServerInfo);
|
||||
await kbnServerListen$.pipe(first()).toPromise();
|
||||
|
||||
const mockResponse: any = {
|
||||
code: jest.fn().mockImplementation(() => mockResponse),
|
||||
header: jest.fn().mockImplementation(() => mockResponse),
|
||||
};
|
||||
const mockResponseToolkit = {
|
||||
response: jest.fn().mockReturnValue(mockResponse),
|
||||
abandon: Symbol('abandon'),
|
||||
};
|
||||
const mockRequest = { raw: { req: { a: 1 }, res: { b: 2 } } };
|
||||
|
||||
const [[{ handler }]] = mockHttpServerInfo.server.route.mock.calls;
|
||||
const response503 = await handler(mockRequest, mockResponseToolkit);
|
||||
|
||||
expect(response503).toBe(mockResponse);
|
||||
expect({
|
||||
body: mockResponseToolkit.response.mock.calls,
|
||||
code: mockResponse.code.mock.calls,
|
||||
header: mockResponse.header.mock.calls,
|
||||
}).toMatchSnapshot('503 response');
|
||||
|
||||
// Make sure request hasn't been passed to the legacy platform.
|
||||
const [mockedLegacyPlatformProxy] = MockLegacyPlatformProxy.mock.instances;
|
||||
expect(mockedLegacyPlatformProxy.emit).not.toHaveBeenCalled();
|
||||
|
||||
// Now wait until kibana is ready and try to request once again.
|
||||
kbnServerListen$.complete();
|
||||
await legacyStartPromise;
|
||||
mockResponseToolkit.response.mockClear();
|
||||
|
||||
const responseProxy = await handler(mockRequest, mockResponseToolkit);
|
||||
expect(responseProxy).toBe(mockResponseToolkit.abandon);
|
||||
expect(mockResponseToolkit.response).not.toHaveBeenCalled();
|
||||
|
||||
// Make sure request has been passed to the legacy platform.
|
||||
expect(mockedLegacyPlatformProxy.emit).toHaveBeenCalledTimes(1);
|
||||
expect(mockedLegacyPlatformProxy.emit).toHaveBeenCalledWith(
|
||||
'request',
|
||||
mockRequest.raw.req,
|
||||
mockRequest.raw.res
|
||||
);
|
||||
});
|
||||
|
||||
test('creates legacy kbnServer and calls `listen`.', async () => {
|
||||
configService.atPath.mockReturnValue(new BehaviorSubject({ autoListen: true }));
|
||||
|
||||
await legacyService.start(mockHttpServerInfo);
|
||||
|
||||
expect(MockKbnServer).toHaveBeenCalledTimes(1);
|
||||
expect(MockKbnServer).toHaveBeenCalledWith(
|
||||
{ server: { autoListen: true } },
|
||||
{
|
||||
serverOptions: {
|
||||
listener: expect.any(LegacyPlatformProxy),
|
||||
someAnotherOption: 'bar',
|
||||
someOption: 'foo',
|
||||
},
|
||||
}
|
||||
);
|
||||
|
||||
const [mockKbnServer] = MockKbnServer.mock.instances;
|
||||
expect(mockKbnServer.listen).toHaveBeenCalledTimes(1);
|
||||
expect(mockKbnServer.close).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
test('creates legacy kbnServer but does not call `listen` if `autoListen: false`.', async () => {
|
||||
configService.atPath.mockReturnValue(new BehaviorSubject({ autoListen: false }));
|
||||
|
||||
await legacyService.start(mockHttpServerInfo);
|
||||
|
||||
expect(MockKbnServer).toHaveBeenCalledTimes(1);
|
||||
expect(MockKbnServer).toHaveBeenCalledWith(
|
||||
{ server: { autoListen: true } },
|
||||
{
|
||||
serverOptions: {
|
||||
listener: expect.any(LegacyPlatformProxy),
|
||||
someAnotherOption: 'bar',
|
||||
someOption: 'foo',
|
||||
},
|
||||
}
|
||||
);
|
||||
|
||||
const [mockKbnServer] = MockKbnServer.mock.instances;
|
||||
expect(mockKbnServer.ready).toHaveBeenCalledTimes(1);
|
||||
expect(mockKbnServer.listen).not.toHaveBeenCalled();
|
||||
expect(mockKbnServer.close).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
test('creates legacy kbnServer and closes it if `listen` fails.', async () => {
|
||||
configService.atPath.mockReturnValue(new BehaviorSubject({ autoListen: true }));
|
||||
MockKbnServer.prototype.listen.mockRejectedValue(new Error('something failed'));
|
||||
|
||||
await expect(legacyService.start(mockHttpServerInfo)).rejects.toThrowErrorMatchingSnapshot();
|
||||
|
||||
const [mockKbnServer] = MockKbnServer.mock.instances;
|
||||
expect(mockKbnServer.listen).toHaveBeenCalled();
|
||||
expect(mockKbnServer.close).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
test('throws if fails to retrieve initial config.', async () => {
|
||||
configService.getConfig$.mockReturnValue(throwError(new Error('something failed')));
|
||||
|
||||
await expect(legacyService.start(mockHttpServerInfo)).rejects.toThrowErrorMatchingSnapshot();
|
||||
|
||||
expect(MockKbnServer).not.toHaveBeenCalled();
|
||||
expect(MockClusterManager).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
test('reconfigures logging configuration if new config is received.', async () => {
|
||||
await legacyService.start(mockHttpServerInfo);
|
||||
|
||||
const [mockKbnServer] = MockKbnServer.mock.instances;
|
||||
expect(mockKbnServer.applyLoggingConfiguration).not.toHaveBeenCalled();
|
||||
|
||||
config$.next(new ObjectToConfigAdapter({ logging: { verbose: true } }));
|
||||
|
||||
expect(mockKbnServer.applyLoggingConfiguration.mock.calls).toMatchSnapshot(
|
||||
`applyLoggingConfiguration params`
|
||||
);
|
||||
});
|
||||
|
||||
test('logs error if re-configuring fails.', async () => {
|
||||
await legacyService.start(mockHttpServerInfo);
|
||||
|
||||
const [mockKbnServer] = MockKbnServer.mock.instances;
|
||||
expect(mockKbnServer.applyLoggingConfiguration).not.toHaveBeenCalled();
|
||||
expect(logger.mockCollect().error).toEqual([]);
|
||||
|
||||
const configError = new Error('something went wrong');
|
||||
mockKbnServer.applyLoggingConfiguration.mockImplementation(() => {
|
||||
throw configError;
|
||||
});
|
||||
|
||||
config$.next(new ObjectToConfigAdapter({ logging: { verbose: true } }));
|
||||
|
||||
expect(logger.mockCollect().error).toEqual([[configError]]);
|
||||
});
|
||||
|
||||
test('logs error if config service fails.', async () => {
|
||||
await legacyService.start(mockHttpServerInfo);
|
||||
|
||||
const [mockKbnServer] = MockKbnServer.mock.instances;
|
||||
expect(mockKbnServer.applyLoggingConfiguration).not.toHaveBeenCalled();
|
||||
expect(logger.mockCollect().error).toEqual([]);
|
||||
|
||||
const configError = new Error('something went wrong');
|
||||
config$.error(configError);
|
||||
|
||||
expect(mockKbnServer.applyLoggingConfiguration).not.toHaveBeenCalled();
|
||||
expect(logger.mockCollect().error).toEqual([[configError]]);
|
||||
});
|
||||
|
||||
test('proxy route abandons request processing and forwards it to the legacy Kibana', async () => {
|
||||
const mockResponseToolkit = { response: jest.fn(), abandon: Symbol('abandon') };
|
||||
const mockRequest = { raw: { req: { a: 1 }, res: { b: 2 } } };
|
||||
|
||||
await legacyService.start(mockHttpServerInfo);
|
||||
|
||||
const [[{ handler }]] = mockHttpServerInfo.server.route.mock.calls;
|
||||
const response = await handler(mockRequest, mockResponseToolkit);
|
||||
|
||||
expect(response).toBe(mockResponseToolkit.abandon);
|
||||
expect(mockResponseToolkit.response).not.toHaveBeenCalled();
|
||||
|
||||
// Make sure request has been passed to the legacy platform.
|
||||
const [mockedLegacyPlatformProxy] = MockLegacyPlatformProxy.mock.instances;
|
||||
expect(mockedLegacyPlatformProxy.emit).toHaveBeenCalledTimes(1);
|
||||
expect(mockedLegacyPlatformProxy.emit).toHaveBeenCalledWith(
|
||||
'request',
|
||||
mockRequest.raw.req,
|
||||
mockRequest.raw.res
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('once LegacyService is started without connection info', () => {
|
||||
beforeEach(async () => await legacyService.start());
|
||||
|
||||
test('creates legacy kbnServer with `autoListen: false`.', () => {
|
||||
expect(mockHttpServerInfo.server.route).not.toHaveBeenCalled();
|
||||
expect(MockKbnServer).toHaveBeenCalledTimes(1);
|
||||
expect(MockKbnServer).toHaveBeenCalledWith(
|
||||
{ server: { autoListen: true } },
|
||||
{ serverOptions: { autoListen: false } }
|
||||
);
|
||||
});
|
||||
|
||||
test('reconfigures logging configuration if new config is received.', async () => {
|
||||
const [mockKbnServer] = MockKbnServer.mock.instances;
|
||||
expect(mockKbnServer.applyLoggingConfiguration).not.toHaveBeenCalled();
|
||||
|
||||
config$.next(new ObjectToConfigAdapter({ logging: { verbose: true } }));
|
||||
|
||||
expect(mockKbnServer.applyLoggingConfiguration.mock.calls).toMatchSnapshot(
|
||||
`applyLoggingConfiguration params`
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('once LegacyService is started in `devClusterMaster` mode', () => {
|
||||
beforeEach(() => {
|
||||
configService.atPath.mockImplementation(path => {
|
||||
return new BehaviorSubject(
|
||||
path === 'dev' ? { basePathProxyTargetPort: 100500 } : { basePath: '/abc' }
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
test('creates ClusterManager without base path proxy.', async () => {
|
||||
const devClusterLegacyService = new LegacyService(
|
||||
Env.createDefault(
|
||||
getEnvOptions({
|
||||
cliArgs: { silent: true, basePath: false },
|
||||
isDevClusterMaster: true,
|
||||
})
|
||||
),
|
||||
logger,
|
||||
configService
|
||||
);
|
||||
|
||||
await devClusterLegacyService.start();
|
||||
|
||||
expect(MockClusterManager.create.mock.calls).toMatchSnapshot(
|
||||
'cluster manager without base path proxy'
|
||||
);
|
||||
});
|
||||
|
||||
test('creates ClusterManager with base path proxy.', async () => {
|
||||
const devClusterLegacyService = new LegacyService(
|
||||
Env.createDefault(
|
||||
getEnvOptions({
|
||||
cliArgs: { quiet: true, basePath: true },
|
||||
isDevClusterMaster: true,
|
||||
})
|
||||
),
|
||||
logger,
|
||||
configService
|
||||
);
|
||||
|
||||
await devClusterLegacyService.start();
|
||||
|
||||
expect(MockClusterManager.create.mock.calls).toMatchSnapshot(
|
||||
'cluster manager with base path proxy'
|
||||
);
|
||||
});
|
||||
});
|
|
@ -2,6 +2,7 @@
|
|||
|
||||
exports[`#get correctly handles server config. 1`] = `
|
||||
Object {
|
||||
"autoListen": true,
|
||||
"basePath": "/abc",
|
||||
"cors": false,
|
||||
"host": "host",
|
||||
|
|
|
@ -32,7 +32,7 @@ interface LegacyLoggingConfig {
|
|||
}
|
||||
|
||||
/**
|
||||
* Represents adapter between config provided by legacy platform and `RawConfig`
|
||||
* Represents adapter between config provided by legacy platform and `Config`
|
||||
* supported by the current platform.
|
||||
*/
|
||||
export class LegacyObjectToConfigAdapter extends ObjectToConfigAdapter {
|
||||
|
@ -59,6 +59,7 @@ export class LegacyObjectToConfigAdapter extends ObjectToConfigAdapter {
|
|||
// TODO: New platform uses just a subset of `server` config from the legacy platform,
|
||||
// new values will be exposed once we need them (eg. customResponseHeaders or xsrf).
|
||||
return {
|
||||
autoListen: configValue.autoListen,
|
||||
basePath: configValue.basePath,
|
||||
cors: configValue.cors,
|
||||
host: configValue.host,
|
||||
|
|
|
@ -17,54 +17,17 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
import { BehaviorSubject } from 'rxjs';
|
||||
import { map } from 'rxjs/operators';
|
||||
import { ConfigService, Env } from '../config';
|
||||
import { LoggerFactory } from '../logging';
|
||||
import { LegacyService } from './legacy_service';
|
||||
|
||||
/** @internal */
|
||||
export { LegacyPlatformProxifier } from './legacy_platform_proxifier';
|
||||
/** @internal */
|
||||
export { LegacyObjectToConfigAdapter } from './config/legacy_object_to_config_adapter';
|
||||
export { LegacyService } from './legacy_service';
|
||||
|
||||
import { LegacyObjectToConfigAdapter, LegacyPlatformProxifier } from '.';
|
||||
import { Env } from '../config';
|
||||
import { Root } from '../root';
|
||||
import { BasePathProxyRoot } from '../root/base_path_proxy_root';
|
||||
export class LegacyCompatModule {
|
||||
public readonly service: LegacyService;
|
||||
|
||||
function initEnvironment(rawKbnServer: any, isDevClusterMaster = false) {
|
||||
const env = Env.createDefault({
|
||||
// The core doesn't work with configs yet, everything is provided by the
|
||||
// "legacy" Kibana, so we can have empty array here.
|
||||
configs: [],
|
||||
// `dev` is the only CLI argument we currently use.
|
||||
cliArgs: { dev: rawKbnServer.config.get('env.dev') },
|
||||
isDevClusterMaster,
|
||||
});
|
||||
|
||||
const legacyConfig$ = new BehaviorSubject<Record<string, any>>(rawKbnServer.config.get());
|
||||
return {
|
||||
config$: legacyConfig$.pipe(map(legacyConfig => new LegacyObjectToConfigAdapter(legacyConfig))),
|
||||
env,
|
||||
// Propagates legacy config updates to the new platform.
|
||||
updateConfig(legacyConfig: Record<string, any>) {
|
||||
legacyConfig$.next(legacyConfig);
|
||||
},
|
||||
};
|
||||
constructor(private readonly configService: ConfigService, logger: LoggerFactory, env: Env) {
|
||||
this.service = new LegacyService(env, logger, this.configService);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @internal
|
||||
*/
|
||||
export const injectIntoKbnServer = (rawKbnServer: any) => {
|
||||
const { env, config$, updateConfig } = initEnvironment(rawKbnServer);
|
||||
|
||||
rawKbnServer.newPlatform = {
|
||||
// Custom HTTP Listener that will be used within legacy platform by HapiJS server.
|
||||
proxyListener: new LegacyPlatformProxifier(new Root(config$, env), env),
|
||||
updateConfig,
|
||||
};
|
||||
};
|
||||
|
||||
export const createBasePathProxy = (rawKbnServer: any) => {
|
||||
const { env, config$ } = initEnvironment(rawKbnServer, true /*isDevClusterMaster*/);
|
||||
return new BasePathProxyRoot(config$, env);
|
||||
};
|
||||
|
|
|
@ -1,172 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch B.V. under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch B.V. licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
import { EventEmitter } from 'events';
|
||||
import { Server } from 'net';
|
||||
|
||||
import { Server as HapiServer, ServerOptions as HapiServerOptions } from 'hapi-latest';
|
||||
import { Env } from '../config';
|
||||
import { Logger } from '../logging';
|
||||
import { Root } from '../root';
|
||||
|
||||
interface ConnectionInfo {
|
||||
server: HapiServer;
|
||||
options: HapiServerOptions;
|
||||
}
|
||||
|
||||
/**
|
||||
* List of the server events to be forwarded to the legacy platform.
|
||||
*/
|
||||
const ServerEventsToForward = [
|
||||
'clientError',
|
||||
'close',
|
||||
'connection',
|
||||
'error',
|
||||
'listening',
|
||||
'upgrade',
|
||||
];
|
||||
|
||||
/**
|
||||
* Represents "proxy" between legacy and current platform.
|
||||
* @internal
|
||||
*/
|
||||
export class LegacyPlatformProxifier extends EventEmitter {
|
||||
private readonly eventHandlers: Map<string, (...args: any[]) => void>;
|
||||
private readonly log: Logger;
|
||||
private server?: Server;
|
||||
|
||||
constructor(private readonly root: Root, private readonly env: Env) {
|
||||
super();
|
||||
|
||||
this.log = root.logger.get('legacy-platform-proxifier');
|
||||
|
||||
// HapiJS expects that the following events will be generated by `listener`, see:
|
||||
// https://github.com/hapijs/hapi/blob/v14.2.0/lib/connection.js.
|
||||
this.eventHandlers = new Map(
|
||||
ServerEventsToForward.map(eventName => {
|
||||
return [
|
||||
eventName,
|
||||
(...args: any[]) => {
|
||||
this.log.debug(`Event is being forwarded: ${eventName}`);
|
||||
this.emit(eventName, ...args);
|
||||
},
|
||||
] as [string, (...args: any[]) => void];
|
||||
})
|
||||
);
|
||||
|
||||
// Once core HTTP service is ready it broadcasts the internal server it relies on
|
||||
// and server options that were used to create that server so that we can properly
|
||||
// bridge with the "legacy" Kibana. If server isn't run (e.g. if process is managed
|
||||
// by ClusterManager or optimizer) then this event will never fire.
|
||||
this.env.legacy.once('connection', (connectionInfo: ConnectionInfo) =>
|
||||
this.onConnection(connectionInfo)
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Neither new nor legacy platform should use this method directly.
|
||||
*/
|
||||
public address() {
|
||||
return this.server && this.server.address();
|
||||
}
|
||||
|
||||
/**
|
||||
* Neither new nor legacy platform should use this method directly.
|
||||
*/
|
||||
public async listen(port: number, host: string, callback?: (error?: Error) => void) {
|
||||
this.log.debug(`"listen" has been called (${host}:${port}).`);
|
||||
|
||||
let error: Error | undefined;
|
||||
try {
|
||||
await this.root.start();
|
||||
} catch (err) {
|
||||
error = err;
|
||||
this.emit('error', err);
|
||||
}
|
||||
|
||||
if (callback !== undefined) {
|
||||
callback(error);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Neither new nor legacy platform should use this method directly.
|
||||
*/
|
||||
public async close(callback?: (error?: Error) => void) {
|
||||
this.log.debug('"close" has been called.');
|
||||
|
||||
let error: Error | undefined;
|
||||
try {
|
||||
await this.root.shutdown();
|
||||
} catch (err) {
|
||||
error = err;
|
||||
this.emit('error', err);
|
||||
}
|
||||
|
||||
if (callback !== undefined) {
|
||||
callback(error);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Neither new nor legacy platform should use this method directly.
|
||||
*/
|
||||
public getConnections(callback: (error: Error | null, count?: number) => void) {
|
||||
// This method is used by `even-better` (before we start platform).
|
||||
// It seems that the latest version of parent `good` doesn't use this anymore.
|
||||
if (this.server) {
|
||||
this.server.getConnections(callback);
|
||||
} else {
|
||||
callback(null, 0);
|
||||
}
|
||||
}
|
||||
|
||||
private onConnection({ server }: ConnectionInfo) {
|
||||
this.server = server.listener;
|
||||
|
||||
for (const [eventName, eventHandler] of this.eventHandlers) {
|
||||
this.server.addListener(eventName, eventHandler);
|
||||
}
|
||||
|
||||
// We register Kibana proxy middleware right before we start server to allow
|
||||
// all new platform plugins register their routes, so that `legacyProxy`
|
||||
// handles only requests that aren't handled by the new platform.
|
||||
server.route({
|
||||
path: '/{p*}',
|
||||
method: '*',
|
||||
options: {
|
||||
payload: {
|
||||
output: 'stream',
|
||||
parse: false,
|
||||
timeout: false,
|
||||
// Having such a large value here will allow legacy routes to override
|
||||
// maximum allowed payload size set in the core http server if needed.
|
||||
maxBytes: Number.MAX_SAFE_INTEGER,
|
||||
},
|
||||
},
|
||||
handler: async ({ raw: { req, res } }, responseToolkit) => {
|
||||
this.log.trace(`Request will be handled by proxy ${req.method}:${req.url}.`);
|
||||
// Forward request and response objects to the legacy platform. This method
|
||||
// is used whenever new platform doesn't know how to handle the request.
|
||||
this.emit('request', req, res);
|
||||
return responseToolkit.abandon;
|
||||
},
|
||||
});
|
||||
}
|
||||
}
|
107
src/core/server/legacy_compat/legacy_platform_proxy.ts
Normal file
107
src/core/server/legacy_compat/legacy_platform_proxy.ts
Normal file
|
@ -0,0 +1,107 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch B.V. under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch B.V. licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
import { EventEmitter } from 'events';
|
||||
import { Server } from 'net';
|
||||
|
||||
import { Logger } from '../logging';
|
||||
|
||||
/**
|
||||
* List of the server events to be forwarded to the legacy platform.
|
||||
*/
|
||||
const ServerEventsToForward = [
|
||||
'clientError',
|
||||
'close',
|
||||
'connection',
|
||||
'error',
|
||||
'listening',
|
||||
'upgrade',
|
||||
];
|
||||
|
||||
/**
|
||||
* Represents "proxy" between legacy and current platform.
|
||||
* @internal
|
||||
*/
|
||||
export class LegacyPlatformProxy extends EventEmitter {
|
||||
private readonly eventHandlers: Map<string, (...args: any[]) => void>;
|
||||
|
||||
constructor(private readonly log: Logger, private readonly server: Server) {
|
||||
super();
|
||||
|
||||
// HapiJS expects that the following events will be generated by `listener`, see:
|
||||
// https://github.com/hapijs/hapi/blob/v14.2.0/lib/connection.js.
|
||||
this.eventHandlers = new Map(
|
||||
ServerEventsToForward.map(eventName => {
|
||||
return [
|
||||
eventName,
|
||||
(...args: any[]) => {
|
||||
this.log.debug(`Event is being forwarded: ${eventName}`);
|
||||
this.emit(eventName, ...args);
|
||||
},
|
||||
] as [string, (...args: any[]) => void];
|
||||
})
|
||||
);
|
||||
|
||||
for (const [eventName, eventHandler] of this.eventHandlers) {
|
||||
this.server.addListener(eventName, eventHandler);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Neither new nor legacy platform should use this method directly.
|
||||
*/
|
||||
public address() {
|
||||
this.log.debug('"address" has been called.');
|
||||
|
||||
return this.server.address();
|
||||
}
|
||||
|
||||
/**
|
||||
* Neither new nor legacy platform should use this method directly.
|
||||
*/
|
||||
public listen(port: number, host: string, callback?: (error?: Error) => void) {
|
||||
this.log.debug(`"listen" has been called (${host}:${port}).`);
|
||||
|
||||
if (callback !== undefined) {
|
||||
callback();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Neither new nor legacy platform should use this method directly.
|
||||
*/
|
||||
public close(callback?: (error?: Error) => void) {
|
||||
this.log.debug('"close" has been called.');
|
||||
|
||||
if (callback !== undefined) {
|
||||
callback();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Neither new nor legacy platform should use this method directly.
|
||||
*/
|
||||
public getConnections(callback: (error: Error | null, count?: number) => void) {
|
||||
this.log.debug('"getConnections" has been called.');
|
||||
|
||||
// This method is used by `even-better` (before we start platform).
|
||||
// It seems that the latest version of parent `good` doesn't use this anymore.
|
||||
this.server.getConnections(callback);
|
||||
}
|
||||
}
|
197
src/core/server/legacy_compat/legacy_service.ts
Normal file
197
src/core/server/legacy_compat/legacy_service.ts
Normal file
|
@ -0,0 +1,197 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch B.V. under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch B.V. licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
import { Server as HapiServer } from 'hapi-latest';
|
||||
import { combineLatest, ConnectableObservable, EMPTY, Subscription } from 'rxjs';
|
||||
import { first, map, mergeMap, publishReplay, tap } from 'rxjs/operators';
|
||||
import { CoreService } from '../../types/core_service';
|
||||
import { Config, ConfigService, Env } from '../config';
|
||||
import { DevConfig } from '../dev';
|
||||
import { BasePathProxyServer, HttpConfig, HttpServerInfo } from '../http';
|
||||
import { Logger, LoggerFactory } from '../logging';
|
||||
import { LegacyPlatformProxy } from './legacy_platform_proxy';
|
||||
|
||||
interface LegacyKbnServer {
|
||||
applyLoggingConfiguration: (settings: Readonly<Record<string, any>>) => void;
|
||||
listen: () => Promise<void>;
|
||||
ready: () => Promise<void>;
|
||||
close: () => Promise<void>;
|
||||
}
|
||||
|
||||
export class LegacyService implements CoreService {
|
||||
private readonly log: Logger;
|
||||
private kbnServer?: LegacyKbnServer;
|
||||
private configSubscription?: Subscription;
|
||||
|
||||
constructor(
|
||||
private readonly env: Env,
|
||||
private readonly logger: LoggerFactory,
|
||||
private readonly configService: ConfigService
|
||||
) {
|
||||
this.log = logger.get('legacy', 'service');
|
||||
}
|
||||
|
||||
public async start(httpServerInfo?: HttpServerInfo) {
|
||||
this.log.debug('starting legacy service');
|
||||
|
||||
const update$ = this.configService.getConfig$().pipe(
|
||||
tap(config => {
|
||||
if (this.kbnServer !== undefined) {
|
||||
this.kbnServer.applyLoggingConfiguration(config.toRaw());
|
||||
}
|
||||
}),
|
||||
tap({ error: err => this.log.error(err) }),
|
||||
publishReplay(1)
|
||||
) as ConnectableObservable<Config>;
|
||||
|
||||
this.configSubscription = update$.connect();
|
||||
|
||||
// Receive initial config and create kbnServer/ClusterManager.
|
||||
this.kbnServer = await update$
|
||||
.pipe(
|
||||
first(),
|
||||
mergeMap(async config => {
|
||||
if (this.env.isDevClusterMaster) {
|
||||
await this.createClusterManager(config);
|
||||
return;
|
||||
}
|
||||
|
||||
return await this.createKbnServer(config, httpServerInfo);
|
||||
})
|
||||
)
|
||||
.toPromise();
|
||||
}
|
||||
|
||||
public async stop() {
|
||||
this.log.debug('stopping legacy service');
|
||||
|
||||
if (this.configSubscription !== undefined) {
|
||||
this.configSubscription.unsubscribe();
|
||||
this.configSubscription = undefined;
|
||||
}
|
||||
|
||||
if (this.kbnServer !== undefined) {
|
||||
await this.kbnServer.close();
|
||||
this.kbnServer = undefined;
|
||||
}
|
||||
}
|
||||
|
||||
private async createClusterManager(config: Config) {
|
||||
const basePathProxy$ = this.env.cliArgs.basePath
|
||||
? combineLatest(
|
||||
this.configService.atPath('dev', DevConfig),
|
||||
this.configService.atPath('server', HttpConfig)
|
||||
).pipe(
|
||||
first(),
|
||||
map(([devConfig, httpConfig]) => {
|
||||
return new BasePathProxyServer(this.logger.get('server'), httpConfig, devConfig);
|
||||
})
|
||||
)
|
||||
: EMPTY;
|
||||
|
||||
require('../../../cli/cluster/cluster_manager').create(
|
||||
this.env.cliArgs,
|
||||
config.toRaw(),
|
||||
await basePathProxy$.toPromise()
|
||||
);
|
||||
}
|
||||
|
||||
private async createKbnServer(config: Config, httpServerInfo?: HttpServerInfo) {
|
||||
const KbnServer = require('../../../server/kbn_server');
|
||||
const kbnServer: LegacyKbnServer = new KbnServer(config.toRaw(), {
|
||||
// If core HTTP service is run we'll receive internal server reference and
|
||||
// options that were used to create that server so that we can properly
|
||||
// bridge with the "legacy" Kibana. If server isn't run (e.g. if process is
|
||||
// managed by ClusterManager or optimizer) then we won't have that info,
|
||||
// so we can't start "legacy" server either.
|
||||
serverOptions:
|
||||
httpServerInfo !== undefined
|
||||
? {
|
||||
...httpServerInfo.options,
|
||||
listener: this.setupProxyListener(httpServerInfo.server),
|
||||
}
|
||||
: { autoListen: false },
|
||||
});
|
||||
|
||||
const httpConfig = await this.configService
|
||||
.atPath('server', HttpConfig)
|
||||
.pipe(first())
|
||||
.toPromise();
|
||||
|
||||
if (httpConfig.autoListen) {
|
||||
try {
|
||||
await kbnServer.listen();
|
||||
} catch (err) {
|
||||
await kbnServer.close();
|
||||
throw err;
|
||||
}
|
||||
} else {
|
||||
await kbnServer.ready();
|
||||
}
|
||||
|
||||
return kbnServer;
|
||||
}
|
||||
|
||||
private setupProxyListener(server: HapiServer) {
|
||||
const legacyProxy = new LegacyPlatformProxy(
|
||||
this.logger.get('legacy', 'proxy'),
|
||||
server.listener
|
||||
);
|
||||
|
||||
// We register Kibana proxy middleware right before we start server to allow
|
||||
// all new platform plugins register their routes, so that `legacyProxy`
|
||||
// handles only requests that aren't handled by the new platform.
|
||||
server.route({
|
||||
path: '/{p*}',
|
||||
method: '*',
|
||||
options: {
|
||||
payload: {
|
||||
output: 'stream',
|
||||
parse: false,
|
||||
timeout: false,
|
||||
// Having such a large value here will allow legacy routes to override
|
||||
// maximum allowed payload size set in the core http server if needed.
|
||||
maxBytes: Number.MAX_SAFE_INTEGER,
|
||||
},
|
||||
},
|
||||
handler: async ({ raw: { req, res } }, responseToolkit) => {
|
||||
if (this.kbnServer === undefined) {
|
||||
this.log.debug(`Kibana server is not ready yet ${req.method}:${req.url}.`);
|
||||
|
||||
// If legacy server is not ready yet (e.g. it's still in optimization phase),
|
||||
// we should let client know that and ask to retry after 30 seconds.
|
||||
return responseToolkit
|
||||
.response('Kibana server is not ready yet')
|
||||
.code(503)
|
||||
.header('Retry-After', '30');
|
||||
}
|
||||
|
||||
this.log.trace(`Request will be handled by proxy ${req.method}:${req.url}.`);
|
||||
|
||||
// Forward request and response objects to the legacy platform. This method
|
||||
// is used whenever new platform doesn't know how to handle the request.
|
||||
legacyProxy.emit('request', req, res);
|
||||
|
||||
return responseToolkit.abandon;
|
||||
},
|
||||
});
|
||||
|
||||
return legacyProxy;
|
||||
}
|
||||
}
|
|
@ -71,7 +71,7 @@ export class LoggingService implements LoggerFactory {
|
|||
this.appenders.set(appenderKey, Appenders.create(appenderConfig));
|
||||
}
|
||||
|
||||
for (const [loggerKey, loggerAdapter] of this.loggers.entries()) {
|
||||
for (const [loggerKey, loggerAdapter] of this.loggers) {
|
||||
loggerAdapter.updateLogger(this.createLogger(loggerKey, config));
|
||||
}
|
||||
|
||||
|
|
|
@ -1,80 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch B.V. under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch B.V. licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
import { first } from 'rxjs/operators';
|
||||
|
||||
import { Root } from '.';
|
||||
import { DevConfig } from '../dev';
|
||||
import { HttpConfig } from '../http';
|
||||
import { BasePathProxyServer, BasePathProxyServerOptions } from '../http/base_path_proxy_server';
|
||||
|
||||
/**
|
||||
* Top-level entry point to start BasePathProxy server.
|
||||
*/
|
||||
export class BasePathProxyRoot extends Root {
|
||||
private basePathProxy?: BasePathProxyServer;
|
||||
|
||||
public async configure({
|
||||
blockUntil,
|
||||
shouldRedirectFromOldBasePath,
|
||||
}: Pick<BasePathProxyServerOptions, 'blockUntil' | 'shouldRedirectFromOldBasePath'>) {
|
||||
const [devConfig, httpConfig] = await Promise.all([
|
||||
this.configService
|
||||
.atPath('dev', DevConfig)
|
||||
.pipe(first())
|
||||
.toPromise(),
|
||||
this.configService
|
||||
.atPath('server', HttpConfig)
|
||||
.pipe(first())
|
||||
.toPromise(),
|
||||
]);
|
||||
|
||||
this.basePathProxy = new BasePathProxyServer(this.logger.get('server'), {
|
||||
blockUntil,
|
||||
devConfig,
|
||||
httpConfig,
|
||||
shouldRedirectFromOldBasePath,
|
||||
});
|
||||
}
|
||||
|
||||
public getBasePath() {
|
||||
return this.getBasePathProxy().basePath;
|
||||
}
|
||||
|
||||
public getTargetPort() {
|
||||
return this.getBasePathProxy().targetPort;
|
||||
}
|
||||
|
||||
protected async startServer() {
|
||||
return this.getBasePathProxy().start();
|
||||
}
|
||||
|
||||
protected async stopServer() {
|
||||
await this.getBasePathProxy().stop();
|
||||
this.basePathProxy = undefined;
|
||||
}
|
||||
|
||||
private getBasePathProxy() {
|
||||
if (this.basePathProxy === undefined) {
|
||||
throw new Error('BasePathProxyRoot is not configured!');
|
||||
}
|
||||
|
||||
return this.basePathProxy;
|
||||
}
|
||||
}
|
|
@ -18,38 +18,34 @@
|
|||
*/
|
||||
|
||||
import { ConnectableObservable, Observable, Subscription } from 'rxjs';
|
||||
import { catchError, first, map, publishReplay } from 'rxjs/operators';
|
||||
import { first, map, publishReplay, tap } from 'rxjs/operators';
|
||||
|
||||
import { Server } from '..';
|
||||
import { Config, ConfigService, Env } from '../config';
|
||||
|
||||
import { Logger, LoggerFactory, LoggingConfig, LoggingService } from '../logging';
|
||||
|
||||
export type OnShutdown = (reason?: Error) => void;
|
||||
|
||||
/**
|
||||
* Top-level entry point to kick off the app and start the Kibana server.
|
||||
*/
|
||||
export class Root {
|
||||
public readonly logger: LoggerFactory;
|
||||
protected readonly configService: ConfigService;
|
||||
private readonly configService: ConfigService;
|
||||
private readonly log: Logger;
|
||||
private server?: Server;
|
||||
private readonly server: Server;
|
||||
private readonly loggingService: LoggingService;
|
||||
private loggingConfigSubscription?: Subscription;
|
||||
|
||||
constructor(
|
||||
config$: Observable<Config>,
|
||||
private readonly env: Env,
|
||||
private readonly onShutdown: OnShutdown = () => {
|
||||
// noop
|
||||
}
|
||||
private readonly onShutdown?: (reason?: Error | string) => void
|
||||
) {
|
||||
this.loggingService = new LoggingService();
|
||||
this.logger = this.loggingService.asLoggerFactory();
|
||||
|
||||
this.log = this.logger.get('root');
|
||||
|
||||
this.configService = new ConfigService(config$, env, this.logger);
|
||||
this.server = new Server(this.configService, this.logger, this.env);
|
||||
}
|
||||
|
||||
public async start() {
|
||||
|
@ -57,53 +53,46 @@ export class Root {
|
|||
|
||||
try {
|
||||
await this.setupLogging();
|
||||
await this.startServer();
|
||||
await this.server.start();
|
||||
} catch (e) {
|
||||
await this.shutdown(e);
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
public async shutdown(reason?: Error) {
|
||||
public async shutdown(reason?: any) {
|
||||
this.log.debug('shutting root down');
|
||||
|
||||
await this.stopServer();
|
||||
if (reason) {
|
||||
if (reason.code === 'EADDRINUSE' && Number.isInteger(reason.port)) {
|
||||
reason = new Error(
|
||||
`Port ${reason.port} is already in use. Another instance of Kibana may be running!`
|
||||
);
|
||||
}
|
||||
|
||||
this.log.fatal(reason);
|
||||
}
|
||||
|
||||
await this.server.stop();
|
||||
|
||||
if (this.loggingConfigSubscription !== undefined) {
|
||||
this.loggingConfigSubscription.unsubscribe();
|
||||
this.loggingConfigSubscription = undefined;
|
||||
}
|
||||
|
||||
await this.loggingService.stop();
|
||||
|
||||
this.onShutdown(reason);
|
||||
}
|
||||
|
||||
protected async startServer() {
|
||||
this.server = new Server(this.configService, this.logger, this.env);
|
||||
return this.server.start();
|
||||
}
|
||||
|
||||
protected async stopServer() {
|
||||
if (this.server === undefined) {
|
||||
return;
|
||||
if (this.onShutdown !== undefined) {
|
||||
this.onShutdown(reason);
|
||||
}
|
||||
|
||||
await this.server.stop();
|
||||
this.server = undefined;
|
||||
}
|
||||
|
||||
private async setupLogging() {
|
||||
// Stream that maps config updates to logger updates, including update failures.
|
||||
const update$ = this.configService.atPath('logging', LoggingConfig).pipe(
|
||||
map(config => this.loggingService.upgrade(config)),
|
||||
catchError(err => {
|
||||
// This specifically console.logs because we were not able to configure the logger.
|
||||
// tslint:disable-next-line no-console
|
||||
console.error('Configuring logger failed:', err);
|
||||
|
||||
throw err;
|
||||
}),
|
||||
// This specifically console.logs because we were not able to configure the logger.
|
||||
// tslint:disable-next-line no-console
|
||||
tap({ error: err => console.error('Configuring logger failed:', err) }),
|
||||
publishReplay(1)
|
||||
) as ConnectableObservable<void>;
|
||||
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
export interface CoreService {
|
||||
start(): Promise<void>;
|
||||
export interface CoreService<TStartContract = void> {
|
||||
start(): Promise<TStartContract>;
|
||||
stop(): Promise<void>;
|
||||
}
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
|
||||
import expect from 'expect.js';
|
||||
import sinon from 'sinon';
|
||||
import { startTestServers } from '../../../../../test_utils/kbn_server.js';
|
||||
import { startTestServers } from '../../../../../test_utils/kbn_server';
|
||||
import manageUuid from '../manage_uuid';
|
||||
|
||||
describe('core_plugins/kibana/server/lib', function () {
|
||||
|
|
|
@ -1,20 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch B.V. under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch B.V. licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
export { startupKibana } from './kibana';
|
|
@ -1,36 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch B.V. under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch B.V. licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
import { createServerWithCorePlugins } from '../../../test_utils/kbn_server';
|
||||
|
||||
export async function startupKibana({ port, esUrl }) {
|
||||
const server = createServerWithCorePlugins({
|
||||
server: {
|
||||
port,
|
||||
autoListen: true,
|
||||
},
|
||||
|
||||
elasticsearch: {
|
||||
url: esUrl
|
||||
}
|
||||
});
|
||||
|
||||
await server.ready();
|
||||
return server;
|
||||
}
|
|
@ -40,7 +40,8 @@ describe('config/deprecation warnings mixin', function () {
|
|||
env: {
|
||||
CREATE_SERVER_OPTS: JSON.stringify({
|
||||
logging: {
|
||||
quiet: false
|
||||
quiet: false,
|
||||
silent: false
|
||||
},
|
||||
uiSettings: {
|
||||
enabled: true
|
||||
|
|
|
@ -17,18 +17,18 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
import { createServer } from '../../../../test_utils/kbn_server';
|
||||
import { createRoot } from '../../../../test_utils/kbn_server';
|
||||
|
||||
(async function run() {
|
||||
const server = createServer(JSON.parse(process.env.CREATE_SERVER_OPTS));
|
||||
const root = createRoot(JSON.parse(process.env.CREATE_SERVER_OPTS));
|
||||
|
||||
// We just need the server to run through startup so that it will
|
||||
// log the deprecation messages. Once it has started up we close it
|
||||
// to allow the process to exit naturally
|
||||
try {
|
||||
await server.ready();
|
||||
await root.start();
|
||||
} finally {
|
||||
await server.close();
|
||||
await root.shutdown();
|
||||
}
|
||||
|
||||
}());
|
||||
|
|
|
@ -1,3 +0,0 @@
|
|||
// Jest Snapshot v1, https://goo.gl/fbAQLP
|
||||
|
||||
exports[`fails with 400 if payload size is larger than default and route config allows 1`] = `"{\\"statusCode\\":400,\\"error\\":\\"Bad Request\\",\\"message\\":\\"Payload content length greater than maximum allowed: 200\\"}"`;
|
|
@ -1,7 +0,0 @@
|
|||
// Jest Snapshot v1, https://goo.gl/fbAQLP
|
||||
|
||||
exports[`xsrf request filter destructiveMethod: DELETE rejects requests without either an xsrf or version header: DELETE reject response 1`] = `"{\\"statusCode\\":400,\\"error\\":\\"Bad Request\\",\\"message\\":\\"Request must contain a kbn-xsrf header.\\"}"`;
|
||||
|
||||
exports[`xsrf request filter destructiveMethod: POST rejects requests without either an xsrf or version header: POST reject response 1`] = `"{\\"statusCode\\":400,\\"error\\":\\"Bad Request\\",\\"message\\":\\"Request must contain a kbn-xsrf header.\\"}"`;
|
||||
|
||||
exports[`xsrf request filter destructiveMethod: PUT rejects requests without either an xsrf or version header: PUT reject response 1`] = `"{\\"statusCode\\":400,\\"error\\":\\"Bad Request\\",\\"message\\":\\"Request must contain a kbn-xsrf header.\\"}"`;
|
|
@ -31,35 +31,7 @@ export default async function (kbnServer, server, config) {
|
|||
kbnServer.server = new Hapi.Server();
|
||||
server = kbnServer.server;
|
||||
|
||||
// Note that all connection options configured here should be exactly the same
|
||||
// as in `getServerOptions()` in the new platform (see `src/core/server/http/http_tools`).
|
||||
//
|
||||
// The only exception is `tls` property: TLS is entirely handled by the new
|
||||
// platform and we don't have to duplicate all TLS related settings here, we just need
|
||||
// to indicate to Hapi connection that TLS is used so that it can use correct protocol
|
||||
// name in `server.info` and `request.connection.info` that are used throughout Kibana.
|
||||
//
|
||||
// Any change SHOULD BE applied in both places.
|
||||
server.connection({
|
||||
host: config.get('server.host'),
|
||||
port: config.get('server.port'),
|
||||
tls: config.get('server.ssl.enabled'),
|
||||
listener: kbnServer.newPlatform.proxyListener,
|
||||
state: {
|
||||
strictHeader: false,
|
||||
},
|
||||
routes: {
|
||||
cors: config.get('server.cors'),
|
||||
payload: {
|
||||
maxBytes: config.get('server.maxPayloadBytes'),
|
||||
},
|
||||
validate: {
|
||||
options: {
|
||||
abortEarly: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
server.connection(kbnServer.core.serverOptions);
|
||||
|
||||
registerHapiPlugins(server);
|
||||
|
||||
|
|
52
src/server/http/integration_tests/max_payload_size.test.js
Normal file
52
src/server/http/integration_tests/max_payload_size.test.js
Normal file
|
@ -0,0 +1,52 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch B.V. under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch B.V. licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
import * as kbnTestServer from '../../../test_utils/kbn_server';
|
||||
|
||||
let root;
|
||||
beforeAll(async () => {
|
||||
root = kbnTestServer.createRoot({ server: { maxPayloadBytes: 100 } });
|
||||
|
||||
await root.start();
|
||||
|
||||
kbnTestServer.getKbnServer(root).server.route({
|
||||
path: '/payload_size_check/test/route',
|
||||
method: 'POST',
|
||||
config: { payload: { maxBytes: 200 } },
|
||||
handler: (req, reply) => reply(null, req.payload.data.slice(0, 5)),
|
||||
});
|
||||
}, 30000);
|
||||
|
||||
afterAll(async () => await root.shutdown());
|
||||
|
||||
test('accepts payload with a size larger than default but smaller than route config allows', async () => {
|
||||
await kbnTestServer.request.post(root, '/payload_size_check/test/route')
|
||||
.send({ data: Array(150).fill('+').join('') })
|
||||
.expect(200, '+++++');
|
||||
});
|
||||
|
||||
test('fails with 400 if payload size is larger than default and route config allows', async () => {
|
||||
await kbnTestServer.request.post(root, '/payload_size_check/test/route')
|
||||
.send({ data: Array(250).fill('+').join('') })
|
||||
.expect(400, {
|
||||
statusCode: 400,
|
||||
error: 'Bad Request',
|
||||
message: 'Payload content length greater than maximum allowed: 200'
|
||||
});
|
||||
});
|
|
@ -18,71 +18,48 @@
|
|||
*/
|
||||
|
||||
import { resolve } from 'path';
|
||||
import * as kbnTestServer from '../../test_utils/kbn_server';
|
||||
import * as kbnTestServer from '../../../test_utils/kbn_server';
|
||||
|
||||
const src = resolve.bind(null, __dirname, '../../../src');
|
||||
const src = resolve.bind(null, __dirname, '../../../../src');
|
||||
|
||||
const versionHeader = 'kbn-version';
|
||||
const version = require(src('../package.json')).version;
|
||||
|
||||
describe('version_check request filter', function () {
|
||||
async function makeRequest(kbnServer, opts) {
|
||||
return await kbnTestServer.makeRequest(kbnServer, opts);
|
||||
}
|
||||
let root;
|
||||
beforeAll(async () => {
|
||||
root = kbnTestServer.createRoot();
|
||||
|
||||
async function makeServer() {
|
||||
const kbnServer = kbnTestServer.createServer();
|
||||
await root.start();
|
||||
|
||||
await kbnServer.ready();
|
||||
|
||||
kbnServer.server.route({
|
||||
kbnTestServer.getKbnServer(root).server.route({
|
||||
path: '/version_check/test/route',
|
||||
method: 'GET',
|
||||
handler: function (req, reply) {
|
||||
reply(null, 'ok');
|
||||
}
|
||||
});
|
||||
}, 30000);
|
||||
|
||||
return kbnServer;
|
||||
}
|
||||
|
||||
let kbnServer;
|
||||
beforeEach(async () => kbnServer = await makeServer());
|
||||
afterEach(async () => await kbnServer.close());
|
||||
afterAll(async () => await root.shutdown());
|
||||
|
||||
it('accepts requests with the correct version passed in the version header', async function () {
|
||||
const resp = await makeRequest(kbnServer, {
|
||||
url: '/version_check/test/route',
|
||||
method: 'GET',
|
||||
headers: {
|
||||
[versionHeader]: version,
|
||||
},
|
||||
});
|
||||
|
||||
expect(resp.statusCode).toBe(200);
|
||||
expect(resp.payload).toBe('ok');
|
||||
await kbnTestServer.request
|
||||
.get(root, '/version_check/test/route')
|
||||
.set(versionHeader, version)
|
||||
.expect(200, 'ok');
|
||||
});
|
||||
|
||||
it('rejects requests with an incorrect version passed in the version header', async function () {
|
||||
const resp = await makeRequest(kbnServer, {
|
||||
url: '/version_check/test/route',
|
||||
method: 'GET',
|
||||
headers: {
|
||||
[versionHeader]: `invalid:${version}`,
|
||||
},
|
||||
});
|
||||
|
||||
expect(resp.statusCode).toBe(400);
|
||||
expect(resp.payload).toMatch(/"Browser client is out of date/);
|
||||
await kbnTestServer.request
|
||||
.get(root, '/version_check/test/route')
|
||||
.set(versionHeader, `invalid:${version}`)
|
||||
.expect(400, /"Browser client is out of date/);
|
||||
});
|
||||
|
||||
it('accepts requests that do not include a version header', async function () {
|
||||
const resp = await makeRequest(kbnServer, {
|
||||
url: '/version_check/test/route',
|
||||
method: 'GET'
|
||||
});
|
||||
|
||||
expect(resp.statusCode).toBe(200);
|
||||
expect(resp.payload).toBe('ok');
|
||||
await kbnTestServer.request
|
||||
.get(root, '/version_check/test/route')
|
||||
.expect(200, 'ok');
|
||||
});
|
||||
});
|
|
@ -18,10 +18,10 @@
|
|||
*/
|
||||
|
||||
import { resolve } from 'path';
|
||||
import * as kbnTestServer from '../../test_utils/kbn_server';
|
||||
import * as kbnTestServer from '../../../test_utils/kbn_server';
|
||||
|
||||
const destructiveMethods = ['POST', 'PUT', 'DELETE'];
|
||||
const src = resolve.bind(null, __dirname, '../../../src');
|
||||
const src = resolve.bind(null, __dirname, '../../../../src');
|
||||
|
||||
const xsrfHeader = 'kbn-xsrf';
|
||||
const versionHeader = 'kbn-version';
|
||||
|
@ -29,23 +29,18 @@ const testPath = '/xsrf/test/route';
|
|||
const whitelistedTestPath = '/xsrf/test/route/whitelisted';
|
||||
const actualVersion = require(src('../package.json')).version;
|
||||
|
||||
describe('xsrf request filter', function () {
|
||||
async function inject(kbnServer, opts) {
|
||||
return await kbnTestServer.makeRequest(kbnServer, opts);
|
||||
}
|
||||
|
||||
const makeServer = async function () {
|
||||
const kbnServer = kbnTestServer.createServer({
|
||||
describe('xsrf request filter', () => {
|
||||
let root;
|
||||
beforeAll(async () => {
|
||||
root = kbnTestServer.createRoot({
|
||||
server: {
|
||||
xsrf: {
|
||||
disableProtection: false,
|
||||
whitelist: [whitelistedTestPath]
|
||||
}
|
||||
xsrf: { disableProtection: false, whitelist: [whitelistedTestPath] }
|
||||
}
|
||||
});
|
||||
|
||||
await kbnServer.ready();
|
||||
await root.start();
|
||||
|
||||
const kbnServer = kbnTestServer.getKbnServer(root);
|
||||
kbnServer.server.route({
|
||||
path: testPath,
|
||||
method: 'GET',
|
||||
|
@ -81,117 +76,68 @@ describe('xsrf request filter', function () {
|
|||
reply(null, 'ok');
|
||||
}
|
||||
});
|
||||
}, 30000);
|
||||
|
||||
return kbnServer;
|
||||
};
|
||||
|
||||
let kbnServer;
|
||||
beforeEach(async () => {
|
||||
kbnServer = await makeServer();
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
await kbnServer.close();
|
||||
});
|
||||
afterAll(async () => await root.shutdown());
|
||||
|
||||
describe(`nonDestructiveMethod: GET`, function () {
|
||||
it('accepts requests without a token', async function () {
|
||||
const resp = await inject(kbnServer, {
|
||||
url: testPath,
|
||||
method: 'GET'
|
||||
});
|
||||
|
||||
expect(resp.statusCode).toBe(200);
|
||||
expect(resp.payload).toBe('ok');
|
||||
await kbnTestServer.request
|
||||
.get(root, testPath)
|
||||
.expect(200, 'ok');
|
||||
});
|
||||
|
||||
it('accepts requests with the xsrf header', async function () {
|
||||
const resp = await inject(kbnServer, {
|
||||
url: testPath,
|
||||
method: 'GET',
|
||||
headers: {
|
||||
[xsrfHeader]: 'anything',
|
||||
},
|
||||
});
|
||||
|
||||
expect(resp.statusCode).toBe(200);
|
||||
expect(resp.payload).toBe('ok');
|
||||
await kbnTestServer.request
|
||||
.get(root, testPath)
|
||||
.set(xsrfHeader, 'anything')
|
||||
.expect(200, 'ok');
|
||||
});
|
||||
});
|
||||
|
||||
describe(`nonDestructiveMethod: HEAD`, function () {
|
||||
it('accepts requests without a token', async function () {
|
||||
const resp = await inject(kbnServer, {
|
||||
url: testPath,
|
||||
method: 'HEAD'
|
||||
});
|
||||
|
||||
expect(resp.statusCode).toBe(200);
|
||||
expect(resp.payload).toHaveLength(0);
|
||||
await kbnTestServer.request
|
||||
.head(root, testPath)
|
||||
.expect(200, undefined);
|
||||
});
|
||||
|
||||
it('accepts requests with the xsrf header', async function () {
|
||||
const resp = await inject(kbnServer, {
|
||||
url: testPath,
|
||||
method: 'HEAD',
|
||||
headers: {
|
||||
[xsrfHeader]: 'anything',
|
||||
},
|
||||
});
|
||||
|
||||
expect(resp.statusCode).toBe(200);
|
||||
expect(resp.payload).toHaveLength(0);
|
||||
await kbnTestServer.request
|
||||
.head(root, testPath)
|
||||
.set(xsrfHeader, 'anything')
|
||||
.expect(200, undefined);
|
||||
});
|
||||
});
|
||||
|
||||
for (const method of destructiveMethods) {
|
||||
describe(`destructiveMethod: ${method}`, function () { // eslint-disable-line no-loop-func
|
||||
it('accepts requests with the xsrf header', async function () {
|
||||
const resp = await inject(kbnServer, {
|
||||
url: testPath,
|
||||
method: method,
|
||||
headers: {
|
||||
[xsrfHeader]: 'anything',
|
||||
},
|
||||
});
|
||||
|
||||
expect(resp.statusCode).toBe(200);
|
||||
expect(resp.payload).toBe('ok');
|
||||
await kbnTestServer.request[method.toLowerCase()](root, testPath)
|
||||
.set(xsrfHeader, 'anything')
|
||||
.expect(200, 'ok');
|
||||
});
|
||||
|
||||
// this is still valid for existing csrf protection support
|
||||
// it does not actually do any validation on the version value itself
|
||||
it('accepts requests with the version header', async function () {
|
||||
const resp = await inject(kbnServer, {
|
||||
url: testPath,
|
||||
method: method,
|
||||
headers: {
|
||||
[versionHeader]: actualVersion,
|
||||
},
|
||||
});
|
||||
|
||||
expect(resp.statusCode).toBe(200);
|
||||
expect(resp.payload).toBe('ok');
|
||||
await kbnTestServer.request[method.toLowerCase()](root, testPath)
|
||||
.set(versionHeader, actualVersion)
|
||||
.expect(200, 'ok');
|
||||
});
|
||||
|
||||
it('rejects requests without either an xsrf or version header', async function () {
|
||||
const resp = await inject(kbnServer, {
|
||||
url: testPath,
|
||||
method: method
|
||||
});
|
||||
|
||||
expect(resp.statusCode).toBe(400);
|
||||
expect(resp.result).toMatchSnapshot(`${method} reject response`);
|
||||
await kbnTestServer.request[method.toLowerCase()](root, testPath)
|
||||
.expect(400, {
|
||||
statusCode: 400,
|
||||
error: 'Bad Request',
|
||||
message: 'Request must contain a kbn-xsrf header.'
|
||||
});
|
||||
});
|
||||
|
||||
it('accepts whitelisted requests without either an xsrf or version header', async function () {
|
||||
const resp = await inject(kbnServer, {
|
||||
url: whitelistedTestPath,
|
||||
method: method
|
||||
});
|
||||
|
||||
expect(resp.statusCode).toBe(200);
|
||||
expect(resp.payload).toBe('ok');
|
||||
await kbnTestServer.request[method.toLowerCase()](root, whitelistedTestPath)
|
||||
.expect(200, 'ok');
|
||||
});
|
||||
});
|
||||
}
|
|
@ -1,70 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch B.V. under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch B.V. licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
import * as kbnTestServer from '../../test_utils/kbn_server';
|
||||
|
||||
let kbnServer;
|
||||
async function makeServer({ maxPayloadBytesDefault, maxPayloadBytesRoute }) {
|
||||
kbnServer = kbnTestServer.createServer({
|
||||
server: { maxPayloadBytes: maxPayloadBytesDefault }
|
||||
});
|
||||
|
||||
await kbnServer.ready();
|
||||
|
||||
kbnServer.server.route({
|
||||
path: '/payload_size_check/test/route',
|
||||
method: 'POST',
|
||||
config: { payload: { maxBytes: maxPayloadBytesRoute } },
|
||||
handler: function (req, reply) {
|
||||
reply(null, req.payload.data.slice(0, 5));
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
async function makeRequest(opts) {
|
||||
return await kbnTestServer.makeRequest(kbnServer, opts);
|
||||
}
|
||||
|
||||
afterEach(async () => await kbnServer.close());
|
||||
|
||||
test('accepts payload with a size larger than default but smaller than route config allows', async () => {
|
||||
await makeServer({ maxPayloadBytesDefault: 100, maxPayloadBytesRoute: 200 });
|
||||
|
||||
const resp = await makeRequest({
|
||||
url: '/payload_size_check/test/route',
|
||||
method: 'POST',
|
||||
payload: { data: Array(150).fill('+').join('') },
|
||||
});
|
||||
|
||||
expect(resp.statusCode).toBe(200);
|
||||
expect(resp.payload).toBe('+++++');
|
||||
});
|
||||
|
||||
test('fails with 400 if payload size is larger than default and route config allows', async () => {
|
||||
await makeServer({ maxPayloadBytesDefault: 100, maxPayloadBytesRoute: 200 });
|
||||
|
||||
const resp = await makeRequest({
|
||||
url: '/payload_size_check/test/route',
|
||||
method: 'POST',
|
||||
payload: { data: Array(250).fill('+').join('') },
|
||||
});
|
||||
|
||||
expect(resp.statusCode).toBe(400);
|
||||
expect(resp.payload).toMatchSnapshot();
|
||||
});
|
|
@ -21,6 +21,7 @@ import { constant, once, compact, flatten } from 'lodash';
|
|||
import { fromNode } from 'bluebird';
|
||||
import { isWorker } from 'cluster';
|
||||
import { fromRoot, pkg } from '../utils';
|
||||
import { Config } from './config';
|
||||
import loggingConfiguration from './logging/configuration';
|
||||
import configSetupMixin from './config/setup';
|
||||
import httpMixin from './http';
|
||||
|
@ -30,6 +31,7 @@ import { usageMixin } from './usage';
|
|||
import { statusMixin } from './status';
|
||||
import pidMixin from './pid';
|
||||
import { configDeprecationWarningsMixin } from './config/deprecation_warnings';
|
||||
import { transformDeprecations } from './config/transform_deprecations';
|
||||
import configCompleteMixin from './config/complete';
|
||||
import optimizeMixin from '../optimize';
|
||||
import * as Plugins from './plugins';
|
||||
|
@ -41,27 +43,26 @@ import { urlShorteningMixin } from './url_shortening';
|
|||
import { serverExtensionsMixin } from './server_extensions';
|
||||
import { uiMixin } from '../ui';
|
||||
import { sassMixin } from './sass';
|
||||
import { injectIntoKbnServer as newPlatformMixin } from '../core';
|
||||
import { i18nMixin } from './i18n';
|
||||
|
||||
const rootDir = fromRoot('.');
|
||||
|
||||
export default class KbnServer {
|
||||
constructor(settings) {
|
||||
constructor(settings, core) {
|
||||
this.name = pkg.name;
|
||||
this.version = pkg.version;
|
||||
this.build = pkg.build || false;
|
||||
this.rootDir = rootDir;
|
||||
this.settings = settings || {};
|
||||
|
||||
this.core = core;
|
||||
|
||||
this.ready = constant(this.mixin(
|
||||
Plugins.waitForInitSetupMixin,
|
||||
|
||||
// sets this.config, reads this.settings
|
||||
configSetupMixin,
|
||||
|
||||
newPlatformMixin,
|
||||
|
||||
// sets this.server
|
||||
httpMixin,
|
||||
|
||||
|
@ -111,13 +112,6 @@ export default class KbnServer {
|
|||
|
||||
// notify any deferred setup logic that plugins have initialized
|
||||
Plugins.waitForInitResolveMixin,
|
||||
|
||||
() => {
|
||||
if (this.config.get('server.autoListen')) {
|
||||
this.ready = constant(Promise.resolve());
|
||||
return this.listen();
|
||||
}
|
||||
}
|
||||
));
|
||||
|
||||
this.listen = once(this.listen);
|
||||
|
@ -148,14 +142,17 @@ export default class KbnServer {
|
|||
async listen() {
|
||||
await this.ready();
|
||||
|
||||
const { server } = this;
|
||||
await fromNode(cb => server.start(cb));
|
||||
|
||||
if (isWorker) {
|
||||
// help parent process know when we are ready
|
||||
process.send(['WORKER_LISTENING']);
|
||||
}
|
||||
|
||||
const { server, config } = this;
|
||||
server.log(['listening', 'info'], `Server running at ${server.info.uri}${
|
||||
config.get('server.rewriteBasePath')
|
||||
? config.get('server.basePath')
|
||||
: ''
|
||||
}`);
|
||||
return server;
|
||||
}
|
||||
|
||||
|
@ -171,7 +168,12 @@ export default class KbnServer {
|
|||
return await this.server.inject(opts);
|
||||
}
|
||||
|
||||
async applyLoggingConfiguration(config) {
|
||||
applyLoggingConfiguration(settings) {
|
||||
const config = new Config(
|
||||
this.config.getSchema(),
|
||||
transformDeprecations(settings)
|
||||
);
|
||||
|
||||
const loggingOptions = loggingConfiguration(config);
|
||||
const subset = {
|
||||
ops: config.get('ops'),
|
||||
|
|
|
@ -1,23 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch B.V. under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch B.V. licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
export function header(user, pass) {
|
||||
const encoded = new Buffer(`${user}:${pass}`).toString('base64');
|
||||
return `Basic ${encoded}`;
|
||||
}
|
|
@ -1,155 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch B.V. under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch B.V. licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
import { resolve } from 'path';
|
||||
import { defaultsDeep, set } from 'lodash';
|
||||
import { header as basicAuthHeader } from './base_auth';
|
||||
import { createEsTestCluster, esTestConfig, kibanaTestUser, kibanaServerTestUser } from '@kbn/test';
|
||||
import KbnServer from '../../src/server/kbn_server';
|
||||
import { ToolingLog } from '@kbn/dev-utils';
|
||||
|
||||
const DEFAULTS_SETTINGS = {
|
||||
server: {
|
||||
autoListen: true,
|
||||
// Use the ephemeral port to make sure that tests use the first available
|
||||
// port and aren't affected by the timing issues in test environment.
|
||||
port: 0,
|
||||
xsrf: {
|
||||
disableProtection: true
|
||||
}
|
||||
},
|
||||
logging: {
|
||||
quiet: true
|
||||
},
|
||||
plugins: {},
|
||||
optimize: {
|
||||
enabled: false
|
||||
},
|
||||
};
|
||||
|
||||
const DEFAULT_SETTINGS_WITH_CORE_PLUGINS = {
|
||||
plugins: {
|
||||
scanDirs: [
|
||||
resolve(__dirname, '../core_plugins'),
|
||||
],
|
||||
},
|
||||
elasticsearch: {
|
||||
url: esTestConfig.getUrl(),
|
||||
username: kibanaServerTestUser.username,
|
||||
password: kibanaServerTestUser.password
|
||||
},
|
||||
};
|
||||
|
||||
/**
|
||||
* Creates an instance of KbnServer with default configuration
|
||||
* tailored for unit tests
|
||||
*
|
||||
* @param {Object} [settings={}] Any config overrides for this instance
|
||||
* @return {KbnServer}
|
||||
*/
|
||||
export function createServer(settings = {}) {
|
||||
return new KbnServer(defaultsDeep({}, settings, DEFAULTS_SETTINGS));
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates an instance of KbnServer, including all of the core plugins,
|
||||
* with default configuration tailored for unit tests, and starts es.
|
||||
*
|
||||
* @param {Object} options
|
||||
* @prop {Object} settings Any config overrides for this instance
|
||||
* @prop {function} adjustTimeout A function(t) => this.timeout(t) that adjust the timeout of a test,
|
||||
* ensuring the test properly waits for the server to boot without timing out.
|
||||
* @return {KbnServer}
|
||||
*/
|
||||
export async function startTestServers({ adjustTimeout, settings = {} }) {
|
||||
if (!adjustTimeout) {
|
||||
throw new Error('adjustTimeout is required in order to avoid flaky tests');
|
||||
}
|
||||
|
||||
const log = new ToolingLog({
|
||||
level: 'debug',
|
||||
writeTo: process.stdout
|
||||
});
|
||||
|
||||
log.indent(6);
|
||||
log.info('starting elasticsearch');
|
||||
log.indent(4);
|
||||
|
||||
const es = createEsTestCluster({ log });
|
||||
|
||||
log.indent(-4);
|
||||
|
||||
adjustTimeout(es.getStartTimeout());
|
||||
|
||||
await es.start();
|
||||
|
||||
const kbnServer = createServerWithCorePlugins(settings);
|
||||
|
||||
await kbnServer.ready();
|
||||
await kbnServer.server.plugins.elasticsearch.waitUntilReady();
|
||||
|
||||
return {
|
||||
kbnServer,
|
||||
es,
|
||||
|
||||
async stop() {
|
||||
await this.kbnServer.close();
|
||||
await es.cleanup();
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates an instance of KbnServer, including all of the core plugins,
|
||||
* with default configuration tailored for unit tests
|
||||
*
|
||||
* @param {Object} [settings={}] Any config overrides for this instance
|
||||
* @return {KbnServer}
|
||||
*/
|
||||
export function createServerWithCorePlugins(settings = {}) {
|
||||
return new KbnServer(defaultsDeep({}, settings, DEFAULT_SETTINGS_WITH_CORE_PLUGINS, DEFAULTS_SETTINGS));
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates request configuration with a basic auth header
|
||||
*/
|
||||
export function authOptions() {
|
||||
const { username, password } = kibanaTestUser;
|
||||
const authHeader = basicAuthHeader(username, password);
|
||||
return set({}, 'headers.Authorization', authHeader);
|
||||
}
|
||||
|
||||
/**
|
||||
* Makes a request with test headers via hapi server inject()
|
||||
*
|
||||
* The given options are decorated with default testing options, so it's
|
||||
* recommended to use this function instead of using inject() directly whenever
|
||||
* possible throughout the tests.
|
||||
*
|
||||
* @param {KbnServer} kbnServer
|
||||
* @param {object} options Any additional options or overrides for inject()
|
||||
*/
|
||||
export async function makeRequest(kbnServer, options) {
|
||||
// Since all requests to Kibana hit core http server first and only after that
|
||||
// are proxied to the "legacy" Kibana we should inject requests through the top
|
||||
// level Hapi server used by the core.
|
||||
return await kbnServer.newPlatform.proxyListener.root.server.http.service.httpServer.server.inject(
|
||||
defaultsDeep({}, authOptions(), options)
|
||||
);
|
||||
}
|
183
src/test_utils/kbn_server.ts
Normal file
183
src/test_utils/kbn_server.ts
Normal file
|
@ -0,0 +1,183 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch B.V. under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch B.V. licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
import { ToolingLog } from '@kbn/dev-utils';
|
||||
// @ts-ignore: implicit any for JS file
|
||||
import { createEsTestCluster, esTestConfig, kibanaServerTestUser, kibanaTestUser } from '@kbn/test';
|
||||
import { defaultsDeep } from 'lodash';
|
||||
import { resolve } from 'path';
|
||||
import { BehaviorSubject } from 'rxjs';
|
||||
import supertest from 'supertest';
|
||||
import { Env } from '../core/server/config';
|
||||
import { LegacyObjectToConfigAdapter } from '../core/server/legacy_compat';
|
||||
import { Root } from '../core/server/root';
|
||||
|
||||
type HttpMethod = 'delete' | 'get' | 'head' | 'post' | 'put';
|
||||
|
||||
const DEFAULTS_SETTINGS = {
|
||||
server: {
|
||||
autoListen: true,
|
||||
// Use the ephemeral port to make sure that tests use the first available
|
||||
// port and aren't affected by the timing issues in test environment.
|
||||
port: 0,
|
||||
xsrf: { disableProtection: true },
|
||||
},
|
||||
logging: { silent: true },
|
||||
plugins: {},
|
||||
optimize: { enabled: false },
|
||||
};
|
||||
|
||||
const DEFAULT_SETTINGS_WITH_CORE_PLUGINS = {
|
||||
plugins: { scanDirs: [resolve(__dirname, '../core_plugins')] },
|
||||
elasticsearch: {
|
||||
url: esTestConfig.getUrl(),
|
||||
username: kibanaServerTestUser.username,
|
||||
password: kibanaServerTestUser.password,
|
||||
},
|
||||
};
|
||||
|
||||
export function createRootWithSettings(...settings: Array<Record<string, any>>) {
|
||||
const env = Env.createDefault({
|
||||
configs: [],
|
||||
cliArgs: {
|
||||
dev: false,
|
||||
quiet: false,
|
||||
silent: false,
|
||||
watch: false,
|
||||
basePath: false,
|
||||
},
|
||||
isDevClusterMaster: false,
|
||||
});
|
||||
|
||||
return new Root(
|
||||
new BehaviorSubject(
|
||||
new LegacyObjectToConfigAdapter(defaultsDeep({}, ...settings, DEFAULTS_SETTINGS))
|
||||
),
|
||||
env
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns supertest request attached to the core's internal native Node server.
|
||||
* @param root
|
||||
* @param method
|
||||
* @param path
|
||||
*/
|
||||
function getSupertest(root: Root, method: HttpMethod, path: string) {
|
||||
const testUserCredentials = new Buffer(`${kibanaTestUser.username}:${kibanaTestUser.password}`);
|
||||
return supertest((root as any).server.http.service.httpServer.server.listener)
|
||||
[method](path)
|
||||
.set('Authorization', `Basic ${testUserCredentials.toString('base64')}`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates an instance of Root with default configuration
|
||||
* tailored for unit tests.
|
||||
*
|
||||
* @param {Object} [settings={}] Any config overrides for this instance.
|
||||
* @returns {Root}
|
||||
*/
|
||||
export function createRoot(settings = {}) {
|
||||
return createRootWithSettings(settings);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates an instance of Root, including all of the core plugins,
|
||||
* with default configuration tailored for unit tests.
|
||||
*
|
||||
* @param {Object} [settings={}] Any config overrides for this instance.
|
||||
* @returns {Root}
|
||||
*/
|
||||
export function createRootWithCorePlugins(settings = {}) {
|
||||
return createRootWithSettings(settings, DEFAULT_SETTINGS_WITH_CORE_PLUGINS);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns `kbnServer` instance used in the "legacy" Kibana.
|
||||
* @param root
|
||||
*/
|
||||
export function getKbnServer(root: Root) {
|
||||
return (root as any).server.legacy.service.kbnServer;
|
||||
}
|
||||
|
||||
export const request: Record<
|
||||
HttpMethod,
|
||||
(root: Root, path: string) => ReturnType<typeof getSupertest>
|
||||
> = {
|
||||
delete: (root, path) => getSupertest(root, 'delete', path),
|
||||
get: (root, path) => getSupertest(root, 'get', path),
|
||||
head: (root, path) => getSupertest(root, 'head', path),
|
||||
post: (root, path) => getSupertest(root, 'post', path),
|
||||
put: (root, path) => getSupertest(root, 'put', path),
|
||||
};
|
||||
|
||||
/**
|
||||
* Creates an instance of the Root, including all of the core "legacy" plugins,
|
||||
* with default configuration tailored for unit tests, and starts es.
|
||||
*
|
||||
* @param options
|
||||
* @prop settings Any config overrides for this instance.
|
||||
* @prop adjustTimeout A function(t) => this.timeout(t) that adjust the timeout of a
|
||||
* test, ensuring the test properly waits for the server to boot without timing out.
|
||||
*/
|
||||
export async function startTestServers({
|
||||
adjustTimeout,
|
||||
settings = {},
|
||||
}: {
|
||||
adjustTimeout: (timeout: number) => void;
|
||||
settings: Record<string, any>;
|
||||
}) {
|
||||
if (!adjustTimeout) {
|
||||
throw new Error('adjustTimeout is required in order to avoid flaky tests');
|
||||
}
|
||||
|
||||
const log = new ToolingLog({
|
||||
level: 'debug',
|
||||
writeTo: process.stdout,
|
||||
});
|
||||
|
||||
log.indent(6);
|
||||
log.info('starting elasticsearch');
|
||||
log.indent(4);
|
||||
|
||||
const es = createEsTestCluster({ log });
|
||||
|
||||
log.indent(-4);
|
||||
|
||||
adjustTimeout(es.getStartTimeout());
|
||||
|
||||
await es.start();
|
||||
|
||||
const root = createRootWithCorePlugins(settings);
|
||||
await root.start();
|
||||
|
||||
const kbnServer = getKbnServer(root);
|
||||
await kbnServer.server.plugins.elasticsearch.waitUntilReady();
|
||||
|
||||
return {
|
||||
kbnServer,
|
||||
root,
|
||||
es,
|
||||
|
||||
async stop() {
|
||||
await root.shutdown();
|
||||
await es.cleanup();
|
||||
},
|
||||
};
|
||||
}
|
|
@ -25,10 +25,10 @@ import sinon from 'sinon';
|
|||
import cheerio from 'cheerio';
|
||||
import { noop } from 'lodash';
|
||||
|
||||
import KbnServer from '../../server/kbn_server';
|
||||
import { createRoot, getKbnServer, request } from '../../test_utils/kbn_server';
|
||||
|
||||
const getInjectedVarsFromResponse = (resp) => {
|
||||
const $ = cheerio.load(resp.payload);
|
||||
const $ = cheerio.load(resp.text);
|
||||
const data = $('kbn-injected-metadata').attr('data');
|
||||
return JSON.parse(data).legacyMetadata.vars;
|
||||
};
|
||||
|
@ -45,45 +45,46 @@ const injectReplacer = (kbnServer, replacer) => {
|
|||
};
|
||||
|
||||
describe('UiExports', function () {
|
||||
describe('#replaceInjectedVars', function () {
|
||||
let root;
|
||||
let kbnServer;
|
||||
before(async () => {
|
||||
this.slow(2000);
|
||||
this.timeout(10000);
|
||||
this.timeout(30000);
|
||||
|
||||
let kbnServer;
|
||||
beforeEach(async () => {
|
||||
kbnServer = new KbnServer({
|
||||
server: { port: 0 }, // pick a random open port
|
||||
logging: { silent: true }, // no logs
|
||||
optimize: { enabled: false },
|
||||
plugins: {
|
||||
paths: [resolve(__dirname, './fixtures/test_app')] // inject an app so we can hit /app/{id}
|
||||
},
|
||||
});
|
||||
|
||||
await kbnServer.ready();
|
||||
|
||||
// TODO: hopefully we can add better support for something
|
||||
// like this in the new platform
|
||||
kbnServer.server._requestor._decorations.getUiSettingsService = {
|
||||
apply: undefined,
|
||||
method() {
|
||||
return {
|
||||
getDefaults: noop,
|
||||
getUserProvided: noop
|
||||
};
|
||||
}
|
||||
};
|
||||
root = root = createRoot({
|
||||
// inject an app so we can hit /app/{id}
|
||||
plugins: { paths: [resolve(__dirname, './fixtures/test_app')] },
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
await kbnServer.close();
|
||||
kbnServer = null;
|
||||
});
|
||||
await root.start();
|
||||
|
||||
kbnServer = getKbnServer(root);
|
||||
|
||||
// TODO: hopefully we can add better support for something
|
||||
// like this in the new platform
|
||||
kbnServer.server._requestor._decorations.getUiSettingsService = {
|
||||
apply: undefined,
|
||||
method: () => ({ getDefaults: noop, getUserProvided: noop })
|
||||
};
|
||||
});
|
||||
|
||||
after(async () => await root.shutdown());
|
||||
|
||||
let originalInjectedVarsReplacers;
|
||||
beforeEach(() => {
|
||||
originalInjectedVarsReplacers = kbnServer.uiExports.injectedVarsReplacers;
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
kbnServer.uiExports.injectedVarsReplacers = originalInjectedVarsReplacers;
|
||||
});
|
||||
|
||||
describe('#replaceInjectedVars', function () {
|
||||
it('allows sync replacing of injected vars', async () => {
|
||||
injectReplacer(kbnServer, () => ({ a: 1 }));
|
||||
|
||||
const resp = await kbnServer.inject('/app/test_app');
|
||||
const resp = await request.get(root, '/app/test_app')
|
||||
.expect(200);
|
||||
const injectedVars = getInjectedVarsFromResponse(resp);
|
||||
|
||||
expect(injectedVars).to.eql({ a: 1 });
|
||||
|
@ -98,7 +99,8 @@ describe('UiExports', function () {
|
|||
};
|
||||
});
|
||||
|
||||
const resp = await kbnServer.inject('/app/test_app');
|
||||
const resp = await request.get(root, '/app/test_app')
|
||||
.expect(200);
|
||||
const injectedVars = getInjectedVarsFromResponse(resp);
|
||||
|
||||
expect(injectedVars).to.eql({
|
||||
|
@ -111,7 +113,8 @@ describe('UiExports', function () {
|
|||
injectReplacer(kbnServer, () => ({ foo: 'bar' }));
|
||||
injectReplacer(kbnServer, stub);
|
||||
|
||||
await kbnServer.inject('/app/test_app');
|
||||
await await request.get(root, '/app/test_app')
|
||||
.expect(200);
|
||||
|
||||
sinon.assert.calledOnce(stub);
|
||||
expect(stub.firstCall.args[0]).to.eql({ foo: 'bar' }); // originalInjectedVars
|
||||
|
@ -126,7 +129,8 @@ describe('UiExports', function () {
|
|||
injectReplacer(kbnServer, orig => ({ name: orig.name + 'a' }));
|
||||
injectReplacer(kbnServer, orig => ({ name: orig.name + 'm' }));
|
||||
|
||||
const resp = await kbnServer.inject('/app/test_app');
|
||||
const resp = await request.get(root, '/app/test_app')
|
||||
.expect(200);
|
||||
const injectedVars = getInjectedVarsFromResponse(resp);
|
||||
|
||||
expect(injectedVars).to.eql({ name: 'sam' });
|
||||
|
@ -138,15 +142,17 @@ describe('UiExports', function () {
|
|||
throw new Error('replacer failed');
|
||||
});
|
||||
|
||||
const resp = await kbnServer.inject('/app/test_app');
|
||||
expect(resp).to.have.property('statusCode', 500);
|
||||
await request.get(root, '/app/test_app')
|
||||
.expect(500);
|
||||
});
|
||||
|
||||
it('starts off with the injected vars for the app merged with the default injected vars', async () => {
|
||||
const stub = sinon.stub();
|
||||
injectReplacer(kbnServer, stub);
|
||||
|
||||
await kbnServer.inject('/app/test_app');
|
||||
await request.get(root, '/app/test_app')
|
||||
.expect(200);
|
||||
|
||||
sinon.assert.calledOnce(stub);
|
||||
expect(stub.firstCall.args[0]).to.eql({ from_defaults: true, from_test_app: true });
|
||||
});
|
||||
|
|
|
@ -22,31 +22,31 @@ import sinon from 'sinon';
|
|||
|
||||
import { FieldFormat } from '../field_format';
|
||||
import * as FieldFormatsServiceNS from '../field_formats_service';
|
||||
import { createServer } from '../../../test_utils/kbn_server';
|
||||
import { fieldFormatsMixin } from '../field_formats_mixin';
|
||||
|
||||
describe('server.registerFieldFormat(createFormat)', () => {
|
||||
const sandbox = sinon.createSandbox();
|
||||
|
||||
let kbnServer;
|
||||
let registerFieldFormat;
|
||||
let fieldFormatServiceFactory;
|
||||
const serverMock = { decorate() {} };
|
||||
beforeEach(async () => {
|
||||
kbnServer = createServer();
|
||||
await kbnServer.ready();
|
||||
sandbox.stub(serverMock);
|
||||
await fieldFormatsMixin({}, serverMock);
|
||||
[[,, fieldFormatServiceFactory], [,, registerFieldFormat]] = serverMock.decorate.args;
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
sandbox.restore();
|
||||
await kbnServer.close();
|
||||
});
|
||||
afterEach(() => sandbox.restore());
|
||||
|
||||
it('throws if createFormat is not a function', () => {
|
||||
expect(() => kbnServer.server.registerFieldFormat()).to.throwError(error => {
|
||||
expect(() => registerFieldFormat()).to.throwError(error => {
|
||||
expect(error.message).to.match(/createFormat is not a function/i);
|
||||
});
|
||||
});
|
||||
|
||||
it('calls the createFormat() function with the FieldFormat class', () => {
|
||||
const createFormat = sinon.stub();
|
||||
kbnServer.server.registerFieldFormat(createFormat);
|
||||
registerFieldFormat(createFormat);
|
||||
sinon.assert.calledOnce(createFormat);
|
||||
sinon.assert.calledWithExactly(createFormat, sinon.match.same(FieldFormat));
|
||||
});
|
||||
|
@ -61,9 +61,9 @@ describe('server.registerFieldFormat(createFormat)', () => {
|
|||
class FooFormat {
|
||||
static id = 'foo'
|
||||
}
|
||||
kbnServer.server.registerFieldFormat(() => FooFormat);
|
||||
registerFieldFormat(() => FooFormat);
|
||||
|
||||
const fieldFormats = await kbnServer.server.fieldFormatServiceFactory({
|
||||
const fieldFormats = await fieldFormatServiceFactory({
|
||||
getAll: () => ({}),
|
||||
getDefaults: () => ({})
|
||||
});
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
import { createServer } from '../test_utils/kbn_server';
|
||||
import { tutorialsMixin } from './tutorials_mixin';
|
||||
|
||||
const validTutorial = {
|
||||
id: 'spec1',
|
||||
|
@ -42,15 +42,22 @@ const validTutorial = {
|
|||
};
|
||||
|
||||
describe('tutorial mixins', () => {
|
||||
|
||||
let kbnServer;
|
||||
let getTutorials;
|
||||
let registerTutorial;
|
||||
let addScopedTutorialContextFactory;
|
||||
const serverMock = { decorate: jest.fn() };
|
||||
beforeEach(async () => {
|
||||
kbnServer = createServer();
|
||||
await kbnServer.ready();
|
||||
await tutorialsMixin({}, serverMock);
|
||||
|
||||
[
|
||||
[,, getTutorials],
|
||||
[,, registerTutorial],
|
||||
[,, addScopedTutorialContextFactory]
|
||||
] = serverMock.decorate.mock.calls;
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
await kbnServer.close();
|
||||
afterEach(() => {
|
||||
jest.clearAllMocks();
|
||||
});
|
||||
|
||||
describe('scoped context', () => {
|
||||
|
@ -70,12 +77,12 @@ describe('tutorial mixins', () => {
|
|||
return tutorial;
|
||||
};
|
||||
beforeEach(async () => {
|
||||
kbnServer.server.addScopedTutorialContextFactory(spacesContextFactory);
|
||||
kbnServer.server.registerTutorial(specProvider);
|
||||
addScopedTutorialContextFactory(spacesContextFactory);
|
||||
registerTutorial(specProvider);
|
||||
});
|
||||
|
||||
test('passes scoped context to specProviders', () => {
|
||||
const tutorials = kbnServer.server.getTutorials(mockRequest);
|
||||
const tutorials = getTutorials(mockRequest);
|
||||
expect(tutorials.length).toBe(1);
|
||||
expect(tutorials[0].shortDescription).toBe('I have been provided with scoped context, spaceId: my-space');
|
||||
});
|
||||
|
|
|
@ -69,8 +69,8 @@
|
|||
"run-sequence": "^2.2.1",
|
||||
"simple-git": "1.37.0",
|
||||
"sinon": "^5.0.7",
|
||||
"supertest": "3.0.0",
|
||||
"supertest-as-promised": "4.0.2",
|
||||
"supertest": "^3.1.0",
|
||||
"supertest-as-promised": "^4.0.2",
|
||||
"tmp": "0.0.31",
|
||||
"tree-kill": "^1.1.0",
|
||||
"typescript": "^2.9.2",
|
||||
|
|
|
@ -7475,7 +7475,7 @@ subtext@4.x.x:
|
|||
pez "2.x.x"
|
||||
wreck "12.x.x"
|
||||
|
||||
superagent@^3.0.0:
|
||||
superagent@3.8.2:
|
||||
version "3.8.2"
|
||||
resolved "https://registry.yarnpkg.com/superagent/-/superagent-3.8.2.tgz#e4a11b9d047f7d3efeb3bbe536d9ec0021d16403"
|
||||
dependencies:
|
||||
|
@ -7490,19 +7490,19 @@ superagent@^3.0.0:
|
|||
qs "^6.5.1"
|
||||
readable-stream "^2.0.5"
|
||||
|
||||
supertest-as-promised@4.0.2:
|
||||
supertest-as-promised@^4.0.2:
|
||||
version "4.0.2"
|
||||
resolved "https://registry.yarnpkg.com/supertest-as-promised/-/supertest-as-promised-4.0.2.tgz#0464f2bd256568d4a59bce84269c0548f6879f1a"
|
||||
dependencies:
|
||||
bluebird "^3.3.1"
|
||||
methods "^1.1.1"
|
||||
|
||||
supertest@3.0.0:
|
||||
version "3.0.0"
|
||||
resolved "https://registry.yarnpkg.com/supertest/-/supertest-3.0.0.tgz#8d4bb68fd1830ee07033b1c5a5a9a4021c965296"
|
||||
supertest@^3.1.0:
|
||||
version "3.1.0"
|
||||
resolved "https://registry.yarnpkg.com/supertest/-/supertest-3.1.0.tgz#f9ebaf488e60f2176021ec580bdd23ad269e7bc6"
|
||||
dependencies:
|
||||
methods "~1.1.2"
|
||||
superagent "^3.0.0"
|
||||
superagent "3.8.2"
|
||||
|
||||
supports-color@1.2.0:
|
||||
version "1.2.0"
|
||||
|
|
18
yarn.lock
18
yarn.lock
|
@ -537,9 +537,9 @@
|
|||
"@types/cookiejar" "*"
|
||||
"@types/node" "*"
|
||||
|
||||
"@types/supertest@^2.0.4":
|
||||
version "2.0.4"
|
||||
resolved "https://registry.yarnpkg.com/@types/supertest/-/supertest-2.0.4.tgz#28770e13293365e240a842d7d5c5a1b3d2dee593"
|
||||
"@types/supertest@^2.0.5":
|
||||
version "2.0.5"
|
||||
resolved "https://registry.yarnpkg.com/@types/supertest/-/supertest-2.0.5.tgz#18d082a667eaed22759be98f4923e0061ae70c62"
|
||||
dependencies:
|
||||
"@types/superagent" "*"
|
||||
|
||||
|
@ -12696,7 +12696,7 @@ suffix@^0.1.0:
|
|||
version "0.1.1"
|
||||
resolved "https://registry.yarnpkg.com/suffix/-/suffix-0.1.1.tgz#cc58231646a0ef1102f79478ef3a9248fd9c842f"
|
||||
|
||||
superagent@^3.0.0:
|
||||
superagent@3.8.2:
|
||||
version "3.8.2"
|
||||
resolved "https://registry.yarnpkg.com/superagent/-/superagent-3.8.2.tgz#e4a11b9d047f7d3efeb3bbe536d9ec0021d16403"
|
||||
dependencies:
|
||||
|
@ -12711,19 +12711,19 @@ superagent@^3.0.0:
|
|||
qs "^6.5.1"
|
||||
readable-stream "^2.0.5"
|
||||
|
||||
supertest-as-promised@4.0.2:
|
||||
supertest-as-promised@^4.0.2:
|
||||
version "4.0.2"
|
||||
resolved "https://registry.yarnpkg.com/supertest-as-promised/-/supertest-as-promised-4.0.2.tgz#0464f2bd256568d4a59bce84269c0548f6879f1a"
|
||||
dependencies:
|
||||
bluebird "^3.3.1"
|
||||
methods "^1.1.1"
|
||||
|
||||
supertest@3.0.0:
|
||||
version "3.0.0"
|
||||
resolved "https://registry.yarnpkg.com/supertest/-/supertest-3.0.0.tgz#8d4bb68fd1830ee07033b1c5a5a9a4021c965296"
|
||||
supertest@^3.1.0:
|
||||
version "3.1.0"
|
||||
resolved "https://registry.yarnpkg.com/supertest/-/supertest-3.1.0.tgz#f9ebaf488e60f2176021ec580bdd23ad269e7bc6"
|
||||
dependencies:
|
||||
methods "~1.1.2"
|
||||
superagent "^3.0.0"
|
||||
superagent "3.8.2"
|
||||
|
||||
supports-color@3.1.2:
|
||||
version "3.1.2"
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue