Merge branch 'master' into issues/20894

This commit is contained in:
Jonathan Budzenski 2018-10-17 08:45:45 -05:00
commit d4709f2f63
No known key found for this signature in database
GPG key ID: D28BF9418FA0F292
4690 changed files with 225701 additions and 59145 deletions

3
.browserslistrc Normal file
View file

@ -0,0 +1,3 @@
last 2 versions
> 5%
Safari 7

View file

@ -27,5 +27,8 @@ bower_components
/x-pack/coverage
/x-pack/build
/x-pack/plugins/**/__tests__/fixtures/**
/x-pack/plugins/canvas/common/lib/grammar.js
/x-pack/plugins/canvas/canvas_plugin
/x-pack/plugins/canvas/canvas_plugin_src/lib/flot-charts/*
**/*.js.snap
!/.eslintrc.js

View file

@ -2,6 +2,8 @@ const { resolve } = require('path');
const { readdirSync } = require('fs');
const dedent = require('dedent');
const restrictedModules = { paths: ['gulp-util'] };
module.exports = {
extends: ['@elastic/eslint-config-kibana', '@elastic/eslint-config-kibana/jest'],
@ -17,6 +19,11 @@ module.exports = {
},
},
rules: {
'no-restricted-imports': [2, restrictedModules],
'no-restricted-modules': [2, restrictedModules],
},
overrides: [
/**
* Prettier
@ -25,6 +32,7 @@ module.exports = {
files: [
'.eslintrc.js',
'packages/eslint-plugin-kibana-custom/**/*',
'packages/kbn-config-schema/**/*',
'packages/kbn-pm/**/*',
'packages/kbn-es/**/*',
'packages/kbn-datemath/**/*',
@ -116,7 +124,7 @@ module.exports = {
'packages/kbn-ui-framework/generator-kui/**/*',
'packages/kbn-ui-framework/Gruntfile.js',
'packages/kbn-es/src/**/*',
'x-pack/{dev-tools,gulp_helpers,scripts,test,build_chromium}/**/*',
'x-pack/{dev-tools,tasks,scripts,test,build_chromium}/**/*',
'x-pack/**/{__tests__,__test__,__jest__,__fixtures__,__mocks__}/**/*',
'x-pack/**/*.test.js',
'x-pack/gulpfile.js',
@ -323,5 +331,115 @@ module.exports = {
files: ['x-pack/plugins/monitoring/public/**/*'],
env: { browser: true },
},
/**
* Canvas overrides
*/
{
files: ['x-pack/plugins/canvas/**/*'],
plugins: ['prettier'],
rules: {
// preferences
'comma-dangle': [2, 'always-multiline'],
'no-multiple-empty-lines': [2, { max: 1, maxEOF: 1 }],
'no-multi-spaces': 2,
radix: 2,
curly: [2, 'multi-or-nest', 'consistent'],
// annoying rules that conflict with prettier
'space-before-function-paren': 0,
indent: 0,
'wrap-iife': 0,
'max-len': 0,
// module importing
'import/order': [
2,
{ groups: ['builtin', 'external', 'internal', 'parent', 'sibling', 'index'] },
],
'import/extensions': [2, 'never', { json: 'always', less: 'always', svg: 'always' }],
// prettier
'prettier/prettier': 2,
// react
'jsx-quotes': 2,
'react/no-did-mount-set-state': 2,
'react/no-did-update-set-state': 2,
'react/no-multi-comp': [2, { ignoreStateless: true }],
'react/self-closing-comp': 2,
'react/sort-comp': 2,
'react/jsx-boolean-value': 2,
'react/jsx-wrap-multilines': 2,
'react/no-unescaped-entities': [2, { forbid: ['>', '}'] }],
'react/forbid-elements': [
2,
{
forbid: [
{
element: 'EuiConfirmModal',
message: 'Use <ConfirmModal> instead',
},
{
element: 'EuiPopover',
message: 'Use <Popover> instead',
},
{
element: 'EuiIconTip',
message: 'Use <TooltipIcon> instead',
},
],
},
],
},
},
{
files: ['x-pack/plugins/canvas/*', 'x-pack/plugins/canvas/**/*'],
rules: {
'import/no-extraneous-dependencies': [
'error',
{
packageDir: './x-pack/',
},
],
},
},
{
files: [
'x-pack/plugins/canvas/gulpfile.js',
'x-pack/plugins/canvas/tasks/*.js',
'x-pack/plugins/canvas/tasks/**/*.js',
'x-pack/plugins/canvas/__tests__/**/*',
'x-pack/plugins/canvas/**/{__tests__,__test__,__jest__,__fixtures__,__mocks__}/**/*',
],
rules: {
'import/no-extraneous-dependencies': [
'error',
{
devDependencies: true,
peerDependencies: true,
packageDir: './x-pack/',
},
],
},
},
{
files: ['x-pack/plugins/canvas/canvas_plugin_src/**/*'],
globals: { canvas: true, $: true },
rules: {
'import/no-extraneous-dependencies': [
'error',
{
packageDir: './x-pack/',
},
],
'import/no-unresolved': [
'error',
{
ignore: ['!!raw-loader.+.svg$'],
},
],
},
},
],
};

4
.github/CODEOWNERS vendored Normal file
View file

@ -0,0 +1,4 @@
# GitHub CODEOWNERS definition
# See: https://help.github.com/articles/about-codeowners/
/x-pack/plugins/security/ @elastic/kibana-security

View file

@ -1,13 +1,19 @@
<!--
Thank you for your interest in and contributing to Kibana! There
are a few simple things to check before submitting your pull request
that can help with the review process. You should delete these items
from your submission, but they are here to help bring them to your
attention.
## Summary
Summarize your PR. If it involves visual changes include a screenshot or gif.
### Checklist
Use ~~strikethroughs~~ to remove checklist items you don't feel are applicable to this PR.
- [ ] This was checked for cross-browser compatibility, [including a check against IE11](https://github.com/elastic/kibana/blob/master/CONTRIBUTING.md#cross-browser-compatibility)
- [ ] Any text added follows [EUI's writing guidelines](https://elastic.github.io/eui/#/guidelines/writing), uses sentence case text and includes [i18n support](https://github.com/elastic/kibana/blob/master/packages/kbn-i18n/README.md)
- [ ] [Documentation](https://github.com/elastic/kibana/blob/master/CONTRIBUTING.md#writing-documentation) was added for features that require explanation or tutorials
- [ ] [Unit or functional tests](https://github.com/elastic/kibana/blob/master/CONTRIBUTING.md#cross-browser-compatibility) were updated or added to match the most common scenarios
- [ ] This was checked for [keyboard-only and screenreader accessibility](https://developer.mozilla.org/en-US/docs/Learn/Tools_and_testing/Cross_browser_testing/Accessibility#Accessibility_testing_checklist)
### For maintainers
- [ ] This was checked for breaking API changes and was [labeled appropriately](https://github.com/elastic/kibana/blob/master/CONTRIBUTING.md#release-notes-process)
- [ ] This includes a feature addition or change that requires a release note and was [labeled appropriately](https://github.com/elastic/kibana/blob/master/CONTRIBUTING.md#release-notes-process)
- Have you signed the [contributor license agreement](https://www.elastic.co/contributor-agreement)?
- Have you followed the [contributor guidelines](https://github.com/elastic/kibana/blob/master/CONTRIBUTING.md)?
- If submitting code, have you included unit tests that cover the changes?
- If submitting code, have you tested and built your code locally prior to submission with `yarn test && yarn build`?
- If submitting code, is your pull request against master? Unless there is a good reason otherwise, we prefer pull requests against master and will backport as needed.
-->

2
.gitignore vendored
View file

@ -42,4 +42,4 @@ package-lock.json
.vscode
npm-debug.log*
.tern-project
index.css
**/public/index.css

19
.i18nrc.json Normal file
View file

@ -0,0 +1,19 @@
{
"paths": {
"common.ui": "src/ui",
"inputControl":"src/core_plugins/input_control_vis",
"kbn": "src/core_plugins/kibana",
"kbnVislibVisTypes": "src/core_plugins/kbn_vislib_vis_types",
"markdownVis": "src/core_plugins/markdown_vis",
"metricVis": "src/core_plugins/metric_vis",
"statusPage": "src/core_plugins/status_page",
"tagCloud": "src/core_plugins/tagcloud",
"xpack.idxMgmt": "x-pack/plugins/index_management"
},
"exclude": [
"src/ui/ui_render/bootstrap/app_bootstrap.js",
"src/ui/ui_render/ui_render_mixin.js",
"x-pack/plugins/monitoring/public/components/cluster/overview/alerts_panel.js",
"x-pack/plugins/monitoring/public/directives/alerts/index.js"
]
}

View file

@ -1 +1 @@
8.11.3
8.11.4

2
.nvmrc
View file

@ -1 +1 @@
8.11.3
8.11.4

View file

@ -30,6 +30,7 @@ A high level overview of our contributing guidelines.
- [Browser Automation Notes](#browser-automation-notes)
- [Building OS packages](#building-os-packages)
- [Writing documentation](#writing-documentation)
- [Release Notes Process](#release-notes-process)
- [Signing the contributor license agreement](#signing-the-contributor-license-agreement)
- [Submitting a Pull Request](#submitting-a-pull-request)
- [Code Reviewing](#code-reviewing)
@ -189,9 +190,10 @@ node scripts/makelogs
> Make sure to execute `node scripts/makelogs` *after* elasticsearch is up and running!
Start the development server.
```bash
yarn start
```
```bash
yarn start
```
> On Windows, you'll need you use Git Bash, Cygwin, or a similar shell that exposes the `sh` command. And to successfully build you'll need Cygwin optional packages zip, tar, and shasum.
@ -271,6 +273,13 @@ You can get all build options using the following command:
yarn build --help
```
macOS users on a machine with a discrete graphics card may see significant speedups (up to 2x) when running tests by changing your terminal emulator's GPU settings. In iTerm2:
- Open Preferences (Command + ,)
- In the General tab, under the "Magic" section, ensure "GPU rendering" is checked
- Open "Advanced GPU Settings..."
- Uncheck the "Prefer integrated to discrete GPU" option
- Restart iTerm
### Debugging Server Code
`yarn debug` will start the server with Node's inspect flag. Kibana's development mode will start three processes. Chrome's developer tools can be configured to connect to all three under the connection tab.
@ -398,6 +407,31 @@ README for getting the docs tooling set up.
node scripts/docs.js --open
```
### Release Notes Process
Part of this process only applies to maintainers, since it requires access to Github labels.
Kibana publishes major, minor and patch releases periodically through the year. During this process we run a script against this repo to collect the applicable PRs against that release and generate [Release Notes](https://www.elastic.co/guide/en/kibana/current/release-notes.html). To include your change in the Release Notes:
1. In the title, summarize what the PR accomplishes in language that is meaningful to the user. In general, use present tense (for example, Adds, Fixes) in sentence case.
1. Label the PR with the targeted version (ex: 6.5).
1. Label the PR with the appropriate github labels:
* For a new feature or functionality, use `release_note:enhancement`.
* For an external-facing fix, use `release_note:fix`. Exception: docs, build, and test fixes do not go in the Release Notes.
* For a deprecated feature, use `release_note:deprecation`.
* For a breaking change, use `release-breaking:note`.
We also produce a blog post that details more important breaking API changes every minor and major release. If the PR includes a breaking API change, apply the label `release_note:dev_docs`. Additionally add a brief summary of the break at the bottom of the PR using the format below:
```
# Dev Docs
## Name the feature with the break (ex: Visualize Loader)
Summary of the change. Anything Under `#Dev Docs` will be used in the blog.
```
## Signing the contributor license agreement
Please make sure you have signed the [Contributor License Agreement](http://www.elastic.co/contributor-agreement/). We are not asking you to assign copyright to us, but to give us the right to distribute your code without restriction. We ask this of all contributors in order to assure our users of the origin and continuing existence of the code. You only need to sign the CLA once.

View file

@ -103,6 +103,33 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
---
This product bundles childnode-remove which is available under a
"MIT" license.
The MIT License (MIT)
Copyright (c) 2016-present, jszhou
https://github.com/jserz/js_piece
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
---
This product bundles geohash.js which is available under a
"MIT" license. For details, see src/ui/public/utils/decode_geo_hash.js.
@ -158,25 +185,3 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
---
This product uses "radioactive button" styles that were published on
https://zurb.com/playground/radioactive-buttons under an "MIT" License.
Copyright (c) ZURB
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in the
Software without restriction, including without limitation the rights to use, copy,
modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the
following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View file

@ -22,3 +22,7 @@ All filenames should use `snake_case`.
*Wrong:*
- `src/kibana/IndexPatterns/IndexPattern.js`
## TypeScript vs JavaScript
Whenever possible, write code in TypeScript instead of javascript, especially if it's new code. Check out [TYPESCRIPT.md](TYPESCRIPT.md) for help with this process.

243
TYPESCRIPT.md Normal file
View file

@ -0,0 +1,243 @@
## TypeScriptifying Kibana Tips
### Converting existing code
To convert existing code over to TypeScript:
1. rename the file from `.js` to either `.ts` (if there is no html or jsx in the file) or `.tsx` (if there is).
2. Ensure tslint is running and installed in the IDE of your choice. There will usually be some linter errors after the file rename.
3. Auto-fix what you can. This will save you a lot of time! VSCode can be set to auto fix tslint errors when files are saved.
### How to fix common TypeScript errors
The first thing that will probably happen when you convert a `.js` file in our system to `.ts` is that some imports will be lacking types.
#### EUI component is missing types
1. Check https://github.com/elastic/eui/issues/256 to see if they know its missing, if its not on there, add it.
2. Temporarily get around the issue by using a declared module and exporting the missing types with the most basic types available. Bonus points if you write a PR yourself to the EUI repo to add the types, but having them available back in Kibana will take some time, as a new EUI release will need to be generated, then that new release pointed to in Kibana. Best, to make forward progress, to do a temporary workaround.
```ts
declare module '@elastic/eui' {
export const EuiPopoverTitle: React.SFC<any>;
}
import { EuiPopoverTitle } from '@elastic/eui';
```
Some background on the differences between module declaration and augmentation:
In TypeScript module declarations can not be merged, which means each module can only be declared once. But it is possible to augment previously declared modules. The documentation about the distinction between module declaration and augmentation is sparse. The observed rules for `declare module '...' {}` in a `.d.ts` file seem to be:
* it is treated as a module declaration when the file itself is not a module
* it is treated as a module augmentation when the file itself is module
A `.d.ts` file is treated as a module if it contains any top-level `import` or `export` statements. That means that in order to write a module declaration the `import`s must be contained within the `declare` block and none must be located on the topmost level. Conversely, to write a module augmentation there must be at least one top-level `import` or `export` and the `declare` block must not contain any `import` statements.
Since `@elastic/eui` already ships with a module declaration, any local additions must be performed using module augmentation, e.g.
```typescript
// file `my_plugin/types/eui.d.ts`
import { CommonProps } from '@elastic/eui';
import { SFC } from 'react';
declare module '@elastic/eui' {
export type EuiNewComponentProps = CommonProps & {
additionalProp: string;
};
export const EuiNewComponent: SFC<EuiNewComponentProps>;
}
```
#### Internal dependency is missing types.
1. Open up the file and see how easy it would be to convert to TypeScript.
2. If it's very straightforward, go for it.
3. If it's not and you wish to stay focused on your own PR, get around the error by adding a type definition file in the same folder as the dependency, with the same name.
4. Minimally you will need to type what you are using in your PR. No need to go crazy to fully type the thing or you might be there for awhile depending on what's available.
For example:
metadata.js:
```js
export let metadata = null;
export function __newPlatformInit__(legacyMetadata) {
...
}
```
documentation_links.js:
```js
import { metadata } from './metadata';
export const DOC_LINK_VERSION = metadata.branch;
```
To TypeScript `documentation_links.js` you'll need to add a type definition for `metadata.js`
metadata.d.ts
```
declare interface Metadata {
public branch: string;
}
declare const metadata: Metadata;
export { metadata };
```
#### External dependency is missing types
1. See if types exist for this module and can be installed, by doing something like:
`yarn add -D @types/markdown-it@8.4.1`
Use the version number that we have installed in package.json. This may not always work, and you might get something like:
`Please choose a version of "@types/markdown-it" from this list:`
If that happens, just pick the closest one.
If yarn doesn't find the module it may not have types. For example, our `rison_node` package doesn't have types. In this case you have a few options:
1. Contribute types into the DefinitelyTyped repo itself, or
2. Create a top level `types` folder and point to that in the tsconfig. For example, Infra team already handled this for `rison_node` and added: `x-pack/plugins/infra/types/rison_node.d.ts`. Other code uses it too so we will need to pull it up. Or,
3. Add a `// @ts-ignore` line above the import. This should be used minimally, the above options are better. However, sometimes you have to resort to this method. For example, the `expect.js` module will require this line. We don't have type definitions installed for this library. Installing these types would conflict with the jest typedefs for expect, and since they aren't API compatible with each other, it's not possible to make both test frameworks happy. Since we are moving from mocha => jest, we don't see this is a big issue.
### TypeScripting react files
React has it's own concept of runtime types via `proptypes`. TypeScript gives you compile time types so I prefer those.
Before:
```jsx
import PropTypes from 'prop-types';
export class Button extends Component {
state = {
buttonWasClicked = false
};
render() {
return <button onClick={() => setState({ buttonWasClicked: true })}>{this.props.text}</button>
}
}
Button.proptypes = {
text: PropTypes.string,
}
```
After:
```tsx
interface Props {
text: string;
}
interface State {
buttonWasClicked: boolean;
}
export class Button extends Component<Props, State> {
state = {
buttonWasClicked = false
};
render() {
return <button onClick={() => setState({ buttonWasClicked: true })}>{this.props.text}</button>
}
}
```
Note that the name of `Props` and `State` doesn't matter, the order does. If you are exporting those interfaces to be used elsewhere, you probably should give them more fleshed out names, such as `ButtonProps` and `ButtonState`.
### Typing functions
In react proptypes, we often will use `PropTypes.func`. In TypeScript, a function is `() => void`, or you can more fully flesh it out, for example:
- `(inputParamName: string) => string`
- `(newLanguage: string) => void`
- `() => Promise<string>`
### Typing destructured object parameters
Especially since we often use the spread operator, this syntax is a little different and probably worth calling out.
Before:
```js
function ({ title, description }) {
...
}
```
After:
```ts
function ({ title, description }: {title: string, description: string}) {
...
}
or, use an interface
interface Options {
title: string;
description: string;
}
function ({ title, description }: Options) {
...
}
```
## Use `any` as little as possible
Using any is sometimes valid, but should rarely be used, even if to make quicker progress. Even `Unknown` is better than using `any` if you aren't sure of an input parameter.
If you use a variable that isn't initially defined, you should give it a type or it will be `any` by default (and strangely this isn't a warning, even though I think it should be)
Before - `color` will be type `any`:
```js
let color;
if (danger) {
color = 'red';
} else {
color = 'green';
}
```
After - `color` will be type `string`:
```ts
let color: string;
if (danger) {
color = 'red';
} else {
color = 'green';
}
```
Another quirk, default `Map\WeakMap\Set` constructors use any-based type signature like `Map<any, any>\WeakMap<any, any>\Set<any>`. That means that TS won't complain about the piece of code below:
```ts
const anyMap = new Map();
anyMap.set('1', 2);
anyMap.set('2', '3');
anyMap.set(3, '4');
const anySet = new Set();
anySet.add(1);
anySet.add('2');
```
So we should explicitly define types for default constructors whenever possible:
```ts
const typedMap = new Map<string, number>();
typedMap.set('1', 2);
typedMap.set('2', '3'); // TS error
typedMap.set(3, '4'); // TS error
const typedSet = new Set<number>();
typedSet.add(1);
typedSet.add('2'); // TS error
```

View file

@ -26,13 +26,18 @@ entirely.
[float]
== APIs
* <<spaces-api>>
* <<role-management-api>>
* <<saved-objects-api>>
* <<dashboard-import-api>>
* <<logstash-configuration-management-api>>
* <<url-shortening-api>>
--
include::api/spaces-management.asciidoc[]
include::api/role-management.asciidoc[]
include::api/saved-objects.asciidoc[]
include::api/dashboard-import.asciidoc[]
include::api/logstash-configuration-management.asciidoc[]
include::api/url-shortening.asciidoc[]

View file

@ -0,0 +1,17 @@
[[dashboard-import-api]]
== Dashboard Import API
The dashboard import/export APIs allow people to import dashboards along with
all of their corresponding saved objects such as visualizations, saved
searches, and index patterns.
Traditionally, developers would perform this level of integration by writing
documents directly to the `.kibana` index. *Do not do this!* Writing directly
to the `.kibana` index is not safe and it _will_ result in corrupted data that
permanently breaks Kibana in a future version.
* <<dashboard-import-api-import>>
* <<dashboard-import-api-export>>
include::dashboard-import/import.asciidoc[]
include::dashboard-import/export.asciidoc[]

View file

@ -0,0 +1,38 @@
[[dashboard-import-api-export]]
=== Export Dashboard
experimental[This functionality is *experimental* and may be changed or removed completely in a future release.]
The dashboard export API allows people to export dashboards along with all of
their corresponding saved objects such as visualizations, saved searches, and
index patterns.
==== Request
`GET /api/kibana/dashboards/export`
==== Query Parameters
`dashboard` (optional)::
(array|string) The id(s) of the dashboard(s) to export
==== Response body
The response body will have a top level `objects` property that contains an
array of saved objects. The order of these objects is not guaranteed. You
should use this exact response body as the request body for the corresponding
<<dashboard-import-api-import, Import Dashboard API>>.
==== Examples
The following example exports all saved objects associated with and including
the dashboard with id `942dcef0-b2cd-11e8-ad8e-85441f0c2e5c`.
[source,js]
--------------------------------------------------
GET api/kibana/dashboards/export?dashboard=942dcef0-b2cd-11e8-ad8e-85441f0c2e5c
--------------------------------------------------
// KIBANA
A successful call returns a response code of `200` along with the exported
objects as the response body.

View file

@ -0,0 +1,96 @@
[[dashboard-import-api-import]]
=== Import Dashboard
experimental[This functionality is *experimental* and may be changed or removed completely in a future release.]
The dashboard import API allows people to import dashboards along with all of
their corresponding saved objects such as visualizations, saved searches, and
index patterns.
==== Request
`POST /api/kibana/dashboards/import`
==== Query Parameters
`force` (optional)::
(boolean) Overwrite any existing objects on id conflict
`exclude` (optional)::
(array) Saved object types that should not be imported
==== Request Body
The request body is JSON, but you should not manually construct a payload to
this endpoint. Instead, use the complete response body from the
<<dashboard-import-api-export, Export Dashboard API>> as the request body to
this import API.
==== Response body
The response body will have a top level `objects` property that contains an
array of the saved objects that were created.
==== Examples
The following example imports saved objects associated with and including the
dashboard with id `942dcef0-b2cd-11e8-ad8e-85441f0c2e5c`.
[source,js]
--------------------------------------------------
POST api/kibana/dashboards/import?exclude=index-pattern
{
"objects": [
{
"id": "80b956f0-b2cd-11e8-ad8e-85441f0c2e5c",
"type": "visualization",
"updated_at": "2018-09-07T18:40:33.247Z",
"version": 1,
"attributes": {
"title": "Count Example",
"visState": "{\"title\":\"Count Example\",\"type\":\"metric\",\"params\":{\"addTooltip\":true,\"addLegend\":false,\"type\":\"metric\",\"metric\":{\"percentageMode\":false,\"useRanges\":false,\"colorSchema\":\"Green to Red\",\"metricColorMode\":\"None\",\"colorsRange\":[{\"from\":0,\"to\":10000}],\"labels\":{\"show\":true},\"invertColors\":false,\"style\":{\"bgFill\":\"#000\",\"bgColor\":false,\"labelColor\":false,\"subText\":\"\",\"fontSize\":60}}},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}}]}",
"uiStateJSON": "{}",
"description": "",
"version": 1,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"index\":\"90943e30-9a47-11e8-b64d-95841ca0b247\",\"query\":{\"query\":\"\",\"language\":\"lucene\"},\"filter\":[]}"
}
}
},
{
"id": "90943e30-9a47-11e8-b64d-95841ca0b247",
"type": "index-pattern",
"updated_at": "2018-09-07T18:39:47.683Z",
"version": 1,
"attributes": {
"title": "kibana_sample_data_logs",
"timeFieldName": "timestamp",
"fields": "<truncated for example>",
"fieldFormatMap": "{\"hour_of_day\":{}}"
}
},
{
"id": "942dcef0-b2cd-11e8-ad8e-85441f0c2e5c",
"type": "dashboard",
"updated_at": "2018-09-07T18:41:05.887Z",
"version": 1,
"attributes": {
"title": "Example Dashboard",
"hits": 0,
"description": "",
"panelsJSON": "[{\"gridData\":{\"w\":24,\"h\":15,\"x\":0,\"y\":0,\"i\":\"1\"},\"version\":\"7.0.0-alpha1\",\"panelIndex\":\"1\",\"type\":\"visualization\",\"id\":\"80b956f0-b2cd-11e8-ad8e-85441f0c2e5c\",\"embeddableConfig\":{}}]",
"optionsJSON": "{\"darkTheme\":false,\"useMargins\":true,\"hidePanelTitles\":false}",
"version": 1,
"timeRestore": false,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"query\":{\"query\":\"\",\"language\":\"lucene\"},\"filter\":[]}"
}
}
}
]
}
--------------------------------------------------
// KIBANA
A response code of `200` will be returned even if there are errors importing
individual saved objects. In that case, error information will be returned in
the response body on an object-by-object basis.

View file

@ -0,0 +1,18 @@
[role="xpack"]
[[role-management-api]]
== Kibana Role Management API
experimental[This API is *experimental* and may be changed or removed completely in a future release. The underlying mechanism of enforcing role based access control is stable, but the APIs for managing the roles are currently experimental.]
The role management API allows people to manage roles that grant <<kibana-privileges>>.
It is *not* supported to do so using the
{ref}/security-api.html#security-role-apis[{es} role management APIs], and doing
so will likely cause {kib}'s authorization to behave unexpectedly.
* <<role-management-api-put>>
* <<role-management-api-get>>
* <<role-management-api-delete>>
include::role-management/put.asciidoc[]
include::role-management/get.asciidoc[]
include::role-management/delete.asciidoc[]

View file

@ -0,0 +1,24 @@
[[role-management-api-delete]]
=== Delete role
experimental[This API is experimental and may be changed or removed completely in a future release. Although the underlying mechanism of enforcing role-based access control is stable, the APIs for managing the roles are currently experimental.]
==== Authorization
To use this API, you must have at least the `manage_security` cluster privilege.
==== Request
To delete a role, submit a DELETE request to the `/api/security/role/<rolename>`
endpoint:
[source,js]
--------------------------------------------------
DELETE /api/security/role/my_admin_role
--------------------------------------------------
// KIBANA
==== Response
If the role is successfully deleted, the response code is `204`; otherwise, the response
code is 404.

View file

@ -0,0 +1,111 @@
[[role-management-api-get]]
=== Get Role
experimental[This API is experimental and may be changed or removed completely in a future release. Although the underlying mechanism of enforcing role-based access control is stable, the APIs for managing the roles are currently experimental.]
Retrieves all {kib} roles, or a specific role.
==== Authorization
To use this API, you must have at least the `manage_security` cluster privilege.
==== Get all {kib} roles
===== Request
To retrieve all roles, issue a GET request to the
/api/security/role endpoint.
[source,js]
--------------------------------------------------
GET /api/security/role
--------------------------------------------------
// KIBANA
===== Response
A successful call returns a response code of `200` and a response body containing a JSON
representation of the roles.
[source,js]
--------------------------------------------------
[
{
"name": "my_kibana_role",
"metadata" : {
"version" : 1
},
"transient_metadata": {
"enabled": true
},
"elasticsearch": {
"indices": [ ],
"cluster": [ ],
"run_as": [ ]
},
"kibana": [ {
"privileges": [ "all" ]
} ],
},
{
"name": "my_admin_role",
"metadata" : {
"version" : 1
},
"transient_metadata": {
"enabled": true
},
"elasticsearch": {
"cluster" : [ "all" ],
"indices" : [ {
"names" : [ "index1", "index2" ],
"privileges" : [ "all" ],
"field_security" : {
"grant" : [ "title", "body" ]
},
"query" : "{\"match\": {\"title\": \"foo\"}}"
} ],
},
"kibana": [ ]
}
]
--------------------------------------------------
==== Get a specific role
===== Request
To retrieve a specific role, issue a GET request to
the `/api/security/role/<rolename>` endpoint:
[source,js]
--------------------------------------------------
GET /api/security/role/my_kibana_role
--------------------------------------------------
// KIBANA
===== Response
A successful call returns a response code of `200` and a response body containing a JSON
representation of the role.
[source,js]
--------------------------------------------------
{
"name": "my_kibana_role",
"metadata" : {
"version" : 1
},
"transient_metadata": {
"enabled": true
},
"elasticsearch": {
"cluster": [ ],
"indices": [ ],
"run_as": [ ]
},
"kibana": [ {
"privileges": [ "all" ]
} ],
}
--------------------------------------------------

View file

@ -0,0 +1,98 @@
[[role-management-api-put]]
=== Create or Update Role
experimental[This API is experimental and may be changed or removed completely in a future release. Although the underlying mechanism of enforcing role-based access control is stable, the APIs for managing the roles are currently experimental.]
Creates a new {kib} role or updates the attributes of an existing role. {kib} roles are stored in the
{es} native realm.
==== Authorization
To use this API, you must have at least the `manage_security` cluster privilege.
==== Request
To create or update a role, issue a PUT request to the
`/api/security/role/<rolename>` endpoint.
[source,js]
--------------------------------------------------
PUT /api/security/role/my_kibana_role
--------------------------------------------------
==== Request Body
The following parameters can be specified in the body of a PUT request to add or update a role:
`metadata`:: (object) Optional meta-data. Within the `metadata` object, keys
that begin with `_` are reserved for system usage.
`elasticsearch`:: (object) Optional {es} cluster and index privileges, valid keys are
`cluster`, `indices` and `run_as`. For more information, see {xpack-ref}/defining-roles.html[Defining Roles].
`kibana`:: (object) An object that specifies the <<kibana-privileges>>. Valid keys are `global` and `space`. Privileges defined in the `global` key will apply to all spaces within Kibana, and will take precedent over any privileges defined in the `space` key. For example, specifying `global: ["all"]` will grant full access to all spaces within Kibana, even if the role indicates that a specific space should only have `read` privileges.
===== Example
[source,js]
--------------------------------------------------
PUT /api/security/role/my_kibana_role
{
"metadata" : {
"version" : 1
},
"elasticsearch": {
"cluster" : [ "all" ],
"indices" : [ {
"names" : [ "index1", "index2" ],
"privileges" : [ "all" ],
"field_security" : {
"grant" : [ "title", "body" ]
},
"query" : "{\"match\": {\"title\": \"foo\"}}"
} ],
},
"kibana": {
"global": ["all"]
}
}
--------------------------------------------------
// KIBANA
==== Response
A successful call returns a response code of `204` and no response body.
==== Granting access to specific spaces
To grant access to individual spaces within {kib}, specify the space identifier within the `kibana` object.
Note: granting access
[source,js]
--------------------------------------------------
PUT /api/security/role/my_kibana_role
{
"metadata" : {
"version" : 1
},
"elasticsearch": {
"cluster" : [ "all" ],
"indices" : [ {
"names" : [ "index1", "index2" ],
"privileges" : [ "all" ],
"field_security" : {
"grant" : [ "title", "body" ]
},
"query" : "{\"match\": {\"title\": \"foo\"}}"
} ],
},
"kibana": {
"global": [],
"space": {
"marketing": ["all"],
"engineering": ["read"]
}
}
}
--------------------------------------------------

View file

@ -11,13 +11,12 @@ saved objects by various conditions.
`GET /api/saved_objects/_find`
==== Query Parameters
`type` (required)::
(array|string) The saved object type(s) that the response should be limited to
`per_page` (optional)::
(number) The number of objects to return per page
`page` (optional)::
(number) The page of objects to return
`type` (optional)::
(array|string) The saved object type(s) that the response should be limited to
`search` (optional)::
(string) A {ref}/query-dsl-simple-query-string-query.html[simple_query_string] Elasticsearch query to filter the objects in the response
`search_fields` (optional)::

View file

@ -0,0 +1,17 @@
[role="xpack"]
[[spaces-api]]
== Kibana Spaces API
experimental[This API is *experimental* and may be changed or removed completely in a future release. The underlying Spaces concepts are stable, but the APIs for managing Spaces are currently experimental.]
The spaces API allows people to manage their spaces within {kib}.
* <<spaces-api-post>>
* <<spaces-api-put>>
* <<spaces-api-get>>
* <<spaces-api-delete>>
include::spaces-management/post.asciidoc[]
include::spaces-management/put.asciidoc[]
include::spaces-management/get.asciidoc[]
include::spaces-management/delete.asciidoc[]

View file

@ -0,0 +1,25 @@
[[spaces-api-delete]]
=== Delete space
experimental[This API is *experimental* and may be changed or removed completely in a future release. The underlying Spaces concepts are stable, but the APIs for managing Spaces are currently experimental.]
[WARNING]
==================================================
Deleting a space will automatically delete all saved objects that belong to that space. This operation cannot be undone!
==================================================
==== Request
To delete a space, submit a DELETE request to the `/api/spaces/space/<space_id>`
endpoint:
[source,js]
--------------------------------------------------
DELETE /api/spaces/space/marketing
--------------------------------------------------
// KIBANA
==== Response
If the space is successfully deleted, the response code is `204`; otherwise, the response
code is 404.

View file

@ -0,0 +1,77 @@
[[spaces-api-get]]
=== Get Space
experimental[This API is *experimental* and may be changed or removed completely in a future release. The underlying Spaces concepts are stable, but the APIs for managing Spaces are currently experimental.]
Retrieves all {kib} spaces, or a specific space.
==== Get all {kib} spaces
===== Request
To retrieve all spaces, issue a GET request to the
/api/spaces/space endpoint.
[source,js]
--------------------------------------------------
GET /api/spaces/space
--------------------------------------------------
// KIBANA
===== Response
A successful call returns a response code of `200` and a response body containing a JSON
representation of the spaces.
[source,js]
--------------------------------------------------
[
{
"id": "default",
"name": "Default",
"description" : "This is the Default Space",
"_reserved": true
},
{
"id": "marketing",
"name": "Marketing",
"description" : "This is the Marketing Space",
"color": "#aabbcc",
"initials": "MK"
},
{
"id": "sales",
"name": "Sales",
"initials": "MK"
},
]
--------------------------------------------------
==== Get a specific space
===== Request
To retrieve a specific space, issue a GET request to
the `/api/spaces/space/<space_id>` endpoint:
[source,js]
--------------------------------------------------
GET /api/spaces/space/marketing
--------------------------------------------------
// KIBANA
===== Response
A successful call returns a response code of `200` and a response body containing a JSON
representation of the space.
[source,js]
--------------------------------------------------
{
"id": "marketing",
"name": "Marketing",
"description" : "This is the Marketing Space",
"color": "#aabbcc",
"initials": "MK"
}
--------------------------------------------------

View file

@ -0,0 +1,50 @@
[[spaces-api-post]]
=== Create Space
experimental[This API is *experimental* and may be changed or removed completely in a future release. The underlying Spaces concepts are stable, but the APIs for managing Spaces are currently experimental.]
Creates a new {kib} space. To update an existing space, use the PUT command.
==== Request
To create a space, issue a POST request to the
`/api/spaces/space` endpoint.
[source,js]
--------------------------------------------------
POST /api/spaces/space
--------------------------------------------------
==== Request Body
The following parameters can be specified in the body of a POST request to create a space:
`id`:: (string) Required identifier for the space. This identifier becomes part of Kibana's URL when inside the space. This cannot be changed by the update operation.
`name`:: (string) Required display name for the space.
`description`:: (string) Optional description for the space.
`initials`:: (string) Optionally specify the initials shown in the Space Avatar for this space. By default, the initials will be automatically generated from the space name.
If specified, initials should be either 1 or 2 characters.
`color`:: (string) Optioanlly specify the hex color code used in the Space Avatar for this space. By default, the color will be automatically generated from the space name.
===== Example
[source,js]
--------------------------------------------------
POST /api/spaces/space
{
"id": "marketing",
"name": "Marketing",
"description" : "This is the Marketing Space",
"color": "#aabbcc",
"initials": "MK"
}
--------------------------------------------------
// KIBANA
==== Response
A successful call returns a response code of `200` with the created Space.

View file

@ -0,0 +1,50 @@
[[spaces-api-put]]
=== Update Space
experimental[This API is *experimental* and may be changed or removed completely in a future release. The underlying Spaces concepts are stable, but the APIs for managing Spaces are currently experimental.]
Updates an existing {kib} space. To create a new space, use the POST command.
==== Request
To update a space, issue a PUT request to the
`/api/spaces/space/<space_id>` endpoint.
[source,js]
--------------------------------------------------
PUT /api/spaces/space/<space_id>
--------------------------------------------------
==== Request Body
The following parameters can be specified in the body of a PUT request to update a space:
`id`:: (string) Required identifier for the space. This identifier becomes part of Kibana's URL when inside the space. This cannot be changed by the update operation.
`name`:: (string) Required display name for the space.
`description`:: (string) Optional description for the space.
`initials`:: (string) Optionally specify the initials shown in the Space Avatar for this space. By default, the initials will be automatically generated from the space name.
If specified, initials should be either 1 or 2 characters.
`color`:: (string) Optioanlly specify the hex color code used in the Space Avatar for this space. By default, the color will be automatically generated from the space name.
===== Example
[source,js]
--------------------------------------------------
PUT /api/spaces/space/marketing
{
"id": "marketing",
"name": "Marketing",
"description" : "This is the Marketing Space",
"color": "#aabbcc",
"initials": "MK"
}
--------------------------------------------------
// KIBANA
==== Response
A successful call returns a response code of `200` with the updated Space.

View file

@ -16,12 +16,7 @@ configuration is required.
If you also use Elastic Stack for logging and server-level metrics, you can
optionally import the APM dashboards that come with the APM Server. You can use
these APM-specific visualizations to correlate APM data with other data sources.
To get the dashboards, run the following command on the APM server:
[source,shell]
----------------------------------------------------------
./apm-server setup
----------------------------------------------------------
To get the dashboards, click the "Load Kibana objects" button at the bottom of the Getting Started guides for APM in Kibana.
For more setup information, see
{apm-get-started-ref}/index.html[Getting Started with APM].

View file

@ -119,4 +119,4 @@ TIP: You can create a link to a dashboard by title by doing this: +
TIP: When sharing a link to a dashboard snapshot, use the *Short URL*. Snapshot
URLs are long and can be problematic for Internet Explorer and other
tools. To create a short URL, you must have write access to `.kibana`.
tools. To create a short URL, you must have write access to {kib}.

View file

@ -16,6 +16,9 @@ endif::gs-mini[]
TIP: See the documentation about the ingest node
{ref}/grok-processor.html[grok processor] and the Logstash {logstash-ref}/plugins-filters-grok.html[grok filter] more info about grok.
NOTE: If you're using {security}, you must have the `manage_pipeline`
permission in order to use the Grok Debugger.
The Grok Debugger is automatically enabled in {kib}. It is located under the *DevTools* tab in {kib}.
To start debugging grok patterns:

View file

@ -15,4 +15,7 @@ include::development/core-development.asciidoc[]
include::development/plugin-development.asciidoc[]
include::development/security/index.asciidoc[]
include::development/pr-review.asciidoc[]

View file

@ -48,15 +48,15 @@ $http.get(chrome.addBasePath('/api/plugin/things'));
[float]
==== Server side
Append `config.get('server.basePath')` to any absolute URL path.
Append `request.getBasePath()` to any absolute URL path.
["source","shell"]
-----------
const basePath = server.config().get('server.basePath');
server.route({
path: '/redirect',
handler(req, reply) {
reply.redirect(`${basePath}/otherLocation`);
handler(request, reply) {
reply.redirect(`${request.getBasePath()}/otherLocation`);
}
});
-----------

View file

@ -0,0 +1,12 @@
[[development-security]]
== Security
Kibana has generally been able to implement security transparently to core and plugin developers, and this largely remains the case. {kib} on two methods that the <<development-elasticsearch, elasticsearch plugin>>'s `Cluster` provides: `callWithRequest` and `callWithInternalUser`.
`callWithRequest` executes requests against Elasticsearch using the authentication credentials of the Kibana end-user. So, if you log into Kibana with the user of `foo` when `callWithRequest` is used, {kib} execute the request against Elasticsearch as the user `foo`. Historically, `callWithRequest` has been used extensively to perform actions that are initiated at the request of Kibana end-users.
`callWithInternalUser` executes requests against Elasticsearch using the internal Kibana server user, and has historically been used for performing actions that aren't initiated by Kibana end users; for example, creating the initial `.kibana` index or performing health checks against Elasticsearch.
However, with the changes that role-based access control (RBAC) introduces, this is no longer cut and dry. {kib} now requires all access to the `.kibana` index goes through the `SavedObjectsClient`. This used to be a best practice, as the `SavedObjectsClient` was responsible for translating the documents stored in Elasticsearch to and from Saved Objects, but RBAC is now taking advantage of this abstraction to implement access control and determine when to use `callWithRequest` versus `callWithInternalUser`.
include::rbac.asciidoc[]

View file

@ -0,0 +1,174 @@
[[development-security-rbac]]
=== Role-based access control
Role-based access control (RBAC) in {kib} relies upon the {xpack-ref}/security-privileges.html#application-privileges[application privileges] that Elasticsearch exposes. This allows {kib} to define the privileges that {kib} wishes to grant to users, assign them to the relevant users using roles, and then authorize the user to perform a specific action. This is handled within a secured instance of the `SavedObjectsClient` and available transparently to consumers when using `request.getSavedObjectsClient()` or `savedObjects.getScopedSavedObjectsClient()`.
[[development-rbac-privileges]]
==== {kib} Privileges
When {kib} first starts up, it executes the following `POST` request against {es}. This synchronizes the definition of the privileges with various `actions` which are later used to authorize a user:
[source,js]
----------------------------------
POST /_xpack/security/privilege
Content-Type: application/json
Authorization: Basic kibana changeme
{
"kibana-.kibana":{
"all":{
"application":"kibana-.kibana",
"name":"all",
"actions":[
"version:7.0.0-alpha1-SNAPSHOT",
"action:login",
"action:*"
],
"metadata":{}
},
"read":{
"application":"kibana-.kibana",
"name":"read",
"actions":[
"version:7.0.0-alpha1-SNAPSHOT",
"action:login",
"action:saved_objects/dashboard/get",
"action:saved_objects/dashboard/bulk_get",
"action:saved_objects/dashboard/find",
...
],"metadata":{}}
}
}
----------------------------------
[NOTE]
==============================================
The application is created by concatenating the prefix of `kibana-` with the value of `kibana.index` from the `kibana.yml`, so different {kib} tenants are isolated from one another.
==============================================
[[development-rbac-assigning-privileges]]
==== Assigning {kib} Privileges
{kib} privileges are assigned to specific roles using the `applications` element. For example, the following role assigns the <<kibana-privileges-all, all>> privilege at `*` `resources` (which will in the future be used to secure spaces) to the default {kib} `application`:
[source,js]
----------------------------------
"new_kibana_user": {
"applications": [
{
"application": "kibana-.kibana",
"privileges": [
"all"
],
"resources": [
"*"
]
}
]
}
----------------------------------
Roles that grant <<kibana-privileges>> should be managed using the <<role-management-api>> or the *Management* / *Security* / *Roles* page, not directly using the {es} {ref}/security-api.html#security-role-apis[role management API]. This role can then be assigned to users using the {es}
{ref}/security-api.html#security-user-apis[user management APIs].
[[development-rbac-authorization]]
==== Authorization
The {es} {ref}/security-api-has-privileges.html[has privileges API] determines whether the user is authorized to perform a specific action:
[source,js]
----------------------------------
POST /_xpack/security/user/_has_privileges
Content-Type: application/json
Authorization: Basic foo_read_only_user password
{
"applications":[
{
"application":"kibana-.kibana",
"resources":["*"],
"privileges":[
"action:saved_objects/dashboard/save",
]
}
]
}
----------------------------------
{es} checks if the user is granted a specific action. If the user is assigned a role that grants a privilege, {es} uses the <<development-rbac-privileges, {kib} privileges>> definition to associate this with the actions, which makes authorizing users more intuitive and flexible programatically.
Once we have authorized the user to perform a specific action, we can execute the request using `callWithInternalUser`.
[[development-rbac-legacy-fallback]]
==== Legacy Fallback
Users have existing roles that rely on index privileges to the `.kibana` index. The legacy fallback uses the `callWithRequest` method when the user doesn't have any application privileges. This relies on the user having index privileges on `.kibana`. The legacy fallback will be available until 7.0.
Within the secured instance of the `SavedObjectsClient` the `_has_privileges` check determines if the user has any index privileges on the `.kibana` index:
[source,js]
----------------------------------
POST /_xpack/security/user/_has_privileges
Content-Type: application/json
Authorization: Basic foo_legacy_user password
{
"applications":[
{
"application":"kibana-.kibana",
"resources":["*"],
"privileges":[
"action:saved_objects/dashboard/save"
]
}
],
"index": [
{
"names": ".kibana",
"privileges": ["create", "delete", "read", "view_index_metadata"]
}
]
}
----------------------------------
Here is an example response if the user does not have application privileges, but does have privileges on the `.kibana` index:
[source,js]
----------------------------------
{
"username": "foo_legacy_user",
"has_all_requested": false,
"cluster": {},
"index": {
".kibana": {
"read": true,
"view_index_metadata": true,
"create": true,
"delete": true
}
},
"application": {
"kibana-.kibana": {
"*": {
"action:saved_objects/dashboard/save": false
}
}
}
}
----------------------------------
{kib} automatically detects that the request could be executed against `.kibana` using `callWithRequest` and does so.
When the user first logs into {kib}, if they have no application privileges and will have to rely on the legacy fallback, {kib} logs a deprecation warning similar to the following:
[source,js]
----------------------------------
${username} relies on index privileges on the {kib} index. This is deprecated and will be removed in {kib} 7.0
----------------------------------
[[development-rbac-reserved-roles]]
==== Reserved roles
Ideally, the `kibana_user` and `kibana_dashboard_only_user` roles should only use application privileges, and no longer have index privileges on the `.kibana` index. However, making this switch forces the user to incur downtime if Elasticsearch is upgraded to >= 6.4, and {kib} is running < 6.4. To mitigate this downtime, for the 6.x releases the `kibana_user` and `kibana_dashbord_only_user` roles have both application privileges and index privileges. When {kib} is running >= 6.4 it uses the application privileges to authorize the user, but when {kib} is running < 6.4 {kib} relies on the direct index privileges.

View file

@ -55,8 +55,9 @@ The list of common parameters:
- *name*: unique visualization name, only lowercase letters and underscore
- *title*: title of your visualization as displayed in kibana
- *icon*: the icon class to use (font awesome)
- *image*: instead of icon you can provide svg image (imported)
- *icon*: <string> the https://elastic.github.io/eui/#/display/icons[EUI icon] type to use for this visualization
- *image*: instead of an icon you can provide a SVG image (imported)
- *legacyIcon*: (DEPRECATED) <string> provide a class name (e.g. for a font awesome icon)
- *description*: description of your visualization as shown in kibana
- *category*: the category your visualization falls into (one of `ui/vis/vis_category` values)
- *visConfig*: object holding visualization parameters
@ -179,7 +180,7 @@ VisTypesRegistryProvider.register(MyNewVisType);
[[development-react-visualization-type]]
==== React Visualization Type
React visualization type assumes you are using React as your rendering technology.
Just pass in a React component to `visConfig.template`.
Just pass in a React component to `visConfig.component`.
The visualization will receive `vis`, `appState`, `updateStatus` and `visData` as props.
It also has a `renderComplete` property, which needs to be called once the rendering has completed.
@ -197,7 +198,7 @@ const MyNewVisType = (Private) => {
icon: 'my_icon',
description: 'Cool new chart',
visConfig: {
template: ReactComponent
component: ReactComponent
}
});
}
@ -421,7 +422,6 @@ The `vis` object holds the visualization state and is the window into kibana:
- *vis.isEditorMode()*: returns true if in editor mode
- *vis.API.timeFilter*: allows you to access time picker
- *vis.API.queryFilter*: gives you access to queryFilter
- *vis.API.queryManager*: gives you access to add filters to the filter bar
- *vis.API.events.click*: default click handler
- *vis.API.events.brush*: default brush handler

View file

@ -35,7 +35,7 @@ For a more complex use-case you usually want to use that method.
`params` is a parameter object specifying several parameters, that influence rendering.
You will find a detailed description of all the parameters in the inline docs
in the {repo}blob/{branch}/src/ui/public/visualize/loader/loader.js[loader source code].
in the {repo}blob/{branch}/src/ui/public/visualize/loader/types.ts[loader source code].
Both methods return an `EmbeddedVisualizeHandler`, that gives you some access
to the visualization. The `embedVisualizationWithSavedObject` method will return

View file

@ -13,9 +13,9 @@ what's new.
============
Starting in 6.3, you can choose to opt-in to a number of exciting experimental query language enhancements under the
options menu in the query bar. Currently, opting in will enable autocomplete functionality, scripted field support,
and a simplified, easier to use syntax. We're hard at work building even more features for you to try out. Take
these features for a spin and let us know what you think!
options menu in the query bar. Currently, opting in will enable scripted field support and a simplified, easier to
use syntax. If you have a Basic license or above, autocomplete functionality will also be enabled. We're hard at
work building even more features for you to try out. Take these features for a spin and let us know what you think!
==== New Simplified Syntax

View file

@ -3,14 +3,17 @@
[partintro]
--
Ready to get some hands-on experience with Kibana?
This tutorial shows you how to:
* Load a sample data set into Elasticsearch
* Define an index pattern
* Discover and explore the sample data
* Visualize the data
* Assemble visualizations into a dashboard
Ready to get some hands-on experience with {kib}? There are two ways to start:
* <<tutorial-sample-data, Explore {kib} using the Flights dashboard>>
+
Load the Flights sample data and dashboard with one click and start
interacting with {kib} visualizations in seconds.
* <<tutorial-build-dashboard, Build your own dashboard>>
+
Manually load a data set and build your own visualizations and dashboard.
Before you begin, make sure you've <<install, installed Kibana>> and established
a {kibana-ref}/connect-to-elasticsearch.html[connection to Elasticsearch].
@ -22,6 +25,22 @@ If you are running our https://cloud.elastic.co[hosted Elasticsearch Service]
on Elastic Cloud, you can access Kibana with a single click.
--
include::getting-started/tutorial-sample-data.asciidoc[]
include::getting-started/tutorial-sample-filter.asciidoc[]
include::getting-started/tutorial-sample-query.asciidoc[]
include::getting-started/tutorial-sample-discover.asciidoc[]
include::getting-started/tutorial-sample-edit.asciidoc[]
include::getting-started/tutorial-sample-inspect.asciidoc[]
include::getting-started/tutorial-sample-remove.asciidoc[]
include::getting-started/tutorial-full-experience.asciidoc[]
include::getting-started/tutorial-load-dataset.asciidoc[]
include::getting-started/tutorial-define-index.asciidoc[]
@ -32,4 +51,6 @@ include::getting-started/tutorial-visualizing.asciidoc[]
include::getting-started/tutorial-dashboard.asciidoc[]
include::getting-started/tutorial-inspect.asciidoc[]
include::getting-started/wrapping-up.asciidoc[]

View file

@ -1,14 +1,14 @@
[[tutorial-dashboard]]
== Putting it Together in a Dashboard
=== Displaying your visualizations in a dashboard
A dashboard is a collection of visualizations that you can arrange and share.
Here you'll build a dashboard that contains the visualizations you saved during
You'll build a dashboard that contains the visualizations you saved during
this tutorial.
. Open *Dashboard*.
. Click *Create new dashboard*.
. Click *Add*.
. Click *Bar Example*, *Map Example*, *Markdown Example*, and *Pie Example*.
. Add *Bar Example*, *Map Example*, *Markdown Example*, and *Pie Example*.
Your sample dashboard look like this:

View file

@ -1,5 +1,5 @@
[[tutorial-define-index]]
== Defining Your Index Patterns
=== Defining your index patterns
Index patterns tell Kibana which Elasticsearch indices you want to explore.
An index pattern can match the name of a single index, or include a wildcard
@ -10,7 +10,7 @@ series of indices in the format `logstash-YYYY.MMM.DD`. To explore all
of the log data from May 2018, you could specify the index pattern
`logstash-2018.05*`.
Create patterns for the Shakespeare data set, which has an
You'll create patterns for the Shakespeare data set, which has an
index named `shakespeare,` and the accounts data set, which has an index named
`bank.` These data sets don't contain time-series data.

View file

@ -1,12 +1,16 @@
[[tutorial-discovering]]
== Discovering Your Data
=== Discovering your data
Using the Discover application, you can enter
an {ref}/query-dsl-query-string-query.html#query-string-syntax[Elasticsearch
query] to search your data and filter the results.
. Open *Discover*. The `shakes*` pattern is the current index pattern.
. Click the caret to the right of `shakes*`, and select `ba*`.
. Open *Discover*.
+
The current index pattern appears below the filter bar, in this case `shakes*`.
You might need to click *New* in the menu bar to refresh the data.
. Click the caret to the right of the current index pattern, and select `ba*`.
. In the search field, enter the following string:
+
[source,text]
@ -19,8 +23,8 @@ excess of 47,500. It returns results for account numbers 8, 32, 78, 85, and 97.
image::images/tutorial-discover-2.png[]
By default, all fields are shown for each matching document. To choose which
fields to display, hover the mouse over the the list of *Available Fields*
and then click *add* next to each field you want include.
fields to display, hover the pointer over the the list of *Available Fields*
and then click *add* next to each field you want include as a column in the table.
For example, if you add the `account_number` field, the display changes to a list of five
account numbers.

View file

@ -0,0 +1,12 @@
[[tutorial-build-dashboard]]
== Building your own dashboard
Ready to load some data and build a dashboard? This tutorial shows you how to:
* Load a data set into Elasticsearch
* Define an index pattern
* Discover and explore the data
* Visualize the data
* Add visualizations to a dashboard
* Inspect the data behind a visualization

View file

@ -0,0 +1,24 @@
[[tutorial-inspect]]
=== Inspecting the data
Seeing visualizations of your data is great,
but sometimes you need to look at the actual data to
understand what's really going on. You can inspect the data behind any visualization
and view the {es} query used to retrieve it.
. In the dashboard, hover the pointer over the pie chart.
. Click the icon in the upper right.
. From the *Options* menu, select *Inspect*.
+
[role="screenshot"]
image::images/tutorial-full-inspect1.png[]
You can also look at the query used to fetch the data for the visualization.
. Open the *View:Data* menu and select *Requests*.
. Click the tabs to look at the request statistics, the Elasticsearch request,
and the response in JSON.
. To close the Inspector, click X in the upper right.
+
[role="screenshot"]
image::images/tutorial-full-inspect2.png[]

View file

@ -1,5 +1,5 @@
[[tutorial-load-dataset]]
== Loading Sample Data
=== Loading sample data
This tutorial requires three data sets:
@ -16,6 +16,8 @@ Two of the data sets are compressed. To extract the files, use these commands:
unzip accounts.zip
gunzip logs.jsonl.gz
==== Structure of the data sets
The Shakespeare data set has this structure:
[source,json]
@ -54,11 +56,18 @@ The logs data set has dozens of different fields. Here are the notable fields fo
"@timestamp": "date"
}
==== Set up mappings
Before you load the Shakespeare and logs data sets, you must set up {ref}/mapping.html[_mappings_] for the fields.
Mappings divide the documents in the index into logical groups and specify the characteristics
of the fields. These characteristics include the searchability of the field
and whether it's _tokenized_, or broken up into separate words.
NOTE: If security is enabled, you must have the `all` Kibana privilege to run this tutorial.
You must also have the `create`, `manage` `read`, `write,` and `delete`
index privileges. See {xpack-ref}/security-privileges.html[Security Privileges]
for more information.
In Kibana *Dev Tools > Console*, set up a mapping for the Shakespeare data set:
[source,js]
@ -149,6 +158,8 @@ PUT /logstash-2015.05.20
The accounts data set doesn't require any mappings.
==== Load the data sets
At this point, you're ready to use the Elasticsearch {ref}/docs-bulk.html[bulk]
API to load the data sets:

View file

@ -0,0 +1,32 @@
[[tutorial-sample-data]]
== Explore {kib} using the Flight dashboard
Youre new to {kib} and want to try it out. With one click, you can install
the Flights sample data and start interacting with Kibana.
The Flights data set contains data for four airlines.
You can load the data and preconfigured dashboard from the {kib} home page.
. On the home page, click the link next to *Sample data*.
. On the *Sample flight data* card, click *Add*.
. Click *View data*.
Youre taken to the *Global Flight* dashboard, a collection of charts, graphs,
maps, and other visualizations of the the data in the `kibana_sample_data_flights` index.
[role="screenshot"]
image::images/tutorial-sample-dashboard.png[]
In this tutorial, youll learn to:
* Filter the data
* Query the data
* Discover the data
* Edit a visualization
* Inspect the data behind the scenes
NOTE: If security is enabled, you must have the `all` Kibana privilege.
You must also have access to the `kibana_sample_data_flights` index with
the `read`, `write,` and `manage` privileges. See {xpack-ref}/security-privileges.html[Security Privileges]
for more information.

View file

@ -0,0 +1,27 @@
[[tutorial-sample-discover]]
=== Using Discover
In the Discover application, the Flight data is presented in a table. You can
interactively explore the data, including searching and filtering.
* In the side navigation, select *Discover*.
The current index pattern appears below the filter bar. An
<<index-patterns, index pattern>> tells {kib} which {es} indices you want to
explore.
The `kibana_sample_data_flights` index contains a time field. A histogram
shows the distribution of documents over time.
[role="screenshot"]
image::images/tutorial-sample-discover1.png[]
By default, all fields are shown for each matching document. To choose which fields to display,
hover the pointer over the the list of *Available Fields* and then click *add* next
to each field you want include as a column in the table.
For example, if you add the `DestAirportID` and `DestWeather` fields,
the display includes columns for those two fields:
[role="screenshot"]
image::images/tutorial-sample-discover2.png[]

View file

@ -0,0 +1,45 @@
[[tutorial-sample-edit]]
=== Editing a visualization
You have edit permissions for the *Global Flight* dashboard so you can change
the appearance and behavior of the visualizations. For example, you might want
to see which airline has the lowest average fares.
. Go to the *Global Flight* dashboard.
. In the menu bar, click *Edit*.
. In the *Average Ticket Price* visualization, click the gear icon in
the upper right.
. From the *Options* menu, select *Edit visualization*.
==== Edit a metric visualization
*Average Ticket Price* is a metric visualization.
To specify which groups to display
in this visualization, you use an {es} {ref}/search-aggregations.html[bucket aggregation].
This aggregation sorts the documents that match your search criteria into different
categories, or buckets.
. In the *Buckets* pane, select *Split Group*.
. In the *Aggregation* dropdown menu, select *Terms*.
. In the *Field* dropdown, select *Carrier*.
. Set *Descending* to four.
. Click *Apply changes* image:images/apply-changes-button.png[].
You now see the average ticket price for all four airlines.
[role="screenshot"]
image::images/tutorial-sample-edit1.png[]
==== Save the changes
. In the menu bar, click *Save*.
. Leave the visualization name unchanged and click *Save*.
. Go to the *Global Flight* dashboard.
. Resize the panel for the *Average Ticket Price* visualization by dragging the
handle in the lower right.
You can also rearrange the visualizations by clicking the header and dragging.
. In the menu bar, click *Save* and then confirm the save.
+
[role="screenshot"]
image::images/tutorial-sample-edit2.png[]

View file

@ -0,0 +1,23 @@
[[tutorial-sample-filter]]
=== Filtering the data
Many visualizations in the *Global Flight* dashboard are interactive. You can
apply filters to modify the view of the data across all visualizations.
. In the *Controls* visualization, set an *Origin City* and a *Destination City*.
. Click *Apply changes*.
+
The `OriginCityName` and the `DestCityName` fields are filtered to match
the data you specified.
+
For example, this dashboard shows the data for flights from London to Newark
and Pittsburgh.
+
[role="screenshot"]
image::images/tutorial-sample-filter.png[]
+
. To remove the filters, in the *Controls* visualization, click *Clear form*, and then
*Apply changes*.
You can also add filters manually. In the filter bar, click *Add a Filter*
and specify the data you want to view.

View file

@ -0,0 +1,24 @@
[[tutorial-sample-inspect]]
=== Inspecting the data
Seeing visualizations of your data is great,
but sometimes you need to look at the actual data to
understand what's really going on. You can inspect the data behind any visualization
and view the {es} query used to retrieve it.
. Hover the pointer over the *Flight Count and Average Ticket Price* visualization.
. Click the icon in the upper right.
. From the *Options* menu, select *Inspect*.
+
[role="screenshot"]
image::images/tutorial-sample-inspect1.png[]
You can also look at the query used to fetch the data for the visualization.
. Open the *View: Data* menu and select *Requests*.
. Click the tabs to look at the request statistics, the Elasticsearch request,
and the response in JSON.
. To close the editor, click X in the upper right.
+
[role="screenshot"]
image::images/tutorial-sample-inspect2.png[]

View file

@ -0,0 +1,30 @@
[[tutorial-sample-query]]
=== Querying the data
You can enter an {es} query to narrow the view of the data.
. To find all flights out of Rome, submit this query:
+
[source,text]
OriginCityName:Rome
. For a more complex query with AND and OR, try this:
+
[source,text]
OriginCityName:Rome AND (Carrier:JetBeats OR "Kibana Airlines")
+
The dashboard updates to show data for the flights out of Rome on JetBeats and
{kib} Airlines.
+
[role="screenshot"]
image::images/tutorial-sample-query.png[]
. When you are finished exploring the dashboard, remove the query by
clearing the contents in the query bar and pressing Enter.
In general, filters are faster than queries. For more information, see {ref}/query-filter-context.html[Query and filter context].
TIP: {kib} has an experimental autocomplete feature that can
help jumpstart your queries. To turn on this feature, click *Options* on the
right of the query bar and opt in. With autocomplete enabled,
search suggestions are displayed when you start typing your query.

View file

@ -0,0 +1,18 @@
[[tutorial-sample-remove]]
=== Wrapping up
When youre done experimenting with the sample data set, you can remove it.
. Go to the {kib} home page and click the link next to *Sample data*.
. On the *Sample flight data* card, click *Remove*.
Now that you have a handle on the {kib} basics, you might be interested in:
* <<tutorial-build-dashboard, Building your own dashboard>>. Youll learn how to load your own
data, define an index pattern, and create visualizations and dashboards.
* <<visualize>>. Youll find information about all the visualization types
{kib} has to offer.
* <<dashboard>>. You have the ability to share a dashboard, or embed the dashboard in a web page.
* <<discover>>. You'll learn more about searching data and filtering by field.

View file

@ -1,5 +1,5 @@
[[tutorial-visualizing]]
== Visualizing Your Data
=== Visualizing your data
In the Visualize application, you can shape your data using a variety
of charts, tables, and maps, and more. You'll create four
@ -19,7 +19,7 @@ gain insight into the account balances in the bank account data.
[role="screenshot"]
image::images/tutorial-visualize-wizard-step-2.png[]
=== Pie Chart
=== Pie chart
Initially, the pie contains a single "slice."
That's because the default search matched all documents.
@ -73,15 +73,17 @@ in a ring around the balance ranges.
[role="screenshot"]
image::images/tutorial-visualize-pie-3.png[]
To save this chart so you can use it later, click *Save* in the top menu bar
and enter `Pie Example`.
To save this chart so you can use it later:
=== Bar Chart
* Click *Save* in the top menu bar and enter `Pie Example`.
=== Bar chart
You'll use a bar chart to look at the Shakespeare data set and compare
the number of speaking parts in the plays.
Create a *Vertical Bar* chart and set the search source to `shakes*`.
* Create a *Vertical Bar* chart and set the search source to `shakes*`.
Initially, the chart is a single bar that shows the total count
of documents that match the default wildcard query.
@ -120,32 +122,12 @@ that play.
Notice how the individual play names show up as whole phrases, instead of
broken into individual words. This is the result of the mapping
you did at the beginning of the tutorial, when your marked the `play_name` field
you did at the beginning of the tutorial, when you marked the `play_name` field
as `not analyzed`.
////
You might
also be curious to see which plays make the greatest demands on an
individual actor. Let's show the maximum number of speeches for a given part.
. Click *Add metrics* to add a Y-axis aggregation.
. Set *Aggregation* to `Max` and *Field* to `speech_number`.
. Click *Metrics & Axes* and then change *Mode* from `stacked` to `normal`.
. Click *Apply changes* image:images/apply-changes-button.png[].
[role="screenshot"]
image::images/tutorial-visualize-bar-3.png[]
The play Love's Labours Lost has an unusually high maximum speech number compared to the other plays.
Note how the *Number of speaking parts* Y-axis starts at zero, but the bars don't begin to differentiate until 18. To
make the differences stand out, starting the Y-axis at a value closer to the minimum, go to Options and select
*Scale Y-Axis to data bounds*.
////
*Save* this chart with the name `Bar Example`.
=== Coordinate Map
=== Coordinate map
Using a coordinate map, you can visualize geographic information in the log file sample data.
@ -175,18 +157,6 @@ You can navigate the map by clicking and dragging. The controls
on the top left of the map enable you to zoom the map and set filters.
Give them a try.
////
- Zoom image:images/viz-zoom.png[] buttons,
- *Fit Data Bounds*
image:images/viz-fit-bounds.png[] button to zoom to the lowest level that
includes all the points.
- Include or exclude a rectangular area
by clicking the *Latitude/Longitude Filter* image:images/viz-lat-long-filter.png[]
button and drawing a bounding box on the map. Applied filters are displayed
below the query bar. Hovering over a filter displays controls to toggle,
pin, invert, or delete the filter.
////
[role="screenshot"]
image::images/tutorial-visualize-map-3.png[]

View file

@ -1,5 +1,5 @@
[[wrapping-up]]
== Wrapping Up
=== Wrapping up
Now that you have a handle on the basics, you're ready to start exploring
your own data with Kibana.

Binary file not shown.

After

Width:  |  Height:  |  Size: 204 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 222 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 378 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 947 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 386 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 191 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 335 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 302 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 268 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 230 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 309 KiB

View file

@ -24,14 +24,12 @@ include::introduction.asciidoc[]
include::setup.asciidoc[]
include::monitoring/monitoring-xkib.asciidoc[]
include::monitoring/monitoring-kibana.asciidoc[]
include::security/securing-kibana.asciidoc[]
include::settings/settings-xkb.asciidoc[]
include::migration.asciidoc[]
include::getting-started.asciidoc[]
include::discover.asciidoc[]
@ -54,6 +52,8 @@ include::monitoring/index.asciidoc[]
include::management.asciidoc[]
include::spaces/index.asciidoc[]
include::security/index.asciidoc[]
include::management/watcher-ui/index.asciidoc[]
@ -74,4 +74,8 @@ include::limitations.asciidoc[]
include::release-notes/highlights.asciidoc[]
include::CHANGELOG.asciidoc[]
include::migration.asciidoc[]
include::CHANGELOG.asciidoc[]
include::redirects.asciidoc[]

View file

@ -43,6 +43,7 @@ working on big documents. Set this property to `false` to disable highlighting.
the Elasticsearch cluster. This setting constrains the length of the segment list. Long segment lists can significantly
increase request processing time.
`courier:ignoreFilterIfFieldNotInIndex`:: Set this property to `true` to skip filters that apply to fields that don't exist in a visualization's index. Useful when dashboards consist of visualizations from multiple index patterns.
`courier:maxConcurrentShardRequests`:: Controls the {ref}/search-multi-search.html[max_concurrent_shard_requests] setting used for _msearch requests sent by Kibana. Set to 0 to disable this config and use the Elasticsearch default.
`fields:popularLimit`:: This setting governs how many of the top most popular fields are shown.
`histogram:barTarget`:: When date histograms use the `auto` interval, Kibana attempts to generate this number of bars.
`histogram:maxBars`:: Date histograms are not generated with more bars than the value of this property, scaling values

View file

@ -3,7 +3,7 @@
=== Advanced Configuration for Dashboard Only Mode
If {security} is enabled, Kibana has a built-in `kibana_dashboard_only_user`
role that grants read only access to the `.kibana` index. This role is sufficient
role that grants read-only access to {kib}. This role is sufficient
for most use cases. However, if your setup requires a custom {kib} index, you can create
your own roles and tag them as *Dashboard only mode*.
@ -15,8 +15,8 @@ Here you can add as many roles as you like.
image:management/dashboard_only_mode/images/advanced_dashboard_mode_role_setup.png["Advanced dashboard mode role setup"]
By default, a *dashboard only mode* user doesn't have access to any data indices.
To grant read only access to your custom {kib}
index, you must assign `view_index_metadata` and `read` privileges.
To grant read-only access to your custom {kib} instance,
you must assign the read <<kibana-privileges, Kibana privilege>>.
These privileges are available under *Management > Security > Roles*.
For more information on roles and privileges, see {xpack-ref}/authorization.html[User Authorization].

Binary file not shown.

Before

Width:  |  Height:  |  Size: 145 KiB

After

Width:  |  Height:  |  Size: 281 KiB

Before After
Before After

View file

@ -3,7 +3,11 @@
== Kibana Dashboard Only Mode
If {security} is enabled, you can use the `kibana_dashboard_only_user` built-in role to limit
what users see when they log in to {kib}.
what users see when they log in to {kib}. The `kibana_dashboard_only_user` role is
preconfigured with read-only permissions to {kib}.
IMPORTANT: You must also assign roles that grant the user appropriate access to the data indices.
For information on roles and privileges, see {xpack-ref}/authorization.html[User Authorization].
Users assigned this role are only able to see the Dashboard app in the navigation
pane. When users open a dashboard, they will have a limited visual experience.
@ -13,15 +17,10 @@ All edit and create controls are hidden.
image:management/dashboard_only_mode/images/view_only_dashboard.png["View Only Dashboard"]
To assign this role, go to *Management > Security > Users*, add or edit
a user, and add the `kibana_dashboard_only_user` role. You must assign roles
that grant the user appropriate data access. For information on roles
and privileges, see {xpack-ref}/authorization.html[User Authorization].
The `kibana_dashboard_only_user` role is
preconfigured with read only permissions on the `.kibana` index.
a user, and add the `kibana_dashboard_only_user` role.
IMPORTANT: If you assign users the `kibana_dashboard_only_user` role, along with a role
with write permissions on the `.kibana` index, they *will* have write access,
with write permissions to {kib}, they *will* have write access,
even though the controls remain hidden in the {kib} UI.
IMPORTANT: If you also assign users the reserved `superuser` role, they will be able to see

View file

@ -26,4 +26,8 @@ At the end of the trial period, the Platinum features operate in a
license, extend the trial, or purchase a subscription.
For a comparison of the Elastic license levels,
see https://www.elastic.co/subscriptions[the subscription page].
see https://www.elastic.co/subscriptions[the subscription page].
TIP: If {security} is enabled, before you can install a gold or platinum
license, you must configure Transport Layer Security (TLS) in {es}. See
{stack-ov}/encrypting-communications.html[Encrypting communications].

View file

@ -1,5 +1,8 @@
[[breaking-changes-7.0]]
== Breaking changes in 7.0
++++
<titleabbrev>7.0</titleabbrev>
++++
This section discusses the changes that you need to be aware of when migrating
your application to Kibana 7.0.
@ -49,4 +52,23 @@ to Kibana's advanced setting.
*Impact:* This setting is no longer necessary. If you enable {monitoring} across the Elastic Stack, a monitoring agent runs
on each Elasticsearch node, Logstash node, Kibana instance, and Beat to collect and index metrics. Each node and instance is
considered unique based on its persistent UUID, which is written to the path.data directory when the node or instance starts.
considered unique based on its persistent UUID, which is written to the path.data directory when the node or instance starts.
[float]
=== Deprecated API `/shorten` has been removed
*Details:* The `/shorten` API has been deprecated since 6.5, when it was replaced by the `/api/shorten_url` API.
*Impact:* The '/shorten' API has been removed. Use the '/api/shorten_url' API instead.
[float]
=== Deprecated kibana.yml setting logging.useUTC has been replaced with logging.timezone
*Details:* Any timezone can now be specified by canonical id.
*Impact:* The logging.useUTC flag will have to be replaced with a timezone id. If set to true the id is `UTC`.
[float]
=== kibana.yml setting `elasticsearch.url` sets port based on protocol
*Details:* In prior versions of Kibana, if no port was specified in `elasticsearch.url` a default of 9200 was chosen.
The port is now protocol dependent: https ports will use 443, and http ports will use 80.
*Impact:* If your `elasticsearch.url` was dependent on an unspecified port set to 9200, `:9200` will have to be appended to the url.

View file

@ -46,8 +46,6 @@ To receive email notifications for the Cluster Alerts:
1. Configure an email account as described in
{xpack-ref}/actions-email.html#configuring-email[Configuring Email Accounts].
2. Navigate to the *Management* page in {kib}.
3. Go to the *Advanced Settings* page, find the `xpack:defaultAdminEmail`
setting, and enter your email address.
2. Configure the `xpack.monitoring.cluster_alerts.email_notifications.email_address` setting in `kibana.yml` with your email address.
Email notifications are sent only when Cluster Alerts are triggered and resolved.

View file

@ -1,4 +0,0 @@
[[configuring-monitoring]]
== Configuring Monitoring
See {ref}/configuring-monitoring.html[Configuring Monitoring in {es}].

View file

@ -1,4 +0,0 @@
[[monitoring-getting-started]]
== Getting Started
See {kibana-ref}/monitoring-data.html[Viewing Monitoring Data in {kib}].

Binary file not shown.

After

Width:  |  Height:  |  Size: 115 KiB

View file

@ -19,9 +19,10 @@ metrics. Each node and instance is considered unique based on its persistent
UUID, which is written to the <<settings,`path.data`>> directory when the node
or instance starts.
* <<monitoring-xpack-kibana>>
* <<monitoring-kibana>>
* <<monitoring-data>>
--
include::viewing-metrics.asciidoc[]
include::monitoring-troubleshooting.asciidoc[]

View file

@ -1,4 +0,0 @@
[[monitoring-cluster]]
== Monitoring Elasticsearch
See {ref}/es-monitoring.html[Monitoring {es}].

View file

@ -1,5 +1,100 @@
[role="xpack"]
[[monitoring-kibana]]
== Monitoring Kibana
== Configuring monitoring in {kib}
++++
<titleabbrev>Configuring monitoring</titleabbrev>
++++
See
{kibana-ref}/monitoring-xpack-kibana.html[Monitoring {kib}].
If you enable the collection of monitoring data in your cluster, you can
optionally collect metrics about {kib}.
The following method involves sending the metrics to the production cluster,
which ultimately routes them to the monitoring cluster. For an alternative
method, see <<monitoring-metricbeat>>.
To learn about monitoring in general, see
{stack-ov}/xpack-monitoring.html[Monitoring the {stack}].
. Enable the collection of monitoring data in {stack}. Set
`xpack.monitoring.collection.enabled` to `true` on the production cluster. +
+
--
For example, you can use the following APIs to review and change this setting:
[source,js]
----------------------------------
GET _cluster/settings
PUT _cluster/settings
{
"persistent": {
"xpack.monitoring.collection.enabled": true
}
}
----------------------------------
For more information, see {ref}/monitoring-settings.html[Monitoring settings in {es}]
and {ref}/cluster-update-settings.html[Cluster update settings].
--
. Verify that `xpack.monitoring.enabled` and
`xpack.monitoring.kibana.collection.enabled` are set to `true` in the
`kibana.yml` file. These are the default values. For
more information, see <<monitoring-settings-kb,Monitoring settings in {kib}>>.
. Identify where to send monitoring data. {kib} automatically
sends metrics to the {es} cluster specified in the `elasticsearch.url` setting
in the `kibana.yml` file. This property has a default value of
`http://localhost:9200`. +
+
--
[TIP]
===============================
In production environments, we strongly recommend using a separate cluster
(referred to as the _monitoring cluster_) to store the data. Using a separate
monitoring cluster prevents production cluster outages from impacting your
ability to access your monitoring data. It also prevents monitoring activities
from impacting the performance of your production cluster.
If {security} is enabled on the production cluster, use an HTTPS URL such
as `https://<your_production_cluster>:9200` in this setting.
===============================
--
. If {security} is enabled on the production cluster:
.. Verify that there is a
valid user ID and password in the `elasticsearch.username` and
`elasticsearch.password` settings in the `kibana.yml` file. These values are
used when {kib} sends monitoring data to the production cluster.
.. Configure {kib} to encrypt communications between the {kib} server and the
production cluster. This set up involves generating a server certificate and
setting `server.ssl.*` and `elasticsearch.ssl.certificateAuthorities` settings
in the `kibana.yml` file on the {kib} server. For example:
+
--
[source,yaml]
--------------------------------------------------------------------------------
server.ssl.key: /path/to/your/server.key
server.ssl.certificate: /path/to/your/server.crt
--------------------------------------------------------------------------------
If you are using your own certificate authority to sign certificates, specify
the location of the PEM file in the `kibana.yml` file:
[source,yaml]
--------------------------------------------------------------------------------
elasticsearch.ssl.certificateAuthorities: /path/to/your/cacert.pem
--------------------------------------------------------------------------------
For more information, see <<using-kibana-with-security>>.
--
. <<start-stop,Start {kib}>>.
. <<monitoring-data,View the monitoring data in {kib}>>.
include::monitoring-metricbeat.asciidoc[]
include::{kib-repo-dir}/settings/monitoring-settings.asciidoc[]

View file

@ -1,5 +0,0 @@
[[monitoring-logstash]]
== Monitoring Logstash
See
{logstash-ref}/monitoring-logstash.html[Monitoring Logstash].

View file

@ -0,0 +1,125 @@
[role="xpack"]
[[monitoring-metricbeat]]
=== Monitoring {kib} with {metricbeat}
beta[] In 6.4 and later, you can use {metricbeat} to collect data about {kib}
and ship it to the monitoring cluster, rather than routing it through the
production cluster as described in <<monitoring-kibana>>.
image::monitoring/images/metricbeat.png[Example monitoring architecture]
To learn about monitoring in general, see
{stack-ov}/xpack-monitoring.html[Monitoring the {stack}].
. Enable the collection of monitoring data. Set
`xpack.monitoring.collection.enabled` to `true` on the production cluster. +
+
--
For example, you can use the following APIs to review and change this setting:
[source,js]
----------------------------------
GET _cluster/settings
PUT _cluster/settings
{
"persistent": {
"xpack.monitoring.collection.enabled": true
}
}
----------------------------------
For more information, see {ref}/monitoring-settings.html[Monitoring settings in {es}]
and {ref}/cluster-update-settings.html[Cluster update settings].
--
. Disable the default collection of {kib} monitoring metrics. +
+
--
Add the following setting in the {kib} configuration file (`kibana.yml`):
[source,yaml]
----------------------------------
xpack.monitoring.kibana.collection.enabled: false
----------------------------------
Leave the `xpack.monitoring.enabled` set to its default value (`true`).
For more information, see
<<monitoring-settings-kb,Monitoring settings in {kib}>>.
--
. {metricbeat-ref}/metricbeat-installation.html[Install {metricbeat}] on the
same server as {kib}.
. Enable the {kib} module in {metricbeat}. +
+
--
For example, to enable the default configuration in the `modules.d` directory,
run the following command:
["source","sh",subs="attributes,callouts"]
----------------------------------------------------------------------
metricbeat modules enable kibana
----------------------------------------------------------------------
For more information, see
{metricbeat-ref}/configuration-metricbeat.html[Specify which modules to run] and
{metricbeat-ref}/metricbeat-module-kibana.html[{kib} module].
--
. Configure the {kib} module in {metricbeat}. +
+
--
You must specify the following settings in the `modules.d/kibana.yml` file:
[source,yaml]
----------------------------------
- module: kibana
metricsets:
- stats
period: 10s
hosts: ["http://localhost:5601"] <1>
xpack.enabled: true
----------------------------------
<1> This setting identifies the host and port number that are used to access {kib}.
NOTE: If you configured {kib} to use <<configuring-tls,encrypted communications>>,
you must access it via HTTPS. For example, `https://localhost:5601`.
--
. Identify where to send the monitoring data. +
+
--
TIP: In production environments, we strongly recommend using a separate cluster
(referred to as the _monitoring cluster_) to store the data. Using a separate
monitoring cluster prevents production cluster outages from impacting your
ability to access your monitoring data. It also prevents monitoring activities
from impacting the performance of your production cluster.
For example, specify the {es} output information in the {metricbeat}
configuration file (`metricbeat.yml`):
[source,yaml]
----------------------------------
output.elasticsearch:
hosts: ["http://es-mon-1:9200", "http://es-mon2:9200"] <1>
----------------------------------
<1> In this example, the data is stored on a monitoring cluster with nodes
`es-mon-1` and `es-mon-2`.
NOTE: If you configured the monitoring cluster to use
{ref}/configuring-tls.html[encrypted communications], you must access it via
HTTPS. For example, `https://es-mon-1:9200`.
For more information about these configuration options, see
{metricbeat-ref}/elasticsearch-output.html[Configure the {es} output].
--
. <<start-stop,Start {kib}>>.
. {metricbeat-ref}/metricbeat-starting.html[Start {metricbeat}].
. <<monitoring-data,View the monitoring data in {kib}>>.

View file

@ -1,8 +1,24 @@
[[monitoring-troubleshooting]]
== {monitoring} Troubleshooting
[role="xpack"]
[[monitor-troubleshooting]]
== Troubleshooting monitoring in {kib}
++++
<titleabbrev>{monitoring}</titleabbrev>
<titleabbrev>Troubleshooting</titleabbrev>
++++
See
{logstash-ref}/monitoring-troubleshooting.html[Troubleshooting {monitoring} in Logstash].
Use the information in this section to troubleshoot common problems and find
answers for frequently asked questions related to {monitoring}.
[float]
=== Cannot view the cluster because the license information is invalid
*Symptoms:*
The following error appears in a banner at the top of the screen in {kib} on the
*Monitoring > Clusters* page:
`You can't view the "<my_cluster>" cluster because the license information is invalid.`
*Resolution:*
You cannot monitor a version 6.3 or later cluster from {kib} version 6.2 or earlier.
To resolve this issue, upgrade {kib} to 6.3 or later. See
{stack-ref}/upgrading-elastic-stack.html[Upgrading the {stack}].

View file

@ -1,134 +0,0 @@
[role="xpack"]
[[monitoring-xpack-kibana]]
== Configuring Monitoring in {kib}
++++
<titleabbrev>Configuring Monitoring</titleabbrev>
++++
{monitoring} gives you insight into the operation of your {stack}. For more
information, see <<xpack-monitoring,{monitoring}>> and
{stack-ov}/xpack-monitoring.html[Monitoring the {stack}].
. To monitor {kib}:
.. Verify that the `xpack.monitoring.collection.enabled` setting is `true` on
the production cluster. If that setting is `false`, which is the default value,
the collection of monitoring data is disabled in {es} and data is ignored from
all other sources. For more information, see
{ref}/monitoring-settings.html[Monitoring Settings in {es}].
.. Verify that `xpack.monitoring.enabled` and
`xpack.monitoring.kibana.collection.enabled` are set to `true`, which are the
default values. For more information, see <<monitoring-settings-kb>>.
.. Identify where to send monitoring data. {kib} automatically
sends metrics to the {es} cluster specified in the `elasticsearch.url` setting
in the `kibana.yml` file. This property has a default value of
`http://localhost:9200`. This cluster is often referred to as the
_production cluster_.
+
--
TIP: If {security} is enabled on the production cluster, use an HTTPS URL such
as `https://<your_production_cluster>:9200` in this setting.
--
. To visualize monitoring data:
.. Verify that `xpack.monitoring.ui.enabled` is set to `true`, which is the
default value. For more information, see <<monitoring-settings-kb>>.
.. Identify where to retrieve monitoring data from. If you want to use a
separate _monitoring cluster_, set `xpack.monitoring.elasticsearch.url` in the
`kibana.yml` file. Otherwise, the monitoring data is stored in the production
cluster.
+
--
TIP: If {security} is enabled on the monitoring cluster, use an HTTPS URL such
as `https://<your_monitoring_cluster>:9200` in this setting.
To learn more about typical monitoring architectures with separate
production and monitoring clusters, see
{xpack-ref}/how-monitoring-works.html[How Monitoring Works].
--
. If {security} is enabled on the production cluster:
.. Verify that there is a
valid user ID and password in the `elasticsearch.username` and
`elasticsearch.password` settings in the `kibana.yml` file. These values are
used when {kib} sends monitoring data to the production cluster.
.. Configure {kib} to encrypt communications between the {kib} server and the
production cluster. This set up involves generating a server certificate and
setting `server.ssl.*` and `elasticsearch.ssl.certificateAuthorities` settings
in the `kibana.yml` file on the {kib} server. For example:
+
--
[source,yaml]
--------------------------------------------------------------------------------
server.ssl.key: /path/to/your/server.key
server.ssl.certificate: /path/to/your/server.crt
--------------------------------------------------------------------------------
If you are using your own certificate authority to sign certificates, specify
the location of the PEM file in the `kibana.yml` file:
[source,yaml]
--------------------------------------------------------------------------------
elasticsearch.ssl.certificateAuthorities: /path/to/your/cacert.pem
--------------------------------------------------------------------------------
For more information, see <<using-kibana-with-security>>.
--
. If {security} is enabled on the monitoring cluster:
.. Identify a user ID and password that {kib} can use to retrieve monitoring
data. Specify these values in the `xpack.monitoring.elasticsearch.username` and
`xpack.monitoring.elasticsearch.password` settings in the `kibana.yml` file.
If these settings are omitted, {kib} uses the `elasticsearch.username` and
`elasticsearch.password` setting values.
.. Configure {kib} to encrypt communications between the {kib} server and the
monitoring cluster. Specify the `xpack.monitoring.elasticsearch.ssl.*` settings
in the `kibana.yml` file on the {kib} server.
+
--
For example, if you are using your own certificate authority to sign
certificates, specify the location of the PEM file in the `kibana.yml` file:
[source,yaml]
--------------------------------------------------------------------------------
xpack.monitoring.elasticsearch.ssl.certificateAuthorities: /path/to/your/cacert.pem
--------------------------------------------------------------------------------
--
. Restart {kib}.
. If {security} is enabled on your {kib} server:
.. Log in to {kib} as a user who has both the `kibana_user` and
`monitoring_user` roles. These roles have the necessary privileges to view the
monitoring dashboards. For example:
+
--
[source,js]
--------------------------------------------------
POST /_xpack/security/user/stack-monitor
{
"password" : "changeme",
"roles" : [ "kibana_user", "monitoring_user" ]
}
--------------------------------------------------
// CONSOLE
--
.. If you are accessing a remote monitoring cluster, you must log in to {kib}
with username and password credentials that are valid on both the {kib} server
and the monitoring cluster.
See also <<monitoring-data>>.
include::{kib-repo-dir}/settings/monitoring-settings.asciidoc[]

View file

@ -1,40 +1,89 @@
[role="xpack"]
[[monitoring-data]]
== Viewing Monitoring Data in {kib}
== Viewing monitoring data in {kib}
++++
<titleabbrev>Viewing Monitoring Data</titleabbrev>
<titleabbrev>Viewing monitoring data</titleabbrev>
++++
You can enable {monitoring} in {es}, Logstash, {kib}, and Beats. By default, the
monitoring agents on {es} index data within the same cluster.
You can use {kib} to monitor the health and performance of {es}, {ls}, {kib},
and Beats.
TIP: If you have a dedicated monitoring cluster, the information is accessible
even if the {es} cluster you're monitoring is not. You can send data from
multiple clusters to the same monitoring cluster and view them all through the
same instance of {kib}. For more information, see
same instance of {kib}. To learn more about typical monitoring architectures
with separate production and monitoring clusters, see
{xpack-ref}/how-monitoring-works.html[How monitoring works].
To view and analyze the health and performance of {es}, Logstash, {kib}, and
Beats:
. {ref}/configuring-monitoring.html[Configure monitoring in {es}]. If you want
. Optional: {ref}/configuring-monitoring.html[Configure monitoring in {es}]. If you want
to use a separate monitoring cluster, see
{xpack-ref}/monitoring-production.html[Monitoring in a Production Environment].
{xpack-ref}/monitoring-production.html[Monitoring in a production environment].
. <<monitoring-xpack-kibana,Configure monitoring in {kib}>>.
. Optional: <<monitoring-kibana,Configure monitoring in {kib}>>.
. {logstash-ref}/configuring-logstash.html[Configure monitoring in Logstash].
. Optional: {logstash-ref}/configuring-logstash.html[Configure monitoring in Logstash].
. Configure monitoring in {auditbeat-ref}/monitoring.html[Auditbeat],
. Optional: Configure monitoring in {auditbeat-ref}/monitoring.html[Auditbeat],
{filebeat-ref}/monitoring.html[Filebeat],
{heartbeat-ref}/monitoring.html[Heartbeat],
{metricbeat-ref}/monitoring.html[Metricbeat],
{packetbeat-ref}/monitoring.html[Packetbeat], and
{winlogbeat-ref}/monitoring.html[Winlogbeat].
. Open {kib} in your web browser and log in. If you are running {kib}
locally, go to `http://localhost:5601/`. To access {kib} and view the
monitoring dashboards, you must log in as a user who has the `kibana_user`
and `monitoring_user` roles.
. Configure {kib} to visualize monitoring data:
.. Verify that `xpack.monitoring.ui.enabled` is set to `true`, which is the
default value. For more information, see <<monitoring-settings-kb>>.
.. Identify where to retrieve monitoring data from. If you want to use a
separate _monitoring cluster_, set `xpack.monitoring.elasticsearch.url` in the
`kibana.yml` file. Otherwise, the monitoring data is stored in the production
cluster.
+
--
TIP: If {security} is enabled on the monitoring cluster, use an HTTPS URL such
as `https://<your_monitoring_cluster>:9200` in this setting.
--
.. If {security} is enabled on the monitoring cluster, identify a user ID and
password that {kib} can use to retrieve monitoring data. Specify these values in
the `xpack.monitoring.elasticsearch.username` and
`xpack.monitoring.elasticsearch.password` settings in the `kibana.yml` file.
If these settings are omitted, {kib} uses the `elasticsearch.username` and
`elasticsearch.password` setting values.
.. If {security} is enabled on the monitoring cluster, configure {kib} to
encrypt communications between the {kib} server and the monitoring cluster.
Specify the `xpack.monitoring.elasticsearch.ssl.*` settings in the `kibana.yml`
file on the {kib} server.
+
--
For example, if you are using your own certificate authority to sign
certificates, specify the location of the PEM file in the `kibana.yml` file:
[source,yaml]
--------------------------------------------------------------------------------
xpack.monitoring.elasticsearch.ssl.certificateAuthorities: /path/to/your/cacert.pem
--------------------------------------------------------------------------------
--
. Open {kib} in your web browser and log in.
+
--
If you are running {kib} locally, go to `http://localhost:5601/`.
If {security} is enabled on the {kib} server, to access {kib} and view the
monitoring dashboards, you must log in as a user who has the `kibana_user` and
`monitoring_user` roles. These roles have the necessary privileges to view the
monitoring dashboards. For more information, see
{stack-ov}/built-in-roles.html[Built-in roles].
If you are accessing a remote monitoring cluster, you must log in to {kib}
with credentials that are valid on both the {kib} server and the monitoring
cluster.
--
. In the side navigation, click *Monitoring*. The first time you open {kib}
monitoring, data collection is

View file

@ -68,6 +68,13 @@ If plugins were installed as a different user and the server is not starting, th
[source,shell]
$ chown -R kibana:kibana /path/to/kibana/optimize
[float]
=== Installing plugins while deferring optimization
The majority of the time spent installing a plugin is running the optimizer. If you're installing multiple plugins it can make sense to omit that step and only run it once.
This can be done by providing --no-optimize to the plugin installation command. You can then either execute bin/kibana --optimize to run the optimizer,
or it will be ran the first time Kibana is started.
[float]
=== Proxy support for plugin installation

View file

@ -17,6 +17,8 @@ This list of plugins is not guaranteed to work on your version of Kibana. Instea
* https://github.com/samtecspg/conveyor[Conveyor] - Simple (GUI) interface for importing data into Elasticsearch.
* https://github.com/TrumanDu/indices_view[Indices View] - View indices related information.
* https://github.com/johtani/analyze-api-ui-plugin[Analyze UI] (johtani) - UI for elasticsearch _analyze API
* https://github.com/TrumanDu/cleaner[Cleaner] (TrumanDu)- Setting index ttl.
* https://github.com/bitsensor/elastalert-kibana-plugin[ElastAlert Kibana Plugin] (BitSensor) - UI to create, test and edit ElastAlert rules
[float]
=== Timelion Extensions
@ -28,6 +30,7 @@ This list of plugins is not guaranteed to work on your version of Kibana. Instea
* https://github.com/JuanCarniglia/area3d_vis[3D Graph] (JuanCarniglia)
* https://github.com/TrumanDu/bmap[Bmap](TrumanDu) - integrated echarts for map visualization
* https://github.com/mstoyano/kbn_c3js_vis[C3JS Visualizations] (mstoyano)
* https://github.com/aaronoah/kibana_calendar_vis[Calendar Visualization] (aaronoah)
* https://github.com/elo7/cohort[Cohort analysis] (elo7)
* https://github.com/DeanF/health_metric_vis[Colored Metric Visualization] (deanf)
* https://github.com/JuanCarniglia/dendrogram_vis[Dendrogram] (JuanCarniglia)

14
docs/redirects.asciidoc Normal file
View file

@ -0,0 +1,14 @@
[role="exclude",id="redirects"]
= Deleted pages
[partintro]
--
The following pages have moved or been deleted.
--
[role="exclude",id="monitoring-xpack-kibana"]
== Configuring monitoring in {kib}
See <<monitoring-kibana>>.

View file

@ -0,0 +1,36 @@
[role="xpack"]
[[xpack-security-audit-logging]]
=== Audit Logging
You can enable auditing to keep track of security-related events such as
authorization success and failures. Logging these events enables you
to monitor {kib} for suspicious activity and provides evidence in the
event of an attack.
Use the {kib} audit logs in conjunction with {es}'s
audit logging to get a holistic view of all security related events.
{kib} defers to {es}'s security model for authentication, data
index authorization, and features that are driven by cluster-wide privileges.
For more information on enabling audit logging in {es}, see
{stack-ov}/auditing.html[Auditing Security Events].
[IMPORTANT]
============================================================================
Audit logs are **disabled** by default. To enable this functionality, you
must set `xpack.security.audit.enabled` to `true` in `kibana.yml`.
============================================================================
Audit logging uses the standard {kib} logging output, which can be configured
in the `kibana.yml` and is discussed in <<settings>>.
==== Audit event types
When you are auditing security events, each request can generate
multiple audit events. The following is a list of the events that can be generated:
|======
| `saved_objects_authorization_success` | Logged when a user is authorized to access a saved
objects when using a role with <<kibana-privileges>>
| `saved_objects_authorization_failure` | Logged when a user isn't authorized to access a saved
objects when using a role with <<kibana-privileges>>
|======

View file

@ -0,0 +1,36 @@
[role="xpack"]
[[xpack-security-authorization]]
=== Authorization
Authorizing users to use {kib} in simple configurations is as easy as assigning the user
either the `kibana_user` or `kibana_dashboard_only_user` reserved role. If you're running
a single tenant of {kib} against your {es} cluster, and you're not controlling access to individual spaces, then this is sufficient and no other action is required.
==== Spaces
If you want to control individual spaces in {kib}, do **not** use the `kibana_user` or `kibana_dashboard_only_user` roles. Users with these roles are able to access all spaces in Kibana. Instead, create your own roles that grant access to specific spaces.
==== Multi-tenant {kib}
When running multiple tenants of {kib}, and changing the `kibana.index` in your `kibana.yml`, you
must create custom roles that authorize the user for that specific tenant. You can use
either the *Management / Security / Roles* page in {kib} or the <<role-management-api>>
to assign a specific <<kibana-privileges, Kibana privilege>> at that tenant. After creating the
custom role, you should assign this role to the user(s) that you wish to have access.
While multi-tenant installations are supported, the recommended approach to securing access to segments of {kib} is to grant users access to specific spaces.
==== Legacy roles
Prior to {kib} 6.4, {kib} users required index privileges to the `kibana.index`
in {es}. This approach is deprecated starting in 6.4, and you will need to switch to using
<<kibana-privileges>> before 7.0. When a user logs into {kib} and they're using
a legacy role, the following is logged to your {kib} logs:
[source,js]
----------------------------------
<username> relies on index privileges on the Kibana index. This is deprecated and will be removed in Kibana 7.0
----------------------------------
To disable legacy roles from being authorized in {kib}, set `xpack.security.authorization.legacyFallback` to `false`
in your `kibana.yml`.

View file

@ -0,0 +1,15 @@
[role="xpack"]
[[kibana-privileges]]
=== Kibana privileges
This section lists the Kibana privileges that you can assign to a role.
[horizontal]
[[kibana-privileges-all]]
`all`::
All Kibana privileges, can read, write and delete saved searches, dashboards, visualizations,
short URLs, Timelion sheets, graph workspaces, index patterns and advanced settings.
`read`::
Can read saved searches, dashboards, visualizations, short URLs, Timelion sheets, graph workspaces,
index patterns, and advanced settings.

View file

@ -6,7 +6,7 @@
password-protect your data as well as implement more advanced security measures
such as encrypting communications, role-based access control, IP filtering, and
auditing. For more information, see
{xpack-ref}/xpack-security.html[Securing {es} and {kib}] and
{xpack-ref}/elasticsearch-security.html[Securing {es} and {kib}] and
<<using-kibana-with-security,Configuring Security in {kib}>>.
[float]
@ -20,6 +20,19 @@ authentication and built-in users, see
[float]
=== Roles
You can manage roles on the *Management* / *Security* / *Roles* page. For more
information, see
{xpack-ref}/authorization.html[Configuring Role-based Access Control].
You can manage roles on the *Management* / *Security* / *Roles* page, or use
{kib}'s <<role-management-api>>. For more information on configuring roles for
{kib} see <<xpack-security-authorization, {kib} Authorization>>.
For a more holistic overview of configuring roles for the entire stack,
see {xpack-ref}/authorization.html[Configuring Role-based Access Control].
[NOTE]
============================================================================
Managing roles that grant <<kibana-privileges>> using the {es}
{ref}/security-api.html#security-role-apis[role management APIs] is not supported. Doing so will likely
cause Kibana's authorization to behave unexpectedly.
============================================================================
include::authorization/index.asciidoc[]
include::authorization/kibana-privileges.asciidoc[]

View file

@ -85,8 +85,9 @@ You can manage privileges on the *Management / Security / Roles* page in {kib}.
If you're using the native realm with Basic Authentication, you can assign roles
using the *Management / Security / Users* page in {kib} or the
{ref}/security-api-users.html[User Management API]. For example, the following
creates a user named `jacknich` and assigns it the `kibana_user` role:
{ref}/security-api.html#security-user-apis[user management APIs]. For example,
the following creates a user named `jacknich` and assigns it the `kibana_user`
role:
[source,js]
--------------------------------------------------------------------------------
@ -124,4 +125,5 @@ NOTE: This must be a user who has been assigned the `kibana_user` role.
include::authentication/index.asciidoc[]
include::securing-communications/index.asciidoc[]
include::audit-logging.asciidoc[]
include::{kib-repo-dir}/settings/security-settings.asciidoc[]

View file

@ -17,12 +17,12 @@ xpack.apm.enabled:: Set to `false` to disabled the APM plugin {kib}. Defaults to
xpack.apm.ui.enabled:: Set to `false` to hide the APM plugin {kib} from the menu. Defaults to
`true`.
apm_oss.indexPattern:: Index pattern is used for integrations with Machine Learning and Kuery Bar. It must match all apm indices. Defaults to `apm-&#42;`.
apm_oss.indexPattern:: Index pattern is used for integrations with Machine Learning and Kuery Bar. It must match all apm indices. Defaults to `apm-*`.
apm_oss.errorIndices:: Matcher for indices containing error documents. Defaults to `apm-&#42;-error-&#42;`.
apm_oss.errorIndices:: Matcher for indices containing error documents. Defaults to `apm-*`.
apm_oss.onboardingIndices:: Matcher for indices containing onboarding documents. Defaults to `apm-&#42;-onboarding-&#42;`.
apm_oss.onboardingIndices:: Matcher for indices containing onboarding documents. Defaults to `apm-*`.
apm_oss.spanIndices:: Matcher for indices containing span documents. Defaults to `apm-&#42;-span-&#42;`.
apm_oss.spanIndices:: Matcher for indices containing span documents. Defaults to `apm-*`.
apm_oss.transactionIndices:: Matcher for indices containing transaction documents. Defaults to `apm-&#42;-transaction-&#42;`.
apm_oss.transactionIndices:: Matcher for indices containing transaction documents. Defaults to `apm-*`.

View file

@ -76,8 +76,9 @@ Defaults to `3000` (3 seconds).
[[xpack-reporting-browser]]`xpack.reporting.capture.browser.type`::
Specifies the browser to use to capture screenshots. Valid options are `phantom`
and `chromium`. When `chromium` is set, the settings specified in the <<reporting-chromium-settings, Chromium settings>>
are respected.
Defaults to `phantom`.
are respected. This setting will be deprecated in 7.0, when Phantom support is removed.
Defaults to `chromium`.
[float]
[[reporting-chromium-settings]]

View file

@ -14,11 +14,19 @@ It is enabled by default.
`xpack.security.enabled`::
Set to `true` (default) to enable {security}. +
+
If set to `false` in `kibana.yml`, the user and role management options are
hidden in this {kib} instance. If `xpack.security.enabled` is set to `true` in
`elasticsearch.yml`, however, you can still use the {security} APIs. To disable
{security} entirely, see the
{ref}/security-settings.html[{es} Security Settings].
Do not set this to `false`. To disable {security} entirely, see
{ref}/security-settings.html[{es} Security Settings]. +
+
If set to `false` in `kibana.yml`, the login form, user and role management screens, and
authorization using <<kibana-privileges>> are disabled. +
+
`xpack.security.audit.enabled`::
Set to `true` to enable audit logging for security events. This is set to `false` by default.
For more details see <<xpack-security-audit-logging>>.
`xpack.security.authorization.legacyFallback`::
Set to `true` (default) to enable the legacy fallback. See <<xpack-security-authorization>>
for more details.
[float]
[[security-ui-settings]]

View file

@ -14,3 +14,4 @@ include::dev-settings.asciidoc[]
include::graph-settings.asciidoc[]
include::ml-settings.asciidoc[]
include::reporting-settings.asciidoc[]
include::spaces-settings.asciidoc[]

View file

@ -0,0 +1,22 @@
[role="xpack"]
[[spaces-settings-kb]]
=== Spaces settings in {kib}
++++
<titleabbrev>Spaces settings</titleabbrev>
++++
By default, Spaces is enabled in Kibana, and you can secure Spaces using
roles when Security is enabled.
[float]
[[spaces-settings]]
==== Spaces settings
`xpack.spaces.enabled`::
Set to `true` (default) to enable Spaces in {kib}.
`xpack.spaces.maxSpaces`::
The maximum amount of Spaces that can be used with this instance of Kibana. Some operations
in Kibana return all spaces using a single `_search` from Elasticsearch, so this must be
set lower than the `index.max_result_window` in Elasticsearch.
Defaults to `1000`.

View file

@ -4,7 +4,7 @@
[float]
=== Hosted Kibana
If you are running our https://cloud.elastic.co[hosted Elasticsearch Service]
If you are running our https://cloud.elastic.co[hosted Elasticsearch Service]
on Elastic Cloud, you can access Kibana with a single click.
[float]
@ -48,7 +48,7 @@ downloaded from the Elastic Docker Registry.
<<docker,Running Kibana on Docker>>
IMPORTANT: If your Elasticsearch installation is protected by
{xpack-ref}/xpack-security.html[{security}] see
{xpack-ref}/elasticsearch-security.html[{security}] see
{kibana-ref}/using-kibana-with-security.html[Configuring Security in Kibana] for
additional setup instructions.

View file

@ -21,7 +21,7 @@ and an Elasticsearch client node on the same machine. For more information, see
[[configuring-kibana-shield]]
=== Using Kibana with {security}
You can use {stack-ov}/xpack-security.html[{security}] to control what
You can use {stack-ov}/elasticsearch-security.html[{security}] to control what
Elasticsearch data users can access through Kibana.
When {security} is enabled, Kibana users have to log in. They need to
@ -40,7 +40,7 @@ For information about setting up Kibana users, see
[[enabling-ssl]]
=== Enabling SSL
See <<configuring-tls>>.
See <<configuring-tls>>.
[float]
[[load-balancing]]

View file

@ -22,7 +22,7 @@ you'll need to update your `kibana.yml` file. You can also enable SSL and set a
`elasticsearch.customHeaders:`:: *Default: `{}`* Header names and values to send to Elasticsearch. Any custom headers
cannot be overwritten by client-side headers, regardless of the `elasticsearch.requestHeadersWhitelist` configuration.
`elasticsearch.logQueries:`:: *Default: `false`* Logs queries sent to Elasticsearch. Requires `logging.verbose` set to `true`. This is useful for seeing the query DSL generated by applications that currently do not have a spy panel, for example Timelion and Monitoring.
`elasticsearch.logQueries:`:: *Default: `false`* Logs queries sent to Elasticsearch. Requires `logging.verbose` set to `true`. This is useful for seeing the query DSL generated by applications that currently do not have an inspector, for example Timelion and Monitoring.
`elasticsearch.pingTimeout:`:: *Default: the value of the `elasticsearch.requestTimeout` setting* Time in milliseconds to
wait for Elasticsearch to respond to pings.
@ -73,7 +73,7 @@ error messages.
[[logging-verbose]]`logging.verbose:`:: *Default: false* Set the value of this setting to `true` to log all events, including system usage information and all requests. Supported on Elastic Cloud Enterprise.
`logging.useUTC`:: *Default: true* Set the value of this setting to `false` to log events using the timezone of the server, rather than UTC.
`logging.timezone`:: *Default: UTC* Set to the canonical timezone id (e.g. `US/Pacific`) to log events using that timezone. A list of timezones can be referenced at https://en.wikipedia.org/wiki/List_of_tz_database_time_zones.
`map.includeElasticMapsService:`:: *Default: true* Turns on or off whether layers from the Elastic Maps Service should be included in the vector and tile layer option list.
By turning this off, only the layers that are configured here will be included.
@ -157,7 +157,7 @@ The minimum value is 100.
`server.ssl.redirectHttpFromPort:`:: Kibana will bind to this port and redirect all http requests to https over the port configured as `server.port`.
`server.ssl.supportedProtocols:`:: *Default: TLSv1, TLSv1.1, TLSv1.2* Supported protocols with versions. Valid protocols: `TLSv1`, `TLSv1.1`, `TLSv1.2`
`server.ssl.supportedProtocols:`:: *Default: TLSv1, TLSv1.1, TLSv1.2* An array of supported protocols with versions. Valid protocols: `TLSv1`, `TLSv1.1`, `TLSv1.2`
`status.allowAnonymous:`:: *Default: false* If authentication is enabled, setting this to `true` allows
unauthenticated users to access the Kibana server status API and status page.
unauthenticated users to access the Kibana server status API and status page.

View file

@ -0,0 +1,8 @@
[role="xpack"]
[[spaces-getting-started]]
=== Getting Started
Spaces are automatically enabled in {kib}. If you don't wish to use this feature, you can disable it
by setting `xpack.spaces.enabled` to `false` in your `kibana.yml` configuration file.
{kib} automatically creates a default space for you. If you are upgrading from another version of {kib}, then the default space will contain all of your existing saved objects. Although you can't delete the default space, you can customize it to your liking.

Some files were not shown because too many files have changed in this diff Show more