Merge branch 'master' of github.com:elastic/kibana into feature-secops
|
@ -12,9 +12,7 @@ node scripts/es snapshot --download-only;
|
|||
node scripts/es snapshot --license=oss --download-only;
|
||||
|
||||
# download reporting browsers
|
||||
cd "x-pack";
|
||||
yarn gulp prepare;
|
||||
cd -;
|
||||
(cd "x-pack" && yarn gulp prepare);
|
||||
|
||||
# cache the chromedriver bin
|
||||
chromedriverDistVersion="$(node -e "console.log(require('chromedriver').version)")"
|
||||
|
|
|
@ -7,6 +7,7 @@ cd "$(dirname "$0")/.."
|
|||
|
||||
source src/dev/ci_setup/extract_bootstrap_cache.sh
|
||||
source src/dev/ci_setup/setup.sh
|
||||
source src/dev/ci_setup/checkout_sibling_es.sh
|
||||
|
||||
case "$JOB" in
|
||||
kibana-intake)
|
||||
|
|
|
@ -18,7 +18,6 @@ bower_components
|
|||
/src/core/lib/kbn_internal_native_observable
|
||||
/packages/*/target
|
||||
/packages/eslint-config-kibana
|
||||
/packages/eslint-plugin-kibana-custom
|
||||
/packages/kbn-es-query/src/kuery/ast/kuery.js
|
||||
/packages/kbn-es-query/src/kuery/ast/legacy_kuery.js
|
||||
/packages/kbn-pm/dist
|
||||
|
|
32
.eslintrc.js
|
@ -34,6 +34,7 @@ const ELASTIC_LICENSE_HEADER = `
|
|||
|
||||
module.exports = {
|
||||
extends: ['@elastic/eslint-config-kibana', '@elastic/eslint-config-kibana/jest'],
|
||||
plugins: ['@kbn/eslint-plugin-eslint'],
|
||||
|
||||
settings: {
|
||||
'import/resolver': {
|
||||
|
@ -41,15 +42,20 @@ module.exports = {
|
|||
forceNode: true,
|
||||
},
|
||||
},
|
||||
|
||||
react: {
|
||||
version: '16.3',
|
||||
},
|
||||
},
|
||||
|
||||
rules: {
|
||||
'no-restricted-imports': [2, restrictedModules],
|
||||
'no-restricted-modules': [2, restrictedModules],
|
||||
'@kbn/eslint/module_migration': [
|
||||
'error',
|
||||
[
|
||||
{
|
||||
from: 'expect.js',
|
||||
to: '@kbn/expect',
|
||||
},
|
||||
],
|
||||
],
|
||||
},
|
||||
|
||||
overrides: [
|
||||
|
@ -59,7 +65,7 @@ module.exports = {
|
|||
{
|
||||
files: [
|
||||
'.eslintrc.js',
|
||||
'packages/eslint-plugin-kibana-custom/**/*',
|
||||
'packages/kbn-eslint-plugin-eslint/**/*',
|
||||
'packages/kbn-config-schema/**/*',
|
||||
'packages/kbn-pm/**/*',
|
||||
'packages/kbn-es/**/*',
|
||||
|
@ -71,6 +77,7 @@ module.exports = {
|
|||
'packages/kbn-test-subj-selector/**/*',
|
||||
'packages/kbn-test/**/*',
|
||||
'packages/kbn-eslint-import-resolver-kibana/**/*',
|
||||
'src/legacy/server/saved_objects/**/*',
|
||||
'x-pack/plugins/apm/**/*',
|
||||
'x-pack/plugins/canvas/**/*',
|
||||
],
|
||||
|
@ -90,7 +97,7 @@ module.exports = {
|
|||
{
|
||||
files: ['x-pack/test/functional/apps/**/*', 'x-pack/plugins/apm/**/*'],
|
||||
rules: {
|
||||
'kibana-custom/no-default-export': 'off',
|
||||
'@kbn/eslint/no-default-export': 'off',
|
||||
'import/no-named-as-default': 'off',
|
||||
},
|
||||
},
|
||||
|
@ -158,6 +165,7 @@ module.exports = {
|
|||
'x-pack/{dev-tools,tasks,scripts,test,build_chromium}/**/*',
|
||||
'x-pack/**/{__tests__,__test__,__jest__,__fixtures__,__mocks__}/**/*',
|
||||
'x-pack/**/*.test.js',
|
||||
'x-pack/test_utils/**/*',
|
||||
'x-pack/gulpfile.js',
|
||||
'x-pack/plugins/apm/public/utils/testHelpers.js',
|
||||
],
|
||||
|
@ -244,7 +252,7 @@ module.exports = {
|
|||
'packages/kbn-plugin-generator/**/*',
|
||||
'packages/kbn-plugin-helpers/**/*',
|
||||
'packages/kbn-eslint-import-resolver-kibana/**/*',
|
||||
'packages/kbn-eslint-plugin-license-header/**/*',
|
||||
'packages/kbn-eslint-plugin-eslint/**/*',
|
||||
'x-pack/gulpfile.js',
|
||||
'x-pack/dev-tools/mocha/setup_mocha.js',
|
||||
'x-pack/scripts/*',
|
||||
|
@ -269,15 +277,14 @@ module.exports = {
|
|||
*/
|
||||
{
|
||||
files: ['**/*.js'],
|
||||
plugins: ['@kbn/eslint-plugin-license-header'],
|
||||
rules: {
|
||||
'@kbn/license-header/require-license-header': [
|
||||
'@kbn/eslint/require-license-header': [
|
||||
'error',
|
||||
{
|
||||
license: APACHE_2_0_LICENSE_HEADER,
|
||||
},
|
||||
],
|
||||
'@kbn/license-header/disallow-license-headers': [
|
||||
'@kbn/eslint/disallow-license-headers': [
|
||||
'error',
|
||||
{
|
||||
licenses: [ELASTIC_LICENSE_HEADER],
|
||||
|
@ -291,15 +298,14 @@ module.exports = {
|
|||
*/
|
||||
{
|
||||
files: ['x-pack/**/*.js'],
|
||||
plugins: ['@kbn/eslint-plugin-license-header'],
|
||||
rules: {
|
||||
'@kbn/license-header/require-license-header': [
|
||||
'@kbn/eslint/require-license-header': [
|
||||
'error',
|
||||
{
|
||||
license: ELASTIC_LICENSE_HEADER,
|
||||
},
|
||||
],
|
||||
'@kbn/license-header/disallow-license-headers': [
|
||||
'@kbn/eslint/disallow-license-headers': [
|
||||
'error',
|
||||
{
|
||||
licenses: [APACHE_2_0_LICENSE_HEADER],
|
||||
|
|
|
@ -454,7 +454,8 @@ node scripts/docs.js --open
|
|||
|
||||
Part of this process only applies to maintainers, since it requires access to Github labels.
|
||||
|
||||
Kibana publishes major, minor and patch releases periodically through the year. During this process we run a script against this repo to collect the applicable PRs against that release and generate [Release Notes](https://www.elastic.co/guide/en/kibana/current/release-notes.html). To include your change in the Release Notes:
|
||||
Kibana publishes major, minor and patch releases periodically through the year. During this process we run a script against this repo to collect the applicable PRs against that release and generate [Release Notes](https://www.elastic.co/guide/en/kibana/current/release-notes.html).
|
||||
To include your change in the Release Notes:
|
||||
|
||||
1. In the title, summarize what the PR accomplishes in language that is meaningful to the user. In general, use present tense (for example, Adds, Fixes) in sentence case.
|
||||
1. Label the PR with the targeted version (ex: 6.5).
|
||||
|
@ -462,7 +463,13 @@ Kibana publishes major, minor and patch releases periodically through the year.
|
|||
* For a new feature or functionality, use `release_note:enhancement`.
|
||||
* For an external-facing fix, use `release_note:fix`. Exception: docs, build, and test fixes do not go in the Release Notes.
|
||||
* For a deprecated feature, use `release_note:deprecation`.
|
||||
* For a breaking change, use `release-breaking:note`.
|
||||
* For a breaking change, use `release_note:breaking`.
|
||||
|
||||
To NOT include your changes in the Release Notes, please use label`non-issue`. PRs with the following labels also won't be included in the Release Notes:
|
||||
`build`, `docs`, `test`, `non-issue`, `jenkins`, `backport`, and `chore`.
|
||||
|
||||
To NOT include your changes in the Release Notes, please use label`non-issue`. PRs with the following labels also won't be included in the Release Notes:
|
||||
`build`, `docs`, `test_*`,`test-*`, `non-issue`, `jenkins`, `backport`, and `chore`.
|
||||
|
||||
We also produce a blog post that details more important breaking API changes every minor and major release. If the PR includes a breaking API change, apply the label `release_note:dev_docs`. Additionally add a brief summary of the break at the bottom of the PR using the format below:
|
||||
|
||||
|
|
26
NOTICE.txt
|
@ -80,32 +80,6 @@ used. Logarithmic ticks are places at powers of ten and at half those
|
|||
values if there are not to many ticks already (e.g. [1, 5, 10, 50, 100]).
|
||||
For details, see https://github.com/flot/flot/pull/1328
|
||||
|
||||
---
|
||||
This product bundles angular-ui-bootstrap@0.12.1 which is available under a
|
||||
"MIT" license.
|
||||
|
||||
The MIT License
|
||||
|
||||
Copyright (c) 2012-2014 the AngularUI Team, https://github.com/organizations/angular-ui/teams/291112
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
|
||||
---
|
||||
This product bundles bootstrap@3.3.6 which is available under a
|
||||
"MIT" license.
|
||||
|
|
|
@ -111,7 +111,7 @@ If yarn doesn't find the module it may not have types. For example, our `rison_
|
|||
|
||||
1. Contribute types into the DefinitelyTyped repo itself, or
|
||||
2. Create a top level `types` folder and point to that in the tsconfig. For example, Infra team already handled this for `rison_node` and added: `x-pack/plugins/infra/types/rison_node.d.ts`. Other code uses it too so we will need to pull it up. Or,
|
||||
3. Add a `// @ts-ignore` line above the import. This should be used minimally, the above options are better. However, sometimes you have to resort to this method. For example, the `expect.js` module will require this line. We don't have type definitions installed for this library. Installing these types would conflict with the jest typedefs for expect, and since they aren't API compatible with each other, it's not possible to make both test frameworks happy. Since we are moving from mocha => jest, we don't see this is a big issue.
|
||||
3. Add a `// @ts-ignore` line above the import. This should be used minimally, the above options are better. However, sometimes you have to resort to this method.
|
||||
|
||||
### TypeScripting react files
|
||||
|
||||
|
|
|
@ -20,7 +20,7 @@ NOTE: You cannot access these endpoints via the Console in Kibana.
|
|||
* <<saved-objects-api-delete>>
|
||||
* <<saved-objects-api-export>>
|
||||
* <<saved-objects-api-import>>
|
||||
* <<saved-objects-api-resolve-import-conflicts>>
|
||||
* <<saved-objects-api-resolve-import-errors>>
|
||||
|
||||
include::saved-objects/get.asciidoc[]
|
||||
include::saved-objects/bulk_get.asciidoc[]
|
||||
|
@ -31,4 +31,4 @@ include::saved-objects/update.asciidoc[]
|
|||
include::saved-objects/delete.asciidoc[]
|
||||
include::saved-objects/export.asciidoc[]
|
||||
include::saved-objects/import.asciidoc[]
|
||||
include::saved-objects/resolve_import_conflicts.asciidoc[]
|
||||
include::saved-objects/resolve_import_errors.asciidoc[]
|
||||
|
|
|
@ -23,6 +23,9 @@ contains the following properties:
|
|||
`id` (required)::
|
||||
(string) ID of object to retrieve
|
||||
|
||||
`fields` (optional)::
|
||||
(array) The fields to return in the object's response
|
||||
|
||||
==== Response body
|
||||
|
||||
The response body will have a top level `saved_objects` property that contains
|
||||
|
|
|
@ -1,15 +1,15 @@
|
|||
[[saved-objects-api-resolve-import-conflicts]]
|
||||
=== Resolve Import Conflicts
|
||||
[[saved-objects-api-resolve-import-errors]]
|
||||
=== Resolve Import Errors
|
||||
|
||||
experimental[This functionality is *experimental* and may be changed or removed completely in a future release.]
|
||||
|
||||
The resolve import conflicts API enables you to resolve conflicts given by the import API by either overwriting specific saved objects or changing references to a newly created object.
|
||||
The resolve import errors API enables you to resolve errors given by the import API by either overwriting specific saved objects or changing references to a newly created object.
|
||||
|
||||
Note: You cannot access this endpoint via the Console in Kibana.
|
||||
|
||||
==== Request
|
||||
|
||||
`POST /api/saved_objects/_resolve_import_conflicts`
|
||||
`POST /api/saved_objects/_resolve_import_errors`
|
||||
|
||||
==== Request body
|
||||
|
||||
|
@ -35,12 +35,12 @@ In the scenario the import wasn't successful a top level `errors` array will con
|
|||
|
||||
==== Examples
|
||||
|
||||
The following example resolves conflicts for an index pattern and dashboard but indicates to skip the index pattern.
|
||||
The following example resolves errors for an index pattern and dashboard but indicates to skip the index pattern.
|
||||
This will cause the index pattern to not be in the system and the dashboard to overwrite the existing saved object.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST api/saved_objects/_resolve_import_conflicts
|
||||
POST api/saved_objects/_resolve_import_errors
|
||||
Content-Type: multipart/form-data; boundary=EXAMPLE
|
||||
--EXAMPLE
|
||||
Content-Disposition: form-data; name="file"; filename="export.ndjson"
|
||||
|
@ -71,12 +71,12 @@ containing a JSON structure similar to the following example:
|
|||
}
|
||||
--------------------------------------------------
|
||||
|
||||
The following example resolves conflicts for a visualization and dashboard but indicates
|
||||
The following example resolves errors for a visualization and dashboard but indicates
|
||||
to replace the dashboard references to another visualization.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST api/saved_objects/_resolve_import_conflicts
|
||||
POST api/saved_objects/_resolve_import_errors
|
||||
Content-Type: multipart/form-data; boundary=EXAMPLE
|
||||
--EXAMPLE
|
||||
Content-Disposition: form-data; name="file"; filename="export.ndjson"
|
|
@ -8,14 +8,15 @@ the *Setup Instructions* will get you started.
|
|||
[role="screenshot"]
|
||||
image::apm/images/apm-setup.png[Installation instructions on the APM page in Kibana]
|
||||
|
||||
After you install the Elastic APM agent library in your application,
|
||||
|
||||
Index patterns tell Kibana which Elasticsearch indices you want to explore.
|
||||
An APM index pattern is necessary for certain features in the APM UI, like the query bar.
|
||||
To set up the correct index pattern,
|
||||
simply click *Load Kibana objects* at the bottom of the Setup Instructions.
|
||||
|
||||
After you install an Elastic APM agent library in your application,
|
||||
the application automatically appears in the APM UI in {kib}.
|
||||
No further configuration is required.
|
||||
|
||||
If you also use the Elastic Stack for logging and server-level metrics,
|
||||
you can import the APM dashboards that come with the APM Server.
|
||||
You can use these APM specific visualizations to correlate APM data with other data sources.
|
||||
To get the dashboards, click *Load Kibana objects* at the bottom of the Setup Instructions.
|
||||
|
||||
[role="screenshot"]
|
||||
image::apm/images/apm-setup-dashboards.png[Install dashboards for APM in Kibana]
|
||||
image::apm/images/apm-index-pattern.png[Setup index pattern for APM in Kibana]
|
||||
|
|
Before Width: | Height: | Size: 393 KiB After Width: | Height: | Size: 311 KiB |
Before Width: | Height: | Size: 364 KiB After Width: | Height: | Size: 410 KiB |
Before Width: | Height: | Size: 340 KiB After Width: | Height: | Size: 426 KiB |
Before Width: | Height: | Size: 480 KiB After Width: | Height: | Size: 501 KiB |
BIN
docs/apm/images/apm-index-pattern.png
Normal file
After Width: | Height: | Size: 48 KiB |
Before Width: | Height: | Size: 317 KiB After Width: | Height: | Size: 429 KiB |
Before Width: | Height: | Size: 430 KiB After Width: | Height: | Size: 510 KiB |
Before Width: | Height: | Size: 264 KiB After Width: | Height: | Size: 265 KiB |
Before Width: | Height: | Size: 49 KiB |
Before Width: | Height: | Size: 368 KiB After Width: | Height: | Size: 408 KiB |
Before Width: | Height: | Size: 401 KiB After Width: | Height: | Size: 479 KiB |
Before Width: | Height: | Size: 423 KiB After Width: | Height: | Size: 450 KiB |
Before Width: | Height: | Size: 65 KiB After Width: | Height: | Size: 60 KiB |
Before Width: | Height: | Size: 384 KiB After Width: | Height: | Size: 332 KiB |
Before Width: | Height: | Size: 286 KiB After Width: | Height: | Size: 299 KiB |
Before Width: | Height: | Size: 264 KiB |
Before Width: | Height: | Size: 381 KiB After Width: | Height: | Size: 408 KiB |
Before Width: | Height: | Size: 289 KiB |
|
@ -37,4 +37,4 @@ After exploring these traces,
|
|||
you can return to the full trace by clicking *View full trace* in the upper right hand corner of the page.
|
||||
|
||||
[role="screenshot"]
|
||||
image::apm/images/apm-view-full-trace.png[Example of distributed trace colors in the APM UI in Kibana]
|
||||
image::apm/images/apm-transaction-sample.png[Example of distributed trace colors in the APM UI in Kibana]
|
||||
|
|
|
@ -4,8 +4,6 @@
|
|||
[partintro]
|
||||
--
|
||||
|
||||
beta[]
|
||||
|
||||
Congratulations on finding the Canvas application in {kib}. You are in for a treat.
|
||||
Canvas is a whole new way of making data look amazing. Canvas combines data with
|
||||
colors, shapes, text, and your own imagination to bring dynamic, multi-page,
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
[[canvas-client-functions]]
|
||||
=== Canvas client functions
|
||||
|
||||
beta[]These functions must execute in a browser. They are only available
|
||||
These functions must execute in a browser. They are only available
|
||||
from within the Canvas application, not via the Canvas HTTP API. These functions must
|
||||
execute in the browser because they use browser specific APIs, such as location,
|
||||
or interact with the workpad to read filters.
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
[[canvas-common-functions]]
|
||||
=== Canvas common functions
|
||||
|
||||
beta[]The common functions can run anywhere, which means they'll execute wherever
|
||||
The common functions can run anywhere, which means they'll execute wherever
|
||||
the expression is currently executing. For example, if the engine is currently
|
||||
running on the server, the functions will run on the server.
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
[[canvas-function-reference]]
|
||||
== Canvas function reference
|
||||
|
||||
beta[] Behind the scenes, Canvas is driven by a powerful expression language,
|
||||
Behind the scenes, Canvas is driven by a powerful expression language,
|
||||
with dozens of functions and other capabilities, including table transforms,
|
||||
type casting, and sub-expressions.
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
[[canvas-getting-started]]
|
||||
== Getting started with Canvas
|
||||
|
||||
beta[]Your best bet to getting started with Canvas is to check out one
|
||||
Your best bet to getting started with Canvas is to check out one
|
||||
(or all) of the sample data sets that ship with {kib}.
|
||||
|
||||
. Click the {kib} logo in the upper left hand corner of your browser to navigate
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
[[canvas-server-functions]]
|
||||
=== Canvas server functions
|
||||
|
||||
beta[]These functions can only execute on the server. This may be for performance
|
||||
These functions can only execute on the server. This may be for performance
|
||||
or security reasons, or because the function uses an API only available on the
|
||||
{kib} server. If the expression is executing in the browser, it will transfer to
|
||||
the server when it hits one of these functions.
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
[[canvas-tinymath-functions]]
|
||||
=== TinyMath functions
|
||||
|
||||
beta[]TinyMath provides a set of functions that can be used with the Canvas expression
|
||||
TinyMath provides a set of functions that can be used with the Canvas expression
|
||||
language to perform complex math calculations. Read on for detailed information about
|
||||
the functions available in TinyMath, including what parameters each function accepts,
|
||||
the return value of that function, and examples of how each function behaves.
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
[[canvas-workpad]]
|
||||
=== Using the workpad
|
||||
|
||||
beta[]Now that you have a workpad with sample data that you can mess with, let’s mess with it.
|
||||
Now that you have a workpad with sample data that you can mess with, let’s mess with it.
|
||||
We’ll start out by making a few stylistic changes.
|
||||
|
||||
. Click the gauge chart in the top left of the workpad (fun fact, these are actually pie charts).
|
||||
|
|
|
@ -45,18 +45,6 @@ with dashboard*.
|
|||
[[loading-a-saved-dashboard]]
|
||||
To import, export, and delete dashboards, see <<managing-saved-objects>>.
|
||||
|
||||
[float]
|
||||
=== Changing the Color Theme ===
|
||||
|
||||
By default, Kibana dashboards use the light color theme. To use the dark color theme:
|
||||
|
||||
. Go to the menu bar and click *Edit*.
|
||||
. Click *Options* in the menu bar.
|
||||
. Select *Use dark theme*.
|
||||
|
||||
To set the dark theme as the default, go to *Management > Advanced Settings*.
|
||||
Scroll down to *Dashboard* and toggle *Dark Theme* to *On*.
|
||||
|
||||
[[customizing-your-dashboard]]
|
||||
== Arranging Dashboard Elements
|
||||
|
||||
|
|
|
@ -82,7 +82,7 @@ Use the `--help` flag for more options.
|
|||
[float]
|
||||
===== Environment
|
||||
|
||||
The tests are written in https://mochajs.org[mocha] using https://github.com/Automattic/expect.js[expect] for assertions.
|
||||
The tests are written in https://mochajs.org[mocha] using https://github.com/elastic/kibana/tree/master/packages/kbn-expect[@kbn/expect] for assertions.
|
||||
|
||||
We use https://sites.google.com/a/chromium.org/chromedriver/[chromedriver], https://theintern.github.io/leadfoot[leadfoot], and https://github.com/theintern/digdug[digdug] for automating Chrome. When the `FunctionalTestRunner` launches, digdug opens a `Tunnel` which starts chromedriver and a stripped-down instance of Chrome. It also creates an instance of https://theintern.github.io/leadfoot/module-leadfoot_Command.html[Leadfoot's `Command`] class, which is available via the `remote` service. The `remote` communicates to Chrome through the digdug `Tunnel`. See the https://theintern.github.io/leadfoot/module-leadfoot_Command.html[leadfoot/Command API] docs for all the commands you can use with `remote`.
|
||||
|
||||
|
@ -122,11 +122,11 @@ A test suite is a collection of tests defined by calling `describe()`, and then
|
|||
[float]
|
||||
===== Anatomy of a test file
|
||||
|
||||
The annotated example file below shows the basic structure every test suite uses. It starts by importing https://github.com/Automattic/expect.js[`expect.js`] and defining its default export: an anonymous Test Provider. The test provider then destructures the Provider API for the `getService()` and `getPageObjects()` functions. It uses these functions to collect the dependencies of this suite. The rest of the test file will look pretty normal to mocha.js users. `describe()`, `it()`, `before()` and the lot are used to define suites that happen to automate a browser via services and objects of type `PageObject`.
|
||||
The annotated example file below shows the basic structure every test suite uses. It starts by importing https://github.com/elastic/kibana/tree/master/packages/kbn-expect[`@kbn/expect`] and defining its default export: an anonymous Test Provider. The test provider then destructures the Provider API for the `getService()` and `getPageObjects()` functions. It uses these functions to collect the dependencies of this suite. The rest of the test file will look pretty normal to mocha.js users. `describe()`, `it()`, `before()` and the lot are used to define suites that happen to automate a browser via services and objects of type `PageObject`.
|
||||
|
||||
["source","js"]
|
||||
----
|
||||
import expect from 'expect.js';
|
||||
import expect from '@kbn/expect';
|
||||
// test files must `export default` a function that defines a test suite
|
||||
export default function ({ getService, getPageObject }) {
|
||||
|
||||
|
|
|
@ -1,22 +1,23 @@
|
|||
[[add-sample-data]]
|
||||
== Get up and running with sample data
|
||||
|
||||
{kib} has three sample data sets that you can use to explore {kib} before loading your own data
|
||||
source. Each set is prepackaged with a dashboard of visualizations and a
|
||||
{kibana-ref}/canvas-getting-started.html[Canvas workpad].
|
||||
{kib} has several sample data sets that you can use to explore {kib} before loading your own data.
|
||||
Sample data sets install prepackaged visualizations, dashboards,
|
||||
{kibana-ref}/canvas-getting-started.html[Canvas workpads],
|
||||
and {kibana-ref}/maps.html[Maps].
|
||||
|
||||
The sample data sets address common use cases:
|
||||
The sample data sets showcase a variety of use cases:
|
||||
|
||||
* *eCommerce orders* includes visualizations for product-related information,
|
||||
such as cost, revenue, and price.
|
||||
* *Web logs* lets you analyze website traffic.
|
||||
* *Flight data* enables you to view and interact with flight routes for four airlines.
|
||||
* *eCommerce orders* includes visualizations for product-related information,
|
||||
such as cost, revenue, and price.
|
||||
* *Web logs* lets you analyze website traffic.
|
||||
* *Flight data* enables you to view and interact with flight routes for four airlines.
|
||||
|
||||
To get started, go to the home page and click the link next to *Add sample data*.
|
||||
|
||||
Once you have loaded a data set, click *View data* to view visualizations in *Dashboard*.
|
||||
|
||||
*Note:* The timestamps in the sample data sets are relative to when they are installed.
|
||||
*Note:* The timestamps in the sample data sets are relative to when they are installed.
|
||||
If you uninstall and reinstall a data set, the timestamps will change to reflect the most recent installation.
|
||||
|
||||
|
||||
|
|
BIN
docs/images/follower_indices.png
Normal file
After Width: | Height: | Size: 182 KiB |
|
@ -50,6 +50,8 @@ include::logs/index.asciidoc[]
|
|||
|
||||
include::apm/index.asciidoc[]
|
||||
|
||||
include::uptime/index.asciidoc[]
|
||||
|
||||
include::graph/index.asciidoc[]
|
||||
|
||||
include::dev-tools.asciidoc[]
|
||||
|
|
After Width: | Height: | Size: 127 KiB |
After Width: | Height: | Size: 748 B |
BIN
docs/infrastructure/images/infrastructure-configure-source.png
Normal file
After Width: | Height: | Size: 38 KiB |
|
@ -4,8 +4,6 @@
|
|||
|
||||
[partintro]
|
||||
--
|
||||
beta[]
|
||||
|
||||
Use the interactive Infrastructure UI to monitor your infrastructure and
|
||||
identify problems in real time. You can explore metrics and logs for common
|
||||
servers, containers, and services.
|
||||
|
@ -15,15 +13,59 @@ image::infrastructure/images/infra-sysmon.jpg[Infrastructure Overview in Kibana]
|
|||
|
||||
|
||||
[float]
|
||||
== Add data sources
|
||||
Kibana provides step-by-step instructions to help you add your data sources.
|
||||
The {infra-guide}[Infrastructure Monitoring Guide] is a good source for more detailed
|
||||
instructions and information.
|
||||
== Add data
|
||||
|
||||
Kibana provides step-by-step instructions to help you add log data. The
|
||||
{infra-guide}[Infrastructure Monitoring Guide] is a good source for more
|
||||
detailed information and instructions.
|
||||
|
||||
[float]
|
||||
== Configure data sources
|
||||
|
||||
By default the Infrastructure UI uses the `metricbeat-*` index pattern to query the data. If you configured Metricbeat to export data to a different set of indices, you will need to set `xpack.infra.sources.default.metricAlias` in `config/kibana.yml` to match your index pattern. You can also configure the timestamp field by overriding `xpack.infra.sources.default.fields.timestamp`. See <<infrastructure-ui-settings-kb>> for a complete list.
|
||||
The `metricbeat-*` index pattern is used to query the data by default.
|
||||
If your metrics are located in a different set of indices, or use a
|
||||
different timestamp field, you can adjust the source configuration via the user
|
||||
interface or the {kib} configuration file.
|
||||
|
||||
NOTE: Logs and Infrastructure share a common data source definition in
|
||||
each space. Changes in one of them can influence the data displayed in the
|
||||
other.
|
||||
|
||||
[float]
|
||||
=== Configure source
|
||||
|
||||
Configure source can be accessed via the corresponding
|
||||
image:logs/images/logs-configure-source-gear-icon.png[Configure source icon]
|
||||
button in the toolbar:
|
||||
|
||||
[role="screenshot"]
|
||||
image::infrastructure/images/infrastructure-configure-source.png[Configure Infrastructure UI source button in Kibana]
|
||||
|
||||
This opens the source configuration fly-out dialog, in which the following
|
||||
configuration items can be inspected and adjusted:
|
||||
|
||||
* *Name*: The name of the source configuration.
|
||||
* *Indices*: The patterns of the elasticsearch indices to read metrics and logs
|
||||
from.
|
||||
* *Fields*: The names of particular fields in the indices that need to be known
|
||||
to the Infrastructure and Logs UIs in order to query and interpret the data
|
||||
correctly.
|
||||
|
||||
[role="screenshot"]
|
||||
image::infrastructure/images/infrastructure-configure-source-dialog.png[Configure Infrastructure UI source dialog in Kibana]
|
||||
|
||||
TIP: If <<xpack-spaces>> are enabled in your Kibana instance, any configuration
|
||||
changes performed via Configure source are specific to that space. You can
|
||||
therefore easily make different subsets of the data available by creating
|
||||
multiple spaces with different data source configurations.
|
||||
|
||||
[float]
|
||||
=== Configuration file
|
||||
|
||||
The settings in the configuration file are used as a fallback when no other
|
||||
configuration for that space has been defined. They are located in the
|
||||
configuration namespace `xpack.infra.sources.default`. See
|
||||
<<infrastructure-ui-settings-kb>> for a complete list of the possible entries.
|
||||
|
||||
--
|
||||
|
||||
|
|
|
@ -59,4 +59,10 @@ Use the time selector to focus on a specific timeframe.
|
|||
Set auto-refresh to keep up-to-date information coming in, or stop
|
||||
refreshing to focus on historical data without new distractions.
|
||||
|
||||
[float]
|
||||
[[infra-configure-source]]
|
||||
=== Adapt to your metric source
|
||||
|
||||
Using a custom index pattern to store the metrics, or want to limit the entries
|
||||
presented in a space? Use configure source to change the index pattern and
|
||||
other settings.
|
||||
|
|
BIN
docs/logs/images/logs-configure-source-dialog.png
Normal file
After Width: | Height: | Size: 279 KiB |
BIN
docs/logs/images/logs-configure-source-gear-icon.png
Normal file
After Width: | Height: | Size: 748 B |
BIN
docs/logs/images/logs-configure-source.png
Normal file
After Width: | Height: | Size: 45 KiB |
|
@ -5,7 +5,6 @@
|
|||
[partintro]
|
||||
--
|
||||
|
||||
beta[]
|
||||
Use the Logs UI to explore logs for common servers, containers, and services.
|
||||
{kib} provides a compact, console-like display that you can customize.
|
||||
|
||||
|
@ -14,17 +13,60 @@ image::logs/images/logs-console.png[Log Console in Kibana]
|
|||
|
||||
|
||||
[float]
|
||||
== Add data sources
|
||||
== Add data
|
||||
|
||||
Kibana provides step-by-step instructions to help you add your data sources.
|
||||
The {infra-guide}[Infrastructure Monitoring Guide] is a good source for more detailed information and
|
||||
instructions.
|
||||
Kibana provides step-by-step instructions to help you add log data. The
|
||||
{infra-guide}[Infrastructure Monitoring Guide] is a good source for more
|
||||
detailed information and instructions.
|
||||
|
||||
[float]
|
||||
== Configure data sources
|
||||
|
||||
By default the Logs UI uses the `filebeat-*` index pattern to query the data. If your logs are located in a different set of indices, you will need to set `xpack.infra.sources.default.logAlias` in `config/kibana.yml` to match your log's index pattern. You can also configure the timestamp field by overriding `xpack.infra.sources.default.fields.timestamp`, by default it is set to `@timestamp`. See <<logs-ui-settings-kb>> for a complete list.
|
||||
The `filebeat-*` index pattern is used to query data by default.
|
||||
If your logs are located in a different set of indices, or use a different
|
||||
timestamp field, you can adjust the source configuration via the user interface
|
||||
or the {kib} configuration file.
|
||||
|
||||
NOTE: Logs and Infrastructure share a common data source definition in
|
||||
each space. Changes in one of them can influence the data displayed in the
|
||||
other.
|
||||
|
||||
[float]
|
||||
=== Configure source
|
||||
|
||||
Configure source can be accessed via the corresponding
|
||||
image:logs/images/logs-configure-source-gear-icon.png[Configure source icon]
|
||||
button in the toolbar.
|
||||
|
||||
[role="screenshot"]
|
||||
image::logs/images/logs-configure-source.png[Configure Logs UI source button in Kibana]
|
||||
|
||||
This opens the source configuration fly-out dialog, in which the following
|
||||
configuration items can be inspected and adjusted:
|
||||
|
||||
* *Name*: The name of the source configuration.
|
||||
* *Indices*: The patterns of the elasticsearch indices to read metrics and logs
|
||||
from.
|
||||
* *Fields*: The names of particular fields in the indices that need to be known
|
||||
to the Infrastructure and Logs UIs in order to query and interpret the data
|
||||
correctly.
|
||||
|
||||
[role="screenshot"]
|
||||
image::logs/images/logs-configure-source-dialog.png[Configure logs UI source dialog in Kibana]
|
||||
|
||||
TIP: If <<xpack-spaces>> are enabled in your Kibana instance, any configuration
|
||||
changes performed via Configure source are specific to that space. You can
|
||||
therefore easily make different subsets of the data available by creating
|
||||
multiple spaces with different data source configurations.
|
||||
|
||||
[float]
|
||||
=== Configuration file
|
||||
|
||||
The settings in the configuration file are used as a fallback when no other
|
||||
configuration for that space has been defined. They are located in the
|
||||
configuration namespace `xpack.infra.sources.default`. See
|
||||
<<logs-ui-settings-kb>> for a complete list of the possible entries.
|
||||
|
||||
--
|
||||
|
||||
include::logs-ui.asciidoc[]
|
||||
include::logs-ui.asciidoc[]
|
||||
|
|
|
@ -12,6 +12,13 @@ image::logs/images/logs-console.png[Log Console in Kibana]
|
|||
=== Use the power of Search
|
||||
The Search bar is always available. Use it to perform adhoc and structured searches.
|
||||
|
||||
[float]
|
||||
[[logs-configure-source]]
|
||||
=== Adapt to your log source
|
||||
Using a custom index pattern to store the log entries, or want to limit the
|
||||
entries presented in a space? Use configure source to change the index pattern
|
||||
and other settings.
|
||||
|
||||
[float]
|
||||
[[logs-time]]
|
||||
=== Jump to a specific time period
|
||||
|
@ -31,7 +38,4 @@ Use *Customize* to adjust your console view and to set the time scale of the log
|
|||
=== Stream or pause logs
|
||||
You can stream data for live log tailing, or pause streaming to focus on historical log data.
|
||||
When you are streaming logs, the most recent log appears at the bottom on the console.
|
||||
Historical data offers infinite scrolling.
|
||||
|
||||
|
||||
|
||||
Historical data offers infinite scrolling.
|
|
@ -24,17 +24,16 @@ image::images/index-lifecycle-policies-create.png[][UI for creating an index lif
|
|||
==== Defining the phases of the index lifecycle
|
||||
|
||||
You can define up to four phases in the index lifecycle. For each phase, you
|
||||
can enable actions to optimize performance for that phase. Transitioning
|
||||
between phases is based on the age of the index.
|
||||
can enable actions to optimize performance for that phase.
|
||||
|
||||
The four phases in the index lifecycle are:
|
||||
|
||||
* *Hot.* The index is actively being queried and written to. You can optionally
|
||||
* *Hot.* The index is actively being queried and written to. You can
|
||||
roll over to a new index when the
|
||||
original index reaches a specified size or age. When a rollover occurs, a new
|
||||
original index reaches a specified size, document count, or age. When a rollover occurs, a new
|
||||
index is created, added to the index alias, and designated as the new “hot”
|
||||
index. You can still query the previous indices, but you only ever write to
|
||||
the “hot” index. See {ref}/indices-rollover-index.html[Rollover index] for more information.
|
||||
the “hot” index. See <<setting-a-rollover-action>>.
|
||||
|
||||
* *Warm.* The index is typically searched at a lower rate than when the data is
|
||||
hot. The index is not used for storing new data, but might occasionally add
|
||||
|
@ -60,6 +59,27 @@ delete phases are optional. For example, you might define all four phases for
|
|||
one policy and only a hot and delete phase for another. See {ref}/_actions.html[Actions]
|
||||
for more information on the actions available in each phase.
|
||||
|
||||
[[setting-a-rollover-action]]
|
||||
==== Setting a rollover action
|
||||
|
||||
The {ref}/indices-rollover-index.html[rollover] action enables you to automatically roll over to a new index based
|
||||
on the index size, document count, or age. Rolling over to a new index based on
|
||||
these criteria is preferable to time-based rollovers. Rolling over at an arbitrary
|
||||
time often results in many small indices, which can have a negative impact on performance and resource usage.
|
||||
|
||||
When you create an index lifecycle policy, the rollover action is enabled
|
||||
by default. The default size for triggering the rollover is 50 gigabytes, and
|
||||
the default age is 30 days. The rollover occurs when any of the criteria are met.
|
||||
|
||||
With the rollover action enabled, you can move to the warm phase on rollover or you can
|
||||
time the move for a specified number of hours or days after the rollover. The
|
||||
move to the cold and delete phases is based on the time from the rollover.
|
||||
|
||||
If you are using daily indices (created by Logstash or another client) and you
|
||||
want to use the index lifecycle policy to manage aging data, you can
|
||||
disable the rollover action in the hot phase. You can then
|
||||
transition to the warm, cold, and delete phases based on the time of index creation.
|
||||
|
||||
==== Setting the index priority
|
||||
|
||||
For the hot, warm, and cold phases, you can set a priority for recovering
|
||||
|
|
|
@ -1,31 +1,30 @@
|
|||
[[management-cross-cluster-search]]
|
||||
=== Cross Cluster Search
|
||||
=== {ccs-cap}
|
||||
|
||||
Elasticsearch supports the ability to run search and aggregation requests across multiple
|
||||
clusters using a module called _cross cluster search_.
|
||||
{es} supports the ability to run search and aggregation requests across multiple
|
||||
clusters using a module called _{ccs}_.
|
||||
|
||||
In order to take advantage of cross cluster search, you must configure your Elasticsearch
|
||||
clusters accordingly. Review the corresponding Elasticsearch
|
||||
{ref}/modules-cross-cluster-search.html[documentation] before attempting to use cross cluster
|
||||
search in Kibana.
|
||||
In order to take advantage of {ccs}, you must configure your {es}
|
||||
clusters accordingly. Review the corresponding {es}
|
||||
{ref}/modules-cross-cluster-search.html[documentation] before attempting to use {ccs} in {kib}.
|
||||
|
||||
Once your Elasticsearch clusters are configured for cross cluster search, you can create
|
||||
specific index patterns in Kibana to search across the clusters of your choosing. Using the
|
||||
same syntax that you'd use in a raw cross cluster search request in Elasticsearch, create your
|
||||
index pattern in Kibana with the convention `<cluster-names>:<pattern>`.
|
||||
Once your {es} clusters are configured for {ccs}, you can create
|
||||
specific index patterns in {kib} to search across the clusters of your choosing. Using the
|
||||
same syntax that you'd use in a raw {ccs} request in {es}, create your
|
||||
index pattern in {kib} with the convention `<cluster-names>:<pattern>`.
|
||||
|
||||
For example, if you want to query logstash indices across two of the Elasticsearch clusters
|
||||
that you set up for cross cluster search, which were named `cluster_one` and `cluster_two`,
|
||||
you would use `cluster_one:logstash-*,cluster_two:logstash-*` as your index pattern in Kibana.
|
||||
For example, if you want to query {ls} indices across two of the {es} clusters
|
||||
that you set up for {ccs}, which were named `cluster_one` and `cluster_two`,
|
||||
you would use `cluster_one:logstash-*,cluster_two:logstash-*` as your index pattern in {kib}.
|
||||
|
||||
Just like in raw search requests in Elasticsearch, you can use wildcards in your cluster names
|
||||
to match any number of clusters, so if you wanted to search logstash indices across any
|
||||
Just like in raw search requests in {es}, you can use wildcards in your cluster names
|
||||
to match any number of clusters, so if you wanted to search {ls} indices across any
|
||||
clusters named `cluster_foo`, `cluster_bar`, and so on, you would use `cluster_*:logstash-*`
|
||||
as your index pattern in Kibana.
|
||||
as your index pattern in {kib}.
|
||||
|
||||
If you want to query across all Elasticsearch clusters that have been configured for cross
|
||||
cluster search, then use a standalone wildcard for your cluster name in your Kibana index
|
||||
If you want to query across all {es} clusters that have been configured for {ccs},
|
||||
then use a standalone wildcard for your cluster name in your {kib} index
|
||||
pattern: `*:logstash-*`.
|
||||
|
||||
Once an index pattern is configured using the cross cluster search syntax, all searches and
|
||||
aggregations using that index pattern in Kibana take advantage of cross cluster search.
|
||||
Once an index pattern is configured using the {ccs} syntax, all searches and
|
||||
aggregations using that index pattern in {kib} take advantage of {ccs}.
|
||||
|
|
|
@ -1,26 +1,67 @@
|
|||
[[working-remote-clusters]]
|
||||
== Working with remote clusters
|
||||
|
||||
{kib} *Management* provides user interfaces for working with data from remote
|
||||
clusters and managing the {ccr} process. You can replicate indices from a
|
||||
leader remote cluster to a follower index in a local cluster. The local follower indices
|
||||
can be used to provide remote backups for disaster recovery or for geo-proximite copies of data.
|
||||
|
||||
Before using these features, you should be familiar with the following concepts:
|
||||
|
||||
* {stack-ov}/xpack-ccr.html[{ccr-cap}]
|
||||
* {ref}/modules-cross-cluster-search.html[{ccs-cap}]
|
||||
* {stack-ov}/cross-cluster-configuring.html[Cross-cluster security requirements]
|
||||
|
||||
[float]
|
||||
[[managing-remote-clusters]]
|
||||
== Managing Remote Clusters
|
||||
== Managing remote clusters
|
||||
|
||||
{kib} *Management* provides two user interfaces for working with data from remote
|
||||
clusters.
|
||||
*Remote clusters* helps you manage remote clusters for use with
|
||||
{ccs} and {ccr}. You can add and remove remote clusters and check their connectivity.
|
||||
|
||||
*Remote Clusters* helps you manage remote clusters for use with
|
||||
{ref}/modules-cross-cluster-search.html[cross cluster search] and
|
||||
{xpack-ref}/xpack-ccr.html[cross cluster replication]. You can add and remove remote
|
||||
clusters and check their connectivity.
|
||||
Before you use this feature, you should be familiar with the concept of
|
||||
{ref}/modules-remote-clusters.html[remote clusters].
|
||||
|
||||
Go to *Management > Elasticsearch > Remote clusters* to create or manage your remotes.
|
||||
|
||||
Go to *Management > Elasticsearch > Remote Clusters* to get started.
|
||||
To set up a new remote, click *Add a remote cluster*. Give the cluster a unique name
|
||||
and define the seed nodes for cluster discovery. You can edit or remove your remote clusters
|
||||
from the *Remote clusters* list view.
|
||||
|
||||
[role="screenshot"]
|
||||
image::images/add_remote_cluster.png[][UI for adding a remote cluster]
|
||||
|
||||
Once a remote cluster is registered, you can use the tools under *{ccr-cap}*
|
||||
to add and manage follower indices on the local cluster, and replicate data from
|
||||
indices on the remote cluster based on an auto-follow index pattern.
|
||||
|
||||
*Cross Cluster Replication* includes tools to help you create and manage the remote
|
||||
replication process. You can follow an index pattern on the remote cluster for
|
||||
auto-discovery and then replicate new indices in the local cluster that match the
|
||||
auto-follow pattern.
|
||||
[float]
|
||||
[[managing-cross-cluster-replication]]
|
||||
== [xpack]#Managing {ccr}#
|
||||
|
||||
Go to *Management > Elasticsearch > Cross Cluster Replication* to get started.
|
||||
*{ccr-cap}* helps you create and manage the {ccr} process.
|
||||
If you want to replicate data from existing indices, or set up
|
||||
local followers on a case-by-case basis, go to *Follower indices*.
|
||||
If you want to automatically detect and follow new indices when they are created
|
||||
on a remote cluster, you can do so from *Auto-follow patterns*.
|
||||
|
||||
Creating an auto-follow pattern is useful when you have time-series data, like a logs index, on the
|
||||
remote cluster that is created or rolled over on a daily basis. Once you have configured an
|
||||
auto-follow pattern, any time a new index with a name that matches the pattern is
|
||||
created in the remote cluster, a follower index is automatically configured in the local cluster.
|
||||
|
||||
From the same view, you can also see a list of your saved auto-follow patterns for
|
||||
a given remote cluster, and monitor whether the replication is active.
|
||||
|
||||
Before you use these features, you should be familiar with the following concepts:
|
||||
|
||||
* {stack-ov}/ccr-requirements.html[Requirements for leader indices]
|
||||
* {stack-ov}/ccr-auto-follow.html[Automatically following indices]
|
||||
|
||||
To get started, go to *Management > Elasticsearch > {ccr-cap}*.
|
||||
|
||||
[role="screenshot"]
|
||||
image::images/auto_follow_pattern.png[][UI for adding an auto-follow pattern]
|
||||
|
||||
[role="screenshot"]
|
||||
image::images/follower_indices.png[][UI for adding follower indices]
|
||||
|
|
|
@ -38,10 +38,13 @@ continue by reindexing fewer indices at a time.
|
|||
|
||||
Additional considerations:
|
||||
|
||||
* During a reindex of a Watcher (`.watches`) index, the Watcher process
|
||||
pauses and no alerts are triggered.
|
||||
|
||||
* During a reindex of a Machine Learning (`.ml-state`) index, the Machine
|
||||
Learning job pauses and models are not trained or updated.
|
||||
* If you use {alert-features}, when you reindex the internal indices
|
||||
(`.watches`), the {watcher} process pauses and no alerts are triggered.
|
||||
|
||||
* If you use {ml-features}, when you reindex the internal indices (`.ml-state`),
|
||||
the {ml} jobs pause and models are not trained or updated.
|
||||
|
||||
* If you use {security-features}, before you reindex the internal indices
|
||||
(`.security*`), it is a good idea to create a temporary superuser account in the
|
||||
`file` realm. For more information, see
|
||||
{ref}/configuring-file-realm.html[Configuring a file realm].
|
||||
|
|
BIN
docs/maps/images/grid_metrics_both.png
Normal file
After Width: | Height: | Size: 113 KiB |
BIN
docs/maps/images/gs_add_cloropeth_layer.png
Normal file
After Width: | Height: | Size: 967 KiB |
BIN
docs/maps/images/gs_add_es_layer.png
Normal file
After Width: | Height: | Size: 809 KiB |
BIN
docs/maps/images/gs_create_new_map.png
Normal file
After Width: | Height: | Size: 731 KiB |
BIN
docs/maps/images/gs_link_icon.png
Normal file
After Width: | Height: | Size: 665 B |
BIN
docs/maps/images/gs_plus_icon.png
Normal file
After Width: | Height: | Size: 629 B |
BIN
docs/maps/images/sample_data_web_logs.png
Normal file
After Width: | Height: | Size: 698 KiB |
|
@ -13,6 +13,7 @@ image::maps/images/sample_data_ecommerce.png[]
|
|||
|
||||
--
|
||||
|
||||
include::maps-getting-started.asciidoc[]
|
||||
include::heatmap-layer.asciidoc[]
|
||||
include::tile-layer.asciidoc[]
|
||||
include::vector-layer.asciidoc[]
|
||||
|
|
171
docs/maps/maps-getting-started.asciidoc
Normal file
|
@ -0,0 +1,171 @@
|
|||
[[maps-getting-started]]
|
||||
== Getting started with Maps
|
||||
|
||||
You work with *Maps* by adding layers. The data for a layer can come from
|
||||
sources such as {es} documents, vector sources, tile map services, web map
|
||||
services, and more. You can symbolize the data in different ways.
|
||||
For example, you might show which airports have the longest flight
|
||||
delays by using circles from small to big. Or,
|
||||
you might show the amount of web log traffic by shading countries from
|
||||
light to dark.
|
||||
|
||||
[role="screenshot"]
|
||||
image::maps/images/sample_data_web_logs.png[]
|
||||
|
||||
[float]
|
||||
=== Prerequisites
|
||||
Before you start this tutorial, <<add-sample-data, add the web logs sample data set>>. Each
|
||||
sample data set includes a map to go along with the data. Once you've added the data, open *Maps* and
|
||||
explore the different layers of the *[Logs] Total Requests and Bytes* map.
|
||||
You'll re-create this map in this tutorial.
|
||||
|
||||
[float]
|
||||
=== Take-away skills
|
||||
In this tutorial, you'll learn to:
|
||||
|
||||
* Create a multi-layer map
|
||||
* Connect a layer to a data source
|
||||
* Use symbols, colors, and labels to style a layer
|
||||
* Create layers for {es} data
|
||||
|
||||
|
||||
=== Creating a new map
|
||||
|
||||
The first thing to do is to create a new map.
|
||||
|
||||
. If you haven't already, open *Maps*.
|
||||
. On the maps list page, click *Create map*.
|
||||
+
|
||||
A new map is created using a base tile layer.
|
||||
+
|
||||
[role="screenshot"]
|
||||
image::maps/images/gs_create_new_map.png[]
|
||||
|
||||
|
||||
=== Adding a choropleth layer
|
||||
|
||||
Now that you have a map, you'll want to add layers to it.
|
||||
The first layer you'll add is a choropleth layer to shade world countries
|
||||
by web log traffic. Darker shades symbolize countries with more web log traffic,
|
||||
and lighter shades symbolize countries with less traffic.
|
||||
|
||||
==== Add a vector layer from the Elastic Maps Service source
|
||||
|
||||
. In the map legend, click *Add layer*.
|
||||
. Click the *Vector shapes* data source.
|
||||
. From the *Layer* dropdown menu, select *World Countries*.
|
||||
. Click the *Add layer* button.
|
||||
. Set *Layer name* to `Total Requests by Country`.
|
||||
. Set *Layer transparency* to 0.5.
|
||||
|
||||
===== Join the vector layer with the sample web log index
|
||||
|
||||
You must add the web log traffic property to the world countries so
|
||||
that the property is available for styling.
|
||||
You'll create a <<terms-join, terms join>> to link the vector source *World Countries* to
|
||||
the {es} index `kibana_sample_data_logs` on the shared key iso2 = geo.src.
|
||||
|
||||
. Click plus image:maps/images/gs_plus_icon.png[] to the right of *Term Joins* label.
|
||||
. Click *Join --select--*
|
||||
. Set *Left field* to *ISO 3166-1 alpha-2 code*.
|
||||
. Set *Right source* to *kibana_sample_data_logs*.
|
||||
. Set *Right field* to *geo.src*.
|
||||
|
||||
===== Set the vector style
|
||||
|
||||
The final step is to set the vector fill color to shade
|
||||
the countries by web log traffic.
|
||||
|
||||
. Click image:maps/images/gs_link_icon.png[] to the right of *Fill color*.
|
||||
. Select the grey color ramp.
|
||||
. In the field select input, select *count of kibana_sample_data_logs:geo.src*.
|
||||
. Click *Save & close*.
|
||||
+
|
||||
Your map now looks like this:
|
||||
+
|
||||
[role="screenshot"]
|
||||
image::maps/images/gs_add_cloropeth_layer.png[]
|
||||
|
||||
=== Adding layers for {es} data
|
||||
|
||||
You'll add two layers for {es} data. The first layer displays documents, and the
|
||||
second layer displays aggregated data.
|
||||
The raw documents appear when you zoom in the map to show smaller regions.
|
||||
The aggregated data
|
||||
appears when you zoom out the map to show larger amounts of the globe.
|
||||
|
||||
==== Add a vector layer from the document source
|
||||
|
||||
This layer displays web log documents as points.
|
||||
The layer is only visible when you zoom in the map past zoom level 9.
|
||||
|
||||
. In the map legend, click *Add layer*.
|
||||
. Click the *Documents* data source.
|
||||
. Set *Index pattern* to *kibana_sample_data_logs*.
|
||||
. Click the *Add layer* button.
|
||||
. Set *Layer name* to `Actual Requests`.
|
||||
. Set *Min zoom* to 9 and *Max zoom* to 24.
|
||||
. Set *Layer transparency* to 1.
|
||||
. Set *Fill color* to *#2200ff*.
|
||||
. Click *Save & close*.
|
||||
|
||||
==== Add a vector layer from the grid aggregation source
|
||||
|
||||
Aggregations group {es} documents into grids. You can calculate metrics
|
||||
for each gridded cell.
|
||||
|
||||
You'll create a layer for aggregated data and make it visible only when the map
|
||||
is zoomed out past zoom level 9. Darker colors will symbolize grids
|
||||
with more web log traffic, and lighter colors will symbolize grids with less
|
||||
traffic. Larger circles will symbolize grids with
|
||||
more total bytes transferred, and smaller circles will symbolize
|
||||
grids with less bytes transferred.
|
||||
|
||||
[role="screenshot"]
|
||||
image::maps/images/grid_metrics_both.png[]
|
||||
|
||||
===== Add the layer
|
||||
|
||||
. In the map legend, click *Add layer*.
|
||||
. Click the *Grid aggregation* data source.
|
||||
. Set *Index pattern* to *kibana_sample_data_logs*.
|
||||
. Click the *Add layer* button.
|
||||
. Set *Layer name* to `Total Requests and Bytes`.
|
||||
. Set *Min zoom* to 0 and *Max zoom* to 9.
|
||||
. Set *Layer transparency* to 1.
|
||||
|
||||
===== Configure the aggregation metrics
|
||||
|
||||
. Click plus image:maps/images/gs_plus_icon.png[] to the right of *Metrics* label.
|
||||
. Select *Sum* in the aggregation select.
|
||||
. Select *bytes* in the field select.
|
||||
|
||||
===== Set the vector style
|
||||
|
||||
. In *Vector style*, change *Symbol size*:
|
||||
.. Set *Min size* to 1.
|
||||
.. Set *Max size* to 25.
|
||||
.. In the field select, select *sum of bytes*.
|
||||
. Click *Save & close* button.
|
||||
+
|
||||
Your map now looks like this:
|
||||
+
|
||||
[role="screenshot"]
|
||||
image::maps/images/gs_add_es_layer.png[]
|
||||
|
||||
=== Saving the map
|
||||
Now that your map is complete, you'll want to save it so others can use it.
|
||||
|
||||
. In the application toolbar, click *Save*.
|
||||
. Enter `Tutorial web logs map` for the title.
|
||||
. Click *Confirm Save*.
|
||||
|
||||
You're now ready to start creating maps using your own data. You might find
|
||||
these resources helpful:
|
||||
|
||||
* <<heatmap-layer, Heat map layer>>
|
||||
* <<tile-layer, Tile layer>>
|
||||
* <<vector-layer, Vector layer>>
|
||||
|
||||
|
||||
|
|
@ -6,4 +6,12 @@
|
|||
|
||||
coming[8.0.0]
|
||||
|
||||
See also <<breaking-changes-8.0,breaking changes>> and <<release-notes>>.
|
||||
See also <<breaking-changes-8.0,breaking changes>> and <<release-notes>>.
|
||||
|
||||
|
||||
//NOTE: The notable-highlights tagged regions are re-used in the
|
||||
//Installation and Upgrade Guide
|
||||
|
||||
// tag::notable-highlights[]
|
||||
|
||||
// end::notable-highlights[]
|
|
@ -1,16 +1,16 @@
|
|||
[[cross-cluster-kibana]]
|
||||
==== Cross Cluster Search and Kibana
|
||||
==== {ccs-cap} and {kib}
|
||||
|
||||
When Kibana is used to search across multiple clusters, a two-step authorization
|
||||
When {kib} is used to search across multiple clusters, a two-step authorization
|
||||
process determines whether or not the user can access indices on a remote
|
||||
cluster:
|
||||
|
||||
* First, the local cluster determines if the user is authorized to access remote
|
||||
clusters. (The local cluster is the cluster Kibana is connected to.)
|
||||
clusters. (The local cluster is the cluster {kib} is connected to.)
|
||||
* If they are, the remote cluster then determines if the user has access
|
||||
to the specified indices.
|
||||
|
||||
To grant Kibana users access to remote clusters, assign them a local role
|
||||
To grant {kib} users access to remote clusters, assign them a local role
|
||||
with read privileges to indices on the remote clusters. You specify remote
|
||||
cluster indices as `<remote_cluster_name>:<index_name>`.
|
||||
|
||||
|
@ -18,10 +18,10 @@ To enable users to actually read the remote indices, you must create a matching
|
|||
role on the remote clusters that grants the `read_cross_cluster` privilege
|
||||
and access to the appropriate indices.
|
||||
|
||||
For example, if Kibana is connected to the cluster where you're actively
|
||||
indexing Logstash data (your _local cluster_) and you're periodically
|
||||
For example, if {kib} is connected to the cluster where you're actively
|
||||
indexing {ls} data (your _local cluster_) and you're periodically
|
||||
offloading older time-based indices to an archive cluster
|
||||
(your _remote cluster_) and you want to enable Kibana users to search both
|
||||
(your _remote cluster_) and you want to enable {kib} users to search both
|
||||
clusters:
|
||||
|
||||
. On the local cluster, create a `logstash_reader` role that grants
|
||||
|
@ -31,7 +31,7 @@ NOTE: If you configure the local cluster as another remote in {es}, the
|
|||
`logstash_reader` role on your local cluster also needs to grant the
|
||||
`read_cross_cluster` privilege.
|
||||
|
||||
. Assign your Kibana users the `kibana_user` role and your `logstash_reader`
|
||||
. Assign your {kib} users the `kibana_user` role and your `logstash_reader`
|
||||
role.
|
||||
|
||||
. On the remote cluster, create a `logstash_reader` role that grants the
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
`xpack.infra.enabled`:: Set to `false` to disable the Logs and Infrastructure UI plugin {kib}. Defaults to `true`.
|
||||
|
||||
`xpack.infra.sources.default.logAlias`:: Index pattern for matching indices that contain log data. Defaults to `filebeat-*`.
|
||||
`xpack.infra.sources.default.logAlias`:: Index pattern for matching indices that contain log data. Defaults to `filebeat-*,kibana_sample_data_logs*`.
|
||||
|
||||
`xpack.infra.sources.default.metricAlias`:: Index pattern for matching indices that contain Metricbeat data. Defaults to `metricbeat-*`.
|
||||
|
||||
|
|
|
@ -68,8 +68,8 @@ security is enabled, `xpack.security.encryptionKey`.
|
|||
============
|
||||
|
||||
`xpack.reporting.queue.pollInterval`::
|
||||
Specifies the number of milliseconds that idle workers wait between polling the
|
||||
index for pending jobs. Defaults to `3000` (3 seconds).
|
||||
Specifies the number of milliseconds that the reporting poller waits between polling the
|
||||
index for any pending Reporting jobs. Defaults to `3000` (3 seconds).
|
||||
|
||||
[[xpack-reporting-q-timeout]]`xpack.reporting.queue.timeout`::
|
||||
How long each worker has to produce a report. If your machine is slow or under
|
||||
|
|
|
@ -39,6 +39,13 @@ Alternatively, you can download other Docker images that contain only features
|
|||
available under the Apache 2.0 license. To download the images, go to
|
||||
https://www.docker.elastic.co[www.docker.elastic.co].
|
||||
|
||||
[float]
|
||||
=== Running Kibana on Docker for development
|
||||
Kibana can be quickly started and connected to a local Elasticsearch container for development
|
||||
or testing use with the following command:
|
||||
--------------------------------------------
|
||||
docker run --link YOUR_ELASTICSEARCH_CONTAINER_NAME_OR_ID:elasticsearch -p 5601:5601 {docker-repo}:{version}
|
||||
--------------------------------------------
|
||||
endif::[]
|
||||
|
||||
[float]
|
||||
|
|
|
@ -122,7 +122,8 @@ which is proxied through the Kibana server.
|
|||
|
||||
`kibana.index:`:: *Default: ".kibana"* Kibana uses an index in Elasticsearch to
|
||||
store saved searches, visualizations and dashboards. Kibana creates a new index
|
||||
if the index doesn’t already exist.
|
||||
if the index doesn’t already exist. If you configure a custom index, the name must
|
||||
be lowercase, and conform to {es} {ref}/indices-create-index.html[index name limitations].
|
||||
|
||||
`logging.dest:`:: *Default: `stdout`* Enables you specify a file where Kibana
|
||||
stores log output.
|
||||
|
@ -134,7 +135,7 @@ suppress all logging output other than error messages.
|
|||
suppress all logging output.
|
||||
|
||||
`logging.timezone`:: *Default: UTC* Set to the canonical timezone id
|
||||
(e.g. `US/Pacific`) to log events using that timezone. A list of timezones can
|
||||
(for example, `America/Los_Angeles`) to log events using that timezone. A list of timezones can
|
||||
be referenced at https://en.wikipedia.org/wiki/List_of_tz_database_time_zones.
|
||||
|
||||
[[logging-verbose]]`logging.verbose:`:: *Default: false* Set the value of this
|
||||
|
|
BIN
docs/uptime/images/check-history.png
Normal file
After Width: | Height: | Size: 295 KiB |
BIN
docs/uptime/images/crosshair-example.png
Normal file
After Width: | Height: | Size: 97 KiB |
BIN
docs/uptime/images/error-list.png
Normal file
After Width: | Height: | Size: 88 KiB |
BIN
docs/uptime/images/filter-bar.png
Normal file
After Width: | Height: | Size: 27 KiB |
BIN
docs/uptime/images/monitor-charts.png
Normal file
After Width: | Height: | Size: 118 KiB |
BIN
docs/uptime/images/monitor-list.png
Normal file
After Width: | Height: | Size: 208 KiB |
BIN
docs/uptime/images/snapshot-view.png
Normal file
After Width: | Height: | Size: 82 KiB |
BIN
docs/uptime/images/status-bar.png
Normal file
After Width: | Height: | Size: 33 KiB |
28
docs/uptime/index.asciidoc
Normal file
|
@ -0,0 +1,28 @@
|
|||
[role="xpack"]
|
||||
[[xpack-uptime]]
|
||||
= Uptime
|
||||
|
||||
[partintro]
|
||||
--
|
||||
Use the Uptime UI to monitor the status of network endpoints via HTTP/S, TCP,
|
||||
and ICMP. You will be able to explore status over time, drill into specific monitors,
|
||||
and view a high-level snapshot of your environment at a selected point in time.
|
||||
|
||||
[float]
|
||||
== Add monitors
|
||||
To get started with Uptime monitoring, you'll need to define some monitors and run Heartbeat.
|
||||
These monitors will provide the data we will be visualizing in the Uptime UI.
|
||||
See {heartbeat-ref}/heartbeat-configuration.html[Configure Heartbeat] for instructions
|
||||
on configuring monitors to begin storing Uptime information in your cluster.
|
||||
|
||||
[float]
|
||||
== Uptime, Heartbeat, and Kibana
|
||||
For Uptime to work, it is important you use the same major versions of Heartbeat and Kibana.
|
||||
For example, version 6.7 of Kibana will expect an index of `heartbeat-6*`,
|
||||
while Kibana 7.0 requires an index of `heartbeat-7*` (containing documents from Heartbeat 7.0).
|
||||
|
||||
--
|
||||
|
||||
include::overview.asciidoc[]
|
||||
include::monitor.asciidoc[]
|
||||
include::security.asciidoc[]
|
54
docs/uptime/monitor.asciidoc
Normal file
|
@ -0,0 +1,54 @@
|
|||
[role="xpack"]
|
||||
[[uptime-monitor]]
|
||||
== Monitor
|
||||
|
||||
The Monitor page will help you get further insight into the performance
|
||||
of a specific network endpoint. You'll see a detailed visualization of
|
||||
the monitor's request duration over time, as well as the `up`/`down`
|
||||
status over time.
|
||||
|
||||
[float]
|
||||
=== Status bar
|
||||
|
||||
[role="screenshot"]
|
||||
image::uptime/images/status-bar.png[Status bar]
|
||||
|
||||
The Status bar displays a quick summary of the latest information
|
||||
regarding your monitor. You can view its latest status, click a link to
|
||||
visit the targeted URL, see its most recent request duration, and determine the
|
||||
amount of time that has elapsed since the last check.
|
||||
|
||||
You can use the Status bar to get a quick summary of current performance,
|
||||
beyond simply knowing if the monitor is `up` or `down`.
|
||||
|
||||
[float]
|
||||
=== Monitor charts
|
||||
|
||||
[role="screenshot"]
|
||||
image::uptime/images/monitor-charts.png[Monitor charts]
|
||||
|
||||
The Monitor charts visualize information over the time specified in the
|
||||
date range. These charts can help you gain insight into how quickly requests are being resolved
|
||||
by the targeted endpoint, and give you a sense of how frequently a host or endpoint
|
||||
was down in your selected timespan.
|
||||
|
||||
The first chart displays request duration information for your monitor.
|
||||
The area surrounding the line is the range of request time for the corresponding
|
||||
bucket. The line is the average time.
|
||||
|
||||
Next, is a graphical representation of the check statuses over time. Hover over
|
||||
the charts to display crosshairs with more specific numeric data.
|
||||
|
||||
[role="screenshot"]
|
||||
image::uptime/images/crosshair-example.png[Chart crosshair]
|
||||
|
||||
[float]
|
||||
=== Check history
|
||||
|
||||
[role="screenshot"]
|
||||
image::uptime/images/check-history.png[Check history view]
|
||||
|
||||
The Check history displays the total count of this monitor's checks for the selected
|
||||
date range. You can additionally filter the checks by `status` to help find recent problems
|
||||
on a per-check basis. This table can help you gain some insight into more granular details
|
||||
about recent individual data points Heartbeat is logging about your host or endpoint.
|
61
docs/uptime/overview.asciidoc
Normal file
|
@ -0,0 +1,61 @@
|
|||
[role="xpack"]
|
||||
[[uptime-overview]]
|
||||
|
||||
== Overview
|
||||
|
||||
The Uptime overview is intended to help you quickly identify and diagnose outages and
|
||||
other connectivity issues within your network or environment. There is a date range
|
||||
selection that is global to the Uptime UI; you can use this selection to highlight
|
||||
an absolute date range, or a relative one, similar to other areas of Kibana.
|
||||
|
||||
[float]
|
||||
=== Filter bar
|
||||
|
||||
[role="screenshot"]
|
||||
image::uptime/images/filter-bar.png[Filter bar]
|
||||
|
||||
The filter bar is designed to let you quickly view specific groups of monitors, or even
|
||||
an individual monitor, if you have defined many.
|
||||
|
||||
This control allows you to use automated filter options, as well as input custom filter
|
||||
text to select specific monitors by field, URL, ID, and other attributes.
|
||||
|
||||
[float]
|
||||
=== Snapshot view
|
||||
|
||||
[role="screenshot"]
|
||||
image::uptime/images/snapshot-view.png[Snapshot view]
|
||||
|
||||
This view is intended to quickly give you a sense of the overall
|
||||
status of the environment you're monitoring, or a subset of those monitors.
|
||||
Here, you can see the total number of detected monitors within the selected
|
||||
Uptime date range. In addition to the total, the counts for the number of monitors
|
||||
in an `up` or `down` state are displayed, based on the last check reported by Heartbeat
|
||||
for each monitor.
|
||||
|
||||
Next to the counts, there is a histogram displaying the change over time throughout the
|
||||
selected date range.
|
||||
|
||||
[float]
|
||||
=== Monitor list
|
||||
|
||||
[role="screenshot"]
|
||||
image::uptime/images/monitor-list.png[Monitor list]
|
||||
|
||||
The Monitor list displays information at the level of individual monitors.
|
||||
The data shown here will flesh out your individual monitors, and provide a quick
|
||||
way to navigate to a more in-depth visualization for interesting hosts or endpoints.
|
||||
|
||||
This table includes information like the most recent status, when the monitor was last checked, its
|
||||
ID and URL, its IP address, and a dedicated sparkline showing its check status over time.
|
||||
|
||||
[float]
|
||||
=== Error list
|
||||
|
||||
[role="screenshot"]
|
||||
image::uptime/images/error-list.png[Error list]
|
||||
|
||||
The Error list displays aggregations of errors that Heartbeat has logged. Errors are
|
||||
displayed by Error type, monitor ID, and message. Clicking a monitor's ID will take you
|
||||
to the corresponding Monitor view, which can provide you richer information about the individual
|
||||
data points that are resulting in the displayed errors.
|
73
docs/uptime/security.asciidoc
Normal file
|
@ -0,0 +1,73 @@
|
|||
[role="xpack"]
|
||||
[[uptime-security]]
|
||||
|
||||
== Use with Elasticsearch Security
|
||||
|
||||
If you use Elasticsearch security, you'll need to enable certain privileges for users
|
||||
that would like to access the Uptime app. Below is an example of creating
|
||||
a user and support role to implement those privileges.
|
||||
|
||||
[float]
|
||||
=== Create a role
|
||||
|
||||
You'll need a role that lets you access the Heartbeat indices, which by default are `heartbeat-*`.
|
||||
You can create this with the following request:
|
||||
|
||||
["source","sh",subs="attributes,callouts"]
|
||||
---------------------------------------------------------------
|
||||
PUT /_security/role/uptime
|
||||
{ "indices" : [
|
||||
{
|
||||
"names" : [
|
||||
"heartbeat-*"
|
||||
],
|
||||
"privileges" : [
|
||||
"read",
|
||||
"view_index_metadata"
|
||||
],
|
||||
"field_security" : {
|
||||
"grant" : [
|
||||
"*"
|
||||
]
|
||||
},
|
||||
"allow_restricted_indices" : false
|
||||
}
|
||||
],
|
||||
"applications" : [
|
||||
{
|
||||
"application" : "kibana-.kibana",
|
||||
"privileges" : [
|
||||
"all"
|
||||
],
|
||||
"resources" : [
|
||||
"*"
|
||||
]
|
||||
}
|
||||
],
|
||||
"transient_metadata" : {
|
||||
"enabled" : true
|
||||
}
|
||||
}
|
||||
---------------------------------------------------------------
|
||||
// CONSOLE
|
||||
|
||||
[float]
|
||||
=== Assign the role to a user
|
||||
|
||||
Next, you'll need to create a user with both the `kibana_user`, and `uptime` roles.
|
||||
You can do this with the following request:
|
||||
|
||||
["source","sh",subs="attributes,callouts"]
|
||||
---------------------------------------------------------------
|
||||
PUT /_security/user/jacknich
|
||||
{
|
||||
"password" : "j@rV1s",
|
||||
"roles" : [ "uptime", "kibana_user" ],
|
||||
"full_name" : "Jack Nicholson",
|
||||
"email" : "jacknich@example.com",
|
||||
"metadata" : {
|
||||
"intelligence" : 7
|
||||
}
|
||||
}
|
||||
---------------------------------------------------------------
|
||||
// CONSOLE
|
3
kibana.d.ts
vendored
|
@ -20,7 +20,8 @@
|
|||
/**
|
||||
* All exports from TS source files (where the implementation is actually done in TS).
|
||||
*/
|
||||
export * from './target/types/type_exports';
|
||||
export { Public, Server } from 'src/core';
|
||||
|
||||
/**
|
||||
* All exports from TS ambient definitions (where types are added for JS source in a .d.ts file).
|
||||
*/
|
||||
|
|
20
package.json
|
@ -48,6 +48,7 @@
|
|||
"test:ui:runner": "node scripts/functional_test_runner",
|
||||
"test:server": "grunt test:server",
|
||||
"test:coverage": "grunt test:coverage",
|
||||
"typespec": "typings-tester --config x-pack/plugins/canvas/public/lib/aeroelastic/tsconfig.json x-pack/plugins/canvas/public/lib/aeroelastic/__fixtures__/typescript/typespec_tests.ts",
|
||||
"checkLicenses": "grunt licenses --dev",
|
||||
"build": "node scripts/build --all-platforms",
|
||||
"start": "node --trace-warnings --trace-deprecation scripts/kibana --dev ",
|
||||
|
@ -66,8 +67,7 @@
|
|||
"uiFramework:createComponent": "cd packages/kbn-ui-framework && yarn createComponent",
|
||||
"uiFramework:documentComponent": "cd packages/kbn-ui-framework && yarn documentComponent",
|
||||
"kbn:watch": "node scripts/kibana --dev --logging.json=false",
|
||||
"build:types": "tsc --p tsconfig.types.json",
|
||||
"kbn:bootstrap": "yarn build:types && node scripts/register_git_hook"
|
||||
"kbn:bootstrap": "node scripts/register_git_hook"
|
||||
},
|
||||
"repository": {
|
||||
"type": "git",
|
||||
|
@ -82,7 +82,8 @@
|
|||
"packages/*",
|
||||
"x-pack",
|
||||
"x-pack/plugins/*",
|
||||
"test/plugin_functional/plugins/*"
|
||||
"test/plugin_functional/plugins/*",
|
||||
"test/interpreter_functional/plugins/*"
|
||||
],
|
||||
"nohoist": [
|
||||
"**/@types/*",
|
||||
|
@ -95,7 +96,7 @@
|
|||
},
|
||||
"dependencies": {
|
||||
"@elastic/datemath": "5.0.2",
|
||||
"@elastic/eui": "9.0.2",
|
||||
"@elastic/eui": "9.5.0",
|
||||
"@elastic/filesaver": "1.1.2",
|
||||
"@elastic/good": "8.1.1-kibana2",
|
||||
"@elastic/numeral": "2.3.2",
|
||||
|
@ -111,6 +112,8 @@
|
|||
"@kbn/ui-framework": "1.0.0",
|
||||
"@types/json-stable-stringify": "^1.0.32",
|
||||
"@types/lodash.clonedeep": "^4.5.4",
|
||||
"@types/react-grid-layout": "^0.16.7",
|
||||
"@types/recompose": "^0.30.5",
|
||||
"JSONStream": "1.1.1",
|
||||
"abortcontroller-polyfill": "^1.1.9",
|
||||
"angular": "1.6.9",
|
||||
|
@ -201,12 +204,10 @@
|
|||
"react-color": "^2.13.8",
|
||||
"react-dom": "^16.8.0",
|
||||
"react-grid-layout": "^0.16.2",
|
||||
"react-input-range": "^1.3.0",
|
||||
"react-markdown": "^3.1.4",
|
||||
"react-redux": "^5.0.7",
|
||||
"react-router-dom": "^4.3.1",
|
||||
"react-sizeme": "^2.3.6",
|
||||
"react-toggle": "4.0.2",
|
||||
"reactcss": "1.2.3",
|
||||
"redux": "4.0.0",
|
||||
"redux-actions": "2.2.1",
|
||||
|
@ -253,11 +254,11 @@
|
|||
"@babel/parser": "^7.3.4",
|
||||
"@babel/types": "^7.3.4",
|
||||
"@elastic/eslint-config-kibana": "0.15.0",
|
||||
"@elastic/eslint-plugin-kibana-custom": "1.1.0",
|
||||
"@elastic/makelogs": "^4.4.0",
|
||||
"@kbn/es": "1.0.0",
|
||||
"@kbn/eslint-import-resolver-kibana": "2.0.0",
|
||||
"@kbn/eslint-plugin-license-header": "1.0.0",
|
||||
"@kbn/eslint-plugin-eslint": "1.0.0",
|
||||
"@kbn/expect": "1.0.0",
|
||||
"@kbn/plugin-generator": "1.0.0",
|
||||
"@kbn/test": "1.0.0",
|
||||
"@octokit/rest": "^15.10.0",
|
||||
|
@ -347,7 +348,6 @@
|
|||
"eslint-plugin-prefer-object-spread": "^1.2.1",
|
||||
"eslint-plugin-prettier": "^2.6.2",
|
||||
"eslint-plugin-react": "^7.11.1",
|
||||
"expect.js": "0.3.1",
|
||||
"faker": "1.1.0",
|
||||
"fetch-mock": "7.3.0",
|
||||
"geckodriver": "1.12.2",
|
||||
|
@ -390,6 +390,7 @@
|
|||
"normalize-path": "^3.0.0",
|
||||
"pixelmatch": "4.0.2",
|
||||
"pkg-up": "^2.0.0",
|
||||
"pngjs": "^3.4.0",
|
||||
"postcss": "^7.0.5",
|
||||
"postcss-url": "^8.0.0",
|
||||
"prettier": "^1.14.3",
|
||||
|
@ -415,6 +416,7 @@
|
|||
"tslint-microsoft-contrib": "^6.0.0",
|
||||
"tslint-plugin-prettier": "^2.0.0",
|
||||
"typescript": "^3.3.3333",
|
||||
"typings-tester": "^0.3.2",
|
||||
"vinyl-fs": "^3.0.2",
|
||||
"xml2js": "^0.4.19",
|
||||
"xmlbuilder": "9.0.4",
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
import dateMath from '../src/index';
|
||||
import moment from 'moment';
|
||||
import sinon from 'sinon';
|
||||
import expect from 'expect.js';
|
||||
import expect from '@kbn/expect';
|
||||
|
||||
/**
|
||||
* Require a new instance of the moment library, bypassing the require cache.
|
||||
|
|
|
@ -18,7 +18,7 @@ module.exports = {
|
|||
|
||||
settings: {
|
||||
react: {
|
||||
version: semver.coerce(PKG.dependencies.react),
|
||||
version: semver.valid(semver.coerce(PKG.dependencies.react)),
|
||||
},
|
||||
},
|
||||
|
||||
|
|
|
@ -1,14 +0,0 @@
|
|||
module.exports = {
|
||||
rules: {
|
||||
'no-default-export': {
|
||||
meta: {
|
||||
schema: []
|
||||
},
|
||||
create: context => ({
|
||||
ExportDefaultDeclaration: (node) => {
|
||||
context.report(node, 'Default exports not allowed.');
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
};
|
|
@ -1,10 +0,0 @@
|
|||
{
|
||||
"name": "@elastic/eslint-plugin-kibana-custom",
|
||||
"version": "1.1.0",
|
||||
"license": "Apache-2.0",
|
||||
"description": "Custom ESLint rules for Kibana",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://github.com/elastic/kibana/tree/master/packages/%40elastic/eslint-plugin-kibana-custom"
|
||||
}
|
||||
}
|
|
@ -20,8 +20,8 @@
|
|||
},
|
||||
"devDependencies": {
|
||||
"@kbn/babel-preset": "1.0.0",
|
||||
"@kbn/expect": "1.0.0",
|
||||
"babel-cli": "^6.26.0",
|
||||
"chance": "1.0.6",
|
||||
"expect.js": "0.3.1"
|
||||
"chance": "1.0.6"
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,8 +22,8 @@
|
|||
"@babel/preset-typescript": "^7.3.3",
|
||||
"@kbn/babel-preset": "1.0.0",
|
||||
"@kbn/dev-utils": "1.0.0",
|
||||
"@kbn/expect": "1.0.0",
|
||||
"del": "^3.0.0",
|
||||
"expect.js": "0.3.1",
|
||||
"getopts": "^2.2.3",
|
||||
"supports-color": "^6.1.0",
|
||||
"typescript": "^3.3.3333"
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
import expect from 'expect.js';
|
||||
import expect from '@kbn/expect';
|
||||
import _ from 'lodash';
|
||||
import { migrateFilter } from '../migrate_filter';
|
||||
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
import expect from 'expect.js';
|
||||
import expect from '@kbn/expect';
|
||||
import { buildEsQuery } from '../build_es_query';
|
||||
import indexPattern from '../../__fixtures__/index_pattern_response.json';
|
||||
import { fromKueryExpression, toElasticsearchQuery } from '../../kuery';
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
import expect from 'expect.js';
|
||||
import expect from '@kbn/expect';
|
||||
import { decorateQuery } from '../decorate_query';
|
||||
|
||||
describe('Query decorator', function () {
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
import expect from 'expect.js';
|
||||
import expect from '@kbn/expect';
|
||||
import { filterMatchesIndex } from '../filter_matches_index';
|
||||
|
||||
describe('filterMatchesIndex', function () {
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
import expect from 'expect.js';
|
||||
import expect from '@kbn/expect';
|
||||
import { buildQueryFromFilters } from '../from_filters';
|
||||
|
||||
describe('build query', function () {
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
|
||||
import { buildQueryFromKuery } from '../from_kuery';
|
||||
import indexPattern from '../../__fixtures__/index_pattern_response.json';
|
||||
import expect from 'expect.js';
|
||||
import expect from '@kbn/expect';
|
||||
import { fromKueryExpression, toElasticsearchQuery } from '../../kuery';
|
||||
|
||||
describe('build query', function () {
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
import expect from 'expect.js';
|
||||
import expect from '@kbn/expect';
|
||||
import { buildQueryFromLucene } from '../from_lucene';
|
||||
import { decorateQuery } from '../decorate_query';
|
||||
import { luceneStringToDsl } from '../lucene_string_to_dsl';
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
*/
|
||||
|
||||
import { luceneStringToDsl } from '../lucene_string_to_dsl';
|
||||
import expect from 'expect.js';
|
||||
import expect from '@kbn/expect';
|
||||
|
||||
describe('build query', function () {
|
||||
|
||||
|
|