Merge branch 'master' of github.com:elastic/kibana into security-rule-type
2
.github/CODEOWNERS
vendored
|
@ -315,7 +315,6 @@
|
|||
/src/plugins/es_ui_shared/ @elastic/kibana-stack-management
|
||||
/x-pack/plugins/cross_cluster_replication/ @elastic/kibana-stack-management
|
||||
/x-pack/plugins/index_lifecycle_management/ @elastic/kibana-stack-management
|
||||
/x-pack/plugins/console_extensions/ @elastic/kibana-stack-management
|
||||
/x-pack/plugins/grokdebugger/ @elastic/kibana-stack-management
|
||||
/x-pack/plugins/index_management/ @elastic/kibana-stack-management
|
||||
/x-pack/plugins/license_api_guard/ @elastic/kibana-stack-management
|
||||
|
@ -330,7 +329,6 @@
|
|||
/x-pack/plugins/ingest_pipelines/ @elastic/kibana-stack-management
|
||||
/packages/kbn-ace/ @elastic/kibana-stack-management
|
||||
/packages/kbn-monaco/ @elastic/kibana-stack-management
|
||||
#CC# /x-pack/plugins/console_extensions/ @elastic/kibana-stack-management
|
||||
#CC# /x-pack/plugins/cross_cluster_replication/ @elastic/kibana-stack-management
|
||||
|
||||
# Security Solution
|
||||
|
|
13
.github/workflows/project-assigner.yml
vendored
|
@ -8,8 +8,17 @@ jobs:
|
|||
name: Assign issue or PR to project based on label
|
||||
steps:
|
||||
- name: Assign to project
|
||||
uses: elastic/github-actions/project-assigner@v2.0.0
|
||||
uses: elastic/github-actions/project-assigner@v2.1.0
|
||||
id: project_assigner
|
||||
with:
|
||||
issue-mappings: '[{"label": "Feature:Lens", "projectNumber": 32, "columnName": "Long-term goals"}, {"label": "Feature:Canvas", "projectNumber": 38, "columnName": "Inbox"}, {"label": "Feature:Dashboard", "projectNumber": 68, "columnName": "Inbox"}, {"label": "Feature:Drilldowns", "projectNumber": 68, "columnName": "Inbox"}, {"label": "Feature:Input Controls", "projectNumber": 72, "columnName": "Inbox"}]'
|
||||
issue-mappings: |
|
||||
[
|
||||
{"label": "Feature:Lens", "projectNumber": 32, "columnName": "Long-term goals"},
|
||||
{"label": "Feature:Discover", "projectNumber": 44, "columnName": "Inbox"},
|
||||
{"label": "Feature:Canvas", "projectNumber": 38, "columnName": "Inbox"},
|
||||
{"label": "Feature:Dashboard", "projectNumber": 68, "columnName": "Inbox"},
|
||||
{"label": "Feature:Drilldowns", "projectNumber": 68, "columnName": "Inbox"},
|
||||
{"label": "Feature:Input Controls", "projectNumber": 72, "columnName": "Inbox"},
|
||||
{"label": "Team:Security", "projectNumber": 320, "columnName": "Awaiting triage", "projectScope": "org"}
|
||||
]
|
||||
ghToken: ${{ secrets.PROJECT_ASSIGNER_TOKEN }}
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
"esUi": "src/plugins/es_ui_shared",
|
||||
"devTools": "src/plugins/dev_tools",
|
||||
"expressions": "src/plugins/expressions",
|
||||
"expressionError": "src/plugins/expression_error",
|
||||
"expressionRevealImage": "src/plugins/expression_reveal_image",
|
||||
"inputControl": "src/plugins/input_control_vis",
|
||||
"inspector": "src/plugins/inspector",
|
||||
|
|
|
@ -42,6 +42,10 @@
|
|||
#elasticsearch.username: "kibana_system"
|
||||
#elasticsearch.password: "pass"
|
||||
|
||||
# Kibana can also authenticate to Elasticsearch via "service account tokens".
|
||||
# If may use this token instead of a username/password.
|
||||
# elasticsearch.serviceAccountToken: "my_token"
|
||||
|
||||
# Enables SSL and paths to the PEM-format SSL certificate and SSL key files, respectively.
|
||||
# These settings enable SSL for outgoing requests from the Kibana server to the browser.
|
||||
#server.ssl.enabled: false
|
||||
|
|
|
@ -43,6 +43,7 @@ Supported configurations are also tagged with the image:./images/dynamic-config.
|
|||
|
||||
[horizontal]
|
||||
Go Agent:: {apm-go-ref}/configuration.html[Configuration reference]
|
||||
iOS agent:: _Not yet supported_
|
||||
Java Agent:: {apm-java-ref}/configuration.html[Configuration reference]
|
||||
.NET Agent:: {apm-dotnet-ref}/configuration.html[Configuration reference]
|
||||
Node.js Agent:: {apm-node-ref}/configuration.html[Configuration reference]
|
||||
|
|
|
@ -1,69 +1,57 @@
|
|||
[role="xpack"]
|
||||
[[apm-alerts]]
|
||||
=== Alerts
|
||||
=== Alerts and rules
|
||||
|
||||
++++
|
||||
<titleabbrev>Create an alert</titleabbrev>
|
||||
++++
|
||||
|
||||
The APM app allows you to define **rules** to detect complex conditions within your APM data
|
||||
and trigger built-in **actions** when those conditions are met.
|
||||
|
||||
The APM app integrates with Kibana's {kibana-ref}/alerting-getting-started.html[alerting and actions] feature.
|
||||
It provides a set of built-in **actions** and APM specific threshold **alerts** for you to use
|
||||
and enables central management of all alerts from <<management,Kibana Management>>.
|
||||
The following **rules** are supported:
|
||||
|
||||
* Latency anomaly rule:
|
||||
Alert when latency of a service is abnormal
|
||||
* Transaction error rate threshold rule:
|
||||
Alert when the service's transaction error rate is above the defined threshold
|
||||
* Error count threshold rule:
|
||||
Alert when the number of errors in a service exceeds a defined threshold
|
||||
|
||||
[role="screenshot"]
|
||||
image::apm/images/apm-alert.png[Create an alert in the APM app]
|
||||
|
||||
For a walkthrough of the alert flyout panel, including detailed information on each configurable property,
|
||||
see Kibana's <<create-edit-rules,defining alerts>>.
|
||||
For a complete walkthrough of the **Create rule** flyout panel, including detailed information on each configurable property,
|
||||
see Kibana's <<create-edit-rules,create and edit rules>>.
|
||||
|
||||
The APM app supports four different types of alerts:
|
||||
|
||||
* Transaction duration anomaly:
|
||||
alerts when the service's transaction duration reaches a certain anomaly score
|
||||
* Transaction duration threshold:
|
||||
alerts when the service's transaction duration exceeds a given time limit over a given time frame
|
||||
* Transaction error rate threshold:
|
||||
alerts when the service's transaction error rate is above the selected rate over a given time frame
|
||||
* Error count threshold:
|
||||
alerts when service exceeds a selected number of errors over a given time frame
|
||||
|
||||
Below, we'll walk through the creation of two of these alerts.
|
||||
Below, we'll walk through the creation of two APM rules.
|
||||
|
||||
[float]
|
||||
[[apm-create-transaction-alert]]
|
||||
=== Example: create a transaction duration alert
|
||||
=== Example: create a latency anomaly rule
|
||||
|
||||
Transaction duration alerts trigger when the duration of a specific transaction type in a service exceeds a defined threshold.
|
||||
This guide will create an alert for the `opbeans-java` service based on the following criteria:
|
||||
Latency anomaly rules trigger when the latency of a service is abnormal.
|
||||
This guide will create an alert for all services based on the following criteria:
|
||||
|
||||
* Environment: Production
|
||||
* Transaction type: `transaction.type:request`
|
||||
* Average request is above `1500ms` for the last 5 minutes
|
||||
* Check every 10 minutes, and repeat the alert every 30 minutes
|
||||
* Send the alert via Slack
|
||||
* Environment: production
|
||||
* Severity level: critical
|
||||
* Run every five minutes
|
||||
* Send an alert to a Slack channel only when the rule status changes
|
||||
|
||||
From the APM app, navigate to the `opbeans-java` service and select
|
||||
**Alerts** > **Create threshold alert** > **Transaction duration**.
|
||||
From any page in the APM app, select **Alerts and rules** > **Latency** > **Create anomaly rule**.
|
||||
Change the name of the alert, but do not edit the tags.
|
||||
|
||||
`Transaction duration | opbeans-java` is automatically set as the name of the alert,
|
||||
and `apm` and `service.name:opbeans-java` are added as tags.
|
||||
It's fine to change the name of the alert, but do not edit the tags.
|
||||
Based on the criteria above, define the following rule details:
|
||||
|
||||
Based on the alert criteria, define the following alert details:
|
||||
* **Check every** - `5 minutes`
|
||||
* **Notify** - "Only on status change"
|
||||
* **Environment** - `all`
|
||||
* **Has anomaly with severity** - `critical`
|
||||
|
||||
* **Check every** - `10 minutes`
|
||||
* **Notify every** - `30 minutes`
|
||||
* **TYPE** - `request`
|
||||
* **WHEN** - `avg`
|
||||
* **IS ABOVE** - `1500ms`
|
||||
* **FOR THE LAST** - `5 minutes`
|
||||
|
||||
Select an action type.
|
||||
Multiple action types can be selected, but in this example, we want to post to a Slack channel.
|
||||
Next, add a connector. Multiple connectors can be selected, but in this example we're interested in Slack.
|
||||
Select **Slack** > **Create a connector**.
|
||||
Enter a name for the connector,
|
||||
and paste the webhook URL.
|
||||
and paste your Slack webhook URL.
|
||||
See Slack's webhook documentation if you need to create one.
|
||||
|
||||
A default message is provided as a starting point for your alert.
|
||||
|
@ -72,35 +60,32 @@ to pass additional alert values at the time a condition is detected to an action
|
|||
A list of available variables can be accessed by selecting the
|
||||
**add variable** button image:apm/images/add-variable.png[add variable button].
|
||||
|
||||
Select **Save**. The alert has been created and is now active!
|
||||
Click **Save**. The rule has been created and is now active!
|
||||
|
||||
[float]
|
||||
[[apm-create-error-alert]]
|
||||
=== Example: create an error rate alert
|
||||
=== Example: create an error count threshold alert
|
||||
|
||||
Error rate alerts trigger when the number of errors in a service exceeds a defined threshold.
|
||||
This guide creates an alert for the `opbeans-python` service based on the following criteria:
|
||||
The error count threshold alert triggers when the number of errors in a service exceeds a defined threshold.
|
||||
This guide will create an alert for all services based on the following criteria:
|
||||
|
||||
* Environment: Production
|
||||
* All environments
|
||||
* Error rate is above 25 for the last minute
|
||||
* Check every 1 minute, and repeat the alert every 10 minutes
|
||||
* Send the alert via email to the `opbeans-python` team
|
||||
* Check every 1 minute, and alert every time the rule is active
|
||||
* Send the alert via email to the site reliability team
|
||||
|
||||
From the APM app, navigate to the `opbeans-python` service and select
|
||||
**Alerts** > **Create threshold alert** > **Error rate**.
|
||||
From any page in the APM app, select **Alerts and rules** > **Error count** > **Create threshold rule**.
|
||||
Change the name of the alert, but do not edit the tags.
|
||||
|
||||
`Error rate | opbeans-python` is automatically set as the name of the alert,
|
||||
and `apm` and `service.name:opbeans-python` are added as tags.
|
||||
It's fine to change the name of the alert, but do not edit the tags.
|
||||
|
||||
Based on the alert criteria, define the following alert details:
|
||||
Based on the criteria above, define the following rule details:
|
||||
|
||||
* **Check every** - `1 minute`
|
||||
* **Notify every** - `10 minutes`
|
||||
* **IS ABOVE** - `25 errors`
|
||||
* **FOR THE LAST** - `1 minute`
|
||||
* **Notify** - "Every time alert is active"
|
||||
* **Environment** - `all`
|
||||
* **Is above** - `25 errors`
|
||||
* **For the last** - `1 minute`
|
||||
|
||||
Select the **Email** action type and click **Create a connector**.
|
||||
Select the **Email** connector and click **Create a connector**.
|
||||
Fill out the required details: sender, host, port, etc., and click **save**.
|
||||
|
||||
A default message is provided as a starting point for your alert.
|
||||
|
@ -109,14 +94,14 @@ to pass additional alert values at the time a condition is detected to an action
|
|||
A list of available variables can be accessed by selecting the
|
||||
**add variable** button image:apm/images/add-variable.png[add variable button].
|
||||
|
||||
Select **Save**. The alert has been created and is now active!
|
||||
Click **Save**. The alert has been created and is now active!
|
||||
|
||||
[float]
|
||||
[[apm-alert-manage]]
|
||||
=== Manage alerts and actions
|
||||
=== Manage alerts and rules
|
||||
|
||||
From the APM app, select **Alerts** > **View active alerts** to be taken to the Kibana alerts and actions management page.
|
||||
From this page, you can create, edit, disable, mute, and delete alerts, and create, edit, and disable connectors.
|
||||
From the APM app, select **Alerts and rules** > **Manage rules** to be taken to the Kibana **Rules and Connectors** page.
|
||||
From this page, you can disable, mute, and delete APM alerts.
|
||||
|
||||
[float]
|
||||
[[apm-alert-more-info]]
|
||||
|
@ -126,4 +111,4 @@ See {kibana-ref}/alerting-getting-started.html[alerting and actions] for more in
|
|||
|
||||
NOTE: If you are using an **on-premise** Elastic Stack deployment with security,
|
||||
communication between Elasticsearch and Kibana must have TLS configured.
|
||||
More information is in the alerting {kibana-ref}/alerting-setup.html#alerting-prerequisites[prerequisites].
|
||||
More information is in the alerting {kibana-ref}/alerting-setup.html#alerting-prerequisites[prerequisites].
|
||||
|
|
|
@ -36,6 +36,7 @@ It's vital to be consistent when naming environments in your agents.
|
|||
To learn how to configure service environments, see the specific agent documentation:
|
||||
|
||||
* *Go:* {apm-go-ref}/configuration.html#config-environment[`ELASTIC_APM_ENVIRONMENT`]
|
||||
* *iOS agent:* _Not yet supported_
|
||||
* *Java:* {apm-java-ref}/config-core.html#config-environment[`environment`]
|
||||
* *.NET:* {apm-dotnet-ref}/config-core.html#config-environment[`Environment`]
|
||||
* *Node.js:* {apm-node-ref}/configuration.html#environment[`environment`]
|
||||
|
|
Before Width: | Height: | Size: 268 KiB After Width: | Height: | Size: 257 KiB |
Before Width: | Height: | Size: 575 KiB After Width: | Height: | Size: 413 KiB |
Before Width: | Height: | Size: 301 KiB After Width: | Height: | Size: 327 KiB |
Before Width: | Height: | Size: 429 KiB After Width: | Height: | Size: 545 KiB |
Before Width: | Height: | Size: 401 KiB After Width: | Height: | Size: 281 KiB |
Before Width: | Height: | Size: 202 KiB After Width: | Height: | Size: 222 KiB |
Before Width: | Height: | Size: 168 KiB After Width: | Height: | Size: 191 KiB |
Before Width: | Height: | Size: 187 KiB After Width: | Height: | Size: 253 KiB |
Before Width: | Height: | Size: 59 KiB After Width: | Height: | Size: 60 KiB |
Before Width: | Height: | Size: 725 KiB After Width: | Height: | Size: 460 KiB |
Before Width: | Height: | Size: 250 KiB After Width: | Height: | Size: 307 KiB |
Before Width: | Height: | Size: 564 KiB After Width: | Height: | Size: 531 KiB |
Before Width: | Height: | Size: 558 KiB After Width: | Height: | Size: 407 KiB |
Before Width: | Height: | Size: 475 KiB After Width: | Height: | Size: 307 KiB |
|
@ -108,6 +108,7 @@ Service maps are supported for the following Agent versions:
|
|||
|
||||
[horizontal]
|
||||
Go agent:: ≥ v1.7.0
|
||||
iOS agent:: _Not yet supported_
|
||||
Java agent:: ≥ v1.13.0
|
||||
.NET agent:: ≥ v1.3.0
|
||||
Node.js agent:: ≥ v3.6.0
|
||||
|
|
|
@ -100,22 +100,22 @@ the selected transaction group.
|
|||
image::apm/images/apm-transaction-response-dist.png[Example view of response time distribution]
|
||||
|
||||
[[transaction-duration-distribution]]
|
||||
==== Transactions duration distribution
|
||||
==== Latency distribution
|
||||
|
||||
This chart plots all transaction durations for the given time period.
|
||||
A plot of all transaction durations for the given time period.
|
||||
The screenshot below shows a typical distribution,
|
||||
and indicates most of our requests were served quickly -- awesome!
|
||||
It's the requests on the right, the ones taking longer than average, that we probably want to focus on.
|
||||
It's the requests on the right, the ones taking longer than average, that we probably need to focus on.
|
||||
|
||||
[role="screenshot"]
|
||||
image::apm/images/apm-transaction-duration-dist.png[Example view of transactions duration distribution graph]
|
||||
image::apm/images/apm-transaction-duration-dist.png[Example view of latency distribution graph]
|
||||
|
||||
Select a transaction duration _bucket_ to display up to ten trace samples.
|
||||
Select a latency duration _bucket_ to display up to ten trace samples.
|
||||
|
||||
[[transaction-trace-sample]]
|
||||
==== Trace sample
|
||||
|
||||
Trace samples are based on the _bucket_ selection in the *Transactions duration distribution* chart;
|
||||
Trace samples are based on the _bucket_ selection in the *Latency distribution* chart;
|
||||
update the samples by selecting a new _bucket_.
|
||||
The number of requests per bucket is displayed when hovering over the graph,
|
||||
and the selected bucket is highlighted to stand out.
|
||||
|
|
|
@ -15,6 +15,7 @@ don't forget to check our other troubleshooting guides or discussion forum:
|
|||
* {apm-server-ref}/troubleshooting.html[APM Server troubleshooting]
|
||||
* {apm-dotnet-ref}/troubleshooting.html[.NET agent troubleshooting]
|
||||
* {apm-go-ref}/troubleshooting.html[Go agent troubleshooting]
|
||||
* {apm-ios-ref}/troubleshooting.html[iOS agent troubleshooting]
|
||||
* {apm-java-ref}/trouble-shooting.html[Java agent troubleshooting]
|
||||
* {apm-node-ref}/troubleshooting.html[Node.js agent troubleshooting]
|
||||
* {apm-php-ref}/troubleshooting.html[PHP agent troubleshooting]
|
||||
|
|
|
@ -72,6 +72,10 @@ This API doesn't support angular, for registering angular dev tools, bootstrap a
|
|||
|This plugin contains reusable code in the form of self-contained modules (or libraries). Each of these modules exports a set of functionality relevant to the domain of the module.
|
||||
|
||||
|
||||
|{kib-repo}blob/{branch}/src/plugins/expression_error/README.md[expressionError]
|
||||
|Expression Error plugin adds an error renderer to the expression plugin. The renderer will display the error image.
|
||||
|
||||
|
||||
|{kib-repo}blob/{branch}/src/plugins/expression_reveal_image/README.md[expressionRevealImage]
|
||||
|Expression Reveal Image plugin adds a revealImage function to the expression plugin and an associated renderer. The renderer will display the given percentage of a given image.
|
||||
|
||||
|
@ -354,10 +358,6 @@ The plugin exposes the static DefaultEditorController class to consume.
|
|||
The client-side plugin configures following values:
|
||||
|
||||
|
||||
|{kib-repo}blob/{branch}/x-pack/plugins/console_extensions/README.md[consoleExtensions]
|
||||
|This plugin provides autocomplete definitions of licensed APIs to the OSS Console plugin.
|
||||
|
||||
|
||||
|{kib-repo}blob/{branch}/x-pack/plugins/cross_cluster_replication/README.md[crossClusterReplication]
|
||||
|You can run a local cluster and simulate a remote cluster within a single Kibana directory.
|
||||
|
||||
|
@ -393,7 +393,7 @@ security and spaces filtering as well as performing audit logging.
|
|||
|
||||
|
||||
|{kib-repo}blob/{branch}/x-pack/plugins/enterprise_search/README.md[enterpriseSearch]
|
||||
|This plugin's goal is to provide a Kibana user interface to the Enterprise Search solution's products (App Search and Workplace Search). In it's current MVP state, the plugin provides the following with the goal of gathering user feedback and raising product awareness:
|
||||
|This plugin provides beta Kibana user interfaces for managing the Enterprise Search solution and its products, App Search and Workplace Search.
|
||||
|
||||
|
||||
|{kib-repo}blob/{branch}/x-pack/plugins/event_log/README.md[eventLog]
|
||||
|
|
|
@ -9,7 +9,7 @@ Configuration options to be used to create a [cluster client](./kibana-plugin-co
|
|||
<b>Signature:</b>
|
||||
|
||||
```typescript
|
||||
export declare type ElasticsearchClientConfig = Pick<ElasticsearchConfig, 'customHeaders' | 'sniffOnStart' | 'sniffOnConnectionFault' | 'requestHeadersWhitelist' | 'sniffInterval' | 'hosts' | 'username' | 'password'> & {
|
||||
export declare type ElasticsearchClientConfig = Pick<ElasticsearchConfig, 'customHeaders' | 'sniffOnStart' | 'sniffOnConnectionFault' | 'requestHeadersWhitelist' | 'sniffInterval' | 'hosts' | 'username' | 'password' | 'serviceAccountToken'> & {
|
||||
pingTimeout?: ElasticsearchConfig['pingTimeout'] | ClientOptions['pingTimeout'];
|
||||
requestTimeout?: ElasticsearchConfig['requestTimeout'] | ClientOptions['requestTimeout'];
|
||||
ssl?: Partial<ElasticsearchConfig['ssl']>;
|
||||
|
|
|
@ -31,10 +31,11 @@ export declare class ElasticsearchConfig
|
|||
| [pingTimeout](./kibana-plugin-core-server.elasticsearchconfig.pingtimeout.md) | | <code>Duration</code> | Timeout after which PING HTTP request will be aborted and retried. |
|
||||
| [requestHeadersWhitelist](./kibana-plugin-core-server.elasticsearchconfig.requestheaderswhitelist.md) | | <code>string[]</code> | List of Kibana client-side headers to send to Elasticsearch when request scoped cluster client is used. If this is an empty array then \*no\* client-side will be sent. |
|
||||
| [requestTimeout](./kibana-plugin-core-server.elasticsearchconfig.requesttimeout.md) | | <code>Duration</code> | Timeout after which HTTP request will be aborted and retried. |
|
||||
| [serviceAccountToken](./kibana-plugin-core-server.elasticsearchconfig.serviceaccounttoken.md) | | <code>string</code> | If Elasticsearch security features are enabled, this setting provides the service account token that the Kibana server users to perform its administrative functions.<!-- -->This is an alternative to specifying a username and password. |
|
||||
| [shardTimeout](./kibana-plugin-core-server.elasticsearchconfig.shardtimeout.md) | | <code>Duration</code> | Timeout for Elasticsearch to wait for responses from shards. Set to 0 to disable. |
|
||||
| [sniffInterval](./kibana-plugin-core-server.elasticsearchconfig.sniffinterval.md) | | <code>false | Duration</code> | Interval to perform a sniff operation and make sure the list of nodes is complete. If <code>false</code> then sniffing is disabled. |
|
||||
| [sniffOnConnectionFault](./kibana-plugin-core-server.elasticsearchconfig.sniffonconnectionfault.md) | | <code>boolean</code> | Specifies whether the client should immediately sniff for a more current list of nodes when a connection dies. |
|
||||
| [sniffOnStart](./kibana-plugin-core-server.elasticsearchconfig.sniffonstart.md) | | <code>boolean</code> | Specifies whether the client should attempt to detect the rest of the cluster when it is first instantiated. |
|
||||
| [ssl](./kibana-plugin-core-server.elasticsearchconfig.ssl.md) | | <code>Pick<SslConfigSchema, Exclude<keyof SslConfigSchema, 'certificateAuthorities' | 'keystore' | 'truststore'>> & {</code><br/><code> certificateAuthorities?: string[];</code><br/><code> }</code> | Set of settings configure SSL connection between Kibana and Elasticsearch that are required when <code>xpack.ssl.verification_mode</code> in Elasticsearch is set to either <code>certificate</code> or <code>full</code>. |
|
||||
| [username](./kibana-plugin-core-server.elasticsearchconfig.username.md) | | <code>string</code> | If Elasticsearch is protected with basic authentication, this setting provides the username that the Kibana server uses to perform its administrative functions. |
|
||||
| [username](./kibana-plugin-core-server.elasticsearchconfig.username.md) | | <code>string</code> | If Elasticsearch is protected with basic authentication, this setting provides the username that the Kibana server uses to perform its administrative functions. Cannot be used in conjunction with serviceAccountToken. |
|
||||
|
||||
|
|
|
@ -0,0 +1,15 @@
|
|||
<!-- Do not edit this file. It is automatically generated by API Documenter. -->
|
||||
|
||||
[Home](./index.md) > [kibana-plugin-core-server](./kibana-plugin-core-server.md) > [ElasticsearchConfig](./kibana-plugin-core-server.elasticsearchconfig.md) > [serviceAccountToken](./kibana-plugin-core-server.elasticsearchconfig.serviceaccounttoken.md)
|
||||
|
||||
## ElasticsearchConfig.serviceAccountToken property
|
||||
|
||||
If Elasticsearch security features are enabled, this setting provides the service account token that the Kibana server users to perform its administrative functions.
|
||||
|
||||
This is an alternative to specifying a username and password.
|
||||
|
||||
<b>Signature:</b>
|
||||
|
||||
```typescript
|
||||
readonly serviceAccountToken?: string;
|
||||
```
|
|
@ -4,7 +4,7 @@
|
|||
|
||||
## ElasticsearchConfig.username property
|
||||
|
||||
If Elasticsearch is protected with basic authentication, this setting provides the username that the Kibana server uses to perform its administrative functions.
|
||||
If Elasticsearch is protected with basic authentication, this setting provides the username that the Kibana server uses to perform its administrative functions. Cannot be used in conjunction with serviceAccountToken.
|
||||
|
||||
<b>Signature:</b>
|
||||
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
<b>Signature:</b>
|
||||
|
||||
```typescript
|
||||
export declare type LegacyElasticsearchClientConfig = Pick<ConfigOptions, 'keepAlive' | 'log' | 'plugins'> & Pick<ElasticsearchConfig, 'apiVersion' | 'customHeaders' | 'requestHeadersWhitelist' | 'sniffOnStart' | 'sniffOnConnectionFault' | 'hosts' | 'username' | 'password'> & {
|
||||
export declare type LegacyElasticsearchClientConfig = Pick<ConfigOptions, 'keepAlive' | 'log' | 'plugins'> & Pick<ElasticsearchConfig, 'apiVersion' | 'customHeaders' | 'requestHeadersWhitelist' | 'sniffOnStart' | 'sniffOnConnectionFault' | 'hosts' | 'username' | 'password' | 'serviceAccountToken'> & {
|
||||
pingTimeout?: ElasticsearchConfig['pingTimeout'] | ConfigOptions['pingTimeout'];
|
||||
requestTimeout?: ElasticsearchConfig['requestTimeout'] | ConfigOptions['requestTimeout'];
|
||||
sniffInterval?: ElasticsearchConfig['sniffInterval'] | ConfigOptions['sniffInterval'];
|
||||
|
|
|
@ -0,0 +1,19 @@
|
|||
<!-- Do not edit this file. It is automatically generated by API Documenter. -->
|
||||
|
||||
[Home](./index.md) > [kibana-plugin-plugins-data-public](./kibana-plugin-plugins-data-public.md) > [IndexPatternType](./kibana-plugin-plugins-data-public.indexpatterntype.md)
|
||||
|
||||
## IndexPatternType enum
|
||||
|
||||
<b>Signature:</b>
|
||||
|
||||
```typescript
|
||||
export declare enum IndexPatternType
|
||||
```
|
||||
|
||||
## Enumeration Members
|
||||
|
||||
| Member | Value | Description |
|
||||
| --- | --- | --- |
|
||||
| DEFAULT | <code>"default"</code> | |
|
||||
| ROLLUP | <code>"rollup"</code> | |
|
||||
|
|
@ -15,4 +15,5 @@ export interface TypeMeta
|
|||
| Property | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| [aggs](./kibana-plugin-plugins-data-public.indexpatterntypemeta.aggs.md) | <code>Record<string, AggregationRestrictions></code> | |
|
||||
| [params](./kibana-plugin-plugins-data-public.indexpatterntypemeta.params.md) | <code>{</code><br/><code> rollup_index: string;</code><br/><code> }</code> | |
|
||||
|
||||
|
|
|
@ -0,0 +1,13 @@
|
|||
<!-- Do not edit this file. It is automatically generated by API Documenter. -->
|
||||
|
||||
[Home](./index.md) > [kibana-plugin-plugins-data-public](./kibana-plugin-plugins-data-public.md) > [IndexPatternTypeMeta](./kibana-plugin-plugins-data-public.indexpatterntypemeta.md) > [params](./kibana-plugin-plugins-data-public.indexpatterntypemeta.params.md)
|
||||
|
||||
## IndexPatternTypeMeta.params property
|
||||
|
||||
<b>Signature:</b>
|
||||
|
||||
```typescript
|
||||
params?: {
|
||||
rollup_index: string;
|
||||
};
|
||||
```
|
|
@ -31,6 +31,7 @@
|
|||
| --- | --- |
|
||||
| [BUCKET\_TYPES](./kibana-plugin-plugins-data-public.bucket_types.md) | |
|
||||
| [ES\_FIELD\_TYPES](./kibana-plugin-plugins-data-public.es_field_types.md) | \* |
|
||||
| [IndexPatternType](./kibana-plugin-plugins-data-public.indexpatterntype.md) | |
|
||||
| [KBN\_FIELD\_TYPES](./kibana-plugin-plugins-data-public.kbn_field_types.md) | \* |
|
||||
| [METRIC\_TYPES](./kibana-plugin-plugins-data-public.metric_types.md) | |
|
||||
| [QuerySuggestionTypes](./kibana-plugin-plugins-data-public.querysuggestiontypes.md) | |
|
||||
|
|
|
@ -19,6 +19,7 @@ export interface QuerySuggestionGetFnArgs
|
|||
| [boolFilter](./kibana-plugin-plugins-data-public.querysuggestiongetfnargs.boolfilter.md) | <code>any</code> | |
|
||||
| [indexPatterns](./kibana-plugin-plugins-data-public.querysuggestiongetfnargs.indexpatterns.md) | <code>IIndexPattern[]</code> | |
|
||||
| [language](./kibana-plugin-plugins-data-public.querysuggestiongetfnargs.language.md) | <code>string</code> | |
|
||||
| [method](./kibana-plugin-plugins-data-public.querysuggestiongetfnargs.method.md) | <code>ValueSuggestionsMethod</code> | |
|
||||
| [query](./kibana-plugin-plugins-data-public.querysuggestiongetfnargs.query.md) | <code>string</code> | |
|
||||
| [selectionEnd](./kibana-plugin-plugins-data-public.querysuggestiongetfnargs.selectionend.md) | <code>number</code> | |
|
||||
| [selectionStart](./kibana-plugin-plugins-data-public.querysuggestiongetfnargs.selectionstart.md) | <code>number</code> | |
|
||||
|
|
|
@ -0,0 +1,11 @@
|
|||
<!-- Do not edit this file. It is automatically generated by API Documenter. -->
|
||||
|
||||
[Home](./index.md) > [kibana-plugin-plugins-data-public](./kibana-plugin-plugins-data-public.md) > [QuerySuggestionGetFnArgs](./kibana-plugin-plugins-data-public.querysuggestiongetfnargs.md) > [method](./kibana-plugin-plugins-data-public.querysuggestiongetfnargs.method.md)
|
||||
|
||||
## QuerySuggestionGetFnArgs.method property
|
||||
|
||||
<b>Signature:</b>
|
||||
|
||||
```typescript
|
||||
method?: ValueSuggestionsMethod;
|
||||
```
|
|
@ -39,5 +39,5 @@ export declare class Execution<Input = unknown, Output = unknown, InspectorAdapt
|
|||
| [invokeChain(chainArr, input)](./kibana-plugin-plugins-expressions-public.execution.invokechain.md) | | |
|
||||
| [invokeFunction(fn, input, args)](./kibana-plugin-plugins-expressions-public.execution.invokefunction.md) | | |
|
||||
| [resolveArgs(fnDef, input, argAsts)](./kibana-plugin-plugins-expressions-public.execution.resolveargs.md) | | |
|
||||
| [start(input)](./kibana-plugin-plugins-expressions-public.execution.start.md) | | Call this method to start execution.<!-- -->N.B. <code>input</code> is initialized to <code>null</code> rather than <code>undefined</code> for legacy reasons, because in legacy interpreter it was set to <code>null</code> by default. |
|
||||
| [start(input, isSubExpression)](./kibana-plugin-plugins-expressions-public.execution.start.md) | | Call this method to start execution.<!-- -->N.B. <code>input</code> is initialized to <code>null</code> rather than <code>undefined</code> for legacy reasons, because in legacy interpreter it was set to <code>null</code> by default. |
|
||||
|
||||
|
|
|
@ -11,7 +11,7 @@ N.B. `input` is initialized to `null` rather than `undefined` for legacy reasons
|
|||
<b>Signature:</b>
|
||||
|
||||
```typescript
|
||||
start(input?: Input): Observable<ExecutionResult<Output | ExpressionValueError>>;
|
||||
start(input?: Input, isSubExpression?: boolean): Observable<ExecutionResult<Output | ExpressionValueError>>;
|
||||
```
|
||||
|
||||
## Parameters
|
||||
|
@ -19,6 +19,7 @@ start(input?: Input): Observable<ExecutionResult<Output | ExpressionValueError>>
|
|||
| Parameter | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| input | <code>Input</code> | |
|
||||
| isSubExpression | <code>boolean</code> | |
|
||||
|
||||
<b>Returns:</b>
|
||||
|
||||
|
|
|
@ -39,5 +39,5 @@ export declare class Execution<Input = unknown, Output = unknown, InspectorAdapt
|
|||
| [invokeChain(chainArr, input)](./kibana-plugin-plugins-expressions-server.execution.invokechain.md) | | |
|
||||
| [invokeFunction(fn, input, args)](./kibana-plugin-plugins-expressions-server.execution.invokefunction.md) | | |
|
||||
| [resolveArgs(fnDef, input, argAsts)](./kibana-plugin-plugins-expressions-server.execution.resolveargs.md) | | |
|
||||
| [start(input)](./kibana-plugin-plugins-expressions-server.execution.start.md) | | Call this method to start execution.<!-- -->N.B. <code>input</code> is initialized to <code>null</code> rather than <code>undefined</code> for legacy reasons, because in legacy interpreter it was set to <code>null</code> by default. |
|
||||
| [start(input, isSubExpression)](./kibana-plugin-plugins-expressions-server.execution.start.md) | | Call this method to start execution.<!-- -->N.B. <code>input</code> is initialized to <code>null</code> rather than <code>undefined</code> for legacy reasons, because in legacy interpreter it was set to <code>null</code> by default. |
|
||||
|
||||
|
|
|
@ -11,7 +11,7 @@ N.B. `input` is initialized to `null` rather than `undefined` for legacy reasons
|
|||
<b>Signature:</b>
|
||||
|
||||
```typescript
|
||||
start(input?: Input): Observable<ExecutionResult<Output | ExpressionValueError>>;
|
||||
start(input?: Input, isSubExpression?: boolean): Observable<ExecutionResult<Output | ExpressionValueError>>;
|
||||
```
|
||||
|
||||
## Parameters
|
||||
|
@ -19,6 +19,7 @@ start(input?: Input): Observable<ExecutionResult<Output | ExpressionValueError>>
|
|||
| Parameter | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| input | <code>Input</code> | |
|
||||
| isSubExpression | <code>boolean</code> | |
|
||||
|
||||
<b>Returns:</b>
|
||||
|
||||
|
|
Before Width: | Height: | Size: 902 KiB After Width: | Height: | Size: 845 KiB |
Before Width: | Height: | Size: 648 KiB After Width: | Height: | Size: 517 KiB |
Before Width: | Height: | Size: 1,005 KiB After Width: | Height: | Size: 1.4 MiB |
Before Width: | Height: | Size: 1.2 MiB After Width: | Height: | Size: 692 KiB |
Before Width: | Height: | Size: 700 KiB After Width: | Height: | Size: 881 KiB |
Before Width: | Height: | Size: 588 KiB After Width: | Height: | Size: 732 KiB |
Before Width: | Height: | Size: 407 KiB After Width: | Height: | Size: 595 KiB |
Before Width: | Height: | Size: 346 KiB |
Before Width: | Height: | Size: 470 KiB |
Before Width: | Height: | Size: 91 KiB |
Before Width: | Height: | Size: 73 KiB After Width: | Height: | Size: 60 KiB |
BIN
docs/getting-started/images/tutorial-visualization-treemap.png
Normal file
After Width: | Height: | Size: 645 KiB |
|
@ -7,7 +7,7 @@ When you've finished, you'll know how to:
|
|||
|
||||
* <<explore-the-data,Explore the data with *Discover*.>>
|
||||
|
||||
* <<view-and-analyze-the-data,Gain insight into the data with *Dashboard*.>>
|
||||
* <<view-and-analyze-the-data,Analyze the data with *Dashboard*.>>
|
||||
|
||||
[float]
|
||||
=== Required privileges
|
||||
|
@ -24,125 +24,125 @@ include::{docs-root}/shared/cloud/ess-getting-started.asciidoc[]
|
|||
[[gs-get-data-into-kibana]]
|
||||
== Add the sample data
|
||||
|
||||
Sample data sets come with sample visualizations, dashboards, and more to help you explore {kib} without adding your own data.
|
||||
Sample data sets come with sample visualizations, dashboards, and more to help you explore {kib} before you ingest or add your own data.
|
||||
|
||||
. From the home page, click *Try our sample data*.
|
||||
. On the home page, click *Try our sample data*.
|
||||
|
||||
. On the *Sample eCommerce orders* card, click *Add data*.
|
||||
+
|
||||
[role="screenshot"]
|
||||
image::getting-started/images/add-sample-data.png[Add data UI]
|
||||
image::images/add-sample-data.png[Add data UI for the sample data sets]
|
||||
|
||||
[float]
|
||||
[[explore-the-data]]
|
||||
== Explore the data
|
||||
|
||||
*Discover* displays an interactive histogram that shows the distribution of of data, or documents, over time, and a table that lists the fields for each document that matches the index. By default, all fields are shown for each matching document.
|
||||
*Discover* displays the data in an interactive histogram that shows the distribution of data, or documents, over time, and a table that lists the fields for each document that matches the index pattern. To view a subset of the documents, you can apply filters to the data, and customize the table to display only the fields you want to explore.
|
||||
|
||||
. Open the main menu, then click *Discover*.
|
||||
|
||||
. Change the <<set-time-filter, time filter>> to *Last 7 days*.
|
||||
+
|
||||
[role="screenshot"]
|
||||
image::images/tutorial-discover-2.png[]
|
||||
image::images/tutorial-discover-2.png[Time filter menu with Last 7 days filter configured]
|
||||
|
||||
. To focus in on the documents you want to view, use the <<kuery-query,{kib} Query Language>>. In the *KQL* search field, enter:
|
||||
. To view the sales orders for women's clothing that are $60 or more, use the <<kuery-query,*KQL*>> search field:
|
||||
+
|
||||
[source,text]
|
||||
products.taxless_price >= 60 AND category : Women's Clothing
|
||||
+
|
||||
The query returns the women's clothing orders for $60 and more.
|
||||
products.taxless_price >= 60 and category : Women's Clothing
|
||||
+
|
||||
[role="screenshot"]
|
||||
image::images/tutorial-discover-4.png[]
|
||||
image::images/tutorial-discover-4.png[Discover tables that displays only the orders for women's clothing that are $60 or more]
|
||||
|
||||
. Hover over the list of *Available fields*, then click *+* next to the fields you want to view in the table.
|
||||
+
|
||||
For example, when you add the *category* field, the table displays the product categories for the orders.
|
||||
. To view only the product categories that contain sales orders, hover over the *category* field, then click *+*.
|
||||
+
|
||||
[role="screenshot"]
|
||||
image::images/tutorial-discover-3.png[]
|
||||
+
|
||||
For more information, refer to <<discover, *Discover*>>.
|
||||
image::images/tutorial-discover-3.png[Discover table that displays only the product categories that contain orders]
|
||||
|
||||
[float]
|
||||
[[view-and-analyze-the-data]]
|
||||
== View and analyze the data
|
||||
|
||||
A dashboard is a collection of panels that you can use to view and analyze the data. Panels contain visualizations, interactive controls, Markdown, and more.
|
||||
A dashboard is a collection of panels that you can use to view and analyze the data. Panels contain visualizations, interactive controls, text, and more.
|
||||
|
||||
. Open the main menu, then click *Dashboard*.
|
||||
|
||||
. Click *[eCommerce] Revenue Dashboard*.
|
||||
+
|
||||
[role="screenshot"]
|
||||
image::getting-started/images/tutorial-sample-dashboard.png[]
|
||||
|
||||
[float]
|
||||
[[filter-and-query-the-data]]
|
||||
=== Filter the data
|
||||
|
||||
To focus in on the data you want to view on the dashboard, use filters.
|
||||
|
||||
. From the *[eCommerce] Controls* panel, make a selection from the *Manufacturer* and *Category* dropdowns, then click *Apply changes*.
|
||||
+
|
||||
For example, the following dashboard shows the data for women's clothing from Gnomehouse.
|
||||
+
|
||||
[role="screenshot"]
|
||||
image::getting-started/images/tutorial-sample-filter.png[]
|
||||
|
||||
. To manually add a filter, click *Add filter*, then specify the options.
|
||||
+
|
||||
For example, to view the orders for Wednesday, select *day_of_week* from the *Field* dropdown, select *is* from the *Operator* dropdown, then select *Wednesday* from the *Value* dropdown.
|
||||
+
|
||||
[role="screenshot"]
|
||||
image::getting-started/images/tutorial-sample-filter2.png[]
|
||||
|
||||
. When you are done, remove the filters.
|
||||
+
|
||||
For more information, refer to <<dashboard,*Dashboard*>>.
|
||||
image::getting-started/images/tutorial-sample-dashboard.png[The [eCommerce] Revenue Dashboard that comes with the Sample eCommerce order data set]
|
||||
|
||||
[float]
|
||||
[[create-a-visualization]]
|
||||
=== Create a visualization panel
|
||||
|
||||
Create a treemap panel that shows the top regions and manufacturers, then add the panel to the dashboard.
|
||||
Create a treemap panel that shows the top sales regions and manufacturers, then add the panel to the dashboard.
|
||||
|
||||
. From the toolbar, click *Edit*, then click *Create new*.
|
||||
. From the toolbar, click *Edit*, then click *Create visualzation*.
|
||||
|
||||
. On the *New Visualization* window, click *Lens*.
|
||||
. Open the *Chart type* menu, then select *Treemap*.
|
||||
+
|
||||
[role="screenshot"]
|
||||
image::getting-started/images/tutorial-visualization-dropdown.png[Chart type menu with Treemap selected]
|
||||
|
||||
. From the *Available fields* list, drag and drop the following fields to the visualization builder:
|
||||
. From the *Available fields* list, drag and drop the following fields onto the workspace:
|
||||
|
||||
* *geoip.city_name*
|
||||
|
||||
* *manufacturer.keyword*
|
||||
+
|
||||
. From the visualization dropdown, select *Treemap*.
|
||||
+
|
||||
[role="screenshot"]
|
||||
image::getting-started/images/tutorial-visualization-dropdown.png[Visualization dropdown with Treemap selected]
|
||||
image::getting-started/images/tutorial-visualization-treemap.png[Treemap that displays Top values of geoip.city_name and Top values or manufacturer.keyword fields]
|
||||
|
||||
. Click *Save*.
|
||||
|
||||
. On the *Save Lens visualization*, enter a title and make sure *Add to Dashboard after saving* is selected, then click *Save and return*.
|
||||
. Click *Save and return*.
|
||||
+
|
||||
The treemap appears as the last visualization panel on the dashboard.
|
||||
+
|
||||
[role="screenshot"]
|
||||
image::getting-started/images/tutorial-final-dashboard.gif[Final dashboard with new treemap visualization]
|
||||
|
||||
[float]
|
||||
[[interact-with-the-data]]
|
||||
=== Interact with the data
|
||||
|
||||
You can interact with the dashboard data using controls that allow you to apply dashboard-level filters. Interact with the *[eCommerce] Controls* panel to view the women's clothing data from the Gnomehouse manufacturer.
|
||||
|
||||
. From the *Manufacturer* dropdown, select *Gnomehouse*.
|
||||
|
||||
. From the *Category* dropdown, select *Women's Clothing*.
|
||||
|
||||
. Click *Apply changes*.
|
||||
+
|
||||
For more information, refer to <<dashboard,Dashboard>>.
|
||||
[role="screenshot"]
|
||||
image::getting-started/images/tutorial-sample-filter.png[The [eCommerce] Revenue Dashboard that shows only the women's clothing data from the Gnomehouse manufacturer]
|
||||
|
||||
[float]
|
||||
[[filter-and-query-the-data]]
|
||||
=== Filter the data
|
||||
|
||||
To view a subset of the data, you can apply filters to the dashboard panels. Apply a filter to view the women's clothing data generated on Wednesday from the Gnomehouse manufacturer.
|
||||
|
||||
. Click *Add filter*.
|
||||
|
||||
. From the *Field* dropdown, select *day_of_week*.
|
||||
|
||||
. From the *Operator* dropdown, select *is*.
|
||||
|
||||
. From the *Value* dropdown, select *Wednesday*.
|
||||
|
||||
. Click *Save*.
|
||||
+
|
||||
[role="screenshot"]
|
||||
image::getting-started/images/tutorial-sample-filter2.png[The [eCommerce] Revenue Dashboard that shows only the women's clothing data generated on Wednesday from the Gnomehouse manufacturer]
|
||||
|
||||
[float]
|
||||
[[quick-start-whats-next]]
|
||||
== What's next?
|
||||
|
||||
If you are you ready to add your own data, refer to <<connect-to-elasticsearch,Add data to {kib}>>.
|
||||
*Add your own data.* Ready to add your own data? Go to {fleet-guide}/fleet-quick-start.html[Quick start: Get logs and metrics into the Elastic Stack] to learn how to ingest your data, or go to <<connect-to-elasticsearch,Add data to {kib}>> and learn about all the other ways you can add data.
|
||||
|
||||
If you want to ingest your data, refer to {fleet-guide}/fleet-quick-start.html[Quick start: Get logs and metrics into the Elastic Stack].
|
||||
*Explore your own data in Discover.* Ready to learn more about exploring your data in *Discover*? Go to <<discover, Discover>>.
|
||||
|
||||
If you want to secure access to your data, refer to our guide on <<tutorial-secure-access-to-kibana, securing {kib}>>
|
||||
*Create a dashboard with your own data.* Ready to learn more about analyzing your data in *Dashboard*? Go to <<dashboard, Dashboard>>.
|
||||
|
||||
If you want to try out {ml-features} with the sample data sets, refer to
|
||||
{ml-docs}/ml-getting-started.html[Getting started with {ml}].
|
||||
*Try out the {ml-features}.* Ready to analyze the sample data sets and generate models for its patterns of behavior? Go to {ml-docs}/ml-getting-started.html[Getting started with {ml}].
|
Before Width: | Height: | Size: 265 KiB After Width: | Height: | Size: 32 KiB |
Before Width: | Height: | Size: 124 KiB After Width: | Height: | Size: 58 KiB |
Before Width: | Height: | Size: 151 KiB After Width: | Height: | Size: 95 KiB |
Before Width: | Height: | Size: 373 KiB After Width: | Height: | Size: 52 KiB |
Before Width: | Height: | Size: 309 KiB After Width: | Height: | Size: 123 KiB |
Before Width: | Height: | Size: 112 KiB |
Before Width: | Height: | Size: 188 KiB After Width: | Height: | Size: 46 KiB |
|
@ -64,13 +64,16 @@ You can read more at {ref}/rollup-job-config.html[rollup job configuration].
|
|||
=== Try it: Create and visualize rolled up data
|
||||
|
||||
This example creates a rollup job to capture log data from sample web logs.
|
||||
To follow along, add the sample web logs data set.
|
||||
Before you start, <<add-sample-data, add the web logs sample data set>>.
|
||||
|
||||
In this example, you want data that is older than 7 days in the target index pattern `kibana_sample_data_logs`
|
||||
to roll up once a day into the index `rollup_logstash`. You’ll bucket the
|
||||
to roll up into the `rollup_logstash` index. You’ll bucket the
|
||||
rolled up data on an hourly basis, using 60m for the time bucket configuration.
|
||||
This allows for more granular queries, such as 2h and 12h.
|
||||
|
||||
For this example, the job will perform the rollup every minute. However, you'd
|
||||
typically roll up less frequently in production.
|
||||
|
||||
[float]
|
||||
==== Create the rollup job
|
||||
|
||||
|
@ -80,7 +83,7 @@ As you walk through the *Create rollup job* UI, enter the data:
|
|||
|*Field* |*Value*
|
||||
|
||||
|Name
|
||||
|logs_job
|
||||
|`logs_job`
|
||||
|
||||
|Index pattern
|
||||
|`kibana_sample_data_logs`
|
||||
|
@ -89,12 +92,13 @@ As you walk through the *Create rollup job* UI, enter the data:
|
|||
|`rollup_logstash`
|
||||
|
||||
|Frequency
|
||||
|Every day at midnight
|
||||
|Every minute
|
||||
|
||||
|Page size
|
||||
|1000
|
||||
|
||||
|Delay (latency buffer)|7d
|
||||
|Latency buffer
|
||||
|7d
|
||||
|
||||
|Date field
|
||||
|@timestamp
|
||||
|
@ -118,6 +122,8 @@ As you walk through the *Create rollup job* UI, enter the data:
|
|||
|bytes (average)
|
||||
|===
|
||||
|
||||
On the **Review and save** page, click **Start job now** and **Save**.
|
||||
|
||||
The terms, histogram, and metrics fields reflect
|
||||
the key information to retain in the rolled up data: where visitors are from (geo.src),
|
||||
what operating system they are using (machine.os.keyword),
|
||||
|
@ -133,7 +139,6 @@ rollup index, or you can remove or archive it using <<creating-index-lifecycle-p
|
|||
Your next step is to visualize your rolled up data in a vertical bar chart.
|
||||
Most visualizations support rolled up data, with the exception of Timelion and Vega visualizations.
|
||||
|
||||
|
||||
. Open the main menu, then click *Stack Management > Index Patterns*.
|
||||
|
||||
. Click *Create index pattern*, and select *Rollup index pattern* from the dropdown.
|
||||
|
@ -149,7 +154,11 @@ is `rollup_logstash,kibana_sample_data_logs`. In this index pattern, `rollup_log
|
|||
matches the rolled up index pattern and `kibana_sample_data_logs` matches the index
|
||||
pattern for raw data.
|
||||
|
||||
. Open the main menu, click *Dashboard*, then create and add a vertical bar chart.
|
||||
. Open the main menu, click *Dashboard*, then *Create dashboard*.
|
||||
|
||||
. Set the <<set-time-filter,time filter>> to *Last 90 days*.
|
||||
|
||||
. On the dashboard, click *Create visualization*.
|
||||
|
||||
. Choose `rollup_logstash,kibana_sample_data_logs`
|
||||
as your source to see both the raw and rolled up data.
|
||||
|
@ -157,13 +166,15 @@ as your source to see both the raw and rolled up data.
|
|||
[role="screenshot"]
|
||||
image::images/management-create-rollup-bar-chart.png[][Create visualization of rolled up data]
|
||||
|
||||
. Look at the data in your visualization.
|
||||
+
|
||||
[role="screenshot"]
|
||||
image::images/management_rollup_job_vis.png[][Visualization of rolled up data]
|
||||
. Select *Bar vertical stacked* in the chart type dropdown.
|
||||
|
||||
. Optionally, create a dashboard that contains visualizations of the rolled up
|
||||
data, raw data, or both.
|
||||
. Add the `@timestamp` field to the *Horizontal axis*.
|
||||
|
||||
. Add the `bytes` field to the *Vertical axis*, defaulting to an `Average of
|
||||
bytes`.
|
||||
+
|
||||
{kib} creates a vertical bar chart of your data. Select a section of the chart
|
||||
to zoom in.
|
||||
+
|
||||
[role="screenshot"]
|
||||
image::images/management_rollup_job_dashboard.png[][Dashboard with rolled up data]
|
||||
|
|
BIN
docs/management/snapshot-restore/images/create-policy-example.png
Executable file → Normal file
Before Width: | Height: | Size: 214 KiB After Width: | Height: | Size: 138 KiB |
BIN
docs/management/snapshot-restore/images/create-policy.png
Executable file → Normal file
Before Width: | Height: | Size: 160 KiB After Width: | Height: | Size: 86 KiB |
Before Width: | Height: | Size: 302 KiB |
BIN
docs/management/snapshot-restore/images/register_repo.png
Executable file → Normal file
Before Width: | Height: | Size: 113 KiB After Width: | Height: | Size: 92 KiB |
BIN
docs/management/snapshot-restore/images/repository_list.png
Executable file → Normal file
Before Width: | Height: | Size: 186 KiB After Width: | Height: | Size: 167 KiB |
Before Width: | Height: | Size: 132 KiB |
BIN
docs/management/snapshot-restore/images/snapshot-restore.png
Executable file → Normal file
Before Width: | Height: | Size: 189 KiB After Width: | Height: | Size: 121 KiB |
BIN
docs/management/snapshot-restore/images/snapshot-retention.png
Executable file → Normal file
Before Width: | Height: | Size: 214 KiB After Width: | Height: | Size: 125 KiB |
BIN
docs/management/snapshot-restore/images/snapshot_details.png
Executable file → Normal file
Before Width: | Height: | Size: 248 KiB After Width: | Height: | Size: 186 KiB |
BIN
docs/management/snapshot-restore/images/snapshot_list.png
Executable file → Normal file
Before Width: | Height: | Size: 173 KiB After Width: | Height: | Size: 98 KiB |
Before Width: | Height: | Size: 84 KiB |
|
@ -2,8 +2,8 @@
|
|||
[[snapshot-repositories]]
|
||||
== Snapshot and Restore
|
||||
|
||||
*Snapshot and Restore* enables you to backup your {es}
|
||||
indices and clusters using data and state snapshots.
|
||||
*Snapshot and Restore* lets you back up a running {es}
|
||||
cluster using data and state snapshots.
|
||||
Snapshots are important because they provide a copy of your data in case
|
||||
something goes wrong. If you need to roll back to an older version of your data,
|
||||
you can restore a snapshot from the repository.
|
||||
|
@ -34,17 +34,12 @@ The minimum required permissions to access *Snapshot and Restore* include:
|
|||
|
||||
To add privileges, open the main menu, then click *Stack Management > Roles*.
|
||||
|
||||
[role="screenshot"]
|
||||
image:management/snapshot-restore/images/snapshot_permissions.png["Edit Role"]
|
||||
|
||||
[float]
|
||||
[[kib-snapshot-register-repository]]
|
||||
=== Register a repository
|
||||
A repository is where your snapshots live. You must register a snapshot
|
||||
repository before you can perform snapshot and restore operations.
|
||||
|
||||
If you don't have a repository, Kibana walks you through the process of
|
||||
registering one.
|
||||
{kib} supports three repository types
|
||||
out of the box: shared file system, read-only URL, and source-only.
|
||||
For more information on these repositories and their settings,
|
||||
|
@ -52,11 +47,9 @@ see {ref}/snapshots-register-repository.html[Repositories].
|
|||
To use other repositories, such as S3, see
|
||||
{ref}/snapshots-register-repository.html#snapshots-repository-plugins[Repository plugins].
|
||||
|
||||
|
||||
Once you create a repository, it is listed in the *Repositories*
|
||||
view.
|
||||
Click a repository name to view its type, number of snapshots, and settings,
|
||||
and to verify status.
|
||||
The *Repositories* view displays a list of registered repositories. Click a
|
||||
repository name to view information about the repository, verify its status, or
|
||||
clean it up.
|
||||
|
||||
[role="screenshot"]
|
||||
image:management/snapshot-restore/images/repository_list.png["Repository list"]
|
||||
|
@ -73,15 +66,8 @@ into each snapshot for further investigation.
|
|||
[role="screenshot"]
|
||||
image:management/snapshot-restore/images/snapshot_details.png["Snapshot details"]
|
||||
|
||||
If you don’t have any snapshots, you can create them from the {kib} <<console-kibana, Console>>. The
|
||||
{ref}/snapshots-take-snapshot.html[snapshot API]
|
||||
takes the current state and data in your index or cluster, and then saves it to a
|
||||
shared repository.
|
||||
|
||||
The snapshot process is "smart." Your first snapshot is a complete copy of
|
||||
the data in your index or cluster.
|
||||
All subsequent snapshots save the changes between the existing snapshots and
|
||||
the new data.
|
||||
If you don’t have any snapshots, you can create them using the
|
||||
{ref}/create-snapshot-api.html[create snapshot API].
|
||||
|
||||
[float]
|
||||
[[kib-restore-snapshot]]
|
||||
|
@ -93,14 +79,14 @@ restore a snapshot made from one cluster to another cluster. You might
|
|||
use the restore operation to:
|
||||
|
||||
* Recover data lost due to a failure
|
||||
* Migrate a current Elasticsearch cluster to a new version
|
||||
* Migrate an {es} cluster to a new version
|
||||
* Move data from one cluster to another cluster
|
||||
|
||||
To get started, go to the *Snapshots* view, find the
|
||||
snapshot, and click the restore icon in the *Actions* column.
|
||||
The Restore wizard presents
|
||||
options for the restore operation, including which
|
||||
indices to restore and whether to modify the index settings.
|
||||
data streams and indices to restore and whether to change index settings.
|
||||
You can restore an existing index only if it’s closed and has the same
|
||||
number of shards as the index in the snapshot.
|
||||
|
||||
|
@ -119,7 +105,7 @@ Use a {ref}/snapshot-lifecycle-management-api.html[snapshot lifecycle policy]
|
|||
to automate the creation and deletion
|
||||
of cluster snapshots. Taking automatic snapshots:
|
||||
|
||||
* Ensures your {es} indices and clusters are backed up on a regular basis
|
||||
* Ensures your {es} data is backed up on a regular basis
|
||||
* Ensures a recent and relevant snapshot is available if a situation
|
||||
arises where a cluster needs to be recovered
|
||||
* Allows you to manage your snapshots in {kib}, instead of using a
|
||||
|
@ -138,8 +124,8 @@ You can drill down into each policy to examine its settings and last successful
|
|||
|
||||
You can perform the following actions on a snapshot policy:
|
||||
|
||||
* *Run* a policy immediately without waiting for the scheduled time.
|
||||
This action is useful before an upgrade or before performing maintenance on indices.
|
||||
* *Run* a policy immediately without waiting for the scheduled time. This action
|
||||
is useful before an upgrade or before performing maintenance.
|
||||
* *Edit* a policy and immediately apply changes to the schedule.
|
||||
* *Delete* a policy to prevent any future snapshots from being taken.
|
||||
This action does not cancel any currently ongoing snapshots or remove any previously taken snapshots.
|
||||
|
@ -160,7 +146,7 @@ and then click *Delete snapshots*.
|
|||
|
||||
[role="xpack"]
|
||||
[[snapshot-restore-tutorial]]
|
||||
=== Tutorial: Snapshot and Restore
|
||||
=== Tutorial: Snapshot and Restore
|
||||
|
||||
|
||||
Ready to try *Snapshot and Restore*? In this tutorial, you'll learn to:
|
||||
|
@ -174,15 +160,12 @@ Ready to try *Snapshot and Restore*? In this tutorial, you'll learn to:
|
|||
|
||||
This example shows you how to register a shared file system repository
|
||||
and store snapshots.
|
||||
Before you begin, you must register the location of the repository in the
|
||||
{ref}/snapshots-register-repository.html#snapshots-filesystem-repository[path.repo] setting on
|
||||
your master and data nodes. You can do this in one of two ways:
|
||||
|
||||
* Edit your `elasticsearch.yml` to include the `path.repo` setting.
|
||||
|
||||
* Pass the `path.repo` setting when you start Elasticsearch.
|
||||
+
|
||||
`bin/elasticsearch -E path.repo=/tmp/es-backups`
|
||||
Before you begin, you must first mount the file system to the same location on
|
||||
all master and data nodes. Then add the file system’s path or parent directory
|
||||
to the
|
||||
{ref}/snapshots-register-repository.html#snapshots-filesystem-repository[`path.repo`]
|
||||
setting in `elasticsearch.yml` for each master and data node.
|
||||
|
||||
[float]
|
||||
[[register-repo-example]]
|
||||
|
@ -216,13 +199,10 @@ Use the {ref}/snapshots-take-snapshot.html[snapshot API] to create a snapshot.
|
|||
. Create the snapshot:
|
||||
+
|
||||
[source,js]
|
||||
PUT /_snapshot/my_backup/2019-04-25_snapshot?wait_for_completion=true
|
||||
PUT /_snapshot/my_backup/2099-04-25_snapshot?wait_for_completion=true
|
||||
+
|
||||
In this example, the snapshot name is `2019-04-25_snapshot`. You can also
|
||||
In this example, the snapshot name is `2099-04-25_snapshot`. You can also
|
||||
use {ref}/date-math-index-names.html[date math expression] for the snapshot name.
|
||||
+
|
||||
[role="screenshot"]
|
||||
image:management/snapshot-restore/images/create_snapshot.png["Create snapshot"]
|
||||
|
||||
. Return to *Snapshot and Restore*.
|
||||
+
|
||||
|
@ -251,16 +231,17 @@ image:management/snapshot-restore/images/create-policy-example.png["Create polic
|
|||
|Snapshot name
|
||||
|`<daily-snap-{now/d}>`
|
||||
|
||||
|Schedule
|
||||
|Every day at 1:30 a.m.
|
||||
|
||||
|Repository
|
||||
|`my_backup`
|
||||
|
||||
|Schedule
|
||||
|Every day at 1:30 a.m.
|
||||
|
||||
|*Snapshot settings* |
|
||||
|
||||
|Indices
|
||||
|Select the indices to back up. By default, all indices, including system indices, are backed up.
|
||||
|Data streams and indices
|
||||
|Select the data streams and indices to back up. By default, all data streams
|
||||
and indices, including system indices, are backed up.
|
||||
|
||||
|All other settings
|
||||
|Use the defaults.
|
||||
|
@ -280,20 +261,22 @@ Your new policy is listed in the *Policies* view, and you see a summary of its d
|
|||
|
||||
[[restore-snapshot-example]]
|
||||
==== Restore a snapshot
|
||||
Finally, you'll restore indices from an existing snapshot.
|
||||
Finally, you'll restore data streams and indices from an existing snapshot.
|
||||
|
||||
. In the *Snapshots* view, find the snapshot you want to restore, for example `2019-04-25_snapshot`.
|
||||
. In the *Snapshots* view, find the snapshot you want to restore, for example `2099-04-25_snapshot`.
|
||||
. Click the restore icon in the *Actions* column.
|
||||
. As you walk through the wizard, enter the following values:
|
||||
+
|
||||
|===
|
||||
|*Logistics* |
|
||||
|
||||
|Indices
|
||||
|Toggle to choose specific indices to restore, or leave in place to restore all indices.
|
||||
|Data streams and indices
|
||||
|Toggle to choose specific data streams and indices to restore. Use the default
|
||||
to restore all data streams and indices in the snapshot.
|
||||
|
||||
|Rename indices
|
||||
|Toggle to give your restored indices new names, or leave in place to restore under original index names.
|
||||
|Rename data streams and indices
|
||||
|Toggle to give your restored data streams and indices new names. Use the
|
||||
default to restore the original data stream and index names.
|
||||
|
||||
|All other fields
|
||||
|Use the defaults.
|
||||
|
@ -313,4 +296,4 @@ or leave in place to keep existing settings.
|
|||
+
|
||||
The operation loads for a few seconds,
|
||||
and then you’re navigated to *Restore Status*,
|
||||
where you can monitor the status of your restored indices.
|
||||
where you can monitor the status of your restored data streams and indices.
|
||||
|
|
Before Width: | Height: | Size: 1.1 MiB After Width: | Height: | Size: 474 KiB |
Before Width: | Height: | Size: 1.1 MiB After Width: | Height: | Size: 459 KiB |
Before Width: | Height: | Size: 1.7 MiB After Width: | Height: | Size: 445 KiB |
Before Width: | Height: | Size: 548 KiB After Width: | Height: | Size: 497 KiB |
Before Width: | Height: | Size: 1.2 MiB After Width: | Height: | Size: 717 KiB |
Before Width: | Height: | Size: 494 KiB After Width: | Height: | Size: 520 KiB |
Before Width: | Height: | Size: 522 KiB After Width: | Height: | Size: 494 KiB |
Before Width: | Height: | Size: 990 KiB After Width: | Height: | Size: 524 KiB |
Before Width: | Height: | Size: 1,014 KiB After Width: | Height: | Size: 499 KiB |
|
@ -20,10 +20,10 @@ The index must contain at least one field mapped as {ref}/geo-point.html[geo_poi
|
|||
Results are limited to the `index.max_result_window` index setting, which defaults to 10000.
|
||||
Select the appropriate *Scaling* option for your use case.
|
||||
+
|
||||
* *Limit results to 10000.* The layer displays features from the first `index.max_result_window` documents.
|
||||
* *Limit results to 10,000* The layer displays features from the first `index.max_result_window` documents.
|
||||
Results exceeding `index.max_result_window` are not displayed.
|
||||
|
||||
* *Show clusters when results exceed 10000.* When results exceed `index.max_result_window`, the layer uses {ref}/search-aggregations-bucket-geotilegrid-aggregation.html[GeoTile grid aggregation] to group your documents into clusters and displays metrics for each cluster. When results are less then `index.max_result_window`, the layer displays features from individual documents.
|
||||
* *Show clusters when results exceed 10,000* When results exceed `index.max_result_window`, the layer uses {ref}/search-aggregations-bucket-geotilegrid-aggregation.html[GeoTile grid aggregation] to group your documents into clusters and displays metrics for each cluster. When results are less then `index.max_result_window`, the layer displays features from individual documents.
|
||||
|
||||
* *Use vector tiles.* Vector tiles partition your map into 6 to 8 tiles.
|
||||
Each tile request is limited to the `index.max_result_window` index setting.
|
||||
|
|
|
@ -18,7 +18,7 @@ It is enabled by default.
|
|||
// Any changes made in this file will be seen there as well.
|
||||
// tag::apm-indices-settings[]
|
||||
|
||||
Index defaults can be changed in Kibana. Open the main menu, then click *APM > Settings > Indices*.
|
||||
Index defaults can be changed in the APM app. Select **Settings** > **Indices**.
|
||||
Index settings in the APM app take precedence over those set in `kibana.yml`.
|
||||
|
||||
[role="screenshot"]
|
||||
|
|
|
@ -284,6 +284,11 @@ the username and password that the {kib} server uses to perform maintenance
|
|||
on the {kib} index at startup. {kib} users still need to authenticate with
|
||||
{es}, which is proxied through the {kib} server.
|
||||
|
||||
|[[elasticsearch-service-account-token]] `elasticsearch.serviceAccountToken:`
|
||||
| beta[]. If your {es} is protected with basic authentication, this token provides the credentials
|
||||
that the {kib} server uses to perform maintenance on the {kib} index at startup. This setting
|
||||
is an alternative to `elasticsearch.username` and `elasticsearch.password`.
|
||||
|
||||
| `enterpriseSearch.host`
|
||||
| The URL of your Enterprise Search instance
|
||||
|
||||
|
|
|
@ -148,6 +148,27 @@ The *Markdown* visualization supports Markdown with Handlebar (mustache) syntax
|
|||
|
||||
For answers to frequently asked *TSVB* question, review the following.
|
||||
|
||||
[float]
|
||||
===== How do I create dashboard drilldowns for Top N and Table visualizations?
|
||||
|
||||
You can create dashboard drilldowns that include the specified time range for *Top N* and *Table* visualizations.
|
||||
|
||||
. Open the dashboard that you want to link to, then copy the URL.
|
||||
|
||||
. Open the dashboard with the *Top N* and *Table* visualization panel, then click *Edit* in the toolbar.
|
||||
|
||||
. Open the *Top N* or *Table* panel menu, then select *Edit visualization*.
|
||||
|
||||
. Click *Panel options*.
|
||||
|
||||
. In the *Item URL* field, enter the URL.
|
||||
+
|
||||
For example `dashboards#/view/f193ca90-c9f4-11eb-b038-dd3270053a27`.
|
||||
|
||||
. Click *Save and return*.
|
||||
|
||||
. In the toolbar, cick *Save as*, then make sure *Store time with dashboard* is deselected.
|
||||
|
||||
[float]
|
||||
===== Why is my TSVB visualization missing data?
|
||||
|
||||
|
|
|
@ -82,9 +82,10 @@ connectors>> for triggering actions.
|
|||
| Monitor the generation of reports—PDF, PNG, and CSV—and download reports that you previously generated.
|
||||
A report can contain a dashboard, visualization, saved search, or Canvas workpad.
|
||||
|
||||
| {ml-docs}/ml-jobs.html[Machine Learning Jobs]
|
||||
| View your {anomaly-jobs} and {dfanalytics-jobs}. Open the Single Metric
|
||||
Viewer or Anomaly Explorer to see your {ml} results.
|
||||
| Machine Learning Jobs
|
||||
| View your <<xpack-ml-anomalies,{anomaly-detect}>> and
|
||||
<<xpack-ml-dfanalytics,{dfanalytics}>> jobs. Open the Single Metric
|
||||
Viewer or Anomaly Explorer to see your {anomaly-detect} results.
|
||||
|
||||
| <<watcher-ui, Watcher>>
|
||||
| Detect changes in your data by creating, managing, and monitoring alerts.
|
||||
|
|
|
@ -48,8 +48,9 @@ pane:
|
|||
image::user/ml/images/ml-job-management.png[Job Management]
|
||||
|
||||
You can use the *Settings* pane to create and edit
|
||||
{ml-docs}/ml-calendars.html[calendars] and the filters that are used in
|
||||
{ml-docs}/ml-rules.html[custom rules]:
|
||||
{ml-docs}/ml-ad-finding-anomalies.html#ml-ad-calendars[calendars] and the
|
||||
filters that are used in
|
||||
{ml-docs}/ml-ad-finding-anomalies.html#ml-ad-rules[custom rules]:
|
||||
|
||||
[role="screenshot"]
|
||||
image::user/ml/images/ml-settings.png[Calendar Management]
|
||||
|
|
|
@ -57,8 +57,12 @@ xpack.task_manager.monitored_task_execution_thresholds:
|
|||
|
||||
The health API is best consumed by via the `/api/task_manager/_health` endpoint.
|
||||
|
||||
Additionally, the metrics are logged in the {kib} `DEBUG` logger at a regular cadence.
|
||||
To enable Task Manager DEBUG logging in your {kib} instance, add the following to your `kibana.yml`:
|
||||
Additionally, there are two ways to consume these metrics:
|
||||
|
||||
*Debug logging*
|
||||
|
||||
The metrics are logged in the {kib} `DEBUG` logger at a regular cadence.
|
||||
To enable Task Manager debug logging in your {kib} instance, add the following to your `kibana.yml`:
|
||||
|
||||
[source,yml]
|
||||
----
|
||||
|
@ -69,7 +73,22 @@ logging:
|
|||
level: debug
|
||||
----
|
||||
|
||||
These stats are logged based the number of milliseconds set in your <<task-manager-settings,`xpack.task_manager.poll_interval`>> setting, which means it could add substantial noise to your logs. Only enable this level of logging temporarily.
|
||||
These stats are logged based on the number of milliseconds set in your <<task-manager-settings,`xpack.task_manager.poll_interval`>> setting, which could add substantial noise to your logs. Only enable this level of logging temporarily.
|
||||
|
||||
*Automatic logging*
|
||||
|
||||
By default, the health API runs at a regular cadence, and each time it runs, it attempts to self evaluate its performance. If this self evaluation yields a potential problem,
|
||||
a message will log to the {kib} server log. In addition, the health API will look at how long tasks have waited to start (from when they were scheduled to start). If this number exceeds a configurable threshold (<<task-manager-settings,`xpack.task_manager.monitored_stats_health_verbose_log.warn_delayed_task_start_in_seconds`>>), the same message as above will log to the {kib} server log.
|
||||
|
||||
This message looks like:
|
||||
|
||||
[source,log]
|
||||
----
|
||||
Detected potential performance issue with Task Manager. Set 'xpack.task_manager.monitored_stats_health_verbose_log.enabled: true' in your Kibana.yml to enable debug logging`
|
||||
----
|
||||
|
||||
|
||||
If this message appears, set <<task-manager-settings,`xpack.task_manager.monitored_stats_health_verbose_log.enabled`>> to `true` in your `kibana.yml`. This will start logging the health metrics at either a `warn` or `error` log level, depending on the detected severity of the potential problem.
|
||||
|
||||
[float]
|
||||
[[making-sense-of-task-manager-health-stats]]
|
||||
|
|
|
@ -6,24 +6,8 @@
|
|||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
const preset = require('@kbn/test/jest-preset');
|
||||
|
||||
module.exports = {
|
||||
preset: '@kbn/test',
|
||||
preset: '@kbn/test/jest_integration',
|
||||
rootDir: '.',
|
||||
roots: ['<rootDir>/src', '<rootDir>/packages'],
|
||||
testMatch: ['**/integration_tests**/*.test.{js,mjs,ts,tsx}'],
|
||||
testPathIgnorePatterns: preset.testPathIgnorePatterns.filter(
|
||||
(pattern) => !pattern.includes('integration_tests')
|
||||
),
|
||||
setupFilesAfterEnv: [
|
||||
'<rootDir>/node_modules/@kbn/test/target_node/jest/setup/after_env.integration.js',
|
||||
],
|
||||
reporters: [
|
||||
'default',
|
||||
['@kbn/test/target_node/jest/junit_reporter', { reportName: 'Jest Integration Tests' }],
|
||||
],
|
||||
coverageReporters: !!process.env.CI
|
||||
? [['json', { file: 'jest-integration.json' }]]
|
||||
: ['html', 'text'],
|
||||
};
|
||||
|
|
|
@ -83,10 +83,8 @@
|
|||
"**/minimist": "^1.2.5",
|
||||
"**/node-jose/node-forge": "^0.10.0",
|
||||
"**/pdfkit/crypto-js": "4.0.0",
|
||||
"**/prismjs": "1.24.0",
|
||||
"**/react-syntax-highlighter": "^15.3.1",
|
||||
"**/react-syntax-highlighter/**/highlight.js": "^10.4.1",
|
||||
"**/refractor": "^3.3.1",
|
||||
"**/request": "^2.88.2",
|
||||
"**/trim": "1.0.1",
|
||||
"**/typescript": "4.1.3",
|
||||
|
@ -99,11 +97,11 @@
|
|||
"dependencies": {
|
||||
"@elastic/apm-rum": "^5.8.0",
|
||||
"@elastic/apm-rum-react": "^1.2.11",
|
||||
"@elastic/charts": "31.1.0",
|
||||
"@elastic/charts": "32.0.0",
|
||||
"@elastic/datemath": "link:bazel-bin/packages/elastic-datemath",
|
||||
"@elastic/elasticsearch": "npm:@elastic/elasticsearch-canary@^8.0.0-canary.13",
|
||||
"@elastic/ems-client": "7.14.0",
|
||||
"@elastic/eui": "34.5.2",
|
||||
"@elastic/eui": "35.0.0",
|
||||
"@elastic/filesaver": "1.1.2",
|
||||
"@elastic/good": "^9.0.1-kibana3",
|
||||
"@elastic/maki": "6.3.0",
|
||||
|
@ -155,6 +153,7 @@
|
|||
"@kbn/securitysolution-utils": "link:bazel-bin/packages/kbn-securitysolution-utils",
|
||||
"@kbn/server-http-tools": "link:bazel-bin/packages/kbn-server-http-tools",
|
||||
"@kbn/server-route-repository": "link:bazel-bin/packages/kbn-server-route-repository",
|
||||
"@kbn/typed-react-router-config": "link:bazel-bin/packages/kbn-typed-react-router-config",
|
||||
"@kbn/std": "link:bazel-bin/packages/kbn-std",
|
||||
"@kbn/tinymath": "link:bazel-bin/packages/kbn-tinymath",
|
||||
"@kbn/ui-framework": "link:bazel-bin/packages/kbn-ui-framework",
|
||||
|
@ -179,6 +178,7 @@
|
|||
"@turf/distance": "6.0.1",
|
||||
"@turf/helpers": "6.0.1",
|
||||
"@turf/length": "^6.0.2",
|
||||
"@types/react-router-config": "^5.0.2",
|
||||
"@types/redux-logger": "^3.0.8",
|
||||
"JSONStream": "1.3.5",
|
||||
"abort-controller": "^3.0.0",
|
||||
|
@ -358,6 +358,7 @@
|
|||
"react-resize-detector": "^4.2.0",
|
||||
"react-reverse-portal": "^1.0.4",
|
||||
"react-router": "^5.2.0",
|
||||
"react-router-config": "^5.1.1",
|
||||
"react-router-dom": "^5.2.0",
|
||||
"react-router-redux": "^4.0.8",
|
||||
"react-shortcuts": "^2.0.0",
|
||||
|
|
|
@ -56,6 +56,7 @@ filegroup(
|
|||
"//packages/kbn-test:build",
|
||||
"//packages/kbn-test-subj-selector:build",
|
||||
"//packages/kbn-tinymath:build",
|
||||
"//packages/kbn-typed-react-router-config:build",
|
||||
"//packages/kbn-ui-framework:build",
|
||||
"//packages/kbn-ui-shared-deps:build",
|
||||
"//packages/kbn-utility-types:build",
|
||||
|
|
73
packages/kbn-io-ts-utils/src/deep_exact_rt/index.test.ts
Normal file
|
@ -0,0 +1,73 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
import * as t from 'io-ts';
|
||||
import { deepExactRt } from '.';
|
||||
import { mergeRt } from '../merge_rt';
|
||||
|
||||
describe('deepExactRt', () => {
|
||||
it('recursively wraps partial/interface types in t.exact', () => {
|
||||
const a = t.type({
|
||||
path: t.type({
|
||||
serviceName: t.string,
|
||||
}),
|
||||
query: t.type({
|
||||
foo: t.string,
|
||||
}),
|
||||
});
|
||||
|
||||
const b = t.type({
|
||||
path: t.type({
|
||||
transactionType: t.string,
|
||||
}),
|
||||
});
|
||||
|
||||
const merged = mergeRt(a, b);
|
||||
|
||||
expect(
|
||||
deepExactRt(a).decode({
|
||||
path: {
|
||||
serviceName: '',
|
||||
transactionType: '',
|
||||
},
|
||||
query: {
|
||||
foo: '',
|
||||
bar: '',
|
||||
},
|
||||
// @ts-ignore
|
||||
}).right
|
||||
).toEqual({ path: { serviceName: '' }, query: { foo: '' } });
|
||||
|
||||
expect(
|
||||
deepExactRt(b).decode({
|
||||
path: {
|
||||
serviceName: '',
|
||||
transactionType: '',
|
||||
},
|
||||
query: {
|
||||
foo: '',
|
||||
bar: '',
|
||||
},
|
||||
// @ts-ignore
|
||||
}).right
|
||||
).toEqual({ path: { transactionType: '' } });
|
||||
|
||||
expect(
|
||||
deepExactRt(merged).decode({
|
||||
path: {
|
||||
serviceName: '',
|
||||
transactionType: '',
|
||||
},
|
||||
query: {
|
||||
foo: '',
|
||||
bar: '',
|
||||
},
|
||||
// @ts-ignore
|
||||
}).right
|
||||
).toEqual({ path: { serviceName: '', transactionType: '' }, query: { foo: '' } });
|
||||
});
|
||||
});
|
45
packages/kbn-io-ts-utils/src/deep_exact_rt/index.ts
Normal file
|
@ -0,0 +1,45 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
import * as t from 'io-ts';
|
||||
import { mapValues } from 'lodash';
|
||||
import { mergeRt } from '../merge_rt';
|
||||
import { isParsableType, ParseableType } from '../parseable_types';
|
||||
|
||||
export function deepExactRt<T extends t.Type<any> | ParseableType>(type: T): T;
|
||||
|
||||
export function deepExactRt(type: t.Type<any> | ParseableType) {
|
||||
if (!isParsableType(type)) {
|
||||
return type;
|
||||
}
|
||||
|
||||
switch (type._tag) {
|
||||
case 'ArrayType':
|
||||
return t.array(deepExactRt(type.type));
|
||||
|
||||
case 'DictionaryType':
|
||||
return t.dictionary(type.domain, deepExactRt(type.codomain));
|
||||
|
||||
case 'InterfaceType':
|
||||
return t.exact(t.interface(mapValues(type.props, deepExactRt)));
|
||||
|
||||
case 'PartialType':
|
||||
return t.exact(t.partial(mapValues(type.props, deepExactRt)));
|
||||
|
||||
case 'IntersectionType':
|
||||
return t.intersection(type.types.map(deepExactRt) as any);
|
||||
|
||||
case 'UnionType':
|
||||
return t.union(type.types.map(deepExactRt) as any);
|
||||
|
||||
case 'MergeType':
|
||||
return mergeRt(deepExactRt(type.types[0]), deepExactRt(type.types[1]));
|
||||
|
||||
default:
|
||||
return type;
|
||||
}
|
||||
}
|
39
packages/kbn-io-ts-utils/src/parseable_types/index.ts
Normal file
|
@ -0,0 +1,39 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
import * as t from 'io-ts';
|
||||
import { MergeType } from '../merge_rt';
|
||||
|
||||
export type ParseableType =
|
||||
| t.StringType
|
||||
| t.NumberType
|
||||
| t.BooleanType
|
||||
| t.ArrayType<t.Mixed>
|
||||
| t.RecordC<t.Mixed, t.Mixed>
|
||||
| t.DictionaryType<t.Mixed, t.Mixed>
|
||||
| t.InterfaceType<t.Props>
|
||||
| t.PartialType<t.Props>
|
||||
| t.UnionType<t.Mixed[]>
|
||||
| t.IntersectionType<t.Mixed[]>
|
||||
| MergeType<t.Mixed, t.Mixed>;
|
||||
|
||||
const parseableTags = [
|
||||
'StringType',
|
||||
'NumberType',
|
||||
'BooleanType',
|
||||
'ArrayType',
|
||||
'DictionaryType',
|
||||
'InterfaceType',
|
||||
'PartialType',
|
||||
'UnionType',
|
||||
'IntersectionType',
|
||||
'MergeType',
|
||||
];
|
||||
|
||||
export const isParsableType = (type: t.Type<any> | ParseableType): type is ParseableType => {
|
||||
return '_tag' in type && parseableTags.includes(type._tag);
|
||||
};
|
|
@ -7,35 +7,7 @@
|
|||
*/
|
||||
import * as t from 'io-ts';
|
||||
import { mapValues } from 'lodash';
|
||||
|
||||
type JSONSchemableValueType =
|
||||
| t.StringType
|
||||
| t.NumberType
|
||||
| t.BooleanType
|
||||
| t.ArrayType<t.Mixed>
|
||||
| t.RecordC<t.Mixed, t.Mixed>
|
||||
| t.DictionaryType<t.Mixed, t.Mixed>
|
||||
| t.InterfaceType<t.Props>
|
||||
| t.PartialType<t.Props>
|
||||
| t.UnionType<t.Mixed[]>
|
||||
| t.IntersectionType<t.Mixed[]>;
|
||||
|
||||
const tags = [
|
||||
'StringType',
|
||||
'NumberType',
|
||||
'BooleanType',
|
||||
'ArrayType',
|
||||
'DictionaryType',
|
||||
'InterfaceType',
|
||||
'PartialType',
|
||||
'UnionType',
|
||||
'IntersectionType',
|
||||
];
|
||||
|
||||
const isSchemableValueType = (type: t.Mixed): type is JSONSchemableValueType => {
|
||||
// @ts-ignore
|
||||
return tags.includes(type._tag);
|
||||
};
|
||||
import { isParsableType } from '../parseable_types';
|
||||
|
||||
interface JSONSchemaObject {
|
||||
type: 'object';
|
||||
|
@ -74,7 +46,7 @@ type JSONSchema =
|
|||
| JSONSchemaAnyOf;
|
||||
|
||||
export const toJsonSchema = (type: t.Mixed): JSONSchema => {
|
||||
if (isSchemableValueType(type)) {
|
||||
if (isParsableType(type)) {
|
||||
switch (type._tag) {
|
||||
case 'ArrayType':
|
||||
return { type: 'array', items: toJsonSchema(type.type) };
|
||||
|
|
|
@ -112,4 +112,5 @@ pageLoadAssetSize:
|
|||
visTypePie: 35583
|
||||
expressionRevealImage: 25675
|
||||
cases: 144442
|
||||
expressionError: 22127
|
||||
userSetup: 18532
|
||||
|
|
|
@ -0,0 +1,9 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
// stub
|
|
@ -1,7 +1,7 @@
|
|||
// Jest Snapshot v1, https://goo.gl/fbAQLP
|
||||
|
||||
exports[`parseDirPath() parses / 1`] = `
|
||||
Object {
|
||||
ParsedPath {
|
||||
"dirs": Array [],
|
||||
"filename": undefined,
|
||||
"query": undefined,
|
||||
|
@ -10,7 +10,7 @@ Object {
|
|||
`;
|
||||
|
||||
exports[`parseDirPath() parses /foo 1`] = `
|
||||
Object {
|
||||
ParsedPath {
|
||||
"dirs": Array [
|
||||
"foo",
|
||||
],
|
||||
|
@ -21,7 +21,7 @@ Object {
|
|||
`;
|
||||
|
||||
exports[`parseDirPath() parses /foo/bar/baz 1`] = `
|
||||
Object {
|
||||
ParsedPath {
|
||||
"dirs": Array [
|
||||
"foo",
|
||||
"bar",
|
||||
|
@ -34,7 +34,7 @@ Object {
|
|||
`;
|
||||
|
||||
exports[`parseDirPath() parses /foo/bar/baz/ 1`] = `
|
||||
Object {
|
||||
ParsedPath {
|
||||
"dirs": Array [
|
||||
"foo",
|
||||
"bar",
|
||||
|
@ -47,7 +47,7 @@ Object {
|
|||
`;
|
||||
|
||||
exports[`parseDirPath() parses c:\\ 1`] = `
|
||||
Object {
|
||||
ParsedPath {
|
||||
"dirs": Array [],
|
||||
"filename": undefined,
|
||||
"query": undefined,
|
||||
|
@ -56,7 +56,7 @@ Object {
|
|||
`;
|
||||
|
||||
exports[`parseDirPath() parses c:\\foo 1`] = `
|
||||
Object {
|
||||
ParsedPath {
|
||||
"dirs": Array [
|
||||
"foo",
|
||||
],
|
||||
|
@ -67,7 +67,7 @@ Object {
|
|||
`;
|
||||
|
||||
exports[`parseDirPath() parses c:\\foo\\bar\\baz 1`] = `
|
||||
Object {
|
||||
ParsedPath {
|
||||
"dirs": Array [
|
||||
"foo",
|
||||
"bar",
|
||||
|
@ -80,7 +80,7 @@ Object {
|
|||
`;
|
||||
|
||||
exports[`parseDirPath() parses c:\\foo\\bar\\baz\\ 1`] = `
|
||||
Object {
|
||||
ParsedPath {
|
||||
"dirs": Array [
|
||||
"foo",
|
||||
"bar",
|
||||
|
@ -93,7 +93,7 @@ Object {
|
|||
`;
|
||||
|
||||
exports[`parseFilePath() parses /foo 1`] = `
|
||||
Object {
|
||||
ParsedPath {
|
||||
"dirs": Array [],
|
||||
"filename": "foo",
|
||||
"query": undefined,
|
||||
|
@ -102,7 +102,7 @@ Object {
|
|||
`;
|
||||
|
||||
exports[`parseFilePath() parses /foo/bar/baz 1`] = `
|
||||
Object {
|
||||
ParsedPath {
|
||||
"dirs": Array [
|
||||
"foo",
|
||||
"bar",
|
||||
|
@ -114,7 +114,7 @@ Object {
|
|||
`;
|
||||
|
||||
exports[`parseFilePath() parses /foo/bar/baz.json 1`] = `
|
||||
Object {
|
||||
ParsedPath {
|
||||
"dirs": Array [
|
||||
"foo",
|
||||
"bar",
|
||||
|
@ -126,7 +126,7 @@ Object {
|
|||
`;
|
||||
|
||||
exports[`parseFilePath() parses /foo/bar/baz.json?light 1`] = `
|
||||
Object {
|
||||
ParsedPath {
|
||||
"dirs": Array [
|
||||
"foo",
|
||||
"bar",
|
||||
|
@ -140,7 +140,7 @@ Object {
|
|||
`;
|
||||
|
||||
exports[`parseFilePath() parses /foo/bar/baz.json?light=true&dark=false 1`] = `
|
||||
Object {
|
||||
ParsedPath {
|
||||
"dirs": Array [
|
||||
"foo",
|
||||
"bar",
|
||||
|
@ -155,7 +155,7 @@ Object {
|
|||
`;
|
||||
|
||||
exports[`parseFilePath() parses c:/foo/bar/baz.json 1`] = `
|
||||
Object {
|
||||
ParsedPath {
|
||||
"dirs": Array [
|
||||
"foo",
|
||||
"bar",
|
||||
|
@ -167,7 +167,7 @@ Object {
|
|||
`;
|
||||
|
||||
exports[`parseFilePath() parses c:\\foo 1`] = `
|
||||
Object {
|
||||
ParsedPath {
|
||||
"dirs": Array [],
|
||||
"filename": "foo",
|
||||
"query": undefined,
|
||||
|
@ -176,7 +176,7 @@ Object {
|
|||
`;
|
||||
|
||||
exports[`parseFilePath() parses c:\\foo\\bar\\baz 1`] = `
|
||||
Object {
|
||||
ParsedPath {
|
||||
"dirs": Array [
|
||||
"foo",
|
||||
"bar",
|
||||
|
@ -188,7 +188,7 @@ Object {
|
|||
`;
|
||||
|
||||
exports[`parseFilePath() parses c:\\foo\\bar\\baz.json 1`] = `
|
||||
Object {
|
||||
ParsedPath {
|
||||
"dirs": Array [
|
||||
"foo",
|
||||
"bar",
|
||||
|
@ -200,7 +200,7 @@ Object {
|
|||
`;
|
||||
|
||||
exports[`parseFilePath() parses c:\\foo\\bar\\baz.json?dark 1`] = `
|
||||
Object {
|
||||
ParsedPath {
|
||||
"dirs": Array [
|
||||
"foo",
|
||||
"bar",
|
||||
|
@ -214,7 +214,7 @@ Object {
|
|||
`;
|
||||
|
||||
exports[`parseFilePath() parses c:\\foo\\bar\\baz.json?dark=true&light=false 1`] = `
|
||||
Object {
|
||||
ParsedPath {
|
||||
"dirs": Array [
|
||||
"foo",
|
||||
"bar",
|
||||
|
|
|
@ -9,17 +9,61 @@
|
|||
import normalizePath from 'normalize-path';
|
||||
import Qs from 'querystring';
|
||||
|
||||
class ParsedPath {
|
||||
constructor(
|
||||
public readonly root: string,
|
||||
public readonly dirs: string[],
|
||||
public readonly query?: Record<string, unknown>,
|
||||
public readonly filename?: string
|
||||
) {}
|
||||
|
||||
private indexOfDir(match: string | RegExp, fromIndex: number = 0) {
|
||||
for (let i = fromIndex; i < this.dirs.length; i++) {
|
||||
if (this.matchDir(i, match)) {
|
||||
return i;
|
||||
}
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
private matchDir(i: number, match: string | RegExp) {
|
||||
return typeof match === 'string' ? this.dirs[i] === match : match.test(this.dirs[i]);
|
||||
}
|
||||
|
||||
matchDirs(...segments: Array<string | RegExp>) {
|
||||
const [first, ...rest] = segments;
|
||||
let fromIndex = 0;
|
||||
while (true) {
|
||||
// do the dirs include the first segment to match?
|
||||
const startIndex = this.indexOfDir(first, fromIndex);
|
||||
if (startIndex === -1) {
|
||||
return;
|
||||
}
|
||||
|
||||
// are all of the ...rest segments also matched at this point?
|
||||
if (!rest.length || rest.every((seg, i) => this.matchDir(startIndex + 1 + i, seg))) {
|
||||
return { startIndex, endIndex: startIndex + rest.length };
|
||||
}
|
||||
|
||||
// no match, search again, this time looking at instances after the matched instance
|
||||
fromIndex = startIndex + 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse an absolute path, supporting normalized paths from webpack,
|
||||
* into a list of directories and root
|
||||
*/
|
||||
export function parseDirPath(path: string) {
|
||||
const filePath = parseFilePath(path);
|
||||
return {
|
||||
...filePath,
|
||||
dirs: [...filePath.dirs, ...(filePath.filename ? [filePath.filename] : [])],
|
||||
filename: undefined,
|
||||
};
|
||||
return new ParsedPath(
|
||||
filePath.root,
|
||||
[...filePath.dirs, ...(filePath.filename ? [filePath.filename] : [])],
|
||||
filePath.query,
|
||||
undefined
|
||||
);
|
||||
}
|
||||
|
||||
export function parseFilePath(path: string) {
|
||||
|
@ -32,10 +76,10 @@ export function parseFilePath(path: string) {
|
|||
}
|
||||
|
||||
const [root, ...others] = normalized.split('/');
|
||||
return {
|
||||
root: root === '' ? '/' : root,
|
||||
dirs: others.slice(0, -1),
|
||||
return new ParsedPath(
|
||||
root === '' ? '/' : root,
|
||||
others.slice(0, -1),
|
||||
query,
|
||||
filename: others[others.length - 1] || undefined,
|
||||
};
|
||||
others[others.length - 1] || undefined
|
||||
);
|
||||
}
|
||||
|
|