From f6f249be159ce0c25928a167bc2adc59f762c81d Mon Sep 17 00:00:00 2001 From: Tal Levy Date: Fri, 4 Oct 2019 13:55:41 -0700 Subject: [PATCH 01/55] Expose ValueException in Grok (#47368) Previously, Grok's groupMatch would allow the code to fall into an IndexOutOfBoundsException, which can be avoided. The other exception that can come up is a ValueException. The times this exception occurs is less understood, but it may make sense to expose this since it typically means something did not go well. --- .../main/java/org/elasticsearch/grok/Grok.java | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/libs/grok/src/main/java/org/elasticsearch/grok/Grok.java b/libs/grok/src/main/java/org/elasticsearch/grok/Grok.java index 07f75fd995b2..1a87d1a4709d 100644 --- a/libs/grok/src/main/java/org/elasticsearch/grok/Grok.java +++ b/libs/grok/src/main/java/org/elasticsearch/grok/Grok.java @@ -26,7 +26,6 @@ import org.joni.Option; import org.joni.Regex; import org.joni.Region; import org.joni.Syntax; -import org.joni.exception.ValueException; import java.io.BufferedReader; import java.io.IOException; @@ -150,17 +149,14 @@ public final class Grok { } public String groupMatch(String name, Region region, String pattern) { - try { - int number = GROK_PATTERN_REGEX.nameToBackrefNumber(name.getBytes(StandardCharsets.UTF_8), 0, - name.getBytes(StandardCharsets.UTF_8).length, region); - int begin = region.beg[number]; - int end = region.end[number]; - return new String(pattern.getBytes(StandardCharsets.UTF_8), begin, end - begin, StandardCharsets.UTF_8); - } catch (StringIndexOutOfBoundsException e) { - return null; - } catch (ValueException e) { + int number = GROK_PATTERN_REGEX.nameToBackrefNumber(name.getBytes(StandardCharsets.UTF_8), 0, + name.getBytes(StandardCharsets.UTF_8).length, region); + int begin = region.beg[number]; + int end = region.end[number]; + if (begin < 0) { // no match found return null; } + return new String(pattern.getBytes(StandardCharsets.UTF_8), begin, end - begin, StandardCharsets.UTF_8); } /** From 08e887ae303a631f0fb133c30bd375b25c56fef2 Mon Sep 17 00:00:00 2001 From: Lee Hinman Date: Fri, 4 Oct 2019 14:57:23 -0600 Subject: [PATCH 02/55] Set default SLM retention invocation time (#47604) This adds a default for the `slm.retention_schedule` setting, setting it to `0 30 1 * * ?` which is 1:30am every day. Having retention unset meant that it would never be invoked and clean up snapshots. We determined it would be better to have a default than never to be run. When coming to a decision, we weighed the option of an absolute time (such as 1:30am) versus a periodic invocation (like every 12 hours). In the end we decided on the absolute time because it has better predictability and consistency than a periodic invocation, which would rely on when the master node were elected or restarted. Relates to #43663 --- .../main/java/org/elasticsearch/common/settings/Setting.java | 5 +++++ .../org/elasticsearch/xpack/core/ilm/LifecycleSettings.java | 5 ++++- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/common/settings/Setting.java b/server/src/main/java/org/elasticsearch/common/settings/Setting.java index 95791406a1fd..b3e95ee38d84 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/Setting.java +++ b/server/src/main/java/org/elasticsearch/common/settings/Setting.java @@ -1073,6 +1073,11 @@ public class Setting implements ToXContentObject { return new Setting<>(new SimpleKey(key), null, s -> "", Function.identity(), validator, properties); } + public static Setting simpleString(String key, String defaultValue, Validator validator, Property... properties) { + validator.validate(defaultValue); + return new Setting<>(new SimpleKey(key), null, s -> defaultValue, Function.identity(), validator, properties); + } + public static Setting simpleString(String key, Setting fallback, Property... properties) { return simpleString(key, fallback, Function.identity(), properties); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/LifecycleSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/LifecycleSettings.java index 6fea7cf87737..333506f20be3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/LifecycleSettings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/LifecycleSettings.java @@ -38,7 +38,10 @@ public class LifecycleSettings { public static final Setting SLM_HISTORY_INDEX_ENABLED_SETTING = Setting.boolSetting(SLM_HISTORY_INDEX_ENABLED, true, Setting.Property.NodeScope); - public static final Setting SLM_RETENTION_SCHEDULE_SETTING = Setting.simpleString(SLM_RETENTION_SCHEDULE, str -> { + public static final Setting SLM_RETENTION_SCHEDULE_SETTING = Setting.simpleString(SLM_RETENTION_SCHEDULE, + // Default to 1:30am every day + "0 30 1 * * ?", + str -> { try { if (Strings.hasText(str)) { // Test that the setting is a valid cron syntax From 4e4990c6a02f2c5f0d3637d69f38de2e0f52e18b Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Fri, 4 Oct 2019 16:10:26 -0700 Subject: [PATCH 03/55] [DOCS] Cleans up links to security content (#47610) --- .../delete-auto-follow-pattern.asciidoc | 4 +- .../get-auto-follow-pattern.asciidoc | 4 +- .../put-auto-follow-pattern.asciidoc | 5 +- .../ccr/apis/follow/get-follow-info.asciidoc | 3 +- .../ccr/apis/follow/get-follow-stats.asciidoc | 2 +- .../apis/follow/post-forget-follower.asciidoc | 2 +- .../apis/follow/post-pause-follow.asciidoc | 2 +- .../apis/follow/post-resume-follow.asciidoc | 2 +- .../ccr/apis/follow/post-unfollow.asciidoc | 2 +- .../ccr/apis/follow/put-follow.asciidoc | 3 +- .../reference/ccr/apis/get-ccr-stats.asciidoc | 2 +- docs/reference/ccr/getting-started.asciidoc | 2 +- docs/reference/commands/certutil.asciidoc | 2 +- .../commands/setup-passwords.asciidoc | 6 +- .../reference/commands/users-command.asciidoc | 2 +- docs/reference/glossary.asciidoc | 2 +- ...ackup-and-restore-security-config.asciidoc | 4 +- .../backup-cluster-data.asciidoc | 4 +- .../ilm/apis/delete-lifecycle.asciidoc | 2 +- docs/reference/ilm/apis/explain.asciidoc | 2 +- .../reference/ilm/apis/get-lifecycle.asciidoc | 2 +- docs/reference/ilm/apis/get-status.asciidoc | 2 +- docs/reference/ilm/apis/move-to-step.asciidoc | 2 +- .../reference/ilm/apis/put-lifecycle.asciidoc | 2 +- .../apis/remove-policy-from-index.asciidoc | 2 +- docs/reference/ilm/apis/retry-policy.asciidoc | 2 +- docs/reference/ilm/apis/slm-api.asciidoc | 2 +- docs/reference/ilm/apis/start.asciidoc | 2 +- docs/reference/ilm/apis/stop.asciidoc | 2 +- .../licensing/delete-license.asciidoc | 4 +- .../licensing/get-basic-status.asciidoc | 3 +- docs/reference/licensing/get-license.asciidoc | 3 +- .../licensing/get-trial-status.asciidoc | 2 +- docs/reference/licensing/start-basic.asciidoc | 2 +- docs/reference/licensing/start-trial.asciidoc | 2 +- .../licensing/update-license.asciidoc | 2 +- .../anomaly-detection/apis/close-job.asciidoc | 2 +- .../apis/datafeedresource.asciidoc | 4 +- .../apis/delete-calendar-event.asciidoc | 2 +- .../apis/delete-calendar-job.asciidoc | 2 +- .../apis/delete-calendar.asciidoc | 2 +- .../apis/delete-datafeed.asciidoc | 2 +- .../apis/delete-expired-data.asciidoc | 2 +- .../apis/delete-filter.asciidoc | 2 +- .../apis/delete-forecast.asciidoc | 2 +- .../apis/delete-job.asciidoc | 3 +- .../apis/delete-snapshot.asciidoc | 3 +- .../apis/eventresource.asciidoc | 2 +- .../apis/find-file-structure.asciidoc | 2 +- .../anomaly-detection/apis/flush-job.asciidoc | 2 +- .../anomaly-detection/apis/forecast.asciidoc | 2 +- .../apis/get-bucket.asciidoc | 4 +- .../apis/get-calendar-event.asciidoc | 2 +- .../apis/get-calendar.asciidoc | 2 +- .../apis/get-category.asciidoc | 4 +- .../apis/get-datafeed-stats.asciidoc | 2 +- .../apis/get-datafeed.asciidoc | 2 +- .../apis/get-filter.asciidoc | 2 +- .../apis/get-influencer.asciidoc | 4 +- .../apis/get-job-stats.asciidoc | 2 +- .../anomaly-detection/apis/get-job.asciidoc | 2 +- .../apis/get-ml-info.asciidoc | 4 +- .../apis/get-overall-buckets.asciidoc | 3 +- .../apis/get-record.asciidoc | 3 +- .../apis/get-snapshot.asciidoc | 2 +- .../apis/jobresource.asciidoc | 14 ++--- .../anomaly-detection/apis/open-job.asciidoc | 2 +- .../apis/post-calendar-event.asciidoc | 2 +- .../anomaly-detection/apis/post-data.asciidoc | 2 +- .../apis/preview-datafeed.asciidoc | 2 +- .../apis/put-calendar-job.asciidoc | 2 +- .../apis/put-calendar.asciidoc | 2 +- .../apis/put-datafeed.asciidoc | 2 +- .../apis/put-filter.asciidoc | 2 +- .../anomaly-detection/apis/put-job.asciidoc | 2 +- .../apis/resultsresource.asciidoc | 2 +- .../apis/revert-snapshot.asciidoc | 2 +- .../apis/set-upgrade-mode.asciidoc | 2 +- .../apis/start-datafeed.asciidoc | 2 +- .../apis/stop-datafeed.asciidoc | 2 +- .../apis/update-datafeed.asciidoc | 2 +- .../apis/update-filter.asciidoc | 2 +- .../apis/update-job.asciidoc | 2 +- .../apis/update-snapshot.asciidoc | 2 +- .../apis/validate-detector.asciidoc | 2 +- .../apis/validate-job.asciidoc | 2 +- .../apis/delete-dfanalytics.asciidoc | 3 +- ...estimate-memory-usage-dfanalytics.asciidoc | 3 +- .../apis/evaluate-dfanalytics.asciidoc | 3 +- .../apis/get-dfanalytics-stats.asciidoc | 3 +- .../apis/get-dfanalytics.asciidoc | 3 +- .../apis/put-dfanalytics.asciidoc | 3 +- .../apis/start-dfanalytics.asciidoc | 3 +- .../apis/stop-dfanalytics.asciidoc | 3 +- .../modules/remote-clusters.asciidoc | 2 +- .../collecting-monitoring-data.asciidoc | 10 ++-- .../monitoring/configuring-filebeat.asciidoc | 14 ++--- .../configuring-metricbeat.asciidoc | 11 ++-- docs/reference/monitoring/production.asciidoc | 8 +-- .../reference/rollup/apis/delete-job.asciidoc | 2 +- docs/reference/rollup/apis/get-job.asciidoc | 2 +- docs/reference/rollup/apis/put-job.asciidoc | 2 +- .../rollup/apis/rollup-caps.asciidoc | 2 +- .../rollup/apis/rollup-index-caps.asciidoc | 2 +- docs/reference/rollup/apis/start-job.asciidoc | 2 +- docs/reference/rollup/apis/stop-job.asciidoc | 2 +- .../settings/audit-settings.asciidoc | 5 +- docs/reference/settings/ccr-settings.asciidoc | 2 +- .../settings/monitoring-settings.asciidoc | 3 +- .../settings/security-settings.asciidoc | 59 +++++++++---------- .../setup/bootstrap-checks-xes.asciidoc | 9 ++- docs/reference/setup/install/docker.asciidoc | 2 +- docs/reference/sql/functions/system.asciidoc | 2 +- .../transform/apis/delete-transform.asciidoc | 3 +- .../apis/get-transform-stats.asciidoc | 3 +- .../transform/apis/get-transform.asciidoc | 3 +- .../transform/apis/preview-transform.asciidoc | 5 +- .../transform/apis/put-transform.asciidoc | 3 +- .../transform/apis/start-transform.asciidoc | 5 +- .../transform/apis/stop-transform.asciidoc | 3 +- .../transform/apis/update-transform.asciidoc | 3 +- .../transform/ecommerce-tutorial.asciidoc | 4 +- .../security/change-password.asciidoc | 2 +- .../en/rest-api/security/clear-cache.asciidoc | 2 +- .../security/clear-roles-cache.asciidoc | 2 +- .../security/create-role-mappings.asciidoc | 7 +-- .../rest-api/security/create-roles.asciidoc | 8 +-- .../rest-api/security/create-users.asciidoc | 2 +- .../delegate-pki-authentication.asciidoc | 2 +- .../security/delete-app-privileges.asciidoc | 3 +- .../security/delete-role-mappings.asciidoc | 2 +- .../rest-api/security/delete-roles.asciidoc | 2 +- .../rest-api/security/delete-users.asciidoc | 2 +- .../rest-api/security/disable-users.asciidoc | 2 +- .../rest-api/security/enable-users.asciidoc | 2 +- .../security/get-app-privileges.asciidoc | 3 +- .../security/get-builtin-privileges.asciidoc | 13 ++-- .../security/get-role-mappings.asciidoc | 2 +- .../en/rest-api/security/get-roles.asciidoc | 2 +- .../en/rest-api/security/get-users.asciidoc | 2 +- .../rest-api/security/has-privileges.asciidoc | 4 +- .../security/put-app-privileges.asciidoc | 6 +- .../security/role-mapping-resources.asciidoc | 4 +- x-pack/docs/en/rest-api/security/ssl.asciidoc | 4 +- .../en/rest-api/watcher/ack-watch.asciidoc | 2 +- .../rest-api/watcher/activate-watch.asciidoc | 2 +- .../watcher/deactivate-watch.asciidoc | 2 +- .../en/rest-api/watcher/delete-watch.asciidoc | 2 +- .../rest-api/watcher/execute-watch.asciidoc | 2 +- .../en/rest-api/watcher/get-watch.asciidoc | 2 +- .../en/rest-api/watcher/put-watch.asciidoc | 2 +- .../docs/en/rest-api/watcher/start.asciidoc | 2 +- .../docs/en/rest-api/watcher/stats.asciidoc | 2 +- x-pack/docs/en/rest-api/watcher/stop.asciidoc | 2 +- ...onfiguring-active-directory-realm.asciidoc | 8 +-- .../configuring-file-realm.asciidoc | 3 +- .../configuring-kerberos-realm.asciidoc | 12 ++-- .../configuring-ldap-realm.asciidoc | 12 ++-- .../configuring-pki-realm.asciidoc | 13 ++-- .../configuring-saml-realm.asciidoc | 23 ++++---- .../authentication/oidc-guide.asciidoc | 4 +- .../authentication/saml-guide.asciidoc | 26 ++++---- .../docs/en/security/configuring-es.asciidoc | 14 ++--- .../docs/en/security/reference/files.asciidoc | 6 +- .../configuring-tls-docker.asciidoc | 6 +- .../securing-elasticsearch.asciidoc | 2 +- 166 files changed, 295 insertions(+), 348 deletions(-) diff --git a/docs/reference/ccr/apis/auto-follow/delete-auto-follow-pattern.asciidoc b/docs/reference/ccr/apis/auto-follow/delete-auto-follow-pattern.asciidoc index 2fbd553ad6ba..602910bdda48 100644 --- a/docs/reference/ccr/apis/auto-follow/delete-auto-follow-pattern.asciidoc +++ b/docs/reference/ccr/apis/auto-follow/delete-auto-follow-pattern.asciidoc @@ -41,13 +41,13 @@ DELETE /_ccr/auto_follow/ * If the {es} {security-features} are enabled, you must have `manage_ccr` cluster privileges on the cluster that contains the follower index. For more information, -see {stack-ov}/security-privileges.html[Security privileges]. +see <>. [[ccr-delete-auto-follow-pattern-desc]] ==== {api-description-title} This API deletes a configured collection of -{stack-ov}/ccr-auto-follow.html[auto-follow patterns]. +<>. [[ccr-delete-auto-follow-pattern-path-parms]] ==== {api-path-parms-title} diff --git a/docs/reference/ccr/apis/auto-follow/get-auto-follow-pattern.asciidoc b/docs/reference/ccr/apis/auto-follow/get-auto-follow-pattern.asciidoc index dfee7f58673a..ac8f9e499413 100644 --- a/docs/reference/ccr/apis/auto-follow/get-auto-follow-pattern.asciidoc +++ b/docs/reference/ccr/apis/auto-follow/get-auto-follow-pattern.asciidoc @@ -53,12 +53,12 @@ GET /_ccr/auto_follow/ * If the {es} {security-features} are enabled, you must have `manage_ccr` cluster privileges on the cluster that contains the follower index. For more information, -see {stack-ov}/security-privileges.html[Security privileges]. +see <>. [[ccr-get-auto-follow-pattern-desc]] ==== {api-description-title} -This API gets configured {stack-ov}/ccr-auto-follow.html[auto-follow patterns]. +This API gets configured <>. This API will return the specified auto-follow pattern collection. [[ccr-get-auto-follow-pattern-path-parms]] diff --git a/docs/reference/ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc b/docs/reference/ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc index 04fa137cad76..0ccc8c13cf20 100644 --- a/docs/reference/ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc +++ b/docs/reference/ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc @@ -45,14 +45,13 @@ DELETE /_ccr/auto_follow/auto_follow_pattern_name * If the {es} {security-features} are enabled, you must have `read` and `monitor` index privileges for the leader index patterns. You must also have `manage_ccr` cluster privileges on the cluster that contains the follower index. For more -information, see -{stack-ov}/security-privileges.html[Security privileges]. +information, see <>. [[ccr-put-auto-follow-pattern-desc]] ==== {api-description-title} This API creates a new named collection of -{stack-ov}/ccr-auto-follow.html[auto-follow patterns] against the remote cluster +<> against the remote cluster specified in the request body. Newly created indices on the remote cluster matching any of the specified patterns will be automatically configured as follower indices. diff --git a/docs/reference/ccr/apis/follow/get-follow-info.asciidoc b/docs/reference/ccr/apis/follow/get-follow-info.asciidoc index 5df2efa863bf..8361f7911fe0 100644 --- a/docs/reference/ccr/apis/follow/get-follow-info.asciidoc +++ b/docs/reference/ccr/apis/follow/get-follow-info.asciidoc @@ -42,8 +42,7 @@ GET //_ccr/info ==== {api-prereq-title} * If the {es} {security-features} are enabled, you must have `monitor` cluster -privileges. For more information, see -{stack-ov}/security-privileges.html[Security privileges]. +privileges. For more information, see <>. [[ccr-get-follow-info-desc]] ==== {api-description-title} diff --git a/docs/reference/ccr/apis/follow/get-follow-stats.asciidoc b/docs/reference/ccr/apis/follow/get-follow-stats.asciidoc index a83019d31539..49dc776a6d30 100644 --- a/docs/reference/ccr/apis/follow/get-follow-stats.asciidoc +++ b/docs/reference/ccr/apis/follow/get-follow-stats.asciidoc @@ -43,7 +43,7 @@ GET //_ccr/stats * If the {es} {security-features} are enabled, you must have `monitor` cluster privileges on the cluster that contains the follower index. For more information, -see {stack-ov}/security-privileges.html[Security privileges]. +see <>. [[ccr-get-follow-stats-desc]] ==== {api-description-title} diff --git a/docs/reference/ccr/apis/follow/post-forget-follower.asciidoc b/docs/reference/ccr/apis/follow/post-forget-follower.asciidoc index adf4508c5cdc..3b2e588f9e62 100644 --- a/docs/reference/ccr/apis/follow/post-forget-follower.asciidoc +++ b/docs/reference/ccr/apis/follow/post-forget-follower.asciidoc @@ -70,7 +70,7 @@ POST //_ccr/forget_follower * If the {es} {security-features} are enabled, you must have `manage_leader_index` index privileges for the leader index. For more information, see -{stack-ov}/security-privileges.html[Security privileges]. +<>. [[ccr-post-forget-follower-desc]] ==== {api-description-title} diff --git a/docs/reference/ccr/apis/follow/post-pause-follow.asciidoc b/docs/reference/ccr/apis/follow/post-pause-follow.asciidoc index e5881fe5a6cc..196fd8dc9f63 100644 --- a/docs/reference/ccr/apis/follow/post-pause-follow.asciidoc +++ b/docs/reference/ccr/apis/follow/post-pause-follow.asciidoc @@ -37,7 +37,7 @@ POST //_ccr/pause_follow * If the {es} {security-features} are enabled, you must have `manage_ccr` cluster privileges on the cluster that contains the follower index. For more information, -see {stack-ov}/security-privileges.html[Security privileges]. +see <>. [[ccr-post-pause-follow-desc]] ==== {api-description-title} diff --git a/docs/reference/ccr/apis/follow/post-resume-follow.asciidoc b/docs/reference/ccr/apis/follow/post-resume-follow.asciidoc index b6bd63f0192e..32ef91f8356a 100644 --- a/docs/reference/ccr/apis/follow/post-resume-follow.asciidoc +++ b/docs/reference/ccr/apis/follow/post-resume-follow.asciidoc @@ -51,7 +51,7 @@ POST //_ccr/resume_follow index privileges for the follower index. You must have `read` and `monitor` index privileges for the leader index. You must also have `manage_ccr` cluster privileges on the cluster that contains the follower index. For more information, -see {stack-ov}/security-privileges.html[Security privileges]. +see <>. [[ccr-post-resume-follow-desc]] ==== {api-description-title} diff --git a/docs/reference/ccr/apis/follow/post-unfollow.asciidoc b/docs/reference/ccr/apis/follow/post-unfollow.asciidoc index 703f926cf9ed..d74f38aa221c 100644 --- a/docs/reference/ccr/apis/follow/post-unfollow.asciidoc +++ b/docs/reference/ccr/apis/follow/post-unfollow.asciidoc @@ -41,7 +41,7 @@ POST //_ccr/unfollow * If the {es} {security-features} are enabled, you must have `manage_follow_index` index privileges for the follower index. For more information, see -{stack-ov}/security-privileges.html[Security privileges]. +<>. [[ccr-post-unfollow-desc]] ==== {api-description-title} diff --git a/docs/reference/ccr/apis/follow/put-follow.asciidoc b/docs/reference/ccr/apis/follow/put-follow.asciidoc index 9f31f47a665e..5ca5b09fc75c 100644 --- a/docs/reference/ccr/apis/follow/put-follow.asciidoc +++ b/docs/reference/ccr/apis/follow/put-follow.asciidoc @@ -41,8 +41,7 @@ PUT //_ccr/follow?wait_for_active_shards=1 and `manage_follow_index` index privileges for the follower index. You must have `read` and `monitor` index privileges for the leader index. You must also have `manage_ccr` cluster privileges on the cluster that contains the follower index. -For more information, see -{stack-ov}/security-privileges.html[Security privileges]. +For more information, see <>. [[ccr-put-follow-desc]] ==== {api-description-title} diff --git a/docs/reference/ccr/apis/get-ccr-stats.asciidoc b/docs/reference/ccr/apis/get-ccr-stats.asciidoc index c09e7c5426ae..564840eb179d 100644 --- a/docs/reference/ccr/apis/get-ccr-stats.asciidoc +++ b/docs/reference/ccr/apis/get-ccr-stats.asciidoc @@ -42,7 +42,7 @@ GET /_ccr/stats * If the {es} {security-features} are enabled, you must have `monitor` cluster privileges on the cluster that contains the follower index. For more information, -see {stack-ov}/security-privileges.html[Security privileges]. +see <>. [[ccr-get-stats-desc]] ==== {api-description-title} diff --git a/docs/reference/ccr/getting-started.asciidoc b/docs/reference/ccr/getting-started.asciidoc index aa096047bb99..41d013f62f3a 100644 --- a/docs/reference/ccr/getting-started.asciidoc +++ b/docs/reference/ccr/getting-started.asciidoc @@ -34,7 +34,7 @@ to control which users have authority to manage {ccr}. By default, you can perform all of the steps in this tutorial by using the built-in `elastic` user. However, a password must be set for this user before the user can do anything. For information about how to set that password, -see {stack-ov}/security-getting-started.html[Tutorial: Getting started with security]. +see <>. If you are performing these steps in a production environment, take extra care because the `elastic` user has the `superuser` role and you could inadvertently diff --git a/docs/reference/commands/certutil.asciidoc b/docs/reference/commands/certutil.asciidoc index 07a3f96738da..0c1a9f25684a 100644 --- a/docs/reference/commands/certutil.asciidoc +++ b/docs/reference/commands/certutil.asciidoc @@ -224,7 +224,7 @@ Alternatively, you can specify the `--ca-pass`, `--out`, and `--pass` parameters By default, this command generates a file called `elastic-certificates.p12`, which you can copy to the relevant configuration directory for each Elastic product that you want to configure. For more information, see -{xpack-ref}/ssl-tls.html[Setting Up TLS on a Cluster]. +<>. [float] [[certutil-silent]] diff --git a/docs/reference/commands/setup-passwords.asciidoc b/docs/reference/commands/setup-passwords.asciidoc index 3dcc9001534f..1c17c5544e7b 100644 --- a/docs/reference/commands/setup-passwords.asciidoc +++ b/docs/reference/commands/setup-passwords.asciidoc @@ -4,7 +4,7 @@ == elasticsearch-setup-passwords The `elasticsearch-setup-passwords` command sets the passwords for the -{stack-ov}/built-in-users.html[built-in users]. +<>. [float] === Synopsis @@ -21,7 +21,7 @@ bin/elasticsearch-setup-passwords auto|interactive This command is intended for use only during the initial configuration of the {es} {security-features}. It uses the -{stack-ov}/built-in-users.html#bootstrap-elastic-passwords[`elastic` bootstrap password] +<> to run user management API requests. After you set a password for the `elastic` user, the bootstrap password is no longer active and you cannot use this command. Instead, you can change passwords by using the *Management > Users* UI in {kib} @@ -36,7 +36,7 @@ location, ensure that the *ES_PATH_CONF* environment variable returns the correct path before you run the `elasticsearch-setup-passwords` command. You can override settings in your `elasticsearch.yml` file by using the `-E` command option. For more information about debugging connection failures, see -{stack-ov}/trb-security-setup.html[`elasticsearch-setup-passwords` command fails due to connection failure]. +<>. [float] === Parameters diff --git a/docs/reference/commands/users-command.asciidoc b/docs/reference/commands/users-command.asciidoc index cf678f2138df..d359d3b9b4db 100644 --- a/docs/reference/commands/users-command.asciidoc +++ b/docs/reference/commands/users-command.asciidoc @@ -33,7 +33,7 @@ Leading or trailing whitespace is not allowed. Passwords must be at least 6 characters long. -For more information, see {xpack-ref}/file-realm.html[File-based User Authentication]. +For more information, see <>. TIP: To ensure that {es} can read the user and role information at startup, run `elasticsearch-users useradd` as the same user you use to run {es}. Running the diff --git a/docs/reference/glossary.asciidoc b/docs/reference/glossary.asciidoc index 44acba941b72..09b743b07e2e 100644 --- a/docs/reference/glossary.asciidoc +++ b/docs/reference/glossary.asciidoc @@ -36,7 +36,7 @@ current master node fails. // tag::ccr-def[] The {ccr} feature enables you to replicate indices in remote clusters to your local cluster. For more information, see -{stack-ov}/xpack-ccr.html[{ccr-cap}]. +{ref}/xpack-ccr.html[{ccr-cap}]. // end::ccr-def[] [[glossary-ccs]] {ccs} (CCS):: diff --git a/docs/reference/high-availability/backup-and-restore-security-config.asciidoc b/docs/reference/high-availability/backup-and-restore-security-config.asciidoc index 6b9ad8cd07e3..8cc2bc174e53 100644 --- a/docs/reference/high-availability/backup-and-restore-security-config.asciidoc +++ b/docs/reference/high-availability/backup-and-restore-security-config.asciidoc @@ -95,7 +95,7 @@ prevent non-administrators exfiltrating data. + -- The following example creates a new user `snapshot_user` in the -{stack-ov}/native-realm.html[native realm], but it is not important which +<>, but it is not important which realm the user is a member of: [source,console] @@ -202,7 +202,7 @@ Then log into one of the node hosts, navigate to {es} installation directory, and follow these steps: . Add a new user with the `superuser` built-in role to the -{stack-ov}/file-realm.html[file realm]. +<>. + -- For example, create a user named `restore_user`: diff --git a/docs/reference/high-availability/backup-cluster-data.asciidoc b/docs/reference/high-availability/backup-cluster-data.asciidoc index ed0c732cdb4b..485e047acd25 100644 --- a/docs/reference/high-availability/backup-cluster-data.asciidoc +++ b/docs/reference/high-availability/backup-cluster-data.asciidoc @@ -22,6 +22,6 @@ It does *not* grant privileges to create repositories, restore snapshots, or search within indices. Hence, the user can view and snapshot all indices, but cannot access or modify any data. -For more information, see {stack-ov}/security-privileges.html[Security privileges] -and {stack-ov}/built-in-roles.html[Built-in roles]. +For more information, see <> +and <>. ==== diff --git a/docs/reference/ilm/apis/delete-lifecycle.asciidoc b/docs/reference/ilm/apis/delete-lifecycle.asciidoc index 13d60661ebaf..653861fbacf5 100644 --- a/docs/reference/ilm/apis/delete-lifecycle.asciidoc +++ b/docs/reference/ilm/apis/delete-lifecycle.asciidoc @@ -30,7 +30,7 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=timeoutparms] ==== Authorization You must have the `manage_ilm` cluster privilege to use this API. -For more information, see {stack-ov}/security-privileges.html[Security Privileges]. +For more information, see <>. ==== Examples diff --git a/docs/reference/ilm/apis/explain.asciidoc b/docs/reference/ilm/apis/explain.asciidoc index 78bb76e46cf0..dfbfec0b840a 100644 --- a/docs/reference/ilm/apis/explain.asciidoc +++ b/docs/reference/ilm/apis/explain.asciidoc @@ -41,7 +41,7 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=timeoutparms] You must have the `view_index_metadata` or `manage_ilm` or both privileges on the indices being managed to use this API. -For more information, see {stack-ov}/security-privileges.html[Security Privileges]. +For more information, see <>. ==== Examples diff --git a/docs/reference/ilm/apis/get-lifecycle.asciidoc b/docs/reference/ilm/apis/get-lifecycle.asciidoc index 2fbdd03f43ec..2cac8f0415b0 100644 --- a/docs/reference/ilm/apis/get-lifecycle.asciidoc +++ b/docs/reference/ilm/apis/get-lifecycle.asciidoc @@ -30,7 +30,7 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=timeoutparms] ==== Authorization You must have the `manage_ilm` or `read_ilm` or both cluster privileges to use this API. -For more information, see {stack-ov}/security-privileges.html[Security Privileges]. +For more information, see <>. ==== Examples diff --git a/docs/reference/ilm/apis/get-status.asciidoc b/docs/reference/ilm/apis/get-status.asciidoc index ce983e8d0ee3..8f4b4fb27712 100644 --- a/docs/reference/ilm/apis/get-status.asciidoc +++ b/docs/reference/ilm/apis/get-status.asciidoc @@ -28,7 +28,7 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=timeoutparms] ==== Authorization You must have the `manage_ilm` or `read_ilm` or both cluster privileges to use this API. -For more information, see {stack-ov}/security-privileges.html[Security Privileges]. +For more information, see <>. ==== Examples diff --git a/docs/reference/ilm/apis/move-to-step.asciidoc b/docs/reference/ilm/apis/move-to-step.asciidoc index 1f49d501462b..27b16c44a0bc 100644 --- a/docs/reference/ilm/apis/move-to-step.asciidoc +++ b/docs/reference/ilm/apis/move-to-step.asciidoc @@ -39,7 +39,7 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=timeoutparms] ==== Authorization You must have the `manage_ilm` privileges on the indices being managed to use this API. -For more information, see {stack-ov}/security-privileges.html[Security Privileges]. +For more information, see <>. ==== Examples diff --git a/docs/reference/ilm/apis/put-lifecycle.asciidoc b/docs/reference/ilm/apis/put-lifecycle.asciidoc index d940a2a28b04..4e1a345d6398 100644 --- a/docs/reference/ilm/apis/put-lifecycle.asciidoc +++ b/docs/reference/ilm/apis/put-lifecycle.asciidoc @@ -36,7 +36,7 @@ You must have the `manage_ilm` cluster privilege to use this API. You must also have the `manage` index privilege on all indices being managed by `policy`. All operations executed by {ilm} for a policy are executed as the user that put the latest version of a policy. -For more information, see {stack-ov}/security-privileges.html[Security Privileges]. +For more information, see <>. ==== Examples diff --git a/docs/reference/ilm/apis/remove-policy-from-index.asciidoc b/docs/reference/ilm/apis/remove-policy-from-index.asciidoc index 09ae762d04f6..d5a94adc1c71 100644 --- a/docs/reference/ilm/apis/remove-policy-from-index.asciidoc +++ b/docs/reference/ilm/apis/remove-policy-from-index.asciidoc @@ -30,7 +30,7 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=timeoutparms] ==== Authorization You must have the `manage_ilm` privileges on the indices being managed to use this API. -For more information, see {stack-ov}/security-privileges.html[Security Privileges]. +For more information, see <>. ==== Examples diff --git a/docs/reference/ilm/apis/retry-policy.asciidoc b/docs/reference/ilm/apis/retry-policy.asciidoc index d3d5bcdfad9f..1bf202c657fe 100644 --- a/docs/reference/ilm/apis/retry-policy.asciidoc +++ b/docs/reference/ilm/apis/retry-policy.asciidoc @@ -30,7 +30,7 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=timeoutparms] ==== Authorization You must have the `manage_ilm` privileges on the indices being managed to use this API. -For more information, see {stack-ov}/security-privileges.html[Security Privileges]. +For more information, see <>. ==== Examples diff --git a/docs/reference/ilm/apis/slm-api.asciidoc b/docs/reference/ilm/apis/slm-api.asciidoc index c9db692b2e8a..4ac7a0b45331 100644 --- a/docs/reference/ilm/apis/slm-api.asciidoc +++ b/docs/reference/ilm/apis/slm-api.asciidoc @@ -43,7 +43,7 @@ You must have the `manage_slm` cluster privilege to use this API. You must also have the `manage` index privilege on all indices being managed by `policy`. All operations executed by {slm} for a policy are executed as the user that put the latest version of a policy. For more information, see -{stack-ov}/security-privileges.html[Security Privileges]. +<>. ==== Example diff --git a/docs/reference/ilm/apis/start.asciidoc b/docs/reference/ilm/apis/start.asciidoc index f80bd244956c..e75f3dbbfea1 100644 --- a/docs/reference/ilm/apis/start.asciidoc +++ b/docs/reference/ilm/apis/start.asciidoc @@ -27,7 +27,7 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=timeoutparms] ==== Authorization You must have the `manage_ilm` cluster privilege to use this API. -For more information, see {stack-ov}/security-privileges.html[Security privileges]. +For more information, see <>. ==== Examples diff --git a/docs/reference/ilm/apis/stop.asciidoc b/docs/reference/ilm/apis/stop.asciidoc index 0d16140054f1..dfad9710f8ef 100644 --- a/docs/reference/ilm/apis/stop.asciidoc +++ b/docs/reference/ilm/apis/stop.asciidoc @@ -32,7 +32,7 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=timeoutparms] ==== Authorization You must have the `manage_ilm` cluster privilege to use this API. -For more information, see {stack-ov}/security-privileges.html[Security Privileges]. +For more information, see <>. ==== Examples diff --git a/docs/reference/licensing/delete-license.asciidoc b/docs/reference/licensing/delete-license.asciidoc index b086fdac009a..6b70cfde7c94 100644 --- a/docs/reference/licensing/delete-license.asciidoc +++ b/docs/reference/licensing/delete-license.asciidoc @@ -17,14 +17,14 @@ This API enables you to delete licensing information. ==== Description When your license expires, {xpack} operates in a degraded mode. For more -information, see {xpack-ref}/license-expiration.html[License Expiration]. +information, see {stack-ov}/license-expiration.html[License Expiration]. [float] ==== Authorization You must have `manage` cluster privileges to use this API. For more information, see -{xpack-ref}/security-privileges.html[Security Privileges]. +<>. [float] ==== Examples diff --git a/docs/reference/licensing/get-basic-status.asciidoc b/docs/reference/licensing/get-basic-status.asciidoc index 59b91d1c6c99..7ad601023ba7 100644 --- a/docs/reference/licensing/get-basic-status.asciidoc +++ b/docs/reference/licensing/get-basic-status.asciidoc @@ -25,8 +25,7 @@ https://www.elastic.co/subscriptions. ==== Authorization You must have `monitor` cluster privileges to use this API. -For more information, see -{xpack-ref}/security-privileges.html[Security Privileges]. +For more information, see <>. [float] ==== Examples diff --git a/docs/reference/licensing/get-license.asciidoc b/docs/reference/licensing/get-license.asciidoc index f38e8c0fffdb..807a40729f9f 100644 --- a/docs/reference/licensing/get-license.asciidoc +++ b/docs/reference/licensing/get-license.asciidoc @@ -35,8 +35,7 @@ https://www.elastic.co/subscriptions. ==== Authorization You must have `monitor` cluster privileges to use this API. -For more information, see -{xpack-ref}/security-privileges.html[Security Privileges]. +For more information, see <>. [float] diff --git a/docs/reference/licensing/get-trial-status.asciidoc b/docs/reference/licensing/get-trial-status.asciidoc index cb1a3772ca06..b411f17b9e3c 100644 --- a/docs/reference/licensing/get-trial-status.asciidoc +++ b/docs/reference/licensing/get-trial-status.asciidoc @@ -32,7 +32,7 @@ https://www.elastic.co/subscriptions. You must have `monitor` cluster privileges to use this API. For more information, see -{xpack-ref}/security-privileges.html[Security Privileges]. +<>. [float] ==== Examples diff --git a/docs/reference/licensing/start-basic.asciidoc b/docs/reference/licensing/start-basic.asciidoc index dd421bfda150..8dbc1425b0b2 100644 --- a/docs/reference/licensing/start-basic.asciidoc +++ b/docs/reference/licensing/start-basic.asciidoc @@ -32,7 +32,7 @@ https://www.elastic.co/subscriptions. You must have `manage` cluster privileges to use this API. For more information, see -{xpack-ref}/security-privileges.html[Security Privileges]. +<>. [float] ==== Examples diff --git a/docs/reference/licensing/start-trial.asciidoc b/docs/reference/licensing/start-trial.asciidoc index df5fd1dfae66..62123d2ab425 100644 --- a/docs/reference/licensing/start-trial.asciidoc +++ b/docs/reference/licensing/start-trial.asciidoc @@ -35,7 +35,7 @@ https://www.elastic.co/subscriptions. You must have `manage` cluster privileges to use this API. For more information, see -{xpack-ref}/security-privileges.html[Security Privileges]. +<>. [float] ==== Examples diff --git a/docs/reference/licensing/update-license.asciidoc b/docs/reference/licensing/update-license.asciidoc index c4c8815b3c7f..268e0b768b57 100644 --- a/docs/reference/licensing/update-license.asciidoc +++ b/docs/reference/licensing/update-license.asciidoc @@ -160,4 +160,4 @@ curl -XPUT -u elastic 'http://:/_license?acknowledge=true' -H "Conte // NOTCONSOLE For more information about the features that are disabled when you downgrade -your license, see {xpack-ref}/license-expiration.html[License Expiration]. +your license, see {stack-ov}/license-expiration.html[License Expiration]. diff --git a/docs/reference/ml/anomaly-detection/apis/close-job.asciidoc b/docs/reference/ml/anomaly-detection/apis/close-job.asciidoc index 48feb40f416e..82375be06546 100644 --- a/docs/reference/ml/anomaly-detection/apis/close-job.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/close-job.asciidoc @@ -26,7 +26,7 @@ operations, but you can still explore and navigate results. * If the {es} {security-features} are enabled, you must have `manage_ml` or `manage` cluster privileges to use this API. See -{stack-ov}/security-privileges.html[Security privileges]. +<>. [[ml-close-job-desc]] ==== {api-description-title} diff --git a/docs/reference/ml/anomaly-detection/apis/datafeedresource.asciidoc b/docs/reference/ml/anomaly-detection/apis/datafeedresource.asciidoc index 32e1237415d9..2cdc695ed91e 100644 --- a/docs/reference/ml/anomaly-detection/apis/datafeedresource.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/datafeedresource.asciidoc @@ -9,7 +9,7 @@ A {dfeed} resource has the following properties: (object) If set, the {dfeed} performs aggregation searches. Support for aggregations is limited and should only be used with low cardinality data. For more information, see - {xpack-ref}/ml-configuring-aggregation.html[Aggregating Data for Faster Performance]. + {stack-ov}/ml-configuring-aggregation.html[Aggregating Data for Faster Performance]. `chunking_config`:: (object) Specifies how data searches are split into time chunks. @@ -53,7 +53,7 @@ A {dfeed} resource has the following properties: The <> in a job can contain functions that use these script fields. For more information, see - {xpack-ref}/ml-configuring-transform.html[Transforming Data With Script Fields]. + {stack-ov}/ml-configuring-transform.html[Transforming Data With Script Fields]. `scroll_size`:: (unsigned integer) The `size` parameter that is used in {es} searches. diff --git a/docs/reference/ml/anomaly-detection/apis/delete-calendar-event.asciidoc b/docs/reference/ml/anomaly-detection/apis/delete-calendar-event.asciidoc index 7f7978043a91..e0be1e135afc 100644 --- a/docs/reference/ml/anomaly-detection/apis/delete-calendar-event.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/delete-calendar-event.asciidoc @@ -18,7 +18,7 @@ Deletes scheduled events from a calendar. * If the {es} {security-features} are enabled, you must have `manage_ml` or `manage` cluster privileges to use this API. See -{stack-ov}/security-privileges.html[Security privileges]. +<>. [[ml-delete-calendar-event-desc]] ==== {api-description-title} diff --git a/docs/reference/ml/anomaly-detection/apis/delete-calendar-job.asciidoc b/docs/reference/ml/anomaly-detection/apis/delete-calendar-job.asciidoc index 19e6c11fd483..dfe56c9388d8 100644 --- a/docs/reference/ml/anomaly-detection/apis/delete-calendar-job.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/delete-calendar-job.asciidoc @@ -18,7 +18,7 @@ Deletes {anomaly-jobs} from a calendar. * If the {es} {security-features} are enabled, you must have `manage_ml` or `manage` cluster privileges to use this API. See -{stack-ov}/security-privileges.html[Security privileges]. +<>. [[ml-delete-calendar-job-path-parms]] ==== {api-path-parms-title} diff --git a/docs/reference/ml/anomaly-detection/apis/delete-calendar.asciidoc b/docs/reference/ml/anomaly-detection/apis/delete-calendar.asciidoc index 48aac40b4320..c9dad6bbba19 100644 --- a/docs/reference/ml/anomaly-detection/apis/delete-calendar.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/delete-calendar.asciidoc @@ -18,7 +18,7 @@ Deletes a calendar. * If the {es} {security-features} are enabled, you must have `manage_ml` or `manage` cluster privileges to use this API. See -{stack-ov}/security-privileges.html[Security privileges]. +<>. [[ml-delete-calendar-desc]] ==== {api-description-title} diff --git a/docs/reference/ml/anomaly-detection/apis/delete-datafeed.asciidoc b/docs/reference/ml/anomaly-detection/apis/delete-datafeed.asciidoc index 1509b98f6365..21b4eb75bef0 100644 --- a/docs/reference/ml/anomaly-detection/apis/delete-datafeed.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/delete-datafeed.asciidoc @@ -22,7 +22,7 @@ Deletes an existing {dfeed}. can delete it. * If the {es} {security-features} are enabled, you must have `manage_ml` or `manage` cluster privileges to use this API. See -{stack-ov}/security-privileges.html[Security privileges]. +<>. [[ml-delete-datafeed-path-parms]] ==== {api-path-parms-title} diff --git a/docs/reference/ml/anomaly-detection/apis/delete-expired-data.asciidoc b/docs/reference/ml/anomaly-detection/apis/delete-expired-data.asciidoc index c3375f968f4f..c1450ec43f8e 100644 --- a/docs/reference/ml/anomaly-detection/apis/delete-expired-data.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/delete-expired-data.asciidoc @@ -18,7 +18,7 @@ Deletes expired and unused machine learning data. * If the {es} {security-features} are enabled, you must have `manage_ml` or `manage` cluster privileges to use this API. See -{stack-ov}/security-privileges.html[Security privileges]. +<>. [[ml-delete-expired-data-desc]] ==== {api-description-title} diff --git a/docs/reference/ml/anomaly-detection/apis/delete-filter.asciidoc b/docs/reference/ml/anomaly-detection/apis/delete-filter.asciidoc index 0b9de2b698e1..d6c563fbe0a8 100644 --- a/docs/reference/ml/anomaly-detection/apis/delete-filter.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/delete-filter.asciidoc @@ -18,7 +18,7 @@ Deletes a filter. * If the {es} {security-features} are enabled, you must have `manage_ml` or `manage` cluster privileges to use this API. See -{stack-ov}/security-privileges.html[Security privileges]. +<>. [[ml-delete-filter-desc]] ==== {api-description-title} diff --git a/docs/reference/ml/anomaly-detection/apis/delete-forecast.asciidoc b/docs/reference/ml/anomaly-detection/apis/delete-forecast.asciidoc index dd6577f2262b..d723b3fba487 100644 --- a/docs/reference/ml/anomaly-detection/apis/delete-forecast.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/delete-forecast.asciidoc @@ -22,7 +22,7 @@ Deletes forecasts from a {ml} job. * If the {es} {security-features} are enabled, you must have `manage_ml` or `manage` cluster privileges to use this API. See -{stack-ov}/security-privileges.html[Security privileges]. +<>. [[ml-delete-forecast-desc]] ==== {api-description-title} diff --git a/docs/reference/ml/anomaly-detection/apis/delete-job.asciidoc b/docs/reference/ml/anomaly-detection/apis/delete-job.asciidoc index 202f7cfcd333..04832663bc35 100644 --- a/docs/reference/ml/anomaly-detection/apis/delete-job.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/delete-job.asciidoc @@ -17,8 +17,7 @@ Deletes an existing {anomaly-job}. ==== {api-prereq-title} * If {es} {security-features} are enabled, you must have `manage_ml` or `manage` -cluster privileges to use this API. See -{stack-ov}/security-privileges.html[Security privileges]. +cluster privileges to use this API. See <>. [[ml-delete-job-desc]] ==== {api-description-title} diff --git a/docs/reference/ml/anomaly-detection/apis/delete-snapshot.asciidoc b/docs/reference/ml/anomaly-detection/apis/delete-snapshot.asciidoc index 1fa01e789fac..b06c6a6a9c1a 100644 --- a/docs/reference/ml/anomaly-detection/apis/delete-snapshot.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/delete-snapshot.asciidoc @@ -17,8 +17,7 @@ Deletes an existing model snapshot. ==== {api-prereq-title} * If the {es} {security-features} are enabled, you must have `manage_ml` or -`manage` cluster privileges to use this API. See -{stack-ov}/security-privileges.html[Security privileges]. +`manage` cluster privileges to use this API. See <>. [[ml-delete-snapshot-desc]] ==== {api-description-title} diff --git a/docs/reference/ml/anomaly-detection/apis/eventresource.asciidoc b/docs/reference/ml/anomaly-detection/apis/eventresource.asciidoc index 6c552b68f368..4fb179be3cbc 100644 --- a/docs/reference/ml/anomaly-detection/apis/eventresource.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/eventresource.asciidoc @@ -24,4 +24,4 @@ An events resource has the following properties: in milliseconds since the epoch or ISO 8601 format. For more information, see -{xpack-ref}/ml-calendars.html[Calendars and Scheduled Events]. +{stack-ov}/ml-calendars.html[Calendars and Scheduled Events]. diff --git a/docs/reference/ml/anomaly-detection/apis/find-file-structure.asciidoc b/docs/reference/ml/anomaly-detection/apis/find-file-structure.asciidoc index 5fa56eacb820..52afbab9c704 100644 --- a/docs/reference/ml/anomaly-detection/apis/find-file-structure.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/find-file-structure.asciidoc @@ -21,7 +21,7 @@ suitable to be ingested into {es}. * If the {es} {security-features} are enabled, you must have `monitor_ml` or `monitor` cluster privileges to use this API. See -{stack-ov}/security-privileges.html[Security privileges]. +<>. [[ml-find-file-structure-desc]] ==== {api-description-title} diff --git a/docs/reference/ml/anomaly-detection/apis/flush-job.asciidoc b/docs/reference/ml/anomaly-detection/apis/flush-job.asciidoc index a306817e3c04..7afef6eabde4 100644 --- a/docs/reference/ml/anomaly-detection/apis/flush-job.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/flush-job.asciidoc @@ -18,7 +18,7 @@ Forces any buffered data to be processed by the job. * If the {es} {security-features} are enabled, you must have `manage_ml` or `manage` cluster privileges to use this API. See -{stack-ov}/security-privileges.html[Security privileges]. +<>. [[ml-flush-job-desc]] ==== {api-description-title} diff --git a/docs/reference/ml/anomaly-detection/apis/forecast.asciidoc b/docs/reference/ml/anomaly-detection/apis/forecast.asciidoc index ee53f3876d6a..3bfc8b51b453 100644 --- a/docs/reference/ml/anomaly-detection/apis/forecast.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/forecast.asciidoc @@ -18,7 +18,7 @@ Predicts the future behavior of a time series by using its historical behavior. * If the {es} {security-features} are enabled, you must have `manage_ml` or `manage` cluster privileges to use this API. See -{stack-ov}/security-privileges.html[Security privileges]. +<>. [[ml-forecast-desc]] ==== {api-description-title} diff --git a/docs/reference/ml/anomaly-detection/apis/get-bucket.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-bucket.asciidoc index d9db7484a6de..91c473ebec91 100644 --- a/docs/reference/ml/anomaly-detection/apis/get-bucket.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-bucket.asciidoc @@ -23,8 +23,8 @@ Retrieves {anomaly-job} results for one or more buckets. need `read` index privilege on the index that stores the results. The `machine_learning_admin` and `machine_learning_user` roles provide these privileges. For more information, see -{stack-ov}/security-privileges.html[Security privileges] and -{stack-ov}/built-in-roles.html[Built-in roles]. +<> and +<>. [[ml-get-bucket-desc]] ==== {api-description-title} diff --git a/docs/reference/ml/anomaly-detection/apis/get-calendar-event.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-calendar-event.asciidoc index 6d6dbbcdadd6..adf50483e25a 100644 --- a/docs/reference/ml/anomaly-detection/apis/get-calendar-event.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-calendar-event.asciidoc @@ -20,7 +20,7 @@ Retrieves information about the scheduled events in calendars. * If the {es} {security-features} are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. See -{stack-ov}/security-privileges.html[Security privileges]. +<>. [[ml-get-calendar-event-desc]] ==== {api-description-title} diff --git a/docs/reference/ml/anomaly-detection/apis/get-calendar.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-calendar.asciidoc index d30852bc8730..9552ba687033 100644 --- a/docs/reference/ml/anomaly-detection/apis/get-calendar.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-calendar.asciidoc @@ -20,7 +20,7 @@ Retrieves configuration information for calendars. * If the {es} {security-features} are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. See -{stack-ov}/security-privileges.html[Security privileges]. +<>. [[ml-get-calendar-desc]] ==== {api-description-title} diff --git a/docs/reference/ml/anomaly-detection/apis/get-category.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-category.asciidoc index 6ce22a6f34c9..1f7955873451 100644 --- a/docs/reference/ml/anomaly-detection/apis/get-category.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-category.asciidoc @@ -22,8 +22,8 @@ Retrieves {anomaly-job} results for one or more categories. `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. You also need `read` index privilege on the index that stores the results. The `machine_learning_admin` and `machine_learning_user` roles provide these -privileges. See {stack-ov}/security-privileges.html[Security privileges] and -{stack-ov}/built-in-roles.html[Built-in roles]. +privileges. See <> and +<>. [[ml-get-category-desc]] ==== {api-description-title} diff --git a/docs/reference/ml/anomaly-detection/apis/get-datafeed-stats.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-datafeed-stats.asciidoc index ae142720e273..bd126a651e26 100644 --- a/docs/reference/ml/anomaly-detection/apis/get-datafeed-stats.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-datafeed-stats.asciidoc @@ -26,7 +26,7 @@ Retrieves usage information for {dfeeds}. * If the {es} {security-features} are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. See -{stack-ov}/security-privileges.html[Security privileges]. +<>. [[ml-get-datafeed-stats-desc]] ==== {api-description-title} diff --git a/docs/reference/ml/anomaly-detection/apis/get-datafeed.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-datafeed.asciidoc index 1903be47e661..3330ae7b821d 100644 --- a/docs/reference/ml/anomaly-detection/apis/get-datafeed.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-datafeed.asciidoc @@ -26,7 +26,7 @@ Retrieves configuration information for {dfeeds}. * If the {es} {security-features} are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. See -{stack-ov}/security-privileges.html[Security privileges]. +<>. [[ml-get-datafeed-desc]] ==== {api-description-title} diff --git a/docs/reference/ml/anomaly-detection/apis/get-filter.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-filter.asciidoc index 7afe33910e8e..6ed8ee4fa0c4 100644 --- a/docs/reference/ml/anomaly-detection/apis/get-filter.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-filter.asciidoc @@ -20,7 +20,7 @@ Retrieves filters. * If the {es} {security-features} are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. See -{stack-ov}/security-privileges.html[Security privileges]. +<>. [[ml-get-filter-desc]] ==== {api-description-title} diff --git a/docs/reference/ml/anomaly-detection/apis/get-influencer.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-influencer.asciidoc index da8d5df00c19..a2da47720c9e 100644 --- a/docs/reference/ml/anomaly-detection/apis/get-influencer.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-influencer.asciidoc @@ -20,8 +20,8 @@ Retrieves {anomaly-job} results for one or more influencers. `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. You also need `read` index privilege on the index that stores the results. The `machine_learning_admin` and `machine_learning_user` roles provide these -privileges. See {stack-ov}/security-privileges.html[Security privileges] and -{stack-ov}/built-in-roles.html[Built-in roles]. +privileges. See <> and +<>. [[ml-get-influencer-path-parms]] ==== {api-path-parms-title} diff --git a/docs/reference/ml/anomaly-detection/apis/get-job-stats.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-job-stats.asciidoc index 0944c0451710..9c7bcc6e7b39 100644 --- a/docs/reference/ml/anomaly-detection/apis/get-job-stats.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-job-stats.asciidoc @@ -24,7 +24,7 @@ Retrieves usage information for {anomaly-jobs}. * If the {es} {security-features} are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. See -{stack-ov}/security-privileges.html[Security privileges]. +<>. [[ml-get-job-stats-desc]] ==== {api-description-title} diff --git a/docs/reference/ml/anomaly-detection/apis/get-job.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-job.asciidoc index c1d6be4eb1c1..e32513d9fdca 100644 --- a/docs/reference/ml/anomaly-detection/apis/get-job.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-job.asciidoc @@ -24,7 +24,7 @@ Retrieves configuration information for {anomaly-jobs}. * If the {es} {security-features} are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. See -{stack-ov}/security-privileges.html[Security privileges]. +<>. [[ml-get-job-desc]] ==== {api-description-title} diff --git a/docs/reference/ml/anomaly-detection/apis/get-ml-info.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-ml-info.asciidoc index a90ed90ebc67..aa7a6357eaf5 100644 --- a/docs/reference/ml/anomaly-detection/apis/get-ml-info.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-ml-info.asciidoc @@ -21,8 +21,8 @@ Returns defaults and limits used by machine learning. * If the {es} {security-features} are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. The `machine_learning_admin` and `machine_learning_user` roles provide these -privileges. See {stack-ov}/security-privileges.html[Security privileges] and -{stack-ov}/built-in-roles.html[Built-in roles]. +privileges. See <> and +<>. [[get-ml-info-desc]] ==== {api-description-title} diff --git a/docs/reference/ml/anomaly-detection/apis/get-overall-buckets.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-overall-buckets.asciidoc index db33ef64394d..43a7de51d989 100644 --- a/docs/reference/ml/anomaly-detection/apis/get-overall-buckets.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-overall-buckets.asciidoc @@ -25,8 +25,7 @@ Retrieves overall bucket results that summarize the bucket results of multiple `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. You also need `read` index privilege on the index that stores the results. The `machine_learning_admin` and `machine_learning_user` roles provide these -privileges. See {stack-ov}/security-privileges.html[Security privileges] and -{stack-ov}/built-in-roles.html[Built-in roles]. +privileges. See <> and <>. [[ml-get-overall-buckets-desc]] ==== {api-description-title} diff --git a/docs/reference/ml/anomaly-detection/apis/get-record.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-record.asciidoc index 8bbf53f28bc5..a850524872c0 100644 --- a/docs/reference/ml/anomaly-detection/apis/get-record.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-record.asciidoc @@ -20,8 +20,7 @@ Retrieves anomaly records for an {anomaly-job}. `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. You also need `read` index privilege on the index that stores the results. The `machine_learning_admin` and `machine_learning_user` roles provide these -privileges. See {stack-ov}/security-privileges.html[Security privileges] and -{stack-ov}/built-in-roles.html[Built-in roles]. +privileges. See <> and <>. [[ml-get-record-path-parms]] ==== {api-path-parms-title} diff --git a/docs/reference/ml/anomaly-detection/apis/get-snapshot.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-snapshot.asciidoc index 733de6bece5b..04d09b50d331 100644 --- a/docs/reference/ml/anomaly-detection/apis/get-snapshot.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-snapshot.asciidoc @@ -20,7 +20,7 @@ Retrieves information about model snapshots. * If the {es} {security-features} are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. See -{stack-ov}/security-privileges.html[Security privileges]. +<>. [[ml-get-snapshot-path-parms]] ==== {api-path-parms-title} diff --git a/docs/reference/ml/anomaly-detection/apis/jobresource.asciidoc b/docs/reference/ml/anomaly-detection/apis/jobresource.asciidoc index 623e5a74de2f..f650e81623a4 100644 --- a/docs/reference/ml/anomaly-detection/apis/jobresource.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/jobresource.asciidoc @@ -33,7 +33,7 @@ so do not set the `background_persist_interval` value too low. `custom_settings`:: (object) Advanced configuration option. Contains custom meta data about the job. For example, it can contain custom URL information as shown in - {xpack-ref}/ml-configuring-url.html[Adding Custom URLs to Machine Learning Results]. + {stack-ov}/ml-configuring-url.html[Adding Custom URLs to Machine Learning Results]. `data_description`:: (object) Describes the data format and how APIs parse timestamp fields. @@ -110,7 +110,7 @@ An analysis configuration object has the following properties: be categorized. The resulting categories must be used in a detector by setting `by_field_name`, `over_field_name`, or `partition_field_name` to the keyword `mlcategory`. For more information, see - {xpack-ref}/ml-configuring-categories.html[Categorizing Log Messages]. + {stack-ov}/ml-configuring-categories.html[Categorizing Log Messages]. `categorization_filters`:: (array of strings) If `categorization_field_name` is specified, @@ -120,7 +120,7 @@ An analysis configuration object has the following properties: tune the categorization by excluding sequences from consideration when categories are defined. For example, you can exclude SQL statements that appear in your log files. For more information, see - {xpack-ref}/ml-configuring-categories.html[Categorizing Log Messages]. + {stack-ov}/ml-configuring-categories.html[Categorizing Log Messages]. This property cannot be used at the same time as `categorization_analyzer`. If you only want to define simple regular expression filters that are applied prior to tokenization, setting this property is the easiest method. @@ -243,14 +243,14 @@ NOTE: The `field_name` cannot contain double quotes or backslashes. `function`:: (string) The analysis function that is used. For example, `count`, `rare`, `mean`, `min`, `max`, and `sum`. For more - information, see {xpack-ref}/ml-functions.html[Function Reference]. + information, see {stack-ov}/ml-functions.html[Function Reference]. `over_field_name`:: (string) The field used to split the data. In particular, this property is used for analyzing the splits with respect to the history of all splits. It is used for finding unusual values in the population of all splits. For more information, see - {xpack-ref}/ml-configuring-pop.html[Performing Population Analysis]. + {stack-ov}/ml-configuring-pop.html[Performing population analysis]. `partition_field_name`:: (string) The field used to segment the analysis. @@ -406,7 +406,7 @@ the categorization analyzer produces then you find the original document that the categorization field value came from. For more information, see -{xpack-ref}/ml-configuring-categories.html[Categorizing Log Messages]. +{stack-ov}/ml-configuring-categories.html[Categorizing log messages]. [float] [[ml-detector-custom-rule]] @@ -489,7 +489,7 @@ The `analysis_limits` object has the following properties: -- NOTE: The `categorization_examples_limit` only applies to analysis that uses categorization. For more information, see -{xpack-ref}/ml-configuring-categories.html[Categorizing Log Messages]. +{stack-ov}/ml-configuring-categories.html[Categorizing log messages]. -- diff --git a/docs/reference/ml/anomaly-detection/apis/open-job.asciidoc b/docs/reference/ml/anomaly-detection/apis/open-job.asciidoc index d1425d258339..5914ec502f10 100644 --- a/docs/reference/ml/anomaly-detection/apis/open-job.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/open-job.asciidoc @@ -18,7 +18,7 @@ Opens one or more {anomaly-jobs}. * If the {es} {security-features} are enabled, you must have `manage_ml` or `manage` cluster privileges to use this API. See -{stack-ov}/security-privileges.html[Security privileges]. +<>. [[ml-open-job-desc]] ==== {api-description-title} diff --git a/docs/reference/ml/anomaly-detection/apis/post-calendar-event.asciidoc b/docs/reference/ml/anomaly-detection/apis/post-calendar-event.asciidoc index b4502bcb72a7..d7d3feedfdf3 100644 --- a/docs/reference/ml/anomaly-detection/apis/post-calendar-event.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/post-calendar-event.asciidoc @@ -18,7 +18,7 @@ Posts scheduled events in a calendar. * If the {es} {security-features} are enabled, you must have `manage_ml` or `manage` cluster privileges to use this API. See -{stack-ov}/security-privileges.html[Security privileges]. +<>. [[ml-post-calendar-event-desc]] ==== {api-description-title} diff --git a/docs/reference/ml/anomaly-detection/apis/post-data.asciidoc b/docs/reference/ml/anomaly-detection/apis/post-data.asciidoc index 0f092ebeddc9..a1e2120728ac 100644 --- a/docs/reference/ml/anomaly-detection/apis/post-data.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/post-data.asciidoc @@ -18,7 +18,7 @@ Sends data to an anomaly detection job for analysis. * If the {es} {security-features} are enabled, you must have `manage_ml` or `manage` cluster privileges to use this API. See -{stack-ov}/security-privileges.html[Security privileges]. +<>. [[ml-post-data-desc]] ==== {api-description-title} diff --git a/docs/reference/ml/anomaly-detection/apis/preview-datafeed.asciidoc b/docs/reference/ml/anomaly-detection/apis/preview-datafeed.asciidoc index e2aa8699b323..c3afca8b03c6 100644 --- a/docs/reference/ml/anomaly-detection/apis/preview-datafeed.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/preview-datafeed.asciidoc @@ -20,7 +20,7 @@ Previews a {dfeed}. * If {es} {security-features} are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. See -{stack-ov}/security-privileges.html[Security privileges]. +<>. [[ml-preview-datafeed-desc]] ==== {api-description-title} diff --git a/docs/reference/ml/anomaly-detection/apis/put-calendar-job.asciidoc b/docs/reference/ml/anomaly-detection/apis/put-calendar-job.asciidoc index f6eb36db5fba..7ba652b60a19 100644 --- a/docs/reference/ml/anomaly-detection/apis/put-calendar-job.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/put-calendar-job.asciidoc @@ -18,7 +18,7 @@ Adds an {anomaly-job} to a calendar. * If the {es} {security-features} are enabled, you must have `manage_ml` or `manage` cluster privileges to use this API. See -{stack-ov}/security-privileges.html[Security privileges]. +<>. [[ml-put-calendar-job-path-parms]] ==== {api-path-parms-title} diff --git a/docs/reference/ml/anomaly-detection/apis/put-calendar.asciidoc b/docs/reference/ml/anomaly-detection/apis/put-calendar.asciidoc index 764e3e61060f..09a65f4300de 100644 --- a/docs/reference/ml/anomaly-detection/apis/put-calendar.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/put-calendar.asciidoc @@ -18,7 +18,7 @@ Instantiates a calendar. * If the {es} {security-features} are enabled, you must have `manage_ml` or `manage` cluster privileges to use this API. See -{stack-ov}/security-privileges.html[Security privileges]. +<>. [[ml-put-calendar-desc]] ==== {api-description-title} diff --git a/docs/reference/ml/anomaly-detection/apis/put-datafeed.asciidoc b/docs/reference/ml/anomaly-detection/apis/put-datafeed.asciidoc index e6c64c58e60e..899f8cfe5cd9 100644 --- a/docs/reference/ml/anomaly-detection/apis/put-datafeed.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/put-datafeed.asciidoc @@ -21,7 +21,7 @@ Instantiates a {dfeed}. * You must create an {anomaly-job} before you create a {dfeed}. * If {es} {security-features} are enabled, you must have `manage_ml` or `manage` cluster privileges to use this API. See -{stack-ov}/security-privileges.html[Security privileges]. +<>. [[ml-put-datafeed-desc]] ==== {api-description-title} diff --git a/docs/reference/ml/anomaly-detection/apis/put-filter.asciidoc b/docs/reference/ml/anomaly-detection/apis/put-filter.asciidoc index 41b276010265..12fadac25d6d 100644 --- a/docs/reference/ml/anomaly-detection/apis/put-filter.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/put-filter.asciidoc @@ -18,7 +18,7 @@ Instantiates a filter. * If the {es} {security-features} are enabled, you must have `manage_ml` or `manage` cluster privileges to use this API. See -{stack-ov}/security-privileges.html[Security privileges]. +<>. [[ml-put-filter-desc]] ==== {api-description-title} diff --git a/docs/reference/ml/anomaly-detection/apis/put-job.asciidoc b/docs/reference/ml/anomaly-detection/apis/put-job.asciidoc index 6b331bbf5569..930b12aa8313 100644 --- a/docs/reference/ml/anomaly-detection/apis/put-job.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/put-job.asciidoc @@ -18,7 +18,7 @@ Instantiates an {anomaly-job}. * If the {es} {security-features} are enabled, you must have `manage_ml` or `manage` cluster privileges to use this API. See -{stack-ov}/security-privileges.html[Security privileges]. +<>. [[ml-put-job-desc]] ==== {api-description-title} diff --git a/docs/reference/ml/anomaly-detection/apis/resultsresource.asciidoc b/docs/reference/ml/anomaly-detection/apis/resultsresource.asciidoc index f2533bbd0734..ce6a8a90015a 100644 --- a/docs/reference/ml/anomaly-detection/apis/resultsresource.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/resultsresource.asciidoc @@ -38,7 +38,7 @@ Categorization results contain the definitions of _categories_ that have been identified. These are only applicable for jobs that are configured to analyze unstructured log data using categorization. These results do not contain a timestamp or any calculated scores. For more information, see -{xpack-ref}/ml-configuring-categories.html[Categorizing Log Messages]. +{stack-ov}/ml-configuring-categories.html[Categorizing log messages]. * <> * <> diff --git a/docs/reference/ml/anomaly-detection/apis/revert-snapshot.asciidoc b/docs/reference/ml/anomaly-detection/apis/revert-snapshot.asciidoc index a839683b0de9..f04db39e25eb 100644 --- a/docs/reference/ml/anomaly-detection/apis/revert-snapshot.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/revert-snapshot.asciidoc @@ -19,7 +19,7 @@ Reverts to a specific snapshot. * Before you revert to a saved snapshot, you must close the job. * If the {es} {security-features} are enabled, you must have `manage_ml` or `manage` cluster privileges to use this API. See -{stack-ov}/security-privileges.html[Security privileges]. +<>. [[ml-revert-snapshot-desc]] ==== {api-description-title} diff --git a/docs/reference/ml/anomaly-detection/apis/set-upgrade-mode.asciidoc b/docs/reference/ml/anomaly-detection/apis/set-upgrade-mode.asciidoc index 7636f3bd6d31..34d8d4c1cd0d 100644 --- a/docs/reference/ml/anomaly-detection/apis/set-upgrade-mode.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/set-upgrade-mode.asciidoc @@ -29,7 +29,7 @@ POST /_ml/set_upgrade_mode?enabled=false&timeout=10m * If the {es} {security-features} are enabled, you must have `manage_ml` or `manage` cluster privileges to use this API. See -{stack-ov}/security-privileges.html[Security privileges]. +<>. [[ml-set-upgrade-mode-desc]] ==== {api-description-title} diff --git a/docs/reference/ml/anomaly-detection/apis/start-datafeed.asciidoc b/docs/reference/ml/anomaly-detection/apis/start-datafeed.asciidoc index 0d5404fda4dc..7faba863774d 100644 --- a/docs/reference/ml/anomaly-detection/apis/start-datafeed.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/start-datafeed.asciidoc @@ -22,7 +22,7 @@ Starts one or more {dfeeds}. error occurs. * If {es} {security-features} are enabled, you must have `manage_ml` or `manage` cluster privileges to use this API. See -{stack-ov}/security-privileges.html[Security privileges]. +<>. [[ml-start-datafeed-desc]] ==== {api-description-title} diff --git a/docs/reference/ml/anomaly-detection/apis/stop-datafeed.asciidoc b/docs/reference/ml/anomaly-detection/apis/stop-datafeed.asciidoc index 8094a8a8cb59..cde9f16c384a 100644 --- a/docs/reference/ml/anomaly-detection/apis/stop-datafeed.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/stop-datafeed.asciidoc @@ -24,7 +24,7 @@ Stops one or more {dfeeds}. * If the {es} {security-features} are enabled, you must have `manage_ml` or `manage` cluster privileges to use this API. See -{stack-ov}/security-privileges.html[Security privileges]. +<>. [[ml-stop-datafeed-desc]] ==== {api-description-title} diff --git a/docs/reference/ml/anomaly-detection/apis/update-datafeed.asciidoc b/docs/reference/ml/anomaly-detection/apis/update-datafeed.asciidoc index a3685047521a..910bb727e976 100644 --- a/docs/reference/ml/anomaly-detection/apis/update-datafeed.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/update-datafeed.asciidoc @@ -22,7 +22,7 @@ Updates certain properties of a {dfeed}. * If {es} {security-features} are enabled, you must have `manage_ml`, or `manage` cluster privileges to use this API. See -{stack-ov}/security-privileges.html[Security privileges]. +<>. [[ml-update-datafeed-desc]] diff --git a/docs/reference/ml/anomaly-detection/apis/update-filter.asciidoc b/docs/reference/ml/anomaly-detection/apis/update-filter.asciidoc index be939230971a..a4aa5c3cab15 100644 --- a/docs/reference/ml/anomaly-detection/apis/update-filter.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/update-filter.asciidoc @@ -18,7 +18,7 @@ Updates the description of a filter, adds items, or removes items. * If the {es} {security-features} are enabled, you must have `manage_ml` or `manage` cluster privileges to use this API. See -{stack-ov}/security-privileges.html[Security privileges]. +<>. [[ml-update-filter-path-parms]] ==== {api-path-parms-title} diff --git a/docs/reference/ml/anomaly-detection/apis/update-job.asciidoc b/docs/reference/ml/anomaly-detection/apis/update-job.asciidoc index 53bec1a1941d..9676f7fd34f9 100644 --- a/docs/reference/ml/anomaly-detection/apis/update-job.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/update-job.asciidoc @@ -18,7 +18,7 @@ Updates certain properties of an {anomaly-job}. * If the {es} {security-features} are enabled, you must have `manage_ml` or `manage` cluster privileges to use this API. See -{stack-ov}/security-privileges.html[Security privileges]. +<>. [[ml-update-job-path-parms]] diff --git a/docs/reference/ml/anomaly-detection/apis/update-snapshot.asciidoc b/docs/reference/ml/anomaly-detection/apis/update-snapshot.asciidoc index beda52bf1408..1eb3e78e69ef 100644 --- a/docs/reference/ml/anomaly-detection/apis/update-snapshot.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/update-snapshot.asciidoc @@ -18,7 +18,7 @@ Updates certain properties of a snapshot. * If the {es} {security-features} are enabled, you must have `manage_ml` or `manage` cluster privileges to use this API. See -{stack-ov}/security-privileges.html[Security privileges]. +<>. [[ml-update-snapshot-path-parms]] diff --git a/docs/reference/ml/anomaly-detection/apis/validate-detector.asciidoc b/docs/reference/ml/anomaly-detection/apis/validate-detector.asciidoc index daf8d7d4c691..74f7e717a063 100644 --- a/docs/reference/ml/anomaly-detection/apis/validate-detector.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/validate-detector.asciidoc @@ -18,7 +18,7 @@ Validates detector configuration information. * If the {es} {security-features} are enabled, you must have `manage_ml` or `manage` cluster privileges to use this API. See -{stack-ov}/security-privileges.html[Security privileges]. +<>. [[ml-valid-detector-desc]] ==== {api-description-title} diff --git a/docs/reference/ml/anomaly-detection/apis/validate-job.asciidoc b/docs/reference/ml/anomaly-detection/apis/validate-job.asciidoc index 8b753b5d58ce..8b094d36b274 100644 --- a/docs/reference/ml/anomaly-detection/apis/validate-job.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/validate-job.asciidoc @@ -18,7 +18,7 @@ Validates {anomaly-job} configuration information. * If the {es} {security-features} are enabled, you must have `manage_ml` or `manage` cluster privileges to use this API. See -{stack-ov}/security-privileges.html[Security privileges]. +<>. [[ml-valid-job-desc]] ==== {api-description-title} diff --git a/docs/reference/ml/df-analytics/apis/delete-dfanalytics.asciidoc b/docs/reference/ml/df-analytics/apis/delete-dfanalytics.asciidoc index 5f20b0341521..7816931161bb 100644 --- a/docs/reference/ml/df-analytics/apis/delete-dfanalytics.asciidoc +++ b/docs/reference/ml/df-analytics/apis/delete-dfanalytics.asciidoc @@ -20,8 +20,7 @@ experimental[] ==== {api-prereq-title} * You must have `machine_learning_admin` built-in role to use this API. For more -information, see {stack-ov}/security-privileges.html[Security privileges] and -{stack-ov}/built-in-roles.html[Built-in roles]. +information, see <> and <>. [[ml-delete-dfanalytics-path-params]] ==== {api-path-parms-title} diff --git a/docs/reference/ml/df-analytics/apis/estimate-memory-usage-dfanalytics.asciidoc b/docs/reference/ml/df-analytics/apis/estimate-memory-usage-dfanalytics.asciidoc index d91fdcaffbc6..64db472dfd1e 100644 --- a/docs/reference/ml/df-analytics/apis/estimate-memory-usage-dfanalytics.asciidoc +++ b/docs/reference/ml/df-analytics/apis/estimate-memory-usage-dfanalytics.asciidoc @@ -21,8 +21,7 @@ experimental[] ==== {api-prereq-title} * You must have `monitor_ml` privilege to use this API. For more -information, see {stack-ov}/security-privileges.html[Security privileges] and -{stack-ov}/built-in-roles.html[Built-in roles]. +information, see <> and <>. [[ml-estimate-memory-usage-dfanalytics-desc]] ==== {api-description-title} diff --git a/docs/reference/ml/df-analytics/apis/evaluate-dfanalytics.asciidoc b/docs/reference/ml/df-analytics/apis/evaluate-dfanalytics.asciidoc index 16b7fe5a3f59..fec0d78c5aef 100644 --- a/docs/reference/ml/df-analytics/apis/evaluate-dfanalytics.asciidoc +++ b/docs/reference/ml/df-analytics/apis/evaluate-dfanalytics.asciidoc @@ -21,8 +21,7 @@ experimental[] ==== {api-prereq-title} * You must have `monitor_ml` privilege to use this API. For more -information, see {stack-ov}/security-privileges.html[Security privileges] and -{stack-ov}/built-in-roles.html[Built-in roles]. +information, see <> and <>. [[ml-evaluate-dfanalytics-desc]] ==== {api-description-title} diff --git a/docs/reference/ml/df-analytics/apis/get-dfanalytics-stats.asciidoc b/docs/reference/ml/df-analytics/apis/get-dfanalytics-stats.asciidoc index fd23d6be6bad..ab065e2622da 100644 --- a/docs/reference/ml/df-analytics/apis/get-dfanalytics-stats.asciidoc +++ b/docs/reference/ml/df-analytics/apis/get-dfanalytics-stats.asciidoc @@ -29,8 +29,7 @@ experimental[] ==== {api-prereq-title} * You must have `monitor_ml` privilege to use this API. For more -information, see {stack-ov}/security-privileges.html[Security privileges] and -{stack-ov}/built-in-roles.html[Built-in roles]. +information, see <> and <>. [[ml-get-dfanalytics-stats-path-params]] diff --git a/docs/reference/ml/df-analytics/apis/get-dfanalytics.asciidoc b/docs/reference/ml/df-analytics/apis/get-dfanalytics.asciidoc index 88b4526efca1..fda6039f88cf 100644 --- a/docs/reference/ml/df-analytics/apis/get-dfanalytics.asciidoc +++ b/docs/reference/ml/df-analytics/apis/get-dfanalytics.asciidoc @@ -26,8 +26,7 @@ experimental[] ==== {api-prereq-title} * You must have `monitor_ml` privilege to use this API. For more -information, see {stack-ov}/security-privileges.html[Security privileges] and -{stack-ov}/built-in-roles.html[Built-in roles]. +information, see <> and <>. [[ml-get-dfanalytics-desc]] ==== {api-description-title} diff --git a/docs/reference/ml/df-analytics/apis/put-dfanalytics.asciidoc b/docs/reference/ml/df-analytics/apis/put-dfanalytics.asciidoc index 714c739f71ce..2386d1e7f74b 100644 --- a/docs/reference/ml/df-analytics/apis/put-dfanalytics.asciidoc +++ b/docs/reference/ml/df-analytics/apis/put-dfanalytics.asciidoc @@ -23,8 +23,7 @@ experimental[] * You must have `machine_learning_admin` built-in role to use this API. You must also have `read` and `view_index_metadata` privileges on the source index and `read`, `create_index`, and `index` privileges on the destination index. For -more information, see {stack-ov}/security-privileges.html[Security privileges] -and {stack-ov}/built-in-roles.html[Built-in roles]. +more information, see <> and <>. [[ml-put-dfanalytics-desc]] diff --git a/docs/reference/ml/df-analytics/apis/start-dfanalytics.asciidoc b/docs/reference/ml/df-analytics/apis/start-dfanalytics.asciidoc index 7dcd01f49f07..9ffbfc3d9c23 100644 --- a/docs/reference/ml/df-analytics/apis/start-dfanalytics.asciidoc +++ b/docs/reference/ml/df-analytics/apis/start-dfanalytics.asciidoc @@ -23,8 +23,7 @@ experimental[] * You must have `machine_learning_admin` built-in role to use this API. You must also have `read` and `view_index_metadata` privileges on the source index and `read`, `create_index`, and `index` privileges on the destination index. For -more information, see {stack-ov}/security-privileges.html[Security privileges] -and {stack-ov}/built-in-roles.html[Built-in roles]. +more information, see <> and <>. [[ml-start-dfanalytics-path-params]] ==== {api-path-parms-title} diff --git a/docs/reference/ml/df-analytics/apis/stop-dfanalytics.asciidoc b/docs/reference/ml/df-analytics/apis/stop-dfanalytics.asciidoc index 0fc0a35b98f5..8c9f705062cb 100644 --- a/docs/reference/ml/df-analytics/apis/stop-dfanalytics.asciidoc +++ b/docs/reference/ml/df-analytics/apis/stop-dfanalytics.asciidoc @@ -25,8 +25,7 @@ experimental[] ==== {api-prereq-title} * You must have `machine_learning_admin` built-in role to use this API. For more -information, see {stack-ov}/security-privileges.html[Security privileges] and -{stack-ov}/built-in-roles.html[Built-in roles]. +information, see <> and <>. [[ml-stop-dfanalytics-desc]] ==== {api-description-title} diff --git a/docs/reference/modules/remote-clusters.asciidoc b/docs/reference/modules/remote-clusters.asciidoc index fc60623c7394..1ab58ac2bf54 100644 --- a/docs/reference/modules/remote-clusters.asciidoc +++ b/docs/reference/modules/remote-clusters.asciidoc @@ -9,7 +9,7 @@ endif::[] ifdef::include-xpack[] The _remote clusters_ module enables you to establish uni-directional connections to a remote cluster. This functionality is used in -{stack-ov}/xpack-ccr.html[{ccr}] and +<> and <>. endif::[] diff --git a/docs/reference/monitoring/collecting-monitoring-data.asciidoc b/docs/reference/monitoring/collecting-monitoring-data.asciidoc index bd47d94ba195..612911a3ca17 100644 --- a/docs/reference/monitoring/collecting-monitoring-data.asciidoc +++ b/docs/reference/monitoring/collecting-monitoring-data.asciidoc @@ -18,8 +18,7 @@ Advanced monitoring settings enable you to control how frequently data is collected, configure timeouts, and set the retention period for locally-stored monitoring indices. You can also adjust how monitoring data is displayed. -To learn about monitoring in general, see -{stack-ov}/xpack-monitoring.html[Monitoring the {stack}]. +To learn about monitoring in general, see <>. . Configure your cluster to collect monitoring data: @@ -131,9 +130,9 @@ xpack.monitoring.exporters: must provide appropriate credentials when data is shipped to the monitoring cluster: ... Create a user on the monitoring cluster that has the -{stack-ov}/built-in-roles.html[`remote_monitoring_agent` built-in role]. +<>. Alternatively, use the -{stack-ov}/built-in-users.html[`remote_monitoring_user` built-in user]. +<>. ... Add the user ID and password settings to the HTTP exporter settings in the `elasticsearch.yml` file on each node. + @@ -197,8 +196,7 @@ xpack.monitoring.exporters: . Configure your cluster to route monitoring data from sources such as {kib}, Beats, and {ls} to the monitoring cluster. For information about configuring -each product to collect and send monitoring data, see -{stack-ov}/xpack-monitoring.html[Monitoring the {stack}]. +each product to collect and send monitoring data, see <>. . If you updated settings in the `elasticsearch.yml` files on your production cluster, restart {es}. See <> and <>. diff --git a/docs/reference/monitoring/configuring-filebeat.asciidoc b/docs/reference/monitoring/configuring-filebeat.asciidoc index 88b9859c87f7..d51c65bbac8d 100644 --- a/docs/reference/monitoring/configuring-filebeat.asciidoc +++ b/docs/reference/monitoring/configuring-filebeat.asciidoc @@ -39,12 +39,12 @@ For more information, see <> and < + -- The {filebeat} {es} module can handle -{stack-ov}/audit-log-output.html[audit logs], -{ref}/logging.html#deprecation-logging[deprecation logs], -{ref}/gc-logging.html[gc logs], {ref}/logging.html[server logs], and -{ref}/index-modules-slowlog.html[slow logs]. +<>, +<>, +<>, <>, and +<>. For more information about the location of your {es} logs, see the -{ref}/path-settings.html[path.logs] setting. +<> setting. IMPORTANT: If there are both structured (`*.json`) and unstructured (plain text) versions of the logs, you must use the structured logs. Otherwise, they might @@ -117,7 +117,7 @@ If {security-features} are enabled, you must provide a valid user ID and password so that {filebeat} can connect to {kib}: .. Create a user on the monitoring cluster that has the -{stack-ov}/built-in-roles.html[`kibana_user` built-in role] or equivalent +<> or equivalent privileges. .. Add the `username` and `password` settings to the {es} output information in @@ -175,7 +175,7 @@ to file ownership or permissions when you try to run {filebeat} modules. See . Check whether the appropriate indices exist on the monitoring cluster. + -- -For example, use the {ref}/cat-indices.html[cat indices] command to verify +For example, use the <> command to verify that there are new `filebeat-*` indices. TIP: If you want to use the *Monitoring* UI in {kib}, there must also be diff --git a/docs/reference/monitoring/configuring-metricbeat.asciidoc b/docs/reference/monitoring/configuring-metricbeat.asciidoc index ea3aecfac2a0..7a1c1fddfb7e 100644 --- a/docs/reference/monitoring/configuring-metricbeat.asciidoc +++ b/docs/reference/monitoring/configuring-metricbeat.asciidoc @@ -14,9 +14,6 @@ as described in <>. image::monitoring/images/metricbeat.png[Example monitoring architecture] -To learn about monitoring in general, see -{stack-ov}/xpack-monitoring.html[Monitoring the {stack}]. - //NOTE: The tagged regions are re-used in the Stack Overview. . Enable the collection of monitoring data. + @@ -106,9 +103,9 @@ If Elastic {security-features} are enabled, you must also provide a user ID and password so that {metricbeat} can collect metrics successfully: .. Create a user on the production cluster that has the -{stack-ov}/built-in-roles.html[`remote_monitoring_collector` built-in role]. +<>. Alternatively, use the -{stack-ov}/built-in-users.html[`remote_monitoring_user` built-in user]. +<>. .. Add the `username` and `password` settings to the {es} module configuration file. @@ -171,9 +168,9 @@ provide a valid user ID and password so that {metricbeat} can send metrics successfully: .. Create a user on the monitoring cluster that has the -{stack-ov}/built-in-roles.html[`remote_monitoring_agent` built-in role]. +<>. Alternatively, use the -{stack-ov}/built-in-users.html[`remote_monitoring_user` built-in user]. +<>. .. Add the `username` and `password` settings to the {es} output information in the {metricbeat} configuration file. diff --git a/docs/reference/monitoring/production.asciidoc b/docs/reference/monitoring/production.asciidoc index 555c2b0cf123..ed6401a9f06b 100644 --- a/docs/reference/monitoring/production.asciidoc +++ b/docs/reference/monitoring/production.asciidoc @@ -60,12 +60,12 @@ credentials must be valid on both the {kib} server and the monitoring cluster. *** If you plan to use {metricbeat} to collect data about {es} or {kib}, create a user that has the `remote_monitoring_collector` built-in role and a user that has the `remote_monitoring_agent` -{stack-ov}/built-in-roles.html#built-in-roles-remote-monitoring-agent[built-in role]. Alternatively, use the -`remote_monitoring_user` {stack-ov}/built-in-users.html[built-in user]. +<>. Alternatively, use the +`remote_monitoring_user` <>. *** If you plan to use HTTP exporters to route data through your production cluster, create a user that has the `remote_monitoring_agent` -{stack-ov}/built-in-roles.html#built-in-roles-remote-monitoring-agent[built-in role]. +<>. + -- For example, the @@ -83,7 +83,7 @@ POST /_security/user/remote_monitor --------------------------------------------------------------- // TEST[skip:needs-gold+-license] -Alternatively, use the `remote_monitoring_user` {stack-ov}/built-in-users.html[built-in user]. +Alternatively, use the `remote_monitoring_user` <>. -- . Configure your production cluster to collect data and send it to the diff --git a/docs/reference/rollup/apis/delete-job.asciidoc b/docs/reference/rollup/apis/delete-job.asciidoc index e5babce43b2c..12ad52eceda3 100644 --- a/docs/reference/rollup/apis/delete-job.asciidoc +++ b/docs/reference/rollup/apis/delete-job.asciidoc @@ -21,7 +21,7 @@ experimental[] * If the {es} {security-features} are enabled, you must have `manage` or `manage_rollup` cluster privileges to use this API. For more information, see -{stack-ov}/security-privileges.html[Security privileges]. +<>. [[rollup-delete-job-desc]] ==== {api-description-title} diff --git a/docs/reference/rollup/apis/get-job.asciidoc b/docs/reference/rollup/apis/get-job.asciidoc index 291ef6aabaa1..a38fae35ac66 100644 --- a/docs/reference/rollup/apis/get-job.asciidoc +++ b/docs/reference/rollup/apis/get-job.asciidoc @@ -20,7 +20,7 @@ experimental[] * You must have `monitor`, `monitor_rollup`, `manage` or `manage_rollup` cluster privileges to use this API. For more information, see -{stack-ov}/security-privileges.html[Security privileges]. +<>. [[rollup-get-job-desc]] ==== {api-description-title} diff --git a/docs/reference/rollup/apis/put-job.asciidoc b/docs/reference/rollup/apis/put-job.asciidoc index ed91d40bde50..8916567e1fec 100644 --- a/docs/reference/rollup/apis/put-job.asciidoc +++ b/docs/reference/rollup/apis/put-job.asciidoc @@ -21,7 +21,7 @@ experimental[] * If the {es} {security-features} are enabled, you must have `manage` or `manage_rollup` cluster privileges to use this API. For more information, see -{stack-ov}/security-privileges.html[Security privileges]. +<>. [[rollup-put-job-api-desc]] ==== {api-description-title} diff --git a/docs/reference/rollup/apis/rollup-caps.asciidoc b/docs/reference/rollup/apis/rollup-caps.asciidoc index 58100513201b..8358ccfed26a 100644 --- a/docs/reference/rollup/apis/rollup-caps.asciidoc +++ b/docs/reference/rollup/apis/rollup-caps.asciidoc @@ -43,7 +43,7 @@ There is no request body for the Get Rollup Caps API. You must have `monitor`, `monitor_rollup`, `manage` or `manage_rollup` cluster privileges to use this API. For more information, see -{xpack-ref}/security-privileges.html[Security Privileges]. +<>. ==== Examples diff --git a/docs/reference/rollup/apis/rollup-index-caps.asciidoc b/docs/reference/rollup/apis/rollup-index-caps.asciidoc index 8b475c8aa5e3..291d8fb4f19c 100644 --- a/docs/reference/rollup/apis/rollup-index-caps.asciidoc +++ b/docs/reference/rollup/apis/rollup-index-caps.asciidoc @@ -35,7 +35,7 @@ There is no request body for the Get Jobs API. You must have the `read` index privilege on the index that stores the rollup results. For more information, see -{xpack-ref}/security-privileges.html[Security Privileges]. +<>. ==== Examples diff --git a/docs/reference/rollup/apis/start-job.asciidoc b/docs/reference/rollup/apis/start-job.asciidoc index da31181a0162..d5b0a2a40761 100644 --- a/docs/reference/rollup/apis/start-job.asciidoc +++ b/docs/reference/rollup/apis/start-job.asciidoc @@ -21,7 +21,7 @@ experimental[] * You must have `manage` or `manage_rollup` cluster privileges to use this API. For more information, see -{stack-ov}/security-privileges.html[Security privileges]. +<>. [[rollup-start-job-desc]] ==== {api-description-title} diff --git a/docs/reference/rollup/apis/stop-job.asciidoc b/docs/reference/rollup/apis/stop-job.asciidoc index a669a6b56c94..254935bf421b 100644 --- a/docs/reference/rollup/apis/stop-job.asciidoc +++ b/docs/reference/rollup/apis/stop-job.asciidoc @@ -21,7 +21,7 @@ experimental[] * You must have `manage` or `manage_rollup` cluster privileges to use this API. For more information, see -{stack-ov}/security-privileges.html[Security privileges]. +<>. [[rollup-stop-job-desc]] ===== {api-description-title} diff --git a/docs/reference/settings/audit-settings.asciidoc b/docs/reference/settings/audit-settings.asciidoc index 78f76c302c26..463d6ac5927d 100644 --- a/docs/reference/settings/audit-settings.asciidoc +++ b/docs/reference/settings/audit-settings.asciidoc @@ -6,8 +6,7 @@ ++++ All of these settings can be added to the `elasticsearch.yml` configuration -file. For more information, see -{stack-ov}/auditing.html[Auditing Security Events]. +file. For more information, see <>. [[general-audit-settings]] ==== General Auditing Settings @@ -69,7 +68,7 @@ The default value is `true`. [[audit-event-ignore-policies]] ==== Audit Logfile Event Ignore Policies -These settings affect the {stack-ov}/audit-log-output.html#audit-log-ignore-policy[ignore policies] +These settings affect the <> that enable fine-grained control over which audit events are printed to the log file. All of the settings with the same policy name combine to form a single policy. If an event matches all of the conditions for a specific policy, it is ignored diff --git a/docs/reference/settings/ccr-settings.asciidoc b/docs/reference/settings/ccr-settings.asciidoc index 286bb421662f..54d9c390d9aa 100644 --- a/docs/reference/settings/ccr-settings.asciidoc +++ b/docs/reference/settings/ccr-settings.asciidoc @@ -10,7 +10,7 @@ These {ccr} settings can be dynamically updated on a live cluster with the ==== Remote recovery settings The following setting can be used to rate-limit the data transmitted during -{stack-ov}/remote-recovery.html[remote recoveries]: +<>: `ccr.indices.recovery.max_bytes_per_sec` (<>):: Limits the total inbound and outbound remote recovery traffic on each node. diff --git a/docs/reference/settings/monitoring-settings.asciidoc b/docs/reference/settings/monitoring-settings.asciidoc index 58d38182f6eb..5c4663a98cd6 100644 --- a/docs/reference/settings/monitoring-settings.asciidoc +++ b/docs/reference/settings/monitoring-settings.asciidoc @@ -22,8 +22,7 @@ Logstash, configure {logstash-ref}/monitoring-internal-collection.html#monitoring-settings[`xpack.monitoring` settings] in `logstash.yml`. -For more information, see -{xpack-ref}/xpack-monitoring.html[Monitoring the Elastic Stack]. +For more information, see <>. [float] [[general-monitoring-settings]] diff --git a/docs/reference/settings/security-settings.asciidoc b/docs/reference/settings/security-settings.asciidoc index 9ace7928e267..a33f2cc82805 100644 --- a/docs/reference/settings/security-settings.asciidoc +++ b/docs/reference/settings/security-settings.asciidoc @@ -64,8 +64,7 @@ See <>. Defaults to `bcrypt`. [[anonymous-access-settings]] ==== Anonymous access settings You can configure the following anonymous access settings in -`elasticsearch.yml`. For more information, see {stack-ov}/anonymous-access.html[ -Enabling anonymous access]. +`elasticsearch.yml`. For more information, see <>. `xpack.security.authc.anonymous.username`:: The username (principal) of the anonymous user. Defaults to `_es_anonymous_user`. @@ -115,8 +114,7 @@ Defaults to `48h` (48 hours). You can set the following document and field level security settings in `elasticsearch.yml`. For more information, see -{stack-ov}/field-and-document-access-control.html[Setting up document and field -level security]. +<>. `xpack.security.dls_fls.enabled`:: Set to `false` to prevent document and field level security @@ -205,7 +203,7 @@ xpack.security.authc.realms: ---------------------------------------- The valid settings vary depending on the realm type. For more -information, see {stack-ov}/setting-up-authentication.html[Setting up authentication]. +information, see <>. [float] [[ref-realm-settings]] @@ -244,8 +242,8 @@ Defaults to `ssha256`. `authentication.enabled`:: If set to `false`, disables authentication support in this realm, so that it only supports user lookups. -(See the {stack-ov}/run-as-privilege.html[run as] and -{stack-ov}/realm-chains.html#authorization_realms[authorization realms] features). +(See the <> and +<> features). Defaults to `true`. [[ref-users-settings]] @@ -260,7 +258,7 @@ the following settings: `cache.ttl`:: The time-to-live for cached user entries. A user and a hash of its credentials are cached for this configured period of time. Defaults to `20m`. Specify values -using the standard {es} {ref}/common-options.html#time-units[time units]. +using the standard {es} <>. Defaults to `20m`. `cache.max_users`:: @@ -273,8 +271,8 @@ user credentials. See <>. Defaults to `ssha256`. `authentication.enabled`:: If set to `false`, disables authentication support in this realm, so that it only supports user lookups. -(See the {stack-ov}/run-as-privilege.html[run as] and -{stack-ov}/realm-chains.html#authorization_realms[authorization realms] features). +(See the <> and +<> features). Defaults to `true`. [[ref-ldap-settings]] @@ -325,14 +323,14 @@ The DN template that replaces the user name with the string `{0}`. This setting is multivalued; you can specify multiple user contexts. Required to operate in user template mode. If `user_search.base_dn` is specified, this setting is not valid. For more information on -the different modes, see {stack-ov}/ldap-realm.html[LDAP realms]. +the different modes, see <>. `authorization_realms`:: The names of the realms that should be consulted for delegated authorization. If this setting is used, then the LDAP realm does not perform role mapping and instead loads the user from the listed realms. The referenced realms are consulted in the order that they are defined in this list. -See {stack-ov}/realm-chains.html#authorization_realms[Delegating authorization to another realm] +See <>. + -- NOTE: If any settings starting with `user_search` are specified, the @@ -349,7 +347,7 @@ to `memberOf`. Specifies a container DN to search for users. Required to operated in user search mode. If `user_dn_templates` is specified, this setting is not valid. For more information on -the different modes, see {stack-ov}/ldap-realm.html[LDAP realms]. +the different modes, see <>. `user_search.scope`:: The scope of the user search. Valid values are `sub_tree`, `one_level` or @@ -421,13 +419,12 @@ the filter. If not set, the user DN is passed into the filter. Defaults to Empt `unmapped_groups_as_roles`:: If set to `true`, the names of any unmapped LDAP groups are used as role names and assigned to the user. A group is considered to be _unmapped_ if it is not -referenced in a -{stack-ov}/mapping-roles.html#mapping-roles-file[role-mapping file]. API-based +referenced in a <>. API-based role mappings are not considered. Defaults to `false`. `files.role_mapping`:: -The <> for the {stack-ov}/mapping-roles.html#mapping-roles[ -YAML role mapping configuration file]. Defaults to +The <> for the +<>. Defaults to `ES_PATH_CONF/role_mapping.yml`. `follow_referrals`:: @@ -542,8 +539,8 @@ in-memory cached user credentials. See <>. Defaults to `ssha256 `authentication.enabled`:: If set to `false`, disables authentication support in this realm, so that it only supports user lookups. -(See the {stack-ov}/run-as-privilege.html[run as] and -{stack-ov}/realm-chains.html#authorization_realms[authorization realms] features). +(See the <> and +<> features). Defaults to `true`. [[ref-ad-settings]] @@ -781,7 +778,7 @@ the default values. `cache.ttl`:: Specifies the time-to-live for cached user entries. A user and a hash of its credentials are cached for this configured period of time. Use the -standard Elasticsearch {ref}/common-options.html#time-units[time units]). +standard Elasticsearch <>). Defaults to `20m`. `cache.max_users`:: @@ -794,8 +791,8 @@ the in-memory cached user credentials. See <>. Defaults to `ssh `authentication.enabled`:: If set to `false`, disables authentication support in this realm, so that it only supports user lookups. -(See the {stack-ov}/run-as-privilege.html[run as] and -{stack-ov}/realm-chains.html#authorization_realms[authorization realms] features). +(See the <> and +<> features). Defaults to `true`. `follow_referrals`:: @@ -836,19 +833,19 @@ for SSL. This setting cannot be used with `certificate_authorities`. `files.role_mapping`:: Specifies the <> of the -{stack-ov}/mapping-roles.html[YAML role mapping configuration file]. +<>. Defaults to `ES_PATH_CONF/role_mapping.yml`. `authorization_realms`:: The names of the realms that should be consulted for delegated authorization. If this setting is used, then the PKI realm does not perform role mapping and instead loads the user from the listed realms. -See {stack-ov}/realm-chains.html#authorization_realms[Delegating authorization to another realm] +See <>. `cache.ttl`:: Specifies the time-to-live for cached user entries. A user and a hash of its credentials are cached for this period of time. Use the -standard {es} {ref}/common-options.html#time-units[time units]). +standard {es} <>). Defaults to `20m`. `cache.max_users`:: @@ -978,7 +975,7 @@ provided by the SAML attributes. Defaults to `true`. The names of the realms that should be consulted for delegated authorization. If this setting is used, then the SAML realm does not perform role mapping and instead loads the user from the listed realms. -See {stack-ov}/realm-chains.html#authorization_realms[Delegating authorization to another realm] +See <>. `allowed_clock_skew`:: The maximum amount of skew that can be tolerated between the IdP's clock and the @@ -992,7 +989,7 @@ authenticate the current user. The Authentication Context of the corresponding authentication response should contain at least one of the requested values. + For more information, see -{stack-ov}/saml-guide-authentication.html#req-authn-context[Requesting specific authentication methods]. +<>. [float] [[ref-saml-signing-settings]] @@ -1224,7 +1221,7 @@ cache at any given time. Defaults to 100,000. The names of the realms that should be consulted for delegated authorization. If this setting is used, then the Kerberos realm does not perform role mapping and instead loads the user from the listed realms. -See {stack-ov}/realm-chains.html#authorization_realms[Delegating authorization to another realm] +See <>. [[ref-oidc-settings]] [float] @@ -1503,7 +1500,7 @@ used (e.g. `xpack.security.authc.realms.ldap.corp_ldap.ssl.verification_mode` or `xpack.security.transport.ssl.supported_protocols`). For more information, see -{stack-ov}/encrypting-communications.html[Encrypting communications]. +<>. `*.ssl.supported_protocols`:: Supported protocols with versions. Valid protocols: `SSLv2Hello`, @@ -1566,7 +1563,7 @@ SSL enabled server. [[pkcs12-truststore-note]] [NOTE] Storing trusted certificates in a PKCS#12 file, although supported, is -uncommon in practice. The {ref}/certutil.html[`elasticsearch-certutil`] tool, +uncommon in practice. The <>, as well as Java's `keytool`, are designed to generate PKCS#12 files that can be used both as a keystore and as a truststore, but this may not be the case for container files that are created using other tools. Usually, @@ -1613,7 +1610,7 @@ setting, this would be `transport.profiles.$PROFILE.xpack.security.ssl.key`. [float] [[ip-filtering-settings]] ==== IP filtering settings -You can configure the following settings for {stack-ov}/ip-filtering.html[IP filtering]. +You can configure the following settings for <>. `xpack.security.transport.filter.allow`:: List of IP addresses to allow. diff --git a/docs/reference/setup/bootstrap-checks-xes.asciidoc b/docs/reference/setup/bootstrap-checks-xes.asciidoc index 4cf55a007238..a81d9bbe4c89 100644 --- a/docs/reference/setup/bootstrap-checks-xes.asciidoc +++ b/docs/reference/setup/bootstrap-checks-xes.asciidoc @@ -23,8 +23,7 @@ on each node in the cluster. For more information, see <>. If you use {es} {security-features} and a Public Key Infrastructure (PKI) realm, you must configure Transport Layer Security (TLS) on your cluster and enable client authentication on the network layers (either transport or http). For more -information, see {stack-ov}/pki-realm.html[PKI user authentication] and -{stack-ov}/ssl-tls.html[Setting up TLS on a cluster]. +information, see <> and <>. To pass this bootstrap check, if a PKI realm is enabled, you must configure TLS and enable client authentication on at least one network communication layer. @@ -41,7 +40,7 @@ and copy it to each node in the cluster. By default, role mappings are stored in `ES_PATH_CONF/role_mapping.yml`. Alternatively, you can specify a different role mapping file for each type of realm and specify its location in the `elasticsearch.yml` file. For more information, see -{stack-ov}/mapping-roles.html#mapping-roles-file[Using role mapping files]. +<>. To pass this bootstrap check, the role mapping files must exist and must be valid. The Distinguished Names (DNs) that are listed in the role mappings files @@ -57,10 +56,10 @@ must configure SSL/TLS for internode-communication. NOTE: Single-node clusters that use a loopback interface do not have this requirement. For more information, see -{stack-ov}/encrypting-communications.html[Encrypting communications]. +<>. To pass this bootstrap check, you must -{stack-ov}/ssl-tls.html[set up SSL/TLS in your cluster]. +<>. [float] diff --git a/docs/reference/setup/install/docker.asciidoc b/docs/reference/setup/install/docker.asciidoc index e8dd1ee95957..7a2a751b07da 100644 --- a/docs/reference/setup/install/docker.asciidoc +++ b/docs/reference/setup/install/docker.asciidoc @@ -11,7 +11,7 @@ https://github.com/elastic/elasticsearch/blob/{branch}/distribution/docker[Githu These images are free to use under the Elastic license. They contain open source and free commercial features and access to paid commercial features. -{xpack-ref}/license-management.html[Start a 30-day trial] to try out all of the +{stack-ov}/license-management.html[Start a 30-day trial] to try out all of the paid commercial features. See the https://www.elastic.co/subscriptions[Subscriptions] page for information about Elastic license levels. diff --git a/docs/reference/sql/functions/system.asciidoc b/docs/reference/sql/functions/system.asciidoc index b2d604728c16..8271d600c502 100644 --- a/docs/reference/sql/functions/system.asciidoc +++ b/docs/reference/sql/functions/system.asciidoc @@ -44,7 +44,7 @@ USER() .Description: Returns the username of the authenticated user executing the query. This function can -return `null` in case {stack-ov}/elasticsearch-security.html[Security] is disabled. +return `null` in case <> is disabled. [source, sql] -------------------------------------------------- diff --git a/docs/reference/transform/apis/delete-transform.asciidoc b/docs/reference/transform/apis/delete-transform.asciidoc index 06ac3c8f5d32..aaf08d2f9abb 100644 --- a/docs/reference/transform/apis/delete-transform.asciidoc +++ b/docs/reference/transform/apis/delete-transform.asciidoc @@ -24,8 +24,7 @@ beta[] * If the {es} {security-features} are enabled, you must have `manage_data_frame_transforms` cluster privileges to use this API. The built-in `data_frame_transforms_admin` role has these privileges. For more information, -see {stack-ov}/security-privileges.html[Security privileges] and -{stack-ov}/built-in-roles.html[Built-in roles]. +see <> and <>. [[delete-transform-path-parms]] diff --git a/docs/reference/transform/apis/get-transform-stats.asciidoc b/docs/reference/transform/apis/get-transform-stats.asciidoc index b4b485b52c5e..cfeea51a4ae2 100644 --- a/docs/reference/transform/apis/get-transform-stats.asciidoc +++ b/docs/reference/transform/apis/get-transform-stats.asciidoc @@ -33,8 +33,7 @@ beta[] * If the {es} {security-features} are enabled, you must have `monitor_data_frame_transforms` cluster privileges to use this API. The built-in `data_frame_transforms_user` role has these privileges. For more information, -see {stack-ov}/security-privileges.html[Security privileges] and -{stack-ov}/built-in-roles.html[Built-in roles]. +see <> and <>. [[get-transform-stats-desc]] diff --git a/docs/reference/transform/apis/get-transform.asciidoc b/docs/reference/transform/apis/get-transform.asciidoc index 63b272ad7fb1..d48e7f07c7fe 100644 --- a/docs/reference/transform/apis/get-transform.asciidoc +++ b/docs/reference/transform/apis/get-transform.asciidoc @@ -31,8 +31,7 @@ beta[] * If the {es} {security-features} are enabled, you must have `monitor_data_frame_transforms` cluster privileges to use this API. The built-in `data_frame_transforms_user` role has these privileges. For more information, -see {stack-ov}/security-privileges.html[Security privileges] and -{stack-ov}/built-in-roles.html[Built-in roles]. +see <> and <>. [[get-transform-desc]] ==== {api-description-title} diff --git a/docs/reference/transform/apis/preview-transform.asciidoc b/docs/reference/transform/apis/preview-transform.asciidoc index e86f6c42bbb9..e0d5227e275b 100644 --- a/docs/reference/transform/apis/preview-transform.asciidoc +++ b/docs/reference/transform/apis/preview-transform.asciidoc @@ -24,9 +24,8 @@ beta[] `manage_data_frame_transforms` cluster privileges to use this API. The built-in `data_frame_transforms_admin` role has these privileges. You must also have `read` and `view_index_metadata` privileges on the source index for the -{transform}. For more information, see -{stack-ov}/security-privileges.html[Security privileges] and -{stack-ov}/built-in-roles.html[Built-in roles]. +{transform}. For more information, +see <> and <>. [[preview-transform-desc]] ==== {api-description-title} diff --git a/docs/reference/transform/apis/put-transform.asciidoc b/docs/reference/transform/apis/put-transform.asciidoc index d72f2722f4e0..d2d76eed1d19 100644 --- a/docs/reference/transform/apis/put-transform.asciidoc +++ b/docs/reference/transform/apis/put-transform.asciidoc @@ -25,8 +25,7 @@ beta[] `data_frame_transforms_admin` role has these privileges. You must also have `read` and `view_index_metadata` privileges on the source index and `read`, `create_index`, and `index` privileges on the destination index. For more -information, see {stack-ov}/security-privileges.html[Security privileges] and -{stack-ov}/built-in-roles.html[Built-in roles]. +information, see <> and <>. [[put-transform-desc]] ==== {api-description-title} diff --git a/docs/reference/transform/apis/start-transform.asciidoc b/docs/reference/transform/apis/start-transform.asciidoc index 3480ddea86fc..41712e3c2809 100644 --- a/docs/reference/transform/apis/start-transform.asciidoc +++ b/docs/reference/transform/apis/start-transform.asciidoc @@ -23,9 +23,8 @@ beta[] * If the {es} {security-features} are enabled, you must have `manage_data_frame_transforms` cluster privileges to use this API. You must also have `view_index_metadata` privileges on the source index for the -{transform}. For more information, see -{stack-ov}/security-privileges.html[Security privileges] and -{stack-ov}/built-in-roles.html[Built-in roles]. +{transform}. For more information, +see <> and <>. [[start-transform-desc]] ==== {api-description-title} diff --git a/docs/reference/transform/apis/stop-transform.asciidoc b/docs/reference/transform/apis/stop-transform.asciidoc index c294186b6c37..55894aa50086 100644 --- a/docs/reference/transform/apis/stop-transform.asciidoc +++ b/docs/reference/transform/apis/stop-transform.asciidoc @@ -29,8 +29,7 @@ beta[] * If the {es} {security-features} are enabled, you must have `manage_data_frame_transforms` cluster privileges to use this API. The built-in `data_frame_transforms_admin` role has these privileges. For more information, -see {stack-ov}/security-privileges.html[Security privileges] and -{stack-ov}/built-in-roles.html[Built-in roles]. +see <> and <>. [[stop-transform-desc]] diff --git a/docs/reference/transform/apis/update-transform.asciidoc b/docs/reference/transform/apis/update-transform.asciidoc index a71bdbe15b84..fe45472b0651 100644 --- a/docs/reference/transform/apis/update-transform.asciidoc +++ b/docs/reference/transform/apis/update-transform.asciidoc @@ -25,8 +25,7 @@ beta[] `data_frame_transforms_admin` role has these privileges. You must also have `read` and `view_index_metadata` privileges on the source index and `read`, `create_index`, and `index` privileges on the destination index. For more -information, see {stack-ov}/security-privileges.html[Security privileges] and -{stack-ov}/built-in-roles.html[Built-in roles]. +information, see <> and <>. [[update-transform-desc]] ==== {api-description-title} diff --git a/docs/reference/transform/ecommerce-tutorial.asciidoc b/docs/reference/transform/ecommerce-tutorial.asciidoc index 026127f97bac..b4dc1ba742fa 100644 --- a/docs/reference/transform/ecommerce-tutorial.asciidoc +++ b/docs/reference/transform/ecommerce-tutorial.asciidoc @@ -23,9 +23,7 @@ You also need `read` and `view_index_metadata` index privileges on the source index and `read`, `create_index`, and `index` privileges on the destination index. -For more information, see -{stack-ov}/security-privileges.html[Security privileges] and -{stack-ov}/built-in-roles.html[Built-in roles]. +For more information, see <> and <>. -- . Choose your _source index_. diff --git a/x-pack/docs/en/rest-api/security/change-password.asciidoc b/x-pack/docs/en/rest-api/security/change-password.asciidoc index ed696d668bc9..af8948a290c8 100644 --- a/x-pack/docs/en/rest-api/security/change-password.asciidoc +++ b/x-pack/docs/en/rest-api/security/change-password.asciidoc @@ -28,7 +28,7 @@ You can use the <> to update everything but a user's `username` and `password`. This API changes a user's password. For more information about the native realm, see -{stack-ov}/realms.html[Realms] and <>. +<> and <>. [[security-api-change-password-path-params]] diff --git a/x-pack/docs/en/rest-api/security/clear-cache.asciidoc b/x-pack/docs/en/rest-api/security/clear-cache.asciidoc index a214163ed179..2a1a227163da 100644 --- a/x-pack/docs/en/rest-api/security/clear-cache.asciidoc +++ b/x-pack/docs/en/rest-api/security/clear-cache.asciidoc @@ -23,7 +23,7 @@ User credentials are cached in memory on each node to avoid connecting to a remote authentication service or hitting the disk for every incoming request. There are realm settings that you can use to configure the user cache. For more information, see -{stack-ov}/controlling-user-cache.html[Controlling the user cache]. +<>. To evict roles from the role cache, see the <>. diff --git a/x-pack/docs/en/rest-api/security/clear-roles-cache.asciidoc b/x-pack/docs/en/rest-api/security/clear-roles-cache.asciidoc index 8fc8f0b88390..3938316007cb 100644 --- a/x-pack/docs/en/rest-api/security/clear-roles-cache.asciidoc +++ b/x-pack/docs/en/rest-api/security/clear-roles-cache.asciidoc @@ -22,7 +22,7 @@ privilege. ==== {api-description-title} For more information about the native realm, see -{stack-ov}/realms.html[Realms] and <>. +<> and <>. [[security-api-clear-role-cache-path-params]] ==== {api-path-parms-title} diff --git a/x-pack/docs/en/rest-api/security/create-role-mappings.asciidoc b/x-pack/docs/en/rest-api/security/create-role-mappings.asciidoc index 2a1e6a26b242..735e1d474e02 100644 --- a/x-pack/docs/en/rest-api/security/create-role-mappings.asciidoc +++ b/x-pack/docs/en/rest-api/security/create-role-mappings.asciidoc @@ -29,10 +29,9 @@ granted to those users. NOTE: This API does not create roles. Rather, it maps users to existing roles. Roles can be created by using <> or -{stack-ov}/defining-roles.html#roles-management-file[roles files]. +<>. -For more information, see -{stack-ov}/mapping-roles.html[Mapping users and groups to roles]. +For more information, see <>. [[security-api-put-role-mapping-path-params]] @@ -313,7 +312,7 @@ POST /_security/role_mapping/mapping8 A templated role can be used to automatically map every user to their own custom role. The role itself can be defined through the <> or using a -{stack-ov}/custom-roles-authorization.html#implementing-custom-roles-provider[custom roles provider]. +<>. In this example every user who authenticates using the "cloud-saml" realm will be automatically mapped to two roles - the `"saml_user"` role and a diff --git a/x-pack/docs/en/rest-api/security/create-roles.asciidoc b/x-pack/docs/en/rest-api/security/create-roles.asciidoc index 19802234f329..749993955931 100644 --- a/x-pack/docs/en/rest-api/security/create-roles.asciidoc +++ b/x-pack/docs/en/rest-api/security/create-roles.asciidoc @@ -25,7 +25,7 @@ privilege. ==== {api-description-title} The role management APIs are generally the preferred way to manage roles, rather than using -{stack-ov}/defining-roles.html#roles-management-file[file-based role management]. The create +<>. The create or update roles API cannot update roles that are defined in roles files. [[security-api-put-role-path-params]] @@ -58,7 +58,7 @@ This field is optional. `indices`:: (list) A list of indices permissions entries. `field_security`::: (list) The document fields that the owners of the role have read access to. For more information, see -{stack-ov}/field-and-document-access-control.html[Setting up field and document level security]. +<>. `names` (required)::: (list) A list of indices (or index name patterns) to which the permissions in this entry apply. `privileges`(required)::: (list) The index level privileges that the owners of the role @@ -72,9 +72,9 @@ that begin with `_` are reserved for system usage. `run_as`:: (list) A list of users that the owners of this role can impersonate. For more information, see -{stack-ov}/run-as-privilege.html[Submitting requests on behalf of other users]. +<>. -For more information, see {stack-ov}/defining-roles.html[Defining roles]. +For more information, see <>. [[security-api-put-role-example]] ==== {api-examples-title} diff --git a/x-pack/docs/en/rest-api/security/create-users.asciidoc b/x-pack/docs/en/rest-api/security/create-users.asciidoc index 47df7c01f503..209c9b1c5e6f 100644 --- a/x-pack/docs/en/rest-api/security/create-users.asciidoc +++ b/x-pack/docs/en/rest-api/security/create-users.asciidoc @@ -31,7 +31,7 @@ To change a user's password, use the <>. For more information about the native realm, see -{stack-ov}/realms.html[Realms] and <>. +<> and <>. [[security-api-put-user-path-params]] ==== {api-path-parms-title} diff --git a/x-pack/docs/en/rest-api/security/delegate-pki-authentication.asciidoc b/x-pack/docs/en/rest-api/security/delegate-pki-authentication.asciidoc index d1bec9ae8dcf..c34ecdcb5f33 100644 --- a/x-pack/docs/en/rest-api/security/delegate-pki-authentication.asciidoc +++ b/x-pack/docs/en/rest-api/security/delegate-pki-authentication.asciidoc @@ -18,7 +18,7 @@ token. * To call this API, the (proxy) user must have the `delegate_pki` or the `all` cluster privilege. The `kibana_system` built-in role already grants this -privilege. See {stack-ov}/security-privileges.html[Security privileges]. +privilege. See <>. [[security-api-delegate-pki-authentication-desc]] ==== {api-description-title} diff --git a/x-pack/docs/en/rest-api/security/delete-app-privileges.asciidoc b/x-pack/docs/en/rest-api/security/delete-app-privileges.asciidoc index 6dd4325925ab..0ff3ecc8b4ae 100644 --- a/x-pack/docs/en/rest-api/security/delete-app-privileges.asciidoc +++ b/x-pack/docs/en/rest-api/security/delete-app-privileges.asciidoc @@ -5,8 +5,7 @@ Delete application privileges ++++ -Removes -{stack-ov}/security-privileges.html#application-privileges[application privileges]. +Removes <>. [[security-api-delete-privilege-request]] ==== {api-request-title} diff --git a/x-pack/docs/en/rest-api/security/delete-role-mappings.asciidoc b/x-pack/docs/en/rest-api/security/delete-role-mappings.asciidoc index 9fe2d6013cd3..489aa944805b 100644 --- a/x-pack/docs/en/rest-api/security/delete-role-mappings.asciidoc +++ b/x-pack/docs/en/rest-api/security/delete-role-mappings.asciidoc @@ -21,7 +21,7 @@ Removes role mappings. ==== {api-description-title} Role mappings define which roles are assigned to each user. For more information, -see {stack-ov}/mapping-roles.html[Mapping users and groups to roles]. +see <>. [[security-api-delete-role-mapping-path-params]] ==== {api-path-parms-title} diff --git a/x-pack/docs/en/rest-api/security/delete-roles.asciidoc b/x-pack/docs/en/rest-api/security/delete-roles.asciidoc index ce5906ad8e32..427e7c6b1860 100644 --- a/x-pack/docs/en/rest-api/security/delete-roles.asciidoc +++ b/x-pack/docs/en/rest-api/security/delete-roles.asciidoc @@ -23,7 +23,7 @@ Removes roles in the native realm. ==== {api-description-title} The role management APIs are generally the preferred way to manage roles, rather than using -{stack-ov}/defining-roles.html#roles-management-file[file-based role management]. The delete roles API cannot remove roles that are defined in roles files. +<>. The delete roles API cannot remove roles that are defined in roles files. [[security-api-delete-role-path-params]] ==== {api-path-parms-title} diff --git a/x-pack/docs/en/rest-api/security/delete-users.asciidoc b/x-pack/docs/en/rest-api/security/delete-users.asciidoc index db48a754f07b..6f77ecedb98f 100644 --- a/x-pack/docs/en/rest-api/security/delete-users.asciidoc +++ b/x-pack/docs/en/rest-api/security/delete-users.asciidoc @@ -21,7 +21,7 @@ Deletes users from the native realm. ==== {api-description-title} For more information about the native realm, see -{stack-ov}/realms.html[Realms] and <>. +<> and <>. [[security-api-delete-user-path-params]] ==== {api-path-parms-title} diff --git a/x-pack/docs/en/rest-api/security/disable-users.asciidoc b/x-pack/docs/en/rest-api/security/disable-users.asciidoc index 4f005a0673ff..b9c71d85ac51 100644 --- a/x-pack/docs/en/rest-api/security/disable-users.asciidoc +++ b/x-pack/docs/en/rest-api/security/disable-users.asciidoc @@ -27,7 +27,7 @@ revoke a user's access to {es}. To re-enable a user, there is an <>. For more information about the native realm, see -{stack-ov}/realms.html[Realms] and <>. +<> and <>. [[security-api-disable-user-path-params]] ==== {api-path-parms-title} diff --git a/x-pack/docs/en/rest-api/security/enable-users.asciidoc b/x-pack/docs/en/rest-api/security/enable-users.asciidoc index 34b759aa8d8d..b589776e438c 100644 --- a/x-pack/docs/en/rest-api/security/enable-users.asciidoc +++ b/x-pack/docs/en/rest-api/security/enable-users.asciidoc @@ -26,7 +26,7 @@ By default, when you create users, they are enabled. You can use this enable users API and the <> to change that attribute. For more information about the native realm, see -{stack-ov}/realms.html[Realms] and <>. +<> and <>. [[security-api-enable-user-path-params]] ==== {api-path-parms-title} diff --git a/x-pack/docs/en/rest-api/security/get-app-privileges.asciidoc b/x-pack/docs/en/rest-api/security/get-app-privileges.asciidoc index bba361fd62c0..98020b1558d9 100644 --- a/x-pack/docs/en/rest-api/security/get-app-privileges.asciidoc +++ b/x-pack/docs/en/rest-api/security/get-app-privileges.asciidoc @@ -5,8 +5,7 @@ Get application privileges ++++ -Retrieves -{stack-ov}/security-privileges.html#application-privileges[application privileges]. +Retrieves <>. [[security-api-get-privileges-request]] ==== {api-request-title} diff --git a/x-pack/docs/en/rest-api/security/get-builtin-privileges.asciidoc b/x-pack/docs/en/rest-api/security/get-builtin-privileges.asciidoc index 7da8bdce87b3..ce84873173fc 100644 --- a/x-pack/docs/en/rest-api/security/get-builtin-privileges.asciidoc +++ b/x-pack/docs/en/rest-api/security/get-builtin-privileges.asciidoc @@ -5,9 +5,8 @@ Get builtin privileges ++++ -Retrieves the list of -{stack-ov}/security-privileges.html#privileges-list-cluster[cluster privileges] and -{stack-ov}/security-privileges.html#privileges-list-indices[index privileges] that are +Retrieves the list of <> and +<> that are available in this version of {es}. [[security-api-get-builtin-privileges-request]] @@ -38,12 +37,12 @@ To check whether a user has particular privileges, use the The response is an object with two fields: `cluster`:: (array of string) The list of - {stack-ov}/security-privileges.html#privileges-list-cluster[cluster privileges] - that are understood by this version of {es} +<> that are understood by this +version of {es}. `index`:: (array of string) The list of - {stack-ov}/security-privileges.html#privileges-list-indices[index privileges] - that are understood by this version of {es} +<> that are understood by this version +of {es}. [[security-api-get-builtin-privileges-example]] diff --git a/x-pack/docs/en/rest-api/security/get-role-mappings.asciidoc b/x-pack/docs/en/rest-api/security/get-role-mappings.asciidoc index c0a77dd59b81..5243d775250e 100644 --- a/x-pack/docs/en/rest-api/security/get-role-mappings.asciidoc +++ b/x-pack/docs/en/rest-api/security/get-role-mappings.asciidoc @@ -23,7 +23,7 @@ Retrieves role mappings. ==== {api-description-title} Role mappings define which roles are assigned to each user. For more information, -see {stack-ov}/mapping-roles.html[Mapping users and groups to roles]. +see <>. [[security-api-get-role-mapping-path-params]] ==== {api-path-parms-title} diff --git a/x-pack/docs/en/rest-api/security/get-roles.asciidoc b/x-pack/docs/en/rest-api/security/get-roles.asciidoc index de7234697d33..46016a1d9d72 100644 --- a/x-pack/docs/en/rest-api/security/get-roles.asciidoc +++ b/x-pack/docs/en/rest-api/security/get-roles.asciidoc @@ -24,7 +24,7 @@ privilege. ==== {api-description-title} The role management APIs are generally the preferred way to manage roles, rather than using -{stack-ov}/defining-roles.html#roles-management-file[file-based role management]. The get roles +<>. The get roles API cannot retrieve roles that are defined in roles files. [[security-api-get-role-path-params]] diff --git a/x-pack/docs/en/rest-api/security/get-users.asciidoc b/x-pack/docs/en/rest-api/security/get-users.asciidoc index 63ae8bc140da..8e7d5c37c9ee 100644 --- a/x-pack/docs/en/rest-api/security/get-users.asciidoc +++ b/x-pack/docs/en/rest-api/security/get-users.asciidoc @@ -25,7 +25,7 @@ Retrieves information about users in the native realm and built-in users. ==== {api-description-title} For more information about the native realm, see -{stack-ov}/realms.html[Realms] and <>. +<> and <>. [[security-api-get-user-path-params]] ==== {api-path-parms-title} diff --git a/x-pack/docs/en/rest-api/security/has-privileges.asciidoc b/x-pack/docs/en/rest-api/security/has-privileges.asciidoc index cce21965d6e3..c5091992e0a9 100644 --- a/x-pack/docs/en/rest-api/security/has-privileges.asciidoc +++ b/x-pack/docs/en/rest-api/security/has-privileges.asciidoc @@ -21,13 +21,13 @@ a specified list of privileges. * All users can use this API, but only to determine their own privileges. To check the privileges of other users, you must use the run as feature. For more information, see -{stack-ov}/run-as-privilege.html[Submitting requests on behalf of other users]. +<>. [[security-api-has-privileges-desc]] ==== {api-description-title} For a list of the privileges that you can specify in this API, -see {stack-ov}/security-privileges.html[Security privileges]. +see <>. A successful call returns a JSON structure that shows whether each specified privilege is assigned to the user. diff --git a/x-pack/docs/en/rest-api/security/put-app-privileges.asciidoc b/x-pack/docs/en/rest-api/security/put-app-privileges.asciidoc index 38383e22d48b..5ae41fbd3b2e 100644 --- a/x-pack/docs/en/rest-api/security/put-app-privileges.asciidoc +++ b/x-pack/docs/en/rest-api/security/put-app-privileges.asciidoc @@ -5,8 +5,7 @@ Create or update application privileges ++++ -Adds or updates -{stack-ov}/security-privileges.html#application-privileges[application privileges]. +Adds or updates <>. [[security-api-put-privileges-request]] ==== {api-request-title} @@ -31,8 +30,7 @@ being referenced in the request This API creates or updates privileges. To remove privileges, use the <>. -For more information, see -{stack-ov}/defining-roles.html#roles-application-priv[Application privileges]. +For more information, see <>. To check a user's application privileges, use the <>. diff --git a/x-pack/docs/en/rest-api/security/role-mapping-resources.asciidoc b/x-pack/docs/en/rest-api/security/role-mapping-resources.asciidoc index 13e6457d7ef3..a6a6fd7a90e7 100644 --- a/x-pack/docs/en/rest-api/security/role-mapping-resources.asciidoc +++ b/x-pack/docs/en/rest-api/security/role-mapping-resources.asciidoc @@ -49,7 +49,7 @@ The value specified in the field rule can be one of the following types: | Simple String | Exactly matches the provided value. | "esadmin" | Wildcard String | Matches the provided value using a wildcard. | "*,dc=example,dc=com" | Regular Expression | Matches the provided value using a - {ref}/regexp-syntax.html[Lucene regexp]. | "/.\*-admin[0-9]*/" + <>. | "/.\*-admin[0-9]*/" | Number | Matches an equivalent numerical value. | 7 | Null | Matches a null or missing value. | null | Array | Tests each element in the array in @@ -87,4 +87,4 @@ other groups they belong to: // NOTCONSOLE For additional realm-specific details, see -{stack-ov}/mapping-roles.html#ldap-role-mapping[Mapping Users and Groups to Roles]. +<>. diff --git a/x-pack/docs/en/rest-api/security/ssl.asciidoc b/x-pack/docs/en/rest-api/security/ssl.asciidoc index 4263bce00677..7ce2ff65df30 100644 --- a/x-pack/docs/en/rest-api/security/ssl.asciidoc +++ b/x-pack/docs/en/rest-api/security/ssl.asciidoc @@ -19,14 +19,14 @@ certificates that are used to encrypt communications in your {es} cluster. * If the {security-features} are enabled, you must have `monitor` cluster privileges to use this API. For more information, see -{stack-ov}/security-privileges.html[Security privileges]. +<>. [[security-api-ssl-desc]] ==== {api-description-title} For more information about how certificates are configured in conjunction with Transport Layer Security (TLS), see -{stack-ov}/ssl-tls.html[Setting up SSL/TLS on a cluster]. +<>. The API returns a list that includes certificates from all TLS contexts including: diff --git a/x-pack/docs/en/rest-api/watcher/ack-watch.asciidoc b/x-pack/docs/en/rest-api/watcher/ack-watch.asciidoc index 33d128c166a4..994c0022b8bc 100644 --- a/x-pack/docs/en/rest-api/watcher/ack-watch.asciidoc +++ b/x-pack/docs/en/rest-api/watcher/ack-watch.asciidoc @@ -19,7 +19,7 @@ to manually throttle execution of the watch's actions. ==== {api-prereq-title} * You must have `manage_watcher` cluster privileges to use this API. For more -information, see {stack-ov}/security-privileges.html[Security privileges]. +information, see <>. [[watcher-api-ack-watch-desc]] ==== {api-description-title} diff --git a/x-pack/docs/en/rest-api/watcher/activate-watch.asciidoc b/x-pack/docs/en/rest-api/watcher/activate-watch.asciidoc index 7151e42a1033..d8af79854c83 100644 --- a/x-pack/docs/en/rest-api/watcher/activate-watch.asciidoc +++ b/x-pack/docs/en/rest-api/watcher/activate-watch.asciidoc @@ -17,7 +17,7 @@ API enables you to activate a currently inactive watch. ==== {api-prereq-title} * You must have `manage_watcher` cluster privileges to use this API. For more -information, see {stack-ov}/security-privileges.html[Security privileges]. +information, see <>. //[[watcher-api-activate-watch-desc]] //==== {api-description-title} diff --git a/x-pack/docs/en/rest-api/watcher/deactivate-watch.asciidoc b/x-pack/docs/en/rest-api/watcher/deactivate-watch.asciidoc index 7753adac2b9a..ba4170174343 100644 --- a/x-pack/docs/en/rest-api/watcher/deactivate-watch.asciidoc +++ b/x-pack/docs/en/rest-api/watcher/deactivate-watch.asciidoc @@ -17,7 +17,7 @@ API enables you to deactivate a currently active watch. ==== {api-prereq-title} * You must have `manage_watcher` cluster privileges to use this API. For more -information, see {stack-ov}/security-privileges.html[Security privileges]. +information, see <>. //[[watcher-api-deactivate-watch-desc]] //==== {api-description-title} diff --git a/x-pack/docs/en/rest-api/watcher/delete-watch.asciidoc b/x-pack/docs/en/rest-api/watcher/delete-watch.asciidoc index 30aba42417a7..3ffcb43ed65e 100644 --- a/x-pack/docs/en/rest-api/watcher/delete-watch.asciidoc +++ b/x-pack/docs/en/rest-api/watcher/delete-watch.asciidoc @@ -16,7 +16,7 @@ Removes a watch from {watcher}. ==== {api-prereq-title} * You must have `manage_watcher` cluster privileges to use this API. For more -information, see {stack-ov}/security-privileges.html[Security privileges]. +information, see <>. [[watcher-api-delete-watch-desc]] ==== {api-description-title} diff --git a/x-pack/docs/en/rest-api/watcher/execute-watch.asciidoc b/x-pack/docs/en/rest-api/watcher/execute-watch.asciidoc index d4edff7bf353..3a98a4c547f9 100644 --- a/x-pack/docs/en/rest-api/watcher/execute-watch.asciidoc +++ b/x-pack/docs/en/rest-api/watcher/execute-watch.asciidoc @@ -18,7 +18,7 @@ Forces the execution of a stored watch. ==== {api-prereq-title} * You must have `manage_watcher` cluster privileges to use this API. For more -information, see {stack-ov}/security-privileges.html[Security privileges]. +information, see <>. [[watcher-api-execute-watch-desc]] ==== {api-description-title} diff --git a/x-pack/docs/en/rest-api/watcher/get-watch.asciidoc b/x-pack/docs/en/rest-api/watcher/get-watch.asciidoc index f62fc04d435c..08ae1871f397 100644 --- a/x-pack/docs/en/rest-api/watcher/get-watch.asciidoc +++ b/x-pack/docs/en/rest-api/watcher/get-watch.asciidoc @@ -17,7 +17,7 @@ Retrieves a watch by its ID. * You must have `manage_watcher` or `monitor_watcher` cluster privileges to use this API. For more information, see -{stack-ov}/security-privileges.html[Security privileges]. +<>. //[[watcher-api-get-watch-desc]] //==== {api-description-title} diff --git a/x-pack/docs/en/rest-api/watcher/put-watch.asciidoc b/x-pack/docs/en/rest-api/watcher/put-watch.asciidoc index 63c0ad2b9b69..b5aafa867057 100644 --- a/x-pack/docs/en/rest-api/watcher/put-watch.asciidoc +++ b/x-pack/docs/en/rest-api/watcher/put-watch.asciidoc @@ -16,7 +16,7 @@ Either registers a new watch in {watcher} or updates an existing one. ==== {api-prereq-title} * You must have `manage_watcher` cluster privileges to use this API. For more -information, see {stack-ov}/security-privileges.html[Security privileges]. +information, see <>. [[watcher-api-put-watch-desc]] ==== {api-description-title} diff --git a/x-pack/docs/en/rest-api/watcher/start.asciidoc b/x-pack/docs/en/rest-api/watcher/start.asciidoc index e4a2441d2230..b153410ed290 100644 --- a/x-pack/docs/en/rest-api/watcher/start.asciidoc +++ b/x-pack/docs/en/rest-api/watcher/start.asciidoc @@ -16,7 +16,7 @@ Starts the {watcher} service if it is not already running. ==== {api-prereq-title} * You must have `manage_watcher` cluster privileges to use this API. For more -information, see {stack-ov}/security-privileges.html[Security privileges]. +information, see <>. //[[watcher-api-start-desc]] //==== {api-description-title} diff --git a/x-pack/docs/en/rest-api/watcher/stats.asciidoc b/x-pack/docs/en/rest-api/watcher/stats.asciidoc index a29b24648f75..afc3191ae9b8 100644 --- a/x-pack/docs/en/rest-api/watcher/stats.asciidoc +++ b/x-pack/docs/en/rest-api/watcher/stats.asciidoc @@ -20,7 +20,7 @@ Retrieves the current {watcher} metrics. * You must have `manage_watcher` or `monitor_watcher` cluster privileges to use this API. For more information, see -{stack-ov}/security-privileges.html[Security privileges]. +<>. //[[watcher-api-stats-desc]] //==== {api-description-title} diff --git a/x-pack/docs/en/rest-api/watcher/stop.asciidoc b/x-pack/docs/en/rest-api/watcher/stop.asciidoc index ae54c2eccf65..f0e733df3979 100644 --- a/x-pack/docs/en/rest-api/watcher/stop.asciidoc +++ b/x-pack/docs/en/rest-api/watcher/stop.asciidoc @@ -16,7 +16,7 @@ Stops the {watcher} service if it is running. ==== {api-prereq-title} * You must have `manage_watcher` cluster privileges to use this API. For more -information, see {stack-ov}/security-privileges.html[Security privileges]. +information, see <>. //[[watcher-api-stop-desc]] //==== {api-description-title} diff --git a/x-pack/docs/en/security/authentication/configuring-active-directory-realm.asciidoc b/x-pack/docs/en/security/authentication/configuring-active-directory-realm.asciidoc index 42378ca10ed8..061ba519a545 100644 --- a/x-pack/docs/en/security/authentication/configuring-active-directory-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/configuring-active-directory-realm.asciidoc @@ -7,7 +7,7 @@ users. To integrate with Active Directory, you configure an `active_directory` realm and map Active Directory users and groups to roles in the role mapping file. For more information about Active Directory realms, see -{stack-ov}/active-directory-realm.html[Active Directory User Authentication]. +<>. . Add a realm configuration of type `active_directory` to `elasticsearch.yml` under the `xpack.security.authc.realms.active_directory` namespace. @@ -119,7 +119,7 @@ can be configured and is used to perform all operations other than the LDAP bind request, which is required to authenticate the credentials provided by the user. The use of a bind user enables the -{stack-ov}/run-as-privilege.html[run as feature] to be used with the Active +<> to be used with the Active Directory realm and the ability to maintain a set of pooled connections to Active Directory. These pooled connection reduce the number of resources that must be created and destroyed with every user authentication. @@ -235,7 +235,7 @@ user: <4> The Active Directory distinguished name (DN) of the user `John Doe`. For more information, see -{stack-ov}/mapping-roles.html[Mapping users and groups to roles]. +<>. -- . (Optional) Configure the `metadata` setting in the Active Directory realm to @@ -244,5 +244,5 @@ include extra properties in the user's metadata. -- By default, `ldap_dn` and `ldap_groups` are populated in the user's metadata. For more information, see -{xpack-ref}/active-directory-realm.html#ad-user-metadata[User Metadata in Active Directory Realms]. +<>. -- diff --git a/x-pack/docs/en/security/authentication/configuring-file-realm.asciidoc b/x-pack/docs/en/security/authentication/configuring-file-realm.asciidoc index df92442507f6..a3c034a75061 100644 --- a/x-pack/docs/en/security/authentication/configuring-file-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/configuring-file-realm.asciidoc @@ -22,8 +22,7 @@ or using a configuration management system such as Puppet or Chef). The `file` realm is added to the realm chain by default. You don't need to explicitly configure a `file` realm. -For more information about file realms, see -{xpack-ref}/file-realm.html[File-based user authentication]. +For more information about file realms, see <>. . (Optional) Add a realm configuration to `elasticsearch.yml` under the `xpack.security.authc.realms.file` namespace. At a minimum, you must set diff --git a/x-pack/docs/en/security/authentication/configuring-kerberos-realm.asciidoc b/x-pack/docs/en/security/authentication/configuring-kerberos-realm.asciidoc index 6a99a4928c71..0d834664b514 100644 --- a/x-pack/docs/en/security/authentication/configuring-kerberos-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/configuring-kerberos-realm.asciidoc @@ -15,7 +15,7 @@ Refer to your Kerberos installation documentation for more information about obtaining TGT. {es} clients must first obtain a TGT then initiate the process of authenticating with {es}. -For a summary of Kerberos terminology, see {stack-ov}/kerberos-realm.html[Kerberos authentication]. +For a summary of Kerberos terminology, see <>. ==== Before you begin @@ -65,7 +65,7 @@ default realm, the Key Distribution Center (KDC), and other configuration detail required for Kerberos authentication. When the JVM needs some configuration properties, it tries to find those values by locating and loading this file. The JVM system property to configure the file path is `java.security.krb5.conf`. To -configure JVM system properties see {ref}/jvm-options.html[configuring jvm options]. +configure JVM system properties see <>. If this system property is not specified, Java tries to locate the file based on the conventions. @@ -133,7 +133,7 @@ set to `true`, the realm part (`@REALM`) is removed. The resulting `username` is used for role mapping. For detailed information of available realm settings, -see {ref}/security-settings.html#ref-kerberos-settings[Kerberos realm settings]. +see <>. -- @@ -145,7 +145,7 @@ see {ref}/security-settings.html#ref-kerberos-settings[Kerberos realm settings]. The `kerberos` realm enables you to map Kerberos users to roles. You can configure these role mappings by using the -{ref}/security-api-role-mapping.html[role-mapping API]. You identify +<>. You identify users by their `username` field. The following example uses the role mapping API to map `user@REALM` to the roles @@ -169,10 +169,10 @@ following are the additional user metadata available for role mapping: - `kerberos_realm` will be set to Kerberos realm name. - `kerberos_user_principal_name` will be set to user principal name from the Kerberos ticket. -For more information, see {stack-ov}/mapping-roles.html[Mapping users and groups to roles]. +For more information, see <>. NOTE: The Kerberos realm supports -{stack-ov}/realm-chains.html#authorization_realms[authorization realms] as an +<> as an alternative to role mapping. -- diff --git a/x-pack/docs/en/security/authentication/configuring-ldap-realm.asciidoc b/x-pack/docs/en/security/authentication/configuring-ldap-realm.asciidoc index f7288469abd3..5bc3f960633d 100644 --- a/x-pack/docs/en/security/authentication/configuring-ldap-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/configuring-ldap-realm.asciidoc @@ -7,7 +7,7 @@ Directory Access Protocol (LDAP) server. To integrate with LDAP, you configure an `ldap` realm and map LDAP groups to user roles. For more information about LDAP realms, see -{stack-ov}/ldap-realm.html[LDAP User Authentication]. +<>. . Determine which mode you want to use. The `ldap` realm supports two modes of operation, a user search mode and a mode with specific templates for user DNs. @@ -134,7 +134,7 @@ See <>. -- The `ldap` realm enables you to map LDAP users to roles via their LDAP groups, or other metadata. This role mapping can be configured via the -{ref}/security-api-put-role-mapping.html[add role mapping API] or by using a file stored +<> or by using a file stored on each node. When a user authenticates with LDAP, the privileges for that user are the union of all privileges defined by the roles to which the user is mapped. @@ -188,12 +188,10 @@ user: <3> The LDAP distinguished name (DN) of the `users` group. For more information, see -{stack-ov}/ldap-realm.html#mapping-roles-ldap[Mapping LDAP Groups to Roles] -and -{stack-ov}/mapping-roles.html[Mapping Users and Groups to Roles]. +<> and <>. NOTE: The LDAP realm supports -{stack-ov}/realm-chains.html#authorization_realms[authorization realms] as an +<> as an alternative to role mapping. -- @@ -204,7 +202,7 @@ fields in the user's metadata. -- By default, `ldap_dn` and `ldap_groups` are populated in the user's metadata. For more information, see -{stack-ov}/ldap-realm.html#ldap-user-metadata[User Metadata in LDAP Realms]. +<>. The example below includes the user's common name (`cn`) as an additional field in their metadata. diff --git a/x-pack/docs/en/security/authentication/configuring-pki-realm.asciidoc b/x-pack/docs/en/security/authentication/configuring-pki-realm.asciidoc index 9b4a8f187274..da99253217ff 100644 --- a/x-pack/docs/en/security/authentication/configuring-pki-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/configuring-pki-realm.asciidoc @@ -14,7 +14,7 @@ requires some <>. On authentication and to submit the client certificates to {es} for further validation by a PKI realm. -For more general information, see {stack-ov}/pki-realm.html[PKI user authentication]. +For more general information, see <>. [float] [role="xpack"] @@ -209,11 +209,9 @@ certificate as the means of authentication) and inspect the metadata field in the result. The user's distinguished name will be populated under the `pki_dn` key. You can also use the authenticate API to validate your role mapping. -For more information, see -{stack-ov}/mapping-roles.html[Mapping Users and Groups to Roles]. +For more information, see <>. -NOTE: The PKI realm supports -{stack-ov}/realm-chains.html#authorization_realms[authorization realms] as an +NOTE: The PKI realm supports <> as an alternative to role mapping. -- @@ -254,9 +252,8 @@ PKI certificate authentication]. A PKI realm with `delegation.enabled` still works unchanged for clients connecting directly to {es}. Directly authenticated users, and users that are PKI authenticated by delegation to {kib} both follow the same -{stack-ov}/mapping-roles.html[role mapping rules] or -{stack-ov}/realm-chains.html#authorization_realms[authorization realms -configurations]. +<> or +<>. However, if you use the <>, you can distinguish between users that are authenticated by delegation and diff --git a/x-pack/docs/en/security/authentication/configuring-saml-realm.asciidoc b/x-pack/docs/en/security/authentication/configuring-saml-realm.asciidoc index b0bdd67d2dee..07ad774353c0 100644 --- a/x-pack/docs/en/security/authentication/configuring-saml-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/configuring-saml-realm.asciidoc @@ -9,8 +9,7 @@ integrate with any identity provider (IdP) that supports at least the SAML 2.0 Web Browser SSO Profile. In SAML terminology, the {stack} is operating as a _service provider_ (SP). For more -information, see {stack-ov}/saml-realm.html[SAML authentication] and -{stack-ov}/saml-guide.html[Configuring SAML SSO on the {stack}]. +information, see <> and <>. [NOTE] -- @@ -55,7 +54,7 @@ download or generate such a document within your IdP administration interface. Most IdPs will provide an appropriate metadata file with all the features that the {stack} requires. For more information, see -{stack-ov}/saml-guide-idp.html[The identity provider]. +<>. -- .. Download the IdP metadata document and store it within the `config` directory @@ -96,7 +95,7 @@ xpack.security.authc.realms: ------------------------------------------------------------ <1> The realm must be within the `xpack.security.authc.realms.saml` namespace. <2> This setting defines a new authentication realm named "saml1". For an -introduction to realms, see {stack-ov}/realms.html[Realms]. +introduction to realms, see <>. <3> You should define a unique order on each realm in your authentication chain. It is recommended that the SAML realm be at the bottom of your authentication chain (that is, it has the _highest_ order). @@ -130,7 +129,7 @@ we recommend that you include at least one additional realm such as the native realm in your authentication chain for use by API clients. For more information, see -{stack-ov}/saml-guide-authentication.html#saml-create-realm[Create a SAML realm]. +<>. -- . Add attribute mappings. @@ -177,7 +176,7 @@ xpack.security.authc.realms.saml.saml1: ------------------------------------------------------------ For more information, see -{stack-ov}/saml-guide-authentication.html#saml-attribute-mapping[Attribute mapping]. +<>. -- . (Optional) Configure logout services. @@ -187,7 +186,7 @@ The SAML protocol supports the concept of Single Logout (SLO). The level of support for SLO varies between identity providers. For more information, see -{stack-ov}/saml-guide-authentication.html#saml-logout[SAML logout]. +<>. -- . (Optional) Configure encryption and signing. @@ -199,7 +198,7 @@ and logout), and processing encrypted content. You can configure {es} for signing, encryption, or both, with the same or separate keys. For more information, see -{stack-ov}/saml-guide-authentication.html#saml-enc-sign[Encryption and signing]. +<>. -- . (Optional) Generate service provider metadata. @@ -219,13 +218,13 @@ but this does not automatically grant them access to perform any actions or access any data. Your SAML users cannot do anything until they are assigned roles. This can be done -through either the {stack-ov}/saml-role-mapping.html[role mapping API], or with -{stack-ov}/realm-chains.html#authorization_realms[authorization realms]. +through either the <>, or with +<>. -NOTE: You cannot use {stack-ov}/mapping-roles.html#mapping-roles-file[role mapping files] +NOTE: You cannot use <> to grant roles to users authenticating via SAML. -- -. {stack-ov}/saml-kibana.html[Configure {kib} to use SAML SSO]. +. <>. diff --git a/x-pack/docs/en/security/authentication/oidc-guide.asciidoc b/x-pack/docs/en/security/authentication/oidc-guide.asciidoc index ee318a8c3a13..da95f5fb74f9 100644 --- a/x-pack/docs/en/security/authentication/oidc-guide.asciidoc +++ b/x-pack/docs/en/security/authentication/oidc-guide.asciidoc @@ -414,9 +414,9 @@ access any data. Your OpenID Connect users cannot do anything until they are assigned roles. This can be done through either the {ref}/security-api-put-role-mapping.html[add role mapping API], or with -<>. +<>. -NOTE: You cannot use {stack-ov}/mapping-roles.html#mapping-roles-file[role mapping files] +NOTE: You cannot use <> to grant roles to users authenticating via OpenID Connect. This is an example of a simple role mapping that grants the `kibana_user` role diff --git a/x-pack/docs/en/security/authentication/saml-guide.asciidoc b/x-pack/docs/en/security/authentication/saml-guide.asciidoc index 5249a5a1b3e4..66ea1bc634c6 100644 --- a/x-pack/docs/en/security/authentication/saml-guide.asciidoc +++ b/x-pack/docs/en/security/authentication/saml-guide.asciidoc @@ -90,7 +90,7 @@ configure the HTTP interface to use SSL/TLS before you can enable SAML authentication. For more information, see -{ref}/configuring-tls.html#tls-http[Encrypting HTTP Client Communications]. +<>. [[saml-enable-token]] ==== Enable the token service @@ -382,7 +382,7 @@ successfully authenticated, the Authentication Statement of the SAML Response contains an indication of the restrictions that were satisfied. You can define the Authentication Context Class Reference values by using the `req_authn_context_class_ref` option in the SAML realm configuration. See -{ref}/security-settings.html#ref-saml-settings[SAML realm settings]. +<>. {es} supports only the `exact` comparison method for the Authentication Context. When it receives the Authentication Response from the IdP, {es} examines the @@ -501,7 +501,7 @@ You should consult the documentation for your IdP to determine what formats they support. Since PEM format is the most commonly supported format, the examples below will generate certificates in that format. -Using the {ref}/certutil.html[`elasticsearch-certutil`] tool, you can generate a +Using the <>, you can generate a signing certificate with the following command: [source, sh] @@ -541,7 +541,7 @@ The path to the PEM formatted key file. e.g. `saml/saml-sign.key` `signing.secure_key_passphrase`:: The passphrase for the key, if the file is encrypted. This is a -{ref}/secure-settings.html[secure setting] that must be set with the +<> that must be set with the `elasticsearch-keystore` tool. If you wish to use *PKCS#12 formatted* files or a *Java Keystore* for @@ -555,7 +555,7 @@ The alias of the key within the keystore. e.g. `signing-key` `signing.keystore.secure_password`:: The passphrase for the keystore, if the file is encrypted. This is a -{ref}/secure-settings.html[secure setting] that must be set with the +<> that must be set with the `elasticsearch-keystore` tool. If you wish to sign some, but not all outgoing *SAML messages*, then you @@ -592,7 +592,7 @@ The path to the PEM formatted key file. e.g. `saml/saml-crypt.key` `encryption.secure_key_passphrase`:: The passphrase for the key, if the file is encrypted. This is a -{ref}/secure-settings.html[secure setting] that must be set with the +<> that must be set with the `elasticsearch-keystore` tool. If you wish to use *PKCS#12 formatted* files or a *Java Keystore* for SAML @@ -606,7 +606,7 @@ The alias of the key within the keystore. e.g. `encryption-key` `encryption.keystore.secure_password`:: The passphrase for the keystore, if the file is encrypted. This is a -{ref}/secure-settings.html[secure setting] that must be set with the +<> that must be set with the `elasticsearch-keystore` tool. [[saml-sp-metadata]] @@ -619,7 +619,7 @@ between the IdP and the SP. The Elastic Stack supports generating such a metadata file using the `bin/elasticsearch-saml-metadata` command in your {es} directory. -The {ref}/saml-metadata.html[documentation for the elasticsearch-saml-metadata utility] +The <> describes how to run it, and the available command line options. [[saml-role-mapping]] @@ -631,10 +631,10 @@ access any data. Your SAML users cannot do anything until they are assigned roles. This can be done through either the -{ref}/security-api-put-role-mapping.html[add role mapping API], or with -<>. +<> or with +<>. -NOTE: You cannot use {stack-ov}/mapping-roles.html#mapping-roles-file[role mapping files] +NOTE: You cannot use <> to grant roles to users authenticating via SAML. This is an example of a simple role mapping that grants the `kibana_user` role @@ -665,7 +665,7 @@ mapping are derived from the SAML attributes as follows: - `metadata`: See <> For more information, see <> and -{ref}/security-api.html#security-role-mapping-apis[role mapping APIs]. +<>. If your IdP has the ability to provide groups or roles to Service Providers, then you should map this SAML attribute to the `attributes.groups` setting in @@ -853,5 +853,5 @@ Additionally, different security domains have different security requirements th specific configuration to be satisfied. A conscious effort has been made to mask this complexity with sane defaults and the detailed documentation above but in case you encounter issues while configuring a SAML realm, you can -look through our {stack-ov}/trb-security-saml.html[SAML troubleshooting documentation] that has +look through our <> that has suggestions and resolutions for common issues. diff --git a/x-pack/docs/en/security/configuring-es.asciidoc b/x-pack/docs/en/security/configuring-es.asciidoc index b3f04eec33e1..a2194df77c76 100644 --- a/x-pack/docs/en/security/configuring-es.asciidoc +++ b/x-pack/docs/en/security/configuring-es.asciidoc @@ -9,7 +9,7 @@ The {es} {security-features} enable you to easily secure a cluster. You can password-protect your data as well as implement more advanced security measures such as encrypting communications, role-based access control, IP filtering, and auditing. For more information, see -{stack-ov}/elasticsearch-security.html[Securing the {stack}]. +<>. . Verify that you are using a license that includes the specific {security-features} you want. @@ -21,7 +21,7 @@ For more information, see https://www.elastic.co/subscriptions and . Verify that the `xpack.security.enabled` setting is `true` on each node in your cluster. If you are using basic or trial licenses, the default value is `false`. -For more information, see {ref}/security-settings.html[Security settings in {es}]. +For more information, see <>. . If you plan to run {es} in a Federal Information Processing Standard (FIPS) 140-2 enabled JVM, see <>. @@ -33,7 +33,7 @@ NOTE: This requirement applies to clusters with more than one node and to clusters with a single node that listens on an external interface. Single-node clusters that use a loopback interface do not have this requirement. For more information, see -{stack-ov}/encrypting-communications.html[Encrypting communications]. +<>. -- @@ -43,7 +43,7 @@ information, see + -- The {es} {security-features} provide -{stack-ov}/built-in-users.html[built-in users] to +<> to help you get up and running. The +elasticsearch-setup-passwords+ command is the simplest method to set the built-in users' passwords for the first time. @@ -127,8 +127,7 @@ information, see https://www.elastic.co/subscriptions. xpack.security.audit.enabled: true ---------------------------- + -For more information, see {stack-ov}/auditing.html[Auditing Security Events] -and <>. +For more information, see <> and <>. .. Restart {es}. @@ -136,8 +135,7 @@ Events are logged to a dedicated `_audit.json` file in `ES_HOME/logs`, on each cluster node. -- -To walk through the configuration of {security-features} in {es}, {kib}, {ls}, and {metricbeat}, see -{stack-ov}/security-getting-started.html[Getting started with security]. +To walk through the configuration of {security-features} in {es}, {kib}, {ls}, and {metricbeat}, see <>. include::securing-communications/securing-elasticsearch.asciidoc[] diff --git a/x-pack/docs/en/security/reference/files.asciidoc b/x-pack/docs/en/security/reference/files.asciidoc index 306fdcdddc16..edf9c222dd0b 100644 --- a/x-pack/docs/en/security/reference/files.asciidoc +++ b/x-pack/docs/en/security/reference/files.asciidoc @@ -6,7 +6,7 @@ The {es} {security-features} use the following files: * `ES_PATH_CONF/roles.yml` defines the roles in use on the cluster. See -{stack-ov}/defining-roles.html[Defining roles]. +<>. * `ES_PATH_CONF/elasticsearch-users` defines the users and their hashed passwords for the `file` realm. See <>. @@ -17,10 +17,10 @@ The {es} {security-features} use the following files: * `ES_PATH_CONF/role_mapping.yml` defines the role assignments for a Distinguished Name (DN) to a role. This allows for LDAP and Active Directory groups and users and PKI users to be mapped to roles. See - {stack-ov}/mapping-roles.html[Mapping users and groups to roles]. + <>. * `ES_PATH_CONF/log4j2.properties` contains audit information. See -{stack-ov}/audit-log-output.html[Logfile audit output]. +<>. [[security-files-location]] diff --git a/x-pack/docs/en/security/securing-communications/configuring-tls-docker.asciidoc b/x-pack/docs/en/security/securing-communications/configuring-tls-docker.asciidoc index 2190f17e4588..ec00220ab1d7 100644 --- a/x-pack/docs/en/security/securing-communications/configuring-tls-docker.asciidoc +++ b/x-pack/docs/en/security/securing-communications/configuring-tls-docker.asciidoc @@ -9,8 +9,8 @@ This section demonstrates an easy path to get started with SSL/TLS for both HTTPS and transport using the {es} Docker image. The example uses Docker Compose to manage the containers. -For further details, please refer to -{stack-ov}/encrypting-communications.html[Encrypting communications] and +For further details, see +<> and https://www.elastic.co/subscriptions[available subscriptions]. [float] @@ -163,7 +163,7 @@ volumes: {"data01", "data02", "certs"} ---- <1> Bootstrap `elastic` with the password defined in `.env`. See -{stack-ov}/built-in-users.html#bootstrap-elastic-passwords[the Elastic Bootstrap Password]. +<>. <2> Automatically generate and apply a trial subscription, in order to enable {security-features}. <3> Disable verification of authenticity for inter-node communication. Allows diff --git a/x-pack/docs/en/security/securing-communications/securing-elasticsearch.asciidoc b/x-pack/docs/en/security/securing-communications/securing-elasticsearch.asciidoc index 635c8a1450f5..ab8a60da791b 100644 --- a/x-pack/docs/en/security/securing-communications/securing-elasticsearch.asciidoc +++ b/x-pack/docs/en/security/securing-communications/securing-elasticsearch.asciidoc @@ -29,7 +29,7 @@ information, see <>. <>. For more information about encrypting communications across the Elastic Stack, -see {stack-ov}/encrypting-communications.html[Encrypting Communications]. +see <>. include::node-certificates.asciidoc[] From bf2a5a052457996b8eee8aafe58584a1e6af9cd9 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Fri, 4 Oct 2019 19:11:54 -0400 Subject: [PATCH 04/55] Move ES_TMPDIR substitution into jvm options parser (#47189) This commit moves the ES_TMPDIR substitution that we do for JVM options into the JVM options parser itself. This solves a problem where the fact that the we do not make the substitution before ergonomics parsing can lead to the JVM that we start for computing the ergonomic values failing to start. Additionally, moving this substitution here enables us to simplify the shell scripts since we do not need to implement this there, and twice for Bash and Windows. --- distribution/src/bin/elasticsearch | 3 +-- .../src/bin/elasticsearch-service.bat | 4 +-- distribution/src/bin/elasticsearch.bat | 4 +-- .../tools/launchers/JvmOptionsParser.java | 26 ++++++++++++++++--- .../launchers/JvmOptionsParserTests.java | 7 +++++ 5 files changed, 35 insertions(+), 9 deletions(-) diff --git a/distribution/src/bin/elasticsearch b/distribution/src/bin/elasticsearch index b7ed2b648b76..53329cc6bad4 100755 --- a/distribution/src/bin/elasticsearch +++ b/distribution/src/bin/elasticsearch @@ -21,8 +21,7 @@ if [ -z "$ES_TMPDIR" ]; then fi ES_JVM_OPTIONS="$ES_PATH_CONF"/jvm.options -JVM_OPTIONS=`"$JAVA" -cp "$ES_CLASSPATH" org.elasticsearch.tools.launchers.JvmOptionsParser "$ES_JVM_OPTIONS"` -ES_JAVA_OPTS="${JVM_OPTIONS//\$\{ES_TMPDIR\}/$ES_TMPDIR}" +ES_JAVA_OPTS=`export ES_TMPDIR; "$JAVA" -cp "$ES_CLASSPATH" org.elasticsearch.tools.launchers.JvmOptionsParser "$ES_JVM_OPTIONS"` # manual parsing to find out, if process should be detached if ! echo $* | grep -E '(^-d |-d$| -d |--daemonize$|--daemonize )' > /dev/null; then diff --git a/distribution/src/bin/elasticsearch-service.bat b/distribution/src/bin/elasticsearch-service.bat index fd4d4b666dba..3b1478110692 100644 --- a/distribution/src/bin/elasticsearch-service.bat +++ b/distribution/src/bin/elasticsearch-service.bat @@ -115,8 +115,8 @@ set ES_JVM_OPTIONS=%ES_PATH_CONF%\jvm.options if not "%ES_JAVA_OPTS%" == "" set ES_JAVA_OPTS=%ES_JAVA_OPTS: =;% @setlocal -for /F "usebackq delims=" %%a in (`"%JAVA% -cp "!ES_CLASSPATH!" "org.elasticsearch.tools.launchers.JvmOptionsParser" "!ES_JVM_OPTIONS!" || echo jvm_options_parser_failed"`) do set JVM_OPTIONS=%%a -@endlocal & set "MAYBE_JVM_OPTIONS_PARSER_FAILED=%JVM_OPTIONS%" & set ES_JAVA_OPTS=%JVM_OPTIONS:${ES_TMPDIR}=!ES_TMPDIR!% +for /F "usebackq delims=" %%a in (`"%JAVA% -cp "!ES_CLASSPATH!" "org.elasticsearch.tools.launchers.JvmOptionsParser" "!ES_JVM_OPTIONS!" || echo jvm_options_parser_failed"`) do set ES_JAVA_OPTS=%%a +@endlocal & set "MAYBE_JVM_OPTIONS_PARSER_FAILED=%ES_JAVA_OPTS%" & set ES_JAVA_OPTS=%ES_JAVA_OPTS% if "%MAYBE_JVM_OPTIONS_PARSER_FAILED%" == "jvm_options_parser_failed" ( exit /b 1 diff --git a/distribution/src/bin/elasticsearch.bat b/distribution/src/bin/elasticsearch.bat index 03dc48728d52..9460554f81f4 100644 --- a/distribution/src/bin/elasticsearch.bat +++ b/distribution/src/bin/elasticsearch.bat @@ -47,8 +47,8 @@ if not defined ES_TMPDIR ( set ES_JVM_OPTIONS=%ES_PATH_CONF%\jvm.options @setlocal -for /F "usebackq delims=" %%a in (`CALL %JAVA% -cp "!ES_CLASSPATH!" "org.elasticsearch.tools.launchers.JvmOptionsParser" "!ES_JVM_OPTIONS!" ^|^| echo jvm_options_parser_failed`) do set JVM_OPTIONS=%%a -@endlocal & set "MAYBE_JVM_OPTIONS_PARSER_FAILED=%JVM_OPTIONS%" & set ES_JAVA_OPTS=%JVM_OPTIONS:${ES_TMPDIR}=!ES_TMPDIR!% +for /F "usebackq delims=" %%a in (`CALL %JAVA% -cp "!ES_CLASSPATH!" "org.elasticsearch.tools.launchers.JvmOptionsParser" "!ES_JVM_OPTIONS!" ^|^| echo jvm_options_parser_failed`) do set ES_JAVA_OPTS=%%a +@endlocal & set "MAYBE_JVM_OPTIONS_PARSER_FAILED=%ES_JAVA_OPTS%" & set ES_JAVA_OPTS=%ES_JAVA_OPTS% if "%MAYBE_JVM_OPTIONS_PARSER_FAILED%" == "jvm_options_parser_failed" ( exit /b 1 diff --git a/distribution/tools/launchers/src/main/java/org/elasticsearch/tools/launchers/JvmOptionsParser.java b/distribution/tools/launchers/src/main/java/org/elasticsearch/tools/launchers/JvmOptionsParser.java index 7894cab72a1e..757a1b3987f2 100644 --- a/distribution/tools/launchers/src/main/java/org/elasticsearch/tools/launchers/JvmOptionsParser.java +++ b/distribution/tools/launchers/src/main/java/org/elasticsearch/tools/launchers/JvmOptionsParser.java @@ -87,9 +87,11 @@ final class JvmOptionsParser { .filter(Predicate.not(String::isBlank)) .collect(Collectors.toUnmodifiableList())); } - final List ergonomicJvmOptions = JvmErgonomics.choose(jvmOptions); - jvmOptions.addAll(ergonomicJvmOptions); - final String spaceDelimitedJvmOptions = spaceDelimitJvmOptions(jvmOptions); + final List substitutedJvmOptions = + substitutePlaceholders(jvmOptions, Map.of("ES_TMPDIR", System.getenv("ES_TMPDIR"))); + final List ergonomicJvmOptions = JvmErgonomics.choose(substitutedJvmOptions); + substitutedJvmOptions.addAll(ergonomicJvmOptions); + final String spaceDelimitedJvmOptions = spaceDelimitJvmOptions(substitutedJvmOptions); Launchers.outPrintln(spaceDelimitedJvmOptions); Launchers.exit(0); } else { @@ -115,6 +117,24 @@ final class JvmOptionsParser { } } + static List substitutePlaceholders(final List jvmOptions, final Map substitutions) { + final Map placeholderSubstitutions = + substitutions.entrySet().stream().collect(Collectors.toMap(e -> "${" + e.getKey() + "}", Map.Entry::getValue)); + return jvmOptions.stream() + .map( + jvmOption -> { + String actualJvmOption = jvmOption; + int start = jvmOption.indexOf("${"); + if (start >= 0 && jvmOption.indexOf('}', start) > 0) { + for (final Map.Entry placeholderSubstitution : placeholderSubstitutions.entrySet()) { + actualJvmOption = actualJvmOption.replace(placeholderSubstitution.getKey(), placeholderSubstitution.getValue()); + } + } + return actualJvmOption; + }) + .collect(Collectors.toList()); + } + /** * Callback for valid JVM options. */ diff --git a/distribution/tools/launchers/src/test/java/org/elasticsearch/tools/launchers/JvmOptionsParserTests.java b/distribution/tools/launchers/src/test/java/org/elasticsearch/tools/launchers/JvmOptionsParserTests.java index 1433d19e8bb6..41a2b71847e2 100644 --- a/distribution/tools/launchers/src/test/java/org/elasticsearch/tools/launchers/JvmOptionsParserTests.java +++ b/distribution/tools/launchers/src/test/java/org/elasticsearch/tools/launchers/JvmOptionsParserTests.java @@ -30,6 +30,7 @@ import java.util.Locale; import java.util.Map; import java.util.concurrent.atomic.AtomicBoolean; +import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.equalTo; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNull; @@ -39,6 +40,12 @@ import static org.junit.Assert.fail; public class JvmOptionsParserTests extends LaunchersTestCase { + public void testSubstitution() { + final List jvmOptions = + JvmOptionsParser.substitutePlaceholders(List.of("-Djava.io.tmpdir=${ES_TMPDIR}"), Map.of("ES_TMPDIR", "/tmp/elasticsearch")); + assertThat(jvmOptions, contains("-Djava.io.tmpdir=/tmp/elasticsearch")); + } + public void testUnversionedOptions() throws IOException { try (StringReader sr = new StringReader("-Xms1g\n-Xmx1g"); BufferedReader br = new BufferedReader(sr)) { From 019265745881bb61e9dc10dc550a424eb7fb9190 Mon Sep 17 00:00:00 2001 From: debadair Date: Fri, 4 Oct 2019 16:24:35 -0700 Subject: [PATCH 05/55] [DOCS] Reformats bulk API. (#47479) * Reformats bulk API. * Update docs/reference/docs/bulk.asciidoc Co-Authored-By: James Rodewig --- docs/reference/docs/bulk.asciidoc | 298 ++++++++++++++++++------------ 1 file changed, 180 insertions(+), 118 deletions(-) diff --git a/docs/reference/docs/bulk.asciidoc b/docs/reference/docs/bulk.asciidoc index 2bf023045e38..6e6b61d73574 100644 --- a/docs/reference/docs/bulk.asciidoc +++ b/docs/reference/docs/bulk.asciidoc @@ -1,28 +1,37 @@ [[docs-bulk]] === Bulk API +++++ +Bulk +++++ -The bulk API makes it possible to perform many index/delete operations -in a single API call. This can greatly increase the indexing speed. +Performs multiple indexing or delete operations in a single API call. +This reduces overhead and can greatly increase indexing speed. -.Client support for bulk requests -********************************************* +[source,console] +-------------------------------------------------- +POST _bulk +{ "index" : { "_index" : "test", "_id" : "1" } } +{ "field1" : "value1" } +{ "delete" : { "_index" : "test", "_id" : "2" } } +{ "create" : { "_index" : "test", "_id" : "3" } } +{ "field1" : "value3" } +{ "update" : {"_id" : "1", "_index" : "test"} } +{ "doc" : {"field2" : "value2"} } +-------------------------------------------------- -Some of the officially supported clients provide helpers to assist with -bulk requests and reindexing of documents from one index to another: +[[docs-bulk-api-request]] +==== {api-request-title} -Perl:: +`POST /_bulk` - See https://metacpan.org/pod/Search::Elasticsearch::Client::5_0::Bulk[Search::Elasticsearch::Client::5_0::Bulk] - and https://metacpan.org/pod/Search::Elasticsearch::Client::5_0::Scroll[Search::Elasticsearch::Client::5_0::Scroll] +`POST //_bulk` -Python:: +[[docs-bulk-api-desc]] +==== {api-description-title} - See http://elasticsearch-py.readthedocs.org/en/master/helpers.html[elasticsearch.helpers.*] +Provides a way to perform multiple `index`, `create`, `delete`, and `update` actions in a single request. -********************************************* - -The REST API endpoint is `/_bulk`, and it expects the following newline delimited JSON -(NDJSON) structure: +The actions are specified in the request body using a newline delimited JSON (NDJSON) structure: [source,js] -------------------------------------------------- @@ -36,19 +45,70 @@ optional_source\n -------------------------------------------------- // NOTCONSOLE -*NOTE*: The final line of data must end with a newline character `\n`. Each newline character -may be preceded by a carriage return `\r`. When sending requests to this endpoint the -`Content-Type` header should be set to `application/x-ndjson`. +The `index` and `create` actions expect a source on the next line, +and have the same semantics as the `op_type` parameter in the standard index API: +create fails if a document with the same name already exists in the index, +index adds or replaces a document as necessary. -The possible actions are `index`, `create`, `delete`, and `update`. -`index` and `create` expect a source on the next -line, and have the same semantics as the `op_type` parameter to the -standard index API (i.e. create will fail if a document with the same -index exists already, whereas index will add or replace a -document as necessary). `delete` does not expect a source on the -following line, and has the same semantics as the standard delete API. -`update` expects that the partial doc, upsert and script and its options -are specified on the next line. +`update` expects that the partial doc, upsert, +and script and its options are specified on the next line. + +`delete` does not expect a source on the next line and +has the same semantics as the standard delete API. + +[NOTE] +==== +The final line of data must end with a newline character `\n`. +Each newline character may be preceded by a carriage return `\r`. +When sending requests to the `_bulk` endpoint, +the `Content-Type` header should be set to `application/x-ndjson`. +==== + +Because this format uses literal `\n`'s as delimiters, +make sure that the JSON actions and sources are not pretty printed. + +If you specify an index in the request URI, +it is used for any actions that don't explicitly specify an index. + +A note on the format: The idea here is to make processing of this as +fast as possible. As some of the actions are redirected to other +shards on other nodes, only `action_meta_data` is parsed on the +receiving node side. + +Client libraries using this protocol should try and strive to do +something similar on the client side, and reduce buffering as much as +possible. + +The response to a bulk action is a large JSON structure with +the individual results of each action performed, +in the same order as the actions that appeared in the request. +The failure of a single action does not affect the remaining actions. + +There is no "correct" number of actions to perform in a single bulk request. +Experiment with different settings to find the optimal size for your particular workload. + +When using the HTTP API, make sure that the client does not send HTTP chunks, +as this will slow things down. + +[float] +[[bulk-clients]] +===== Client support for bulk requests + +Some of the officially supported clients provide helpers to assist with +bulk requests and reindexing of documents from one index to another: + +Perl:: + + See https://metacpan.org/pod/Search::Elasticsearch::Client::5_0::Bulk[Search::Elasticsearch::Client::5_0::Bulk] + and https://metacpan.org/pod/Search::Elasticsearch::Client::5_0::Scroll[Search::Elasticsearch::Client::5_0::Scroll] + +Python:: + + See http://elasticsearch-py.readthedocs.org/en/master/helpers.html[elasticsearch.helpers.*] + +[float] +[[bulk-curl]] +===== Submitting bulk requests with cURL If you're providing text file input to `curl`, you *must* use the `--data-binary` flag instead of plain `-d`. The latter doesn't preserve @@ -65,9 +125,97 @@ $ curl -s -H "Content-Type: application/x-ndjson" -XPOST localhost:9200/_bulk -- // NOTCONSOLE // Not converting to console because this shows how curl works -Because this format uses literal `\n`'s as delimiters, please be sure -that the JSON actions and sources are not pretty printed. Here is an -example of a correct sequence of bulk commands: +[float] +[[bulk-optimistic-concurrency-control]] +===== Optimistic Concurrency Control + +Each `index` and `delete` action within a bulk API call may include the +`if_seq_no` and `if_primary_term` parameters in their respective action +and meta data lines. The `if_seq_no` and `if_primary_term` parameters control +how operations are executed, based on the last modification to existing +documents. See <> for more details. + + +[float] +[[bulk-versioning]] +===== Versioning + +Each bulk item can include the version value using the +`version` field. It automatically follows the behavior of the +index / delete operation based on the `_version` mapping. It also +support the `version_type` (see <>). + +[float] +[[bulk-routing]] +===== Routing + +Each bulk item can include the routing value using the +`routing` field. It automatically follows the behavior of the +index / delete operation based on the `_routing` mapping. + +[float] +[[bulk-wait-for-active-shards]] +===== Wait For Active Shards + +When making bulk calls, you can set the `wait_for_active_shards` +parameter to require a minimum number of shard copies to be active +before starting to process the bulk request. See +<> for further details and a usage +example. + +[float] +[[bulk-refresh]] +===== Refresh + +Control when the changes made by this request are visible to search. See +<>. + +NOTE: Only the shards that receive the bulk request will be affected by +`refresh`. Imagine a `_bulk?refresh=wait_for` request with three +documents in it that happen to be routed to different shards in an index +with five shards. The request will only wait for those three shards to +refresh. The other two shards that make up the index do not +participate in the `_bulk` request at all. + +[float] +[[bulk-security]] +===== Security + +See <>. + +[float] +[[bulk-partial-responses]] +===== Partial responses +To ensure fast responses, the bulk API will respond with partial results if one or more shards fail. +See <> for more information. + +[[docs-bulk-api-path-params]] +==== {api-path-parms-title} + +``:: +(Optional, string) Name of the index to perform the bulk actions against. + +[[docs-bulk-api-query-params]] +==== {api-query-parms-title} + +include::{docdir}/rest-api/common-parms.asciidoc[tag=pipeline] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=refresh] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=routing] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=source] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=source_excludes] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=source_includes] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=timeout] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=wait_for_active_shards] + +[[docs-bulk-api-example]] +==== {api-examples-title} [source,console] -------------------------------------------------- @@ -81,7 +229,7 @@ POST _bulk { "doc" : {"field2" : "value2"} } -------------------------------------------------- -The result of this bulk operation is: +The API returns the following result: [source,console-result] -------------------------------------------------- @@ -171,85 +319,9 @@ The result of this bulk operation is: // TESTRESPONSE[s/"_seq_no" : 3/"_seq_no" : $body.items.3.update._seq_no/] // TESTRESPONSE[s/"_primary_term" : 4/"_primary_term" : $body.items.3.update._primary_term/] -The endpoints are `/_bulk` and `/{index}/_bulk`. When the index is provided, it -will be used by default on bulk items that don't provide it explicitly. - -A note on the format. The idea here is to make processing of this as -fast as possible. As some of the actions will be redirected to other -shards on other nodes, only `action_meta_data` is parsed on the -receiving node side. - -Client libraries using this protocol should try and strive to do -something similar on the client side, and reduce buffering as much as -possible. - -The response to a bulk action is a large JSON structure with the individual -results of each action that was performed in the same order as the actions that -appeared in the request. The failure of a single action does not affect the -remaining actions. - -There is no "correct" number of actions to perform in a single bulk -call. You should experiment with different settings to find the optimum -size for your particular workload. - -If using the HTTP API, make sure that the client does not send HTTP -chunks, as this will slow things down. - -[float] -[[bulk-optimistic-concurrency-control]] -==== Optimistic Concurrency Control - -Each `index` and `delete` action within a bulk API call may include the -`if_seq_no` and `if_primary_term` parameters in their respective action -and meta data lines. The `if_seq_no` and `if_primary_term` parameters control -how operations are executed, based on the last modification to existing -documents. See <> for more details. - - -[float] -[[bulk-versioning]] -==== Versioning - -Each bulk item can include the version value using the -`version` field. It automatically follows the behavior of the -index / delete operation based on the `_version` mapping. It also -support the `version_type` (see <>). - -[float] -[[bulk-routing]] -==== Routing - -Each bulk item can include the routing value using the -`routing` field. It automatically follows the behavior of the -index / delete operation based on the `_routing` mapping. - -[float] -[[bulk-wait-for-active-shards]] -==== Wait For Active Shards - -When making bulk calls, you can set the `wait_for_active_shards` -parameter to require a minimum number of shard copies to be active -before starting to process the bulk request. See -<> for further details and a usage -example. - -[float] -[[bulk-refresh]] -==== Refresh - -Control when the changes made by this request are visible to search. See -<>. - -NOTE: Only the shards that receive the bulk request will be affected by -`refresh`. Imagine a `_bulk?refresh=wait_for` request with three -documents in it that happen to be routed to different shards in an index -with five shards. The request will only wait for those three shards to -refresh. The other two shards that make up the index do not -participate in the `_bulk` request at all. - [float] [[bulk-update]] -==== Update +===== Bulk update example When using the `update` action, `retry_on_conflict` can be used as a field in the action itself (not in the extra payload line), to specify how many @@ -276,13 +348,3 @@ POST _bulk -------------------------------------------------- // TEST[continued] -[float] -[[bulk-security]] -==== Security - -See <>. - -[float] -[[bulk-partial-responses]] -==== Partial responses -To ensure fast responses, the bulk API will respond with partial results if one or more shards fail. See <> for more information. \ No newline at end of file From d5be0404f582eacfd6907a499ebbf7f03598eb26 Mon Sep 17 00:00:00 2001 From: debadair Date: Fri, 4 Oct 2019 17:55:53 -0700 Subject: [PATCH 06/55] Reformats mget API (#47477) * Reformats mget API * Update docs/reference/docs/get.asciidoc Co-Authored-By: James Rodewig * Incorporated feedback. --- docs/reference/docs/get.asciidoc | 29 ++- docs/reference/docs/multi-get.asciidoc | 195 +++++++++++++----- docs/reference/rest-api/common-parms.asciidoc | 16 +- 3 files changed, 165 insertions(+), 75 deletions(-) diff --git a/docs/reference/docs/get.asciidoc b/docs/reference/docs/get.asciidoc index a3ed4498672f..c73646fe363d 100644 --- a/docs/reference/docs/get.asciidoc +++ b/docs/reference/docs/get.asciidoc @@ -6,6 +6,12 @@ Retrieves the specified JSON document from an index. +[source,console] +-------------------------------------------------- +GET twitter/_doc/0 +-------------------------------------------------- +// TEST[setup:twitter] + [[docs-get-api-request]] ==== {api-request-title} @@ -150,32 +156,21 @@ deleted documents in the background as you continue to index more data. [[docs-get-api-query-params]] ==== {api-query-parms-title} -`preference`:: -(Optional, string) Specify the node or shard the operation should -be performed on (default: random). +include::{docdir}/rest-api/common-parms.asciidoc[tag=preference] -`realtime`:: -(Optional, boolean) Set to `false` to disable real time GET -(default: `true`). See <>. +include::{docdir}/rest-api/common-parms.asciidoc[tag=realtime] include::{docdir}/rest-api/common-parms.asciidoc[tag=refresh] include::{docdir}/rest-api/common-parms.asciidoc[tag=routing] -`stored_fields`:: -(Optional, boolean) Set to `true` to retrieve the document fields stored in the -index rather than the document `_source` (default: `false`). +include::{docdir}/rest-api/common-parms.asciidoc[tag=stored_fields] -`_source`:: -(Optional, list) Set to `false` to disable source retrieval (default: `true`). - You can also specify a comma-separated list of the fields -you want to retrieve. +include::{docdir}/rest-api/common-parms.asciidoc[tag=source] -`_source_excludes`:: -(Optional, list) Specify the source fields you want to exclude. +include::{docdir}/rest-api/common-parms.asciidoc[tag=source_excludes] -`_source_includes`:: -(Optional, list) Specify the source fields you want to retrieve. +include::{docdir}/rest-api/common-parms.asciidoc[tag=source_includes] include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-version] diff --git a/docs/reference/docs/multi-get.asciidoc b/docs/reference/docs/multi-get.asciidoc index 834a77458274..5944aa2fc2a2 100644 --- a/docs/reference/docs/multi-get.asciidoc +++ b/docs/reference/docs/multi-get.asciidoc @@ -1,15 +1,10 @@ [[docs-multi-get]] -=== Multi Get API +=== Multi get (mget) API +++++ +Multi get +++++ -The Multi get API returns multiple documents based on an index and id -(and possibly routing). The response includes a `docs` array -with all the fetched documents in order corresponding to the original multi-get -request (if there was a failure for a specific get, an object containing this -error is included in place in the response instead). The structure of a -successful get is similar in structure to a document provided by the -<> API. - -Here is an example: +Retrieves multiple JSON documents by ID. [source,console] -------------------------------------------------- @@ -17,23 +12,121 @@ GET /_mget { "docs" : [ { - "_index" : "test", + "_index" : "twitter", "_id" : "1" }, { - "_index" : "test", + "_index" : "twitter", "_id" : "2" } ] } -------------------------------------------------- +// TEST[setup:twitter] -The `mget` endpoint can also be used against an index (in which case it -is not required in the body): +[[docs-multi-get-api-request]] +==== {api-request-title} + +`GET /_mget` + +`GET //_mget` + +[[docs-multi-get-api-desc]] +==== {api-description-title} + +You use `mget` to retrieve multiple documents from one or more indices. +If you specify an index in the request URI, you only need to specify the document IDs in the request body. + +[[mget-security]] +===== Security + +See <>. + +[[multi-get-partial-responses]] +===== Partial responses + +To ensure fast responses, the multi get API responds with partial results if one or more shards fail. +See <> for more information. + +[[docs-multi-get-api-path-params]] +==== {api-path-parms-title} + +include::{docdir}/rest-api/common-parms.asciidoc[tag=index] + +[[docs-multi-get-api-query-params]] +==== {api-query-parms-title} + +include::{docdir}/rest-api/common-parms.asciidoc[tag=preference] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=realtime] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=refresh] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=routing] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=stored_fields] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=source] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=source_excludes] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=source_includes] + +[[docs-multi-get-api-request-body]] +==== {api-request-body-title} + +`docs`:: +(Optional, array) The documents you want to retrieve. +Required if no index is specified in the request URI. +You can specify the following attributes for each +document: ++ +-- +`_id`:: +(Required, string) The unique document ID. + +`_index`:: +(Optional, string) +The index that contains the document. +Required if no index is specified in the request URI. + +`_routing`:: +(Optional, string) The key for the primary shard the document resides on. +Required if routing is used during indexing. + +`_source`:: +(Optional, boolean) If `false`, excludes all `_source` fields. Defaults to `true`. +`source_include`::: +(Optional, array) The fields to extract and return from the `_source` field. +`source_exclude`::: +(Optional, array) The fields to exclude from the returned `_source` field. + +`_stored_fields`:: +(Optional, array) The stored fields you want to retrieve. +-- + +`ids`:: +(Optional, array) The IDs of the documents you want to retrieve. +Allowed when the index is specified in the request URI. + +[[multi-get-api-response-body]] +==== {api-response-body-title} + +The response includes a `docs` array that contains the documents in the order specified in the request. +The structure of the returned documents is similar to that returned by the <> API. +If there is a failure getting a particular document, the error is included in place of the document. + +[[docs-multi-get-api-example]] +==== {api-examples-title} + +[[mget-ids]] +===== Get documents by ID + +If you specify an index in the request URI, only the document IDs are required in the request body: [source,console] -------------------------------------------------- -GET /test/_mget +GET /twitter/_mget { "docs" : [ { @@ -45,30 +138,31 @@ GET /test/_mget ] } -------------------------------------------------- -//CONSOLE +// TEST[setup:twitter] -In which case, the `ids` element can directly be used to simplify the -request: +You can use the `ids` element to simplify the request: [source,console] -------------------------------------------------- -GET /test/_mget +GET /twitter/_mget { "ids" : ["1", "2"] } -------------------------------------------------- +// TEST[setup:twitter] -[float] [[mget-source-filtering]] -==== Source filtering +===== Filter source fields -By default, the `_source` field will be returned for every document (if stored). -Similar to the <> API, you can retrieve only parts of -the `_source` (or not at all) by using the `_source` parameter. You can also use -the url parameters `_source`, `_source_includes`, and `_source_excludes` to specify defaults, -which will be used when there are no per-document instructions. +By default, the `_source` field is returned for every document (if stored). +Use the `_source` and `_source_include` or `source_exclude` attributes to +filter what fields are returned for a particular document. +You can include the `_source`, `_source_includes`, and `_source_excludes` query parameters in the +request URI to specify the defaults to use when there are no per-document instructions. -For example: +For example, the following request sets `_source` to false for document 1 to exclude the +source entirely, retrieves `field3` and `field4` from document 2, and retrieves the `user` field +from document 3 but filters out the `user.location` field. [source,console] -------------------------------------------------- @@ -97,13 +191,16 @@ GET /_mget } -------------------------------------------------- - -[float] [[mget-fields]] -==== Fields +===== Get stored fields -Specific stored fields can be specified to be retrieved per document to get, similar to the <> parameter of the Get API. -For example: +Use the `stored_fields` attribute to specify the set of stored fields you want +to retrieve. Any requested fields that are not stored are ignored. +You can include the `stored_fields` query parameter in the request URI to specify the defaults +to use when there are no per-document instructions. + +For example, the following request retrieves `field1` and `field2` from document 1, and +`field3` and `field4`from document 2: [source,console] -------------------------------------------------- @@ -124,8 +221,9 @@ GET /_mget } -------------------------------------------------- -Alternatively, you can specify the `stored_fields` parameter in the query string -as a default to be applied to all documents. +The following request retrieves `field1` and `field2` from all documents by default. +These default fields are returned for document 1, but +overridden to return `field3` and `field4` for document 2. [source,console] -------------------------------------------------- @@ -133,23 +231,22 @@ GET /test/_mget?stored_fields=field1,field2 { "docs" : [ { - "_id" : "1" <1> + "_id" : "1" }, { "_id" : "2", - "stored_fields" : ["field3", "field4"] <2> + "stored_fields" : ["field3", "field4"] } ] } -------------------------------------------------- -<1> Returns `field1` and `field2` -<2> Returns `field3` and `field4` -[float] [[mget-routing]] -==== Routing +===== Specify document routing -You can also specify a routing value as a parameter: +If routing is used during indexing, you need to specify the routing value to retrieve documents. +For example, the following request fetches `test/_doc/2` from the shard corresponding to routing key `key1`, +and fetches `test/_doc/1` from the shard corresponding to routing key `key2`. [source,console] -------------------------------------------------- @@ -167,18 +264,4 @@ GET /_mget?routing=key1 } ] } --------------------------------------------------- - -In this example, document `test/_doc/2` will be fetched from the shard corresponding to routing key `key1` but -document `test/_doc/1` will be fetched from the shard corresponding to routing key `key2`. - -[float] -[[mget-security]] -==== Security - -See <>. - -[float] -[[multi-get-partial-responses]] -==== Partial responses -To ensure fast responses, the multi get API will respond with partial results if one or more shards fail. See <> for more information. \ No newline at end of file +-------------------------------------------------- \ No newline at end of file diff --git a/docs/reference/rest-api/common-parms.asciidoc b/docs/reference/rest-api/common-parms.asciidoc index b68e6c064958..11b9e8bad591 100644 --- a/docs/reference/rest-api/common-parms.asciidoc +++ b/docs/reference/rest-api/common-parms.asciidoc @@ -486,6 +486,12 @@ tag::query[] <>. end::query[] +tag::realtime[] +`realtime`:: +(Optional, boolean) Set to `false` to disable real time GET +(default: `true`). See <>. +end::realtime[] + tag::refresh[] `refresh`:: (Optional, enum) If `true`, {es} refreshes the affected shards to make this @@ -616,6 +622,12 @@ tag::stats[] purposes. end::stats[] +tag::stored_fields[] +`stored_fields`:: +(Optional, boolean) If `true`, retrieves the document fields stored in the +index rather than the document `_source`. Defaults to `false`. +end::stored_fields[] + tag::target-index[] ``:: + @@ -700,6 +712,6 @@ end::wait_for_active_shards[] tag::wait_for_completion[] `wait_for_completion`:: -(Optional, boolean) Should the request block until the operation is -complete. Defaults to `true`. +(Optional, boolean) If `true`, the request blocks until the operation is complete. +Defaults to `true`. end::wait_for_completion[] From 671027919939b8a84b1fc246d3477abb85e7db97 Mon Sep 17 00:00:00 2001 From: debadair Date: Fri, 4 Oct 2019 19:41:46 -0700 Subject: [PATCH 07/55] Reformats reindex API (#47483) * Reformats reindex API * Incorporated review feedback. --- docs/reference/docs/reindex.asciidoc | 1536 ++++++++++++-------------- 1 file changed, 716 insertions(+), 820 deletions(-) diff --git a/docs/reference/docs/reindex.asciidoc b/docs/reference/docs/reindex.asciidoc index 2f6d1561e67c..009c8deb7785 100644 --- a/docs/reference/docs/reindex.asciidoc +++ b/docs/reference/docs/reindex.asciidoc @@ -1,16 +1,20 @@ [[docs-reindex]] === Reindex API +++++ +Reindex +++++ -IMPORTANT: Reindex requires <> to be enabled for +Copies documents from one index to another. + +[IMPORTANT] +================================================= +Reindex requires <> to be enabled for all documents in the source index. -IMPORTANT: Reindex does not attempt to set up the destination index. It does -not copy the settings of the source index. You should set up the destination -index prior to running a `_reindex` action, including setting up mappings, shard -counts, replicas, etc. - -The most basic form of `_reindex` just copies documents from one index to another. -This will copy documents from the `twitter` index into the `new_twitter` index: +You must set up the destination index before calling `_reindex`. +Reindex does not copy the settings from the source index. +Mappings, shard counts, replicas, and so on must be configured ahead of time. +================================================= [source,console] -------------------------------------------------- @@ -26,7 +30,7 @@ POST _reindex -------------------------------------------------- // TEST[setup:big_twitter] -That will return something like this: +//// [source,console-result] -------------------------------------------------- @@ -52,145 +56,201 @@ That will return something like this: -------------------------------------------------- // TESTRESPONSE[s/"took" : 147/"took" : "$body.took"/] +//// + +[[docs-reindex-api-request]] +==== {api-request-title} + +`POST /_reindex` + +[[docs-reindex-api-desc]] +==== {api-description-title} + +Extracts the document source from the source index and indexes the documents into the destination index. +You can copy all documents to the destination index, or reindex a subset of the documents. + Just like <>, `_reindex` gets a snapshot of the source index but its target must be a **different** index so version conflicts are unlikely. The `dest` element can be configured like the -index API to control optimistic concurrency control. Just leaving out -`version_type` (as above) or setting it to `internal` will cause Elasticsearch +index API to control optimistic concurrency control. Omitting +`version_type` or setting it to `internal` causes Elasticsearch to blindly dump documents into the target, overwriting any that happen to have -the same type and id: +the same ID. -[source,console] --------------------------------------------------- -POST _reindex -{ - "source": { - "index": "twitter" - }, - "dest": { - "index": "new_twitter", - "version_type": "internal" - } -} --------------------------------------------------- -// TEST[setup:twitter] - -Setting `version_type` to `external` will cause Elasticsearch to preserve the +Setting `version_type` to `external` causes Elasticsearch to preserve the `version` from the source, create any documents that are missing, and update any documents that have an older version in the destination index than they do -in the source index: +in the source index. -[source,console] --------------------------------------------------- -POST _reindex -{ - "source": { - "index": "twitter" - }, - "dest": { - "index": "new_twitter", - "version_type": "external" - } -} --------------------------------------------------- -// TEST[setup:twitter] - -Settings `op_type` to `create` will cause `_reindex` to only create missing +Setting `op_type` to `create` causes `_reindex` to only create missing documents in the target index. All existing documents will cause a version -conflict: +conflict. + +By default, version conflicts abort the `_reindex` process. +To continue reindexing if there are conflicts, set the `"conflicts"` request body parameter to `proceed`. +In this case, the response includes a count of the version conflicts that were encountered. +Note that the handling of other error types is unaffected by the `"conflicts"` parameter. + +[[docs-reindex-task-api]] +===== Running reindex asynchronously + +If the request contains `wait_for_completion=false`, {es} +performs some preflight checks, launches the request, and returns a +<> you can use to cancel or get the status of the task. +{es} creates a record of this task as a document at `.tasks/task/${taskId}`. +When you are done with a task, you should delete the task document so +{es} can reclaim the space. + +[[docs-reindex-many-indices]] +===== Reindexing many indices +If you have many indices to reindex it is generally better to reindex them +one at a time rather than using a glob pattern to pick up many indices. That +way you can resume the process if there are any errors by removing the +partially completed index and starting over at that index. It also makes +parallelizing the process fairly simple: split the list of indices to reindex +and run each list in parallel. + +One-off bash scripts seem to work nicely for this: + +[source,bash] +---------------------------------------------------------------- +for index in i1 i2 i3 i4 i5; do + curl -HContent-Type:application/json -XPOST localhost:9200/_reindex?pretty -d'{ + "source": { + "index": "'$index'" + }, + "dest": { + "index": "'$index'-reindexed" + } + }' +done +---------------------------------------------------------------- +// NOTCONSOLE + +[[docs-reindex-throttle]] +===== Throttling + +Set `requests_per_second` to any positive decimal number (`1.4`, `6`, +`1000`, etc.) to throttle the rate at which `_reindex` issues batches of index +operations. Requests are throttled by padding each batch with a wait time. +To disable throttling, set `requests_per_second` to `-1`. + +The throttling is done by waiting between batches so that the `scroll` that `_reindex` +uses internally can be given a timeout that takes into account the padding. +The padding time is the difference between the batch size divided by the +`requests_per_second` and the time spent writing. By default the batch size is +`1000`, so if `requests_per_second` is set to `500`: + +[source,txt] +-------------------------------------------------- +target_time = 1000 / 500 per second = 2 seconds +wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds +-------------------------------------------------- + +Since the batch is issued as a single `_bulk` request, large batch sizes +cause Elasticsearch to create many requests and then wait for a while before +starting the next set. This is "bursty" instead of "smooth". + +[[docs-reindex-rethrottle]] +===== Rethrottling + +The value of `requests_per_second` can be changed on a running reindex using +the `_rethrottle` API: [source,console] -------------------------------------------------- -POST _reindex -{ - "source": { - "index": "twitter" - }, - "dest": { - "index": "new_twitter", - "op_type": "create" - } -} +POST _reindex/r1A2WoRbTwKZ516z6NEs5A:36619/_rethrottle?requests_per_second=-1 -------------------------------------------------- -// TEST[setup:twitter] -By default, version conflicts abort the `_reindex` process. The `"conflicts"` request body -parameter can be used to instruct `_reindex` to proceed with the next document on version conflicts. -It is important to note that the handling of other error types is unaffected by the `"conflicts"` parameter. -When `"conflicts": "proceed"` is set in the request body, the `_reindex` process will continue on version conflicts -and return a count of version conflicts encountered: +The task ID can be found using the <>. + +Just like when setting it on the Reindex API, `requests_per_second` +can be either `-1` to disable throttling or any decimal number +like `1.7` or `12` to throttle to that level. Rethrottling that speeds up the +query takes effect immediately, but rethrottling that slows down the query will +take effect after completing the current batch. This prevents scroll +timeouts. + +[[docs-reindex-slice]] +===== Slicing + +Reindex supports <> to parallelize the reindexing process. +This parallelization can improve efficiency and provide a convenient way to +break the request down into smaller parts. + +NOTE: Reindexing from remote clusters does not support +<> or +<>. + +[[docs-reindex-manual-slice]] +====== Manual slicing +Slice a reindex request manually by providing a slice id and total number of +slices to each request: [source,console] --------------------------------------------------- -POST _reindex -{ - "conflicts": "proceed", - "source": { - "index": "twitter" - }, - "dest": { - "index": "new_twitter", - "op_type": "create" - } -} --------------------------------------------------- -// TEST[setup:twitter] - -You can limit the documents by adding a query to the `source`. -This will only copy tweets made by `kimchy` into `new_twitter`: - -[source,console] --------------------------------------------------- +---------------------------------------------------------------- POST _reindex { "source": { "index": "twitter", - "query": { - "term": { - "user": "kimchy" - } + "slice": { + "id": 0, + "max": 2 } }, "dest": { "index": "new_twitter" } } --------------------------------------------------- -// TEST[setup:twitter] - -`index` in `source` can be a list, allowing you to copy from lots -of sources in one request. This will copy documents from the -`twitter` and `blog` indices: - -[source,console] --------------------------------------------------- POST _reindex { "source": { - "index": ["twitter", "blog"] + "index": "twitter", + "slice": { + "id": 1, + "max": 2 + } }, "dest": { - "index": "all_together" + "index": "new_twitter" } } --------------------------------------------------- -// TEST[setup:twitter] -// TEST[s/^/PUT blog\/post\/post1?refresh\n{"test": "foo"}\n/] +---------------------------------------------------------------- +// TEST[setup:big_twitter] -NOTE: The Reindex API makes no effort to handle ID collisions so the last -document written will "win" but the order isn't usually predictable so it is -not a good idea to rely on this behavior. Instead, make sure that IDs are unique -using a script. - -It's also possible to limit the number of processed documents by setting -`max_docs`. This will only copy a single document from `twitter` to -`new_twitter`: +You can verify this works by: [source,console] --------------------------------------------------- -POST _reindex +---------------------------------------------------------------- +GET _refresh +POST new_twitter/_search?size=0&filter_path=hits.total +---------------------------------------------------------------- +// TEST[continued] + +which results in a sensible `total` like this one: + +[source,console-result] +---------------------------------------------------------------- +{ + "hits": { + "total" : { + "value": 120, + "relation": "eq" + } + } +} +---------------------------------------------------------------- + +[[docs-reindex-automatic-slice]] +====== Automatic slicing + +You can also let `_reindex` automatically parallelize using <> to +slice on `_uid`. Use `slices` to specify the number of slices to use: + +[source,console] +---------------------------------------------------------------- +POST _reindex?slices=5&refresh { - "max_docs": 1, "source": { "index": "twitter" }, @@ -198,104 +258,80 @@ POST _reindex "index": "new_twitter" } } --------------------------------------------------- -// TEST[setup:twitter] +---------------------------------------------------------------- +// TEST[setup:big_twitter] -If you want a particular set of documents from the `twitter` index you'll -need to use `sort`. Sorting makes the scroll less efficient but in some contexts -it's worth it. If possible, prefer a more selective query to `max_docs` and `sort`. -This will copy 10000 documents from `twitter` into `new_twitter`: +You can also this verify works by: [source,console] --------------------------------------------------- -POST _reindex +---------------------------------------------------------------- +POST new_twitter/_search?size=0&filter_path=hits.total +---------------------------------------------------------------- +// TEST[continued] + +which results in a sensible `total` like this one: + +[source,console-result] +---------------------------------------------------------------- { - "max_docs": 10000, - "source": { - "index": "twitter", - "sort": { "date": "desc" } - }, - "dest": { - "index": "new_twitter" + "hits": { + "total" : { + "value": 120, + "relation": "eq" + } } } --------------------------------------------------- -// TEST[setup:twitter] +---------------------------------------------------------------- -The `source` section supports all the elements that are supported in a -<>. For instance, only a subset of the -fields from the original documents can be reindexed using `source` filtering -as follows: +Setting `slices` to `auto` will let Elasticsearch choose the number of slices +to use. This setting will use one slice per shard, up to a certain limit. If +there are multiple source indices, it will choose the number of slices based +on the index with the smallest number of shards. -[source,console] --------------------------------------------------- -POST _reindex -{ - "source": { - "index": "twitter", - "_source": ["user", "_doc"] - }, - "dest": { - "index": "new_twitter" - } -} --------------------------------------------------- -// TEST[setup:twitter] +Adding `slices` to `_reindex` just automates the manual process used in the +section above, creating sub-requests which means it has some quirks: -[[reindex-scripts]] -Like `_update_by_query`, `_reindex` supports a script that modifies the -document. Unlike `_update_by_query`, the script is allowed to modify the -document's metadata. This example bumps the version of the source document: +* You can see these requests in the <>. These +sub-requests are "child" tasks of the task for the request with `slices`. +* Fetching the status of the task for the request with `slices` only contains +the status of completed slices. +* These sub-requests are individually addressable for things like cancelation +and rethrottling. +* Rethrottling the request with `slices` will rethrottle the unfinished +sub-request proportionally. +* Canceling the request with `slices` will cancel each sub-request. +* Due to the nature of `slices` each sub-request won't get a perfectly even +portion of the documents. All documents will be addressed, but some slices may +be larger than others. Expect larger slices to have a more even distribution. +* Parameters like `requests_per_second` and `max_docs` on a request with +`slices` are distributed proportionally to each sub-request. Combine that with +the point above about distribution being uneven and you should conclude that +using `max_docs` with `slices` might not result in exactly `max_docs` documents +being reindexed. +* Each sub-request gets a slightly different snapshot of the source index, +though these are all taken at approximately the same time. -[source,console] --------------------------------------------------- -POST _reindex -{ - "source": { - "index": "twitter" - }, - "dest": { - "index": "new_twitter", - "version_type": "external" - }, - "script": { - "source": "if (ctx._source.foo == 'bar') {ctx._version++; ctx._source.remove('foo')}", - "lang": "painless" - } -} --------------------------------------------------- -// TEST[setup:twitter] +[[docs-reindex-picking-slices]] +====== Picking the number of slices -Just as in `_update_by_query`, you can set `ctx.op` to change the -operation that is executed on the destination index: +If slicing automatically, setting `slices` to `auto` will choose a reasonable +number for most indices. If slicing manually or otherwise tuning +automatic slicing, use these guidelines. -`noop`:: +Query performance is most efficient when the number of `slices` is equal to the +number of shards in the index. If that number is large (e.g. 500), +choose a lower number as too many `slices` will hurt performance. Setting +`slices` higher than the number of shards generally does not improve efficiency +and adds overhead. -Set `ctx.op = "noop"` if your script decides that the document doesn't have -to be indexed in the destination index. This no operation will be reported -in the `noop` counter in the <>. +Indexing performance scales linearly across available resources with the +number of slices. -`delete`:: +Whether query or indexing performance dominates the runtime depends on the +documents being reindexed and cluster resources. -Set `ctx.op = "delete"` if your script decides that the document must be - deleted from the destination index. The deletion will be reported in the - `deleted` counter in the <>. - -Setting `ctx.op` to anything else will return an error, as will setting any -other field in `ctx`. - -Think of the possibilities! Just be careful; you are able to -change: - - * `_id` - * `_index` - * `_version` - * `_routing` - -Setting `_version` to `null` or clearing it from the `ctx` map is just like not -sending the version in an indexing request; it will cause the document to be -overwritten in the target index regardless of the version on the target or the -version type you use in the `_reindex` request. +[[docs-reindex-routing]] +===== Reindex routing By default if `_reindex` sees a document with routing then the routing is preserved unless it's changed by the script. You can set `routing` on the @@ -339,6 +375,8 @@ POST _reindex -------------------------------------------------- // TEST[s/^/PUT source\n/] + + By default `_reindex` uses scroll batches of 1000. You can change the batch size with the `size` field in the `source` element: @@ -376,9 +414,491 @@ POST _reindex -------------------------------------------------- // TEST[s/^/PUT source\n/] -[float] +[[docs-reindex-api-query-params]] +==== {api-query-parms-title} + +include::{docdir}/rest-api/common-parms.asciidoc[tag=refresh] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=timeout] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=wait_for_active_shards] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=wait_for_completion] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=requests_per_second] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=scroll] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=slices] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=max_docs] + +[[docs-reindex-api-request-body]] +==== {api-request-body-title} + +`conflicts`:: +(Optional, enum) Set to `proceed` to continue reindexing even if there are conflicts. +Defaults to `abort`. + +`source`:: +`index`::: +(Required, string) The name of the index you are copying _from_. +Also accepts a comma-separated list of indices to reindex from multiple sources. + +`max_docs`::: +(Optional, integer) The maximum number of documents to reindex. + +`query`::: +(Optional, <>) Specifies the documents to reindex using the Query DSL. + +`remote`::: +`host`:::: +(Optional, string) The URL for the remote instance of {es} that you want to index _from_. +Required when indexing from remote. +`username`:::: +(Optional, string) The username to use for authentication with the remote host. +`password`:::: +(Optional, string) The password to use for authentication with the remote host. +`socket_timeout`:::: +(Optional, <>) The remote socket read timeout. Defaults to 30 seconds. +`connect_timeout`:::: +(Optional, <>) The remote connection timeout. Defaults to 30 seconds. + +`size`::: +{Optional, integer) The number of documents to index per batch. +Use when indexing from remote to ensure that the batches fit within the on-heap buffer, +which defaults to a maximum size of 100 MB. + +`slice`::: +`id`:::: +(Optional, integer) Slice ID for <>. +`max`:::: +(Optional, integer) Total number of slices. + +`sort`::: +(Optional, list) A comma-separated list of `:` pairs to sort by before indexing. +Use in conjunction with `max_docs` to control what documents are reindexed. + +`_source`::: +(Optional, string) If `true` reindexes all source fields. +Set to a list to reindex select fields. +Defaults to `true`. + +`dest`:: +`index`::: +(Required, string) The name of the index you are copying _to_. + +`version_type`::: +(Optional, enum) The versioning to use for the indexing operation. +Valid values: `internal`, `external`, `external_gt`, `external_gte`. +See <> for more information. + +`op_type`::: +(Optional, enum) Set to create to only index documents that do not already exist (put if absent). +Valid values: `index`, `create`. Defaults to `index`. + +`script`:: +`source`::: +(Optional, string) The script to run to update the document source or metadata when reindexing. +`lang`::: +(Optional, enum) The script language: `painless`, `expression`, `mustache`, `java`. +For more information, see <>. + + +[[docs-reindex-api-response-body]] +==== {api-response-body-title} + +`took`:: + +(integer) The total milliseconds the entire operation took. + +`timed_out`:: + +{boolean) This flag is set to `true` if any of the requests executed during the +reindex timed out. + +`total`:: + +(integer) The number of documents that were successfully processed. + +`updated`:: + +(integer) The number of documents that were successfully updated. + +`created`:: + +(integer) The number of documents that were successfully created. + +`deleted`:: + +(integer) The number of documents that were successfully deleted. + +`batches`:: + +(integer) The number of scroll responses pulled back by the reindex. + +`noops`:: + +(integer) The number of documents that were ignored because the script used for +the reindex returned a `noop` value for `ctx.op`. + +`version_conflicts`:: + +{integer)The number of version conflicts that reindex hit. + +`retries`:: + +(integer) The number of retries attempted by reindex. `bulk` is the number of bulk +actions retried and `search` is the number of search actions retried. + +`throttled_millis`:: + +(integer) Number of milliseconds the request slept to conform to `requests_per_second`. + +`requests_per_second`:: + +(integer) The number of requests per second effectively executed during the reindex. + +`throttled_until_millis`:: + +(integer) This field should always be equal to zero in a `_reindex` response. It only +has meaning when using the <>, where it +indicates the next time (in milliseconds since epoch) a throttled request will be +executed again in order to conform to `requests_per_second`. + +`failures`:: + +(array) Array of failures if there were any unrecoverable errors during the process. If +this is non-empty then the request aborted because of those failures. Reindex +is implemented using batches and any failure causes the entire process to abort +but all failures in the current batch are collected into the array. You can use +the `conflicts` option to prevent reindex from aborting on version conflicts. + +[[docs-reindex-api-example]] +==== {api-examples-title} + +[[docs-reindex-select-query]] +===== Reindex select documents with a query + +You can limit the documents by adding a query to the `source`. +For example, the following request only copies tweets made by `kimchy` into `new_twitter`: + +[source,console] +-------------------------------------------------- +POST _reindex +{ + "source": { + "index": "twitter", + "query": { + "term": { + "user": "kimchy" + } + } + }, + "dest": { + "index": "new_twitter" + } +} +-------------------------------------------------- +// TEST[setup:twitter] + +[[docs-reindex-select-sort]] +===== Reindex select documents with sort + +You can limit the number of processed documents by setting `max_docs`. +For example, this request copies a single document from `twitter` to +`new_twitter`: + +[source,console] +-------------------------------------------------- +POST _reindex +{ + "max_docs": 1, + "source": { + "index": "twitter" + }, + "dest": { + "index": "new_twitter" + } +} +-------------------------------------------------- +// TEST[setup:twitter] + +You can use `sort` in conjunction with `max_docs` to select the documents you want to reindex. +Sorting makes the scroll less efficient but in some contexts it's worth it. +If possible, it's better to use a more selective query instead of `max_docs` and `sort`. + +For example, following request copies 10000 documents from `twitter` into `new_twitter`: + +[source,console] +-------------------------------------------------- +POST _reindex +{ + "max_docs": 10000, + "source": { + "index": "twitter", + "sort": { "date": "desc" } + }, + "dest": { + "index": "new_twitter" + } +} +-------------------------------------------------- +// TEST[setup:twitter] + +[[docs-reindex-multiple-indices]] +===== Reindex from multiple indices + +The `index` attribute in `source` can be a list, allowing you to copy from lots +of sources in one request. This will copy documents from the +`twitter` and `blog` indices: + +[source,console] +-------------------------------------------------- +POST _reindex +{ + "source": { + "index": ["twitter", "blog"] + }, + "dest": { + "index": "all_together" + } +} +-------------------------------------------------- +// TEST[setup:twitter] +// TEST[s/^/PUT blog\/post\/post1?refresh\n{"test": "foo"}\n/] + +NOTE: The Reindex API makes no effort to handle ID collisions so the last +document written will "win" but the order isn't usually predictable so it is +not a good idea to rely on this behavior. Instead, make sure that IDs are unique +using a script. + +[[docs-reindex-filter-source]] +===== Reindex select fields with a source filter + +You can use source filtering to reindex a subset of the fields in the original documents. +For example, the following request only reindexes the `user` and `_doc` fields of each document: + +[source,console] +-------------------------------------------------- +POST _reindex +{ + "source": { + "index": "twitter", + "_source": ["user", "_doc"] + }, + "dest": { + "index": "new_twitter" + } +} +-------------------------------------------------- +// TEST[setup:twitter] + +[[docs-reindex-change-name]] +===== Reindex to change the name of a field + +`_reindex` can be used to build a copy of an index with renamed fields. Say you +create an index containing documents that look like this: + +[source,console] +-------------------------------------------------- +POST test/_doc/1?refresh +{ + "text": "words words", + "flag": "foo" +} +-------------------------------------------------- + +but you don't like the name `flag` and want to replace it with `tag`. +`_reindex` can create the other index for you: + +[source,console] +-------------------------------------------------- +POST _reindex +{ + "source": { + "index": "test" + }, + "dest": { + "index": "test2" + }, + "script": { + "source": "ctx._source.tag = ctx._source.remove(\"flag\")" + } +} +-------------------------------------------------- +// TEST[continued] + +Now you can get the new document: + +[source,console] +-------------------------------------------------- +GET test2/_doc/1 +-------------------------------------------------- +// TEST[continued] + +which will return: + +[source,console-result] +-------------------------------------------------- +{ + "found": true, + "_id": "1", + "_index": "test2", + "_version": 1, + "_seq_no": 44, + "_primary_term": 1, + "_source": { + "text": "words words", + "tag": "foo" + } +} +-------------------------------------------------- +// TESTRESPONSE[s/"_seq_no": \d+/"_seq_no" : $body._seq_no/ s/"_primary_term": 1/"_primary_term" : $body._primary_term/] + +[[docs-reindex-daily-indices]] +===== Reindex daily indices + +You can use `_reindex` in combination with <> to reindex +daily indices to apply a new template to the existing documents. + +Assuming you have indices that contain documents like: + +[source,console] +---------------------------------------------------------------- +PUT metricbeat-2016.05.30/_doc/1?refresh +{"system.cpu.idle.pct": 0.908} +PUT metricbeat-2016.05.31/_doc/1?refresh +{"system.cpu.idle.pct": 0.105} +---------------------------------------------------------------- + +The new template for the `metricbeat-*` indices is already loaded into Elasticsearch, +but it applies only to the newly created indices. Painless can be used to reindex +the existing documents and apply the new template. + +The script below extracts the date from the index name and creates a new index +with `-1` appended. All data from `metricbeat-2016.05.31` will be reindexed +into `metricbeat-2016.05.31-1`. + +[source,console] +---------------------------------------------------------------- +POST _reindex +{ + "source": { + "index": "metricbeat-*" + }, + "dest": { + "index": "metricbeat" + }, + "script": { + "lang": "painless", + "source": "ctx._index = 'metricbeat-' + (ctx._index.substring('metricbeat-'.length(), ctx._index.length())) + '-1'" + } +} +---------------------------------------------------------------- +// TEST[continued] + +All documents from the previous metricbeat indices can now be found in the `*-1` indices. + +[source,console] +---------------------------------------------------------------- +GET metricbeat-2016.05.30-1/_doc/1 +GET metricbeat-2016.05.31-1/_doc/1 +---------------------------------------------------------------- +// TEST[continued] + +The previous method can also be used in conjunction with <> +to load only the existing data into the new index and rename any fields if needed. + +[[docs-reindex-api-subset]] +===== Extract a random subset of an index + +`_reindex` can be used to extract a random subset of an index for testing: + +[source,console] +---------------------------------------------------------------- +POST _reindex +{ + "max_docs": 10, + "source": { + "index": "twitter", + "query": { + "function_score" : { + "query" : { "match_all": {} }, + "random_score" : {} + } + }, + "sort": "_score" <1> + }, + "dest": { + "index": "random_twitter" + } +} +---------------------------------------------------------------- +// TEST[setup:big_twitter] + +<1> `_reindex` defaults to sorting by `_doc` so `random_score` will not have any +effect unless you override the sort to `_score`. + +[[reindex-scripts]] +===== Modify documents during reindexing + +Like `_update_by_query`, `_reindex` supports a script that modifies the +document. Unlike `_update_by_query`, the script is allowed to modify the +document's metadata. This example bumps the version of the source document: + +[source,console] +-------------------------------------------------- +POST _reindex +{ + "source": { + "index": "twitter" + }, + "dest": { + "index": "new_twitter", + "version_type": "external" + }, + "script": { + "source": "if (ctx._source.foo == 'bar') {ctx._version++; ctx._source.remove('foo')}", + "lang": "painless" + } +} +-------------------------------------------------- +// TEST[setup:twitter] + +Just as in `_update_by_query`, you can set `ctx.op` to change the +operation that is executed on the destination index: + +`noop`:: + +Set `ctx.op = "noop"` if your script decides that the document doesn't have +to be indexed in the destination index. This no operation will be reported +in the `noop` counter in the <>. + +`delete`:: + +Set `ctx.op = "delete"` if your script decides that the document must be + deleted from the destination index. The deletion will be reported in the + `deleted` counter in the <>. + +Setting `ctx.op` to anything else will return an error, as will setting any +other field in `ctx`. + +Think of the possibilities! Just be careful; you are able to +change: + + * `_id` + * `_index` + * `_version` + * `_routing` + +Setting `_version` to `null` or clearing it from the `ctx` map is just like not +sending the version in an indexing request; it will cause the document to be +overwritten in the target index regardless of the version on the target or the +version type you use in the `_reindex` request. + [[reindex-from-remote]] -==== Reindex from Remote +==== Reindex from remote Reindex supports reindexing from a remote Elasticsearch cluster: @@ -506,7 +1026,6 @@ POST _reindex // TEST[s/^/PUT source\n/] // TEST[s/otherhost:9200/\${host}/] -[float] [[reindex-ssl]] ===== Configuring SSL parameters @@ -598,626 +1117,3 @@ Defaults to the keystore password. This setting cannot be used with The password for the key in the keystore (`reindex.ssl.keystore.path`). Defaults to the keystore password. This setting cannot be used with `reindex.ssl.keystore.key_password`. - -[float] -==== URL Parameters - -In addition to the standard parameters like `pretty`, the Reindex API also -supports `refresh`, `wait_for_completion`, `wait_for_active_shards`, `timeout`, -`scroll`, and `requests_per_second`. - -Sending the `refresh` url parameter will cause all indexes to which the request -wrote to be refreshed. This is different than the Index API's `refresh` -parameter which causes just the shard that received the new data to be -refreshed. Also unlike the Index API it does not support `wait_for`. - -If the request contains `wait_for_completion=false` then Elasticsearch will -perform some preflight checks, launch the request, and then return a `task` -which can be used with <> -to cancel or get the status of the task. Elasticsearch will also create a -record of this task as a document at `.tasks/task/${taskId}`. This is yours -to keep or remove as you see fit. When you are done with it, delete it so -Elasticsearch can reclaim the space it uses. - -`wait_for_active_shards` controls how many copies of a shard must be active -before proceeding with the reindexing. See <> -for details. `timeout` controls how long each write request waits for unavailable -shards to become available. Both work exactly how they work in the -<>. As `_reindex` uses scroll search, you can also specify -the `scroll` parameter to control how long it keeps the "search context" alive, -(e.g. `?scroll=10m`). The default value is 5 minutes. - -`requests_per_second` can be set to any positive decimal number (`1.4`, `6`, -`1000`, etc.) and throttles the rate at which `_reindex` issues batches of index -operations by padding each batch with a wait time. The throttling can be -disabled by setting `requests_per_second` to `-1`. - -The throttling is done by waiting between batches so that the `scroll` which `_reindex` -uses internally can be given a timeout that takes into account the padding. -The padding time is the difference between the batch size divided by the -`requests_per_second` and the time spent writing. By default the batch size is -`1000`, so if the `requests_per_second` is set to `500`: - -[source,txt] --------------------------------------------------- -target_time = 1000 / 500 per second = 2 seconds -wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds --------------------------------------------------- - -Since the batch is issued as a single `_bulk` request, large batch sizes will -cause Elasticsearch to create many requests and then wait for a while before -starting the next set. This is "bursty" instead of "smooth". The default value is `-1`. - -[float] -[[docs-reindex-response-body]] -==== Response body - -////////////////////////// -[source,console] --------------------------------------------------- -POST /_reindex?wait_for_completion -{ - "source": { - "index": "twitter" - }, - "dest": { - "index": "new_twitter" - } -} --------------------------------------------------- -// TEST[setup:twitter] - -////////////////////////// - -The JSON response looks like this: - -[source,console-result] --------------------------------------------------- -{ - "took": 639, - "timed_out": false, - "total": 5, - "updated": 0, - "created": 5, - "deleted": 0, - "batches": 1, - "noops": 0, - "version_conflicts": 2, - "retries": { - "bulk": 0, - "search": 0 - }, - "throttled_millis": 0, - "requests_per_second": 1, - "throttled_until_millis": 0, - "failures": [ ] -} --------------------------------------------------- -// TESTRESPONSE[s/: [0-9]+/: $body.$_path/] - -`took`:: - -The total milliseconds the entire operation took. - -`timed_out`:: - -This flag is set to `true` if any of the requests executed during the -reindex timed out. - -`total`:: - -The number of documents that were successfully processed. - -`updated`:: - -The number of documents that were successfully updated. - -`created`:: - -The number of documents that were successfully created. - -`deleted`:: - -The number of documents that were successfully deleted. - -`batches`:: - -The number of scroll responses pulled back by the reindex. - -`noops`:: - -The number of documents that were ignored because the script used for -the reindex returned a `noop` value for `ctx.op`. - -`version_conflicts`:: - -The number of version conflicts that reindex hit. - -`retries`:: - -The number of retries attempted by reindex. `bulk` is the number of bulk -actions retried and `search` is the number of search actions retried. - -`throttled_millis`:: - -Number of milliseconds the request slept to conform to `requests_per_second`. - -`requests_per_second`:: - -The number of requests per second effectively executed during the reindex. - -`throttled_until_millis`:: - -This field should always be equal to zero in a `_reindex` response. It only -has meaning when using the <>, where it -indicates the next time (in milliseconds since epoch) a throttled request will be -executed again in order to conform to `requests_per_second`. - -`failures`:: - -Array of failures if there were any unrecoverable errors during the process. If -this is non-empty then the request aborted because of those failures. Reindex -is implemented using batches and any failure causes the entire process to abort -but all failures in the current batch are collected into the array. You can use -the `conflicts` option to prevent reindex from aborting on version conflicts. - -[float] -[[docs-reindex-task-api]] -==== Works with the Task API - -You can fetch the status of all running reindex requests with the -<>: - -[source,console] --------------------------------------------------- -GET _tasks?detailed=true&actions=*reindex --------------------------------------------------- -// TEST[skip:No tasks to retrieve] - -The response looks like: - -[source,console-result] --------------------------------------------------- -{ - "nodes" : { - "r1A2WoRbTwKZ516z6NEs5A" : { - "name" : "r1A2WoR", - "transport_address" : "127.0.0.1:9300", - "host" : "127.0.0.1", - "ip" : "127.0.0.1:9300", - "attributes" : { - "testattr" : "test", - "portsfile" : "true" - }, - "tasks" : { - "r1A2WoRbTwKZ516z6NEs5A:36619" : { - "node" : "r1A2WoRbTwKZ516z6NEs5A", - "id" : 36619, - "type" : "transport", - "action" : "indices:data/write/reindex", - "status" : { <1> - "total" : 6154, - "updated" : 3500, - "created" : 0, - "deleted" : 0, - "batches" : 4, - "version_conflicts" : 0, - "noops" : 0, - "retries": { - "bulk": 0, - "search": 0 - }, - "throttled_millis": 0, - "requests_per_second": -1, - "throttled_until_millis": 0 - }, - "description" : "", - "start_time_in_millis": 1535149899665, - "running_time_in_nanos": 5926916792, - "cancellable": true, - "headers": {} - } - } - } - } -} --------------------------------------------------- - -<1> This object contains the actual status. It is identical to the response JSON -except for the important addition of the `total` field. `total` is the total number -of operations that the `_reindex` expects to perform. You can estimate the -progress by adding the `updated`, `created`, and `deleted` fields. The request -will finish when their sum is equal to the `total` field. - -With the task id you can look up the task directly. The following example -retrieves information about the task `r1A2WoRbTwKZ516z6NEs5A:36619`: - -[source,console] --------------------------------------------------- -GET /_tasks/r1A2WoRbTwKZ516z6NEs5A:36619 --------------------------------------------------- -// TEST[catch:missing] - -The advantage of this API is that it integrates with `wait_for_completion=false` -to transparently return the status of completed tasks. If the task is completed -and `wait_for_completion=false` was set, it will return a -`results` or an `error` field. The cost of this feature is the document that -`wait_for_completion=false` creates at `.tasks/task/${taskId}`. It is up to -you to delete that document. - - -[float] -[[docs-reindex-cancel-task-api]] -==== Works with the Cancel Task API - -Any reindex can be canceled using the <>. For -example: - -[source,console] --------------------------------------------------- -POST _tasks/r1A2WoRbTwKZ516z6NEs5A:36619/_cancel --------------------------------------------------- - -The task ID can be found using the <>. - -Cancelation should happen quickly but might take a few seconds. The Tasks -API will continue to list the task until it wakes to cancel itself. - - -[float] -[[docs-reindex-rethrottle]] -==== Rethrottling - -The value of `requests_per_second` can be changed on a running reindex using -the `_rethrottle` API: - -[source,console] --------------------------------------------------- -POST _reindex/r1A2WoRbTwKZ516z6NEs5A:36619/_rethrottle?requests_per_second=-1 --------------------------------------------------- - -The task ID can be found using the <>. - -Just like when setting it on the Reindex API, `requests_per_second` -can be either `-1` to disable throttling or any decimal number -like `1.7` or `12` to throttle to that level. Rethrottling that speeds up the -query takes effect immediately, but rethrottling that slows down the query will -take effect after completing the current batch. This prevents scroll -timeouts. - -[float] -[[docs-reindex-change-name]] -==== Reindex to change the name of a field - -`_reindex` can be used to build a copy of an index with renamed fields. Say you -create an index containing documents that look like this: - -[source,console] --------------------------------------------------- -POST test/_doc/1?refresh -{ - "text": "words words", - "flag": "foo" -} --------------------------------------------------- - -but you don't like the name `flag` and want to replace it with `tag`. -`_reindex` can create the other index for you: - -[source,console] --------------------------------------------------- -POST _reindex -{ - "source": { - "index": "test" - }, - "dest": { - "index": "test2" - }, - "script": { - "source": "ctx._source.tag = ctx._source.remove(\"flag\")" - } -} --------------------------------------------------- -// TEST[continued] - -Now you can get the new document: - -[source,console] --------------------------------------------------- -GET test2/_doc/1 --------------------------------------------------- -// TEST[continued] - -which will return: - -[source,console-result] --------------------------------------------------- -{ - "found": true, - "_id": "1", - "_index": "test2", - "_version": 1, - "_seq_no": 44, - "_primary_term": 1, - "_source": { - "text": "words words", - "tag": "foo" - } -} --------------------------------------------------- -// TESTRESPONSE[s/"_seq_no": \d+/"_seq_no" : $body._seq_no/ s/"_primary_term": 1/"_primary_term" : $body._primary_term/] - -[float] -[[docs-reindex-slice]] -==== Slicing - -Reindex supports <> to parallelize the reindexing process. -This parallelization can improve efficiency and provide a convenient way to -break the request down into smaller parts. - -NOTE: Reindexing from remote clusters does not support -<> or -<>. - -[float] -[[docs-reindex-manual-slice]] -===== Manual slicing -Slice a reindex request manually by providing a slice id and total number of -slices to each request: - -[source,console] ----------------------------------------------------------------- -POST _reindex -{ - "source": { - "index": "twitter", - "slice": { - "id": 0, - "max": 2 - } - }, - "dest": { - "index": "new_twitter" - } -} -POST _reindex -{ - "source": { - "index": "twitter", - "slice": { - "id": 1, - "max": 2 - } - }, - "dest": { - "index": "new_twitter" - } -} ----------------------------------------------------------------- -// TEST[setup:big_twitter] - -You can verify this works by: - -[source,console] ----------------------------------------------------------------- -GET _refresh -POST new_twitter/_search?size=0&filter_path=hits.total ----------------------------------------------------------------- -// TEST[continued] - -which results in a sensible `total` like this one: - -[source,console-result] ----------------------------------------------------------------- -{ - "hits": { - "total" : { - "value": 120, - "relation": "eq" - } - } -} ----------------------------------------------------------------- - -[float] -[[docs-reindex-automatic-slice]] -===== Automatic slicing - -You can also let `_reindex` automatically parallelize using <> to -slice on `_uid`. Use `slices` to specify the number of slices to use: - -[source,console] ----------------------------------------------------------------- -POST _reindex?slices=5&refresh -{ - "source": { - "index": "twitter" - }, - "dest": { - "index": "new_twitter" - } -} ----------------------------------------------------------------- -// TEST[setup:big_twitter] - -You can also this verify works by: - -[source,console] ----------------------------------------------------------------- -POST new_twitter/_search?size=0&filter_path=hits.total ----------------------------------------------------------------- -// TEST[continued] - -which results in a sensible `total` like this one: - -[source,console-result] ----------------------------------------------------------------- -{ - "hits": { - "total" : { - "value": 120, - "relation": "eq" - } - } -} ----------------------------------------------------------------- - -Setting `slices` to `auto` will let Elasticsearch choose the number of slices -to use. This setting will use one slice per shard, up to a certain limit. If -there are multiple source indices, it will choose the number of slices based -on the index with the smallest number of shards. - -Adding `slices` to `_reindex` just automates the manual process used in the -section above, creating sub-requests which means it has some quirks: - -* You can see these requests in the <>. These -sub-requests are "child" tasks of the task for the request with `slices`. -* Fetching the status of the task for the request with `slices` only contains -the status of completed slices. -* These sub-requests are individually addressable for things like cancelation -and rethrottling. -* Rethrottling the request with `slices` will rethrottle the unfinished -sub-request proportionally. -* Canceling the request with `slices` will cancel each sub-request. -* Due to the nature of `slices` each sub-request won't get a perfectly even -portion of the documents. All documents will be addressed, but some slices may -be larger than others. Expect larger slices to have a more even distribution. -* Parameters like `requests_per_second` and `max_docs` on a request with -`slices` are distributed proportionally to each sub-request. Combine that with -the point above about distribution being uneven and you should conclude that -using `max_docs` with `slices` might not result in exactly `max_docs` documents -being reindexed. -* Each sub-request gets a slightly different snapshot of the source index, -though these are all taken at approximately the same time. - -[float] -[[docs-reindex-picking-slices]] -====== Picking the number of slices - -If slicing automatically, setting `slices` to `auto` will choose a reasonable -number for most indices. If slicing manually or otherwise tuning -automatic slicing, use these guidelines. - -Query performance is most efficient when the number of `slices` is equal to the -number of shards in the index. If that number is large (e.g. 500), -choose a lower number as too many `slices` will hurt performance. Setting -`slices` higher than the number of shards generally does not improve efficiency -and adds overhead. - -Indexing performance scales linearly across available resources with the -number of slices. - -Whether query or indexing performance dominates the runtime depends on the -documents being reindexed and cluster resources. - -[float] -==== Reindexing many indices -If you have many indices to reindex it is generally better to reindex them -one at a time rather than using a glob pattern to pick up many indices. That -way you can resume the process if there are any errors by removing the -partially completed index and starting over at that index. It also makes -parallelizing the process fairly simple: split the list of indices to reindex -and run each list in parallel. - -One-off bash scripts seem to work nicely for this: - -[source,bash] ----------------------------------------------------------------- -for index in i1 i2 i3 i4 i5; do - curl -HContent-Type:application/json -XPOST localhost:9200/_reindex?pretty -d'{ - "source": { - "index": "'$index'" - }, - "dest": { - "index": "'$index'-reindexed" - } - }' -done ----------------------------------------------------------------- -// NOTCONSOLE - -[float] -==== Reindex daily indices - -Notwithstanding the above advice, you can use `_reindex` in combination with -<> to reindex daily indices to apply -a new template to the existing documents. - -Assuming you have indices consisting of documents as follows: - -[source,console] ----------------------------------------------------------------- -PUT metricbeat-2016.05.30/_doc/1?refresh -{"system.cpu.idle.pct": 0.908} -PUT metricbeat-2016.05.31/_doc/1?refresh -{"system.cpu.idle.pct": 0.105} ----------------------------------------------------------------- - -The new template for the `metricbeat-*` indices is already loaded into Elasticsearch, -but it applies only to the newly created indices. Painless can be used to reindex -the existing documents and apply the new template. - -The script below extracts the date from the index name and creates a new index -with `-1` appended. All data from `metricbeat-2016.05.31` will be reindexed -into `metricbeat-2016.05.31-1`. - -[source,console] ----------------------------------------------------------------- -POST _reindex -{ - "source": { - "index": "metricbeat-*" - }, - "dest": { - "index": "metricbeat" - }, - "script": { - "lang": "painless", - "source": "ctx._index = 'metricbeat-' + (ctx._index.substring('metricbeat-'.length(), ctx._index.length())) + '-1'" - } -} ----------------------------------------------------------------- -// TEST[continued] - -All documents from the previous metricbeat indices can now be found in the `*-1` indices. - -[source,console] ----------------------------------------------------------------- -GET metricbeat-2016.05.30-1/_doc/1 -GET metricbeat-2016.05.31-1/_doc/1 ----------------------------------------------------------------- -// TEST[continued] - -The previous method can also be used in conjunction with <> -to load only the existing data into the new index and rename any fields if needed. - -[float] -==== Extracting a random subset of an index - -`_reindex` can be used to extract a random subset of an index for testing: - -[source,console] ----------------------------------------------------------------- -POST _reindex -{ - "max_docs": 10, - "source": { - "index": "twitter", - "query": { - "function_score" : { - "query" : { "match_all": {} }, - "random_score" : {} - } - }, - "sort": "_score" <1> - }, - "dest": { - "index": "random_twitter" - } -} ----------------------------------------------------------------- -// TEST[setup:big_twitter] - -<1> `_reindex` defaults to sorting by `_doc` so `random_score` will not have any -effect unless you override the sort to `_score`. From 07f4ca799b50027bf9754fcf0258114c69934bbf Mon Sep 17 00:00:00 2001 From: debadair Date: Fri, 4 Oct 2019 20:09:11 -0700 Subject: [PATCH 08/55] Reformats term vectors APIs (#47484) * Reformat termvectors APIs * Reformats mtermvectors * Apply suggestions from code review Co-Authored-By: James Rodewig * Incorporated review feedback. --- .../reference/docs/multi-termvectors.asciidoc | 85 ++++++++++++--- docs/reference/docs/termvectors.asciidoc | 101 +++++++++++++----- docs/reference/rest-api/common-parms.asciidoc | 45 ++++++-- 3 files changed, 179 insertions(+), 52 deletions(-) diff --git a/docs/reference/docs/multi-termvectors.asciidoc b/docs/reference/docs/multi-termvectors.asciidoc index d4749ec68b8f..f351c29745aa 100644 --- a/docs/reference/docs/multi-termvectors.asciidoc +++ b/docs/reference/docs/multi-termvectors.asciidoc @@ -1,14 +1,10 @@ [[docs-multi-termvectors]] -=== Multi termvectors API +=== Multi term vectors API +++++ +Multi term vectors +++++ -Multi termvectors API allows to get multiple termvectors at once. The -documents from which to retrieve the term vectors are specified by an index and id. -But the documents could also be artificially provided in the request itself. - -The response includes a `docs` -array with all the fetched termvectors, each element having the structure -provided by the <> -API. Here is an example: +Retrieves multiple term vectors with a single request. [source,console] -------------------------------------------------- @@ -32,10 +28,64 @@ POST /_mtermvectors -------------------------------------------------- // TEST[setup:twitter] -See the <> API for a description of possible parameters. +[[docs-multi-termvectors-api-request]] +==== {api-request-title} -The `_mtermvectors` endpoint can also be used against an index (in which case it -is not required in the body): +`POST /_mtermvectors` + +`POST //_mtermvectors` + +[[docs-multi-termvectors-api-desc]] +==== {api-description-title} + +You can specify existing documents by index and ID or +provide artificial documents in the body of the request. +The index can be specified the body of the request or in the request URI. + +The response contains a `docs` array with all the fetched termvectors. +Each element has the structure provided by the <> +API. + +See the <> API for more information about the information +that can be included in the response. + +[[docs-multi-termvectors-api-path-params]] +==== {api-path-parms-title} + +``:: +(Optional, string) Name of the index that contains the documents. + +[[docs-multi-termvectors-api-query-params]] +==== {api-query-parms-title} + +include::{docdir}/rest-api/common-parms.asciidoc[tag=fields] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=field_statistics] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=offsets] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=payloads] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=positions] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=preference] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=routing] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=realtime] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=term_statistics] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=version] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=version_type] + +[float] +[[docs-multi-termvectors-api-example]] +==== {api-examples-title} + +If you specify an index in the request URI, the index does not need to be specified for each documents +in the request body: [source,console] -------------------------------------------------- @@ -57,7 +107,8 @@ POST /twitter/_mtermvectors -------------------------------------------------- // TEST[setup:twitter] -If all requested documents are on same index and also the parameters are the same, the request can be simplified: +If all requested documents are in same index and the parameters are the same, you can use the +following simplified syntax: [source,console] -------------------------------------------------- @@ -74,9 +125,11 @@ POST /twitter/_mtermvectors -------------------------------------------------- // TEST[setup:twitter] -Additionally, just like for the <> -API, term vectors could be generated for user provided documents. -The mapping used is determined by `_index`. +[[docs-multi-termvectors-artificial-doc]] +===== Artificial documents + +You can also use `mtermvectors` to generate term vectors for _artificial_ documents provided +in the body of the request. The mapping used is determined by the specified `_index`. [source,console] -------------------------------------------------- diff --git a/docs/reference/docs/termvectors.asciidoc b/docs/reference/docs/termvectors.asciidoc index ee4e110205b1..abd8fc6de096 100644 --- a/docs/reference/docs/termvectors.asciidoc +++ b/docs/reference/docs/termvectors.asciidoc @@ -1,10 +1,10 @@ [[docs-termvectors]] -=== Term Vectors +=== Term vectors API +++++ +Term vectors +++++ -Returns information and statistics on terms in the fields of a particular -document. The document could be stored in the index or artificially provided -by the user. Term vectors are <> by default, not near -realtime. This can be changed by setting `realtime` parameter to `false`. +Retrieves information and statistics for terms in the fields of a particular document. [source,console] -------------------------------------------------- @@ -12,8 +12,19 @@ GET /twitter/_termvectors/1 -------------------------------------------------- // TEST[setup:twitter] -Optionally, you can specify the fields for which the information is -retrieved either with a parameter in the url +[[docs-termvectors-api-request]] +==== {api-request-title} + +`GET //_termvectors/<_id>` + +[[docs-termvectors-api-desc]] +==== {api-description-title} + +You can retrieve term vectors for documents stored in the index or +for _artificial_ documents passed in the body of the request. + +You can specify the fields you are interested in through the `fields` parameter, +or by adding the fields to the request body. [source,console] -------------------------------------------------- @@ -21,18 +32,16 @@ GET /twitter/_termvectors/1?fields=message -------------------------------------------------- // TEST[setup:twitter] -or by adding the requested fields in the request body (see -example below). Fields can also be specified with wildcards -in similar way to the <> +Fields can be specified using wildcards, similar to the <>. -[float] -==== Return values +Term vectors are <> by default, not near real-time. +This can be changed by setting `realtime` parameter to `false`. -Three types of values can be requested: _term information_, _term statistics_ +You can request three types of values: _term information_, _term statistics_ and _field statistics_. By default, all term information and field -statistics are returned for all fields but no term statistics. +statistics are returned for all fields but term statistics are excluded. -[float] +[[docs-termvectors-api-term-info]] ===== Term information * term frequency in the field (always returned) @@ -52,7 +61,7 @@ should make sure that the string you are taking a sub-string of is also encoded using UTF-16. ====== -[float] +[[docs-termvectors-api-term-stats]] ===== Term statistics Setting `term_statistics` to `true` (default is `false`) will @@ -65,7 +74,7 @@ return By default these values are not returned since term statistics can have a serious performance impact. -[float] +[[docs-termvectors-api-field-stats]] ===== Field statistics Setting `field_statistics` to `false` (default is `true`) will @@ -77,8 +86,8 @@ omit : * sum of total term frequencies (the sum of total term frequencies of each term in this field) -[float] -===== Terms Filtering +[[docs-termvectors-api-terms-filtering]] +===== Terms filtering With the parameter `filter`, the terms returned could also be filtered based on their tf-idf scores. This could be useful in order find out a good @@ -105,7 +114,7 @@ The following sub-parameters are supported: `max_word_length`:: The maximum word length above which words will be ignored. Defaults to unbounded (`0`). -[float] +[[docs-termvectors-api-behavior]] ==== Behaviour The term and field statistics are not accurate. Deleted documents @@ -116,8 +125,45 @@ whereas the absolute numbers have no meaning in this context. By default, when requesting term vectors of artificial documents, a shard to get the statistics from is randomly selected. Use `routing` only to hit a particular shard. -[float] -===== Example: Returning stored term vectors +[[docs-termvectors-api-path-params]] +==== {api-path-parms-title} + +``:: +(Required, string) Name of the index that contains the document. + +`<_id>`:: +(Optional, string) Unique identifier of the document. + +[[docs-termvectors-api-query-params]] +==== {api-query-parms-title} + +include::{docdir}/rest-api/common-parms.asciidoc[tag=fields] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=field_statistics] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=offsets] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=payloads] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=positions] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=preference] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=routing] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=realtime] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=term_statistics] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=version] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=version_type] + +[[docs-termvectors-api-example]] +==== {api-examples-title} + +[[docs-termvectors-api-stored-termvectors]] +===== Returning stored term vectors First, we create an index that stores term vectors, payloads etc. : @@ -259,8 +305,8 @@ Response: // TEST[continued] // TESTRESPONSE[s/"took": 6/"took": "$body.took"/] -[float] -===== Example: Generating term vectors on the fly +[[docs-termvectors-api-generate-termvectors]] +===== Generating term vectors on the fly Term vectors which are not explicitly stored in the index are automatically computed on the fly. The following request returns all information and statistics for the @@ -281,8 +327,7 @@ GET /twitter/_termvectors/1 // TEST[continued] [[docs-termvectors-artificial-doc]] -[float] -===== Example: Artificial documents +===== Artificial documents Term vectors can also be generated for artificial documents, that is for documents not present in the index. For example, the following request would @@ -304,7 +349,6 @@ GET /twitter/_termvectors // TEST[continued] [[docs-termvectors-per-field-analyzer]] -[float] ====== Per-field analyzer Additionally, a different analyzer than the one at the field may be provided @@ -369,8 +413,7 @@ Response: [[docs-termvectors-terms-filtering]] -[float] -===== Example: Terms filtering +===== Terms filtering Finally, the terms returned could be filtered based on their tf-idf scores. In the example below we obtain the three most "interesting" keywords from the diff --git a/docs/reference/rest-api/common-parms.asciidoc b/docs/reference/rest-api/common-parms.asciidoc index 11b9e8bad591..adbbc138260c 100644 --- a/docs/reference/rest-api/common-parms.asciidoc +++ b/docs/reference/rest-api/common-parms.asciidoc @@ -135,6 +135,13 @@ Wildcard expressions are not accepted. -- end::expand-wildcards[] +tag::field_statistics[] +`field_statistics`:: +(Optional, boolean) If `true`, the response includes the document count, sum of document frequencies, +and sum of total term frequencies. +Defaults to `true`. +end::field_statistics[] + tag::fielddata-fields[] `fielddata_fields`:: (Optional, string) @@ -222,7 +229,7 @@ end::cat-h[] tag::help[] `help`:: -(Optional, boolean) If `true`, the response returns help information. Defaults +(Optional, boolean) If `true`, the response includes help information. Defaults to `false`. end::help[] @@ -444,6 +451,12 @@ Comma-separated list of node IDs or names used to limit returned information. end::node-id-query-parm[] +tag::offsets[] +``:: +(Optional, boolean) If `true`, the response includes term offsets. +Defaults to `true`. +end::offsets[] + tag::parent-task-id[] `parent_task_id`:: + @@ -469,6 +482,18 @@ tag::path-pipeline[] used to limit the request. end::path-pipeline[] +tag::payloads[] +`payloads`:: +(Optional, boolean) If `true`, the response includes term payloads. +Defaults to `true`. +end::payloads[] + +tag::positions[] +`positions`:: +(Optional, boolean) If `true`, the response includes term positions. +Defaults to `true`. +end::positions[] + tag::preference[] `preference`:: (Optional, string) Specifies the node or shard the operation should be @@ -488,8 +513,8 @@ end::query[] tag::realtime[] `realtime`:: -(Optional, boolean) Set to `false` to disable real time GET -(default: `true`). See <>. +(Optional, boolean) If `true`, the request is real-time as opposed to near-real-time. +Defaults to `true`. See <>. end::realtime[] tag::refresh[] @@ -502,8 +527,8 @@ end::refresh[] tag::request_cache[] `request_cache`:: -(Optional, boolean) Specifies if the request cache should be used for this -request. Defaults to the index-level setting. +(Optional, boolean) If `true`, the request cache is used for this request. +Defaults to the index-level setting. end::request_cache[] tag::requests_per_second[] @@ -645,6 +670,12 @@ tag::task-id[] (`node_id:task_number`). end::task-id[] +tag::term_statistics[] +`term_statistics`:: +(Optional, boolean) If `true`, the response includes term frequency and document frequency. +Defaults to `false`. +end::term_statistics[] + tag::terminate_after[] `terminate_after`:: (Optional, integer) The maximum number of documents to collect for each shard, @@ -671,8 +702,8 @@ end::timeoutparms[] tag::cat-v[] `v`:: -(Optional, boolean) If `true`, the response includes column headings. Defaults -to `false`. +(Optional, boolean) If `true`, the response includes column headings. +Defaults to `false`. end::cat-v[] tag::version[] From 3d4a7d0c6cfb3516bd1e94213788b66b42ca9cf7 Mon Sep 17 00:00:00 2001 From: Alpar Torok Date: Sat, 5 Oct 2019 15:36:08 +0300 Subject: [PATCH 09/55] Mute org.elasticsearch.xpack.sql.jdbc.JdbcConfigurationTests.testDriverConfigurationWithSSLInURL tracked in #41557 --- .../sql/jdbc/JdbcConfigurationTests.java | 70 ++++++++++--------- 1 file changed, 36 insertions(+), 34 deletions(-) diff --git a/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/JdbcConfigurationTests.java b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/JdbcConfigurationTests.java index a8495fbf5711..3cfbae78ecdb 100644 --- a/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/JdbcConfigurationTests.java +++ b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/JdbcConfigurationTests.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.sql.jdbc; +import org.apache.lucene.util.Constants; import org.elasticsearch.SpecialPermission; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.sql.client.SslConfig; @@ -91,17 +92,17 @@ public class JdbcConfigurationTests extends ESTestCase { JdbcConfiguration ci = ci("jdbc:es://test?ssl=true"); assertThat(ci.baseUri().toString(), is("https://test:9200/")); } - + public void testHttpWithSSLEnabledFromPropertyAndDisabledFromProtocol() throws Exception { JdbcConfiguration ci = ci("jdbc:es://http://test?ssl=true"); assertThat(ci.baseUri().toString(), is("https://test:9200/")); } - + public void testHttpWithSSLEnabledFromProtocol() throws Exception { JdbcConfiguration ci = ci("jdbc:es://https://test:9200"); assertThat(ci.baseUri().toString(), is("https://test:9200/")); } - + public void testHttpWithSSLEnabledFromProtocolAndProperty() throws Exception { JdbcConfiguration ci = ci("jdbc:es://https://test:9200?ssl=true"); assertThat(ci.baseUri().toString(), is("https://test:9200/")); @@ -111,49 +112,49 @@ public class JdbcConfigurationTests extends ESTestCase { JdbcConfiguration ci = ci("jdbc:es://test?ssl=false"); assertThat(ci.baseUri().toString(), is("http://test:9200/")); } - + public void testHttpWithSSLDisabledFromPropertyAndProtocol() throws Exception { JdbcConfiguration ci = ci("jdbc:es://http://test?ssl=false"); assertThat(ci.baseUri().toString(), is("http://test:9200/")); } - + public void testHttpWithSSLDisabledFromPropertyAndEnabledFromProtocol() throws Exception { Exception e = expectThrows(JdbcSQLException.class, () -> ci("jdbc:es://https://test?ssl=false")); assertEquals("Cannot enable SSL: HTTPS protocol being used in the URL and SSL disabled in properties", e.getMessage()); } - + public void testValidatePropertiesDefault() { Exception e = expectThrows(JdbcSQLException.class, () -> ci("jdbc:es://test:9200?pagee.size=12")); assertEquals("Unknown parameter [pagee.size]; did you mean [page.size]", e.getMessage()); - + e = expectThrows(JdbcSQLException.class, () -> ci("jdbc:es://test:9200?foo=bar")); assertEquals("Unknown parameter [foo]; did you mean [ssl]", e.getMessage()); } - + public void testValidateProperties() { Exception e = expectThrows(JdbcSQLException.class, () -> ci("jdbc:es://test:9200?pagee.size=12&validate.properties=true")); assertEquals("Unknown parameter [pagee.size]; did you mean [page.size]", e.getMessage()); - + e = expectThrows(JdbcSQLException.class, () -> ci("jdbc:es://test:9200?&validate.properties=true&something=some_value")); assertEquals("Unknown parameter [something]; did you mean []", e.getMessage()); - + Properties properties = new Properties(); properties.setProperty(PROPERTIES_VALIDATION, "true"); e = expectThrows(JdbcSQLException.class, () -> JdbcConfiguration.create("jdbc:es://test:9200?something=some_value", properties, 0)); assertEquals("Unknown parameter [something]; did you mean []", e.getMessage()); } - + public void testNoPropertiesValidation() throws SQLException { JdbcConfiguration ci = ci("jdbc:es://test:9200?pagee.size=12&validate.properties=false"); assertEquals(false, ci.validateProperties()); - + // URL properties test long queryTimeout = randomNonNegativeLong(); long connectTimeout = randomNonNegativeLong(); long networkTimeout = randomNonNegativeLong(); long pageTimeout = randomNonNegativeLong(); int pageSize = randomIntBetween(0, Integer.MAX_VALUE); - + ci = ci("jdbc:es://test:9200?validate.properties=false&something=some_value&query.timeout=" + queryTimeout + "&connect.timeout=" + connectTimeout + "&network.timeout=" + networkTimeout + "&page.timeout=" + pageTimeout + "&page.size=" + pageSize); assertEquals(false, ci.validateProperties()); @@ -162,7 +163,7 @@ public class JdbcConfigurationTests extends ESTestCase { assertEquals(networkTimeout, ci.networkTimeout()); assertEquals(pageTimeout, ci.pageTimeout()); assertEquals(pageSize, ci.pageSize()); - + // Properties test Properties properties = new Properties(); properties.setProperty(PROPERTIES_VALIDATION, "false"); @@ -171,7 +172,7 @@ public class JdbcConfigurationTests extends ESTestCase { properties.put(CONNECT_TIMEOUT, Long.toString(connectTimeout)); properties.put(NETWORK_TIMEOUT, Long.toString(networkTimeout)); properties.put(PAGE_SIZE, Integer.toString(pageSize)); - + // also putting validate.properties in URL to be overriden by the properties value ci = JdbcConfiguration.create("jdbc:es://test:9200?validate.properties=true&something=some_value", properties, 0); assertEquals(false, ci.validateProperties()); @@ -207,37 +208,37 @@ public class JdbcConfigurationTests extends ESTestCase { public void testSSLPropertiesInUrl() throws Exception { Map urlPropMap = sslProperties(); - + Properties allProps = new Properties(); allProps.putAll(urlPropMap); String sslUrlProps = urlPropMap.entrySet().stream().map(e -> e.getKey() + "=" + e.getValue()).collect(Collectors.joining("&")); - + assertSslConfig(allProps, ci("jdbc:es://test?" + sslUrlProps.toString()).sslConfig()); } - + public void testSSLPropertiesInUrlAndProperties() throws Exception { Map urlPropMap = new HashMap<>(4); urlPropMap.put("ssl", "false"); urlPropMap.put("ssl.protocol", "SSLv3"); urlPropMap.put("ssl.keystore.location", "/abc/xyz"); urlPropMap.put("ssl.keystore.pass", "mypass"); - + Map propMap = new HashMap<>(4); propMap.put("ssl.keystore.type", "PKCS12"); propMap.put("ssl.truststore.location", "/foo/bar"); propMap.put("ssl.truststore.pass", "anotherpass"); propMap.put("ssl.truststore.type", "jks"); - + Properties props = new Properties(); props.putAll(propMap); String sslUrlProps = urlPropMap.entrySet().stream().map(e -> e.getKey() + "=" + e.getValue()).collect(Collectors.joining("&")); - + Properties allProps = new Properties(); allProps.putAll(urlPropMap); allProps.putAll(propMap); assertSslConfig(allProps, JdbcConfiguration.create("jdbc:es://test?" + sslUrlProps.toString(), props, 0).sslConfig()); } - + public void testSSLPropertiesOverride() throws Exception { Map urlPropMap = sslProperties(); Map propMap = new HashMap<>(8); @@ -249,18 +250,19 @@ public class JdbcConfigurationTests extends ESTestCase { propMap.put("ssl.truststore.location", "/baz"); propMap.put("ssl.truststore.pass", "different_anotherpass"); propMap.put("ssl.truststore.type", "PKCS11"); - + Properties props = new Properties(); props.putAll(propMap); String sslUrlProps = urlPropMap.entrySet().stream().map(e -> e.getKey() + "=" + e.getValue()).collect(Collectors.joining("&")); assertSslConfig(props, JdbcConfiguration.create("jdbc:es://test?" + sslUrlProps.toString(), props, 0).sslConfig()); } - + @SuppressForbidden(reason = "JDBC drivers allows logging to Sys.out") public void testDriverConfigurationWithSSLInURL() { + assumeFalse("https://github.com/elastic/elasticsearch/issues/41557", Constants.WINDOWS); Map urlPropMap = sslProperties(); String sslUrlProps = urlPropMap.entrySet().stream().map(e -> e.getKey() + "=" + e.getValue()).collect(Collectors.joining("&")); - + SecurityManager sm = System.getSecurityManager(); if (sm != null) { sm.checkPermission(new SpecialPermission()); @@ -276,7 +278,7 @@ public class JdbcConfigurationTests extends ESTestCase { fail("Driver registration should have been successful. Error: " + sqle); } } - + public void testTyposInSslConfigInUrl(){ assertJdbcSqlExceptionFromUrl("ssl.protocl", "ssl.protocol"); assertJdbcSqlExceptionFromUrl("sssl", "ssl"); @@ -287,7 +289,7 @@ public class JdbcConfigurationTests extends ESTestCase { assertJdbcSqlExceptionFromUrl("ssl.tuststore.pass", "ssl.truststore.pass"); assertJdbcSqlExceptionFromUrl("ssl.ruststore.type", "ssl.truststore.type"); } - + public void testTyposInSslConfigInProperties() { assertJdbcSqlExceptionFromProperties("ssl.protocl", "ssl.protocol"); assertJdbcSqlExceptionFromProperties("sssl", "ssl"); @@ -298,7 +300,7 @@ public class JdbcConfigurationTests extends ESTestCase { assertJdbcSqlExceptionFromProperties("ssl.tuststore.pass", "ssl.truststore.pass"); assertJdbcSqlExceptionFromProperties("ssl.ruststore.type", "ssl.truststore.type"); } - + static Map sslProperties() { Map sslPropertiesMap = new HashMap<>(8); // always using "false" so that the SSLContext doesn't actually start verifying the keystore and trustore @@ -311,31 +313,31 @@ public class JdbcConfigurationTests extends ESTestCase { sslPropertiesMap.put("ssl.truststore.location", "/foo/bar"); sslPropertiesMap.put("ssl.truststore.pass", "anotherpass"); sslPropertiesMap.put("ssl.truststore.type", "jks"); - + return sslPropertiesMap; } - + static void assertSslConfig(Properties allProperties, SslConfig sslConfig) throws URISyntaxException { // because SslConfig doesn't expose its internal properties (and it shouldn't), // we compare a newly created SslConfig with the one from the JdbcConfiguration with the equals() method SslConfig mockSslConfig = new SslConfig(allProperties, new URI("http://test:9200/")); assertEquals(mockSslConfig, sslConfig); } - + private void assertJdbcSqlExceptionFromUrl(String wrongSetting, String correctSetting) { String url = "jdbc:es://test?" + wrongSetting + "=foo"; assertJdbcSqlException(wrongSetting, correctSetting, url, null); } - + private void assertJdbcSqlExceptionFromProperties(String wrongSetting, String correctSetting) { String url = "jdbc:es://test"; Properties props = new Properties(); props.put(wrongSetting, correctSetting); assertJdbcSqlException(wrongSetting, correctSetting, url, props); } - + private void assertJdbcSqlException(String wrongSetting, String correctSetting, String url, Properties props) { - JdbcSQLException ex = expectThrows(JdbcSQLException.class, + JdbcSQLException ex = expectThrows(JdbcSQLException.class, () -> JdbcConfiguration.create(url, props, 0)); assertEquals("Unknown parameter [" + wrongSetting + "]; did you mean [" + correctSetting + "]", ex.getMessage()); } From da59dfe09d9c94d38c40c1f1a64f34cd98ceffd5 Mon Sep 17 00:00:00 2001 From: Rory Hunter Date: Sat, 5 Oct 2019 15:40:42 +0100 Subject: [PATCH 10/55] Introduce packaging tests for Docker (#46599) Closes #37617. Add packaging tests for our Docker images, similar to what we have for RPMs or Debian packages. This works by running a container and probing it e.g. via `docker exec`. Test can also be run in Vagrant, by exporting the Docker images to disk and loading them again in VMs. Docker is installed via `Vagrantfile` in a selection of boxes. --- Vagrantfile | 105 +++++- .../gradle/test/DistroTestPlugin.java | 15 +- .../gradle/DistributionDownloadPlugin.java | 27 +- .../gradle/ElasticsearchDistribution.java | 30 +- distribution/docker/build.gradle | 34 ++ .../docker/docker-export/build.gradle | 2 + .../docker/oss-docker-export/build.gradle | 2 + .../packaging/test/DockerTests.java | 225 +++++++++++ .../packaging/test/PackagingTestCase.java | 8 +- .../packaging/util/Distribution.java | 16 +- .../elasticsearch/packaging/util/Docker.java | 355 ++++++++++++++++++ .../packaging/util/FileMatcher.java | 1 + .../packaging/util/Installation.java | 14 + .../packaging/util/Platforms.java | 4 + .../elasticsearch/packaging/util/Shell.java | 7 +- settings.gradle | 2 + 16 files changed, 817 insertions(+), 30 deletions(-) create mode 100644 distribution/docker/docker-export/build.gradle create mode 100644 distribution/docker/oss-docker-export/build.gradle create mode 100644 qa/os/src/test/java/org/elasticsearch/packaging/test/DockerTests.java create mode 100644 qa/os/src/test/java/org/elasticsearch/packaging/util/Docker.java diff --git a/Vagrantfile b/Vagrantfile index 4d1c4e92b7a8..93b60b46872f 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -1,5 +1,5 @@ # -*- mode: ruby -*- -# vi: set ft=ruby : +# vim: ft=ruby ts=2 sw=2 sts=2 et: # This Vagrantfile exists to test packaging. Read more about its use in the # vagrant section in TESTING.asciidoc. @@ -63,6 +63,7 @@ Vagrant.configure(2) do |config| # Install Jayatana so we can work around it being present. [ -f /usr/share/java/jayatanaag.jar ] || install jayatana SHELL + ubuntu_docker config end end 'ubuntu-1804'.tap do |box| @@ -72,6 +73,7 @@ Vagrant.configure(2) do |config| # Install Jayatana so we can work around it being present. [ -f /usr/share/java/jayatanaag.jar ] || install jayatana SHELL + ubuntu_docker config end end 'debian-8'.tap do |box| @@ -87,6 +89,7 @@ Vagrant.configure(2) do |config| config.vm.define box, define_opts do |config| config.vm.box = 'elastic/debian-9-x86_64' deb_common config, box + deb_docker config end end 'centos-6'.tap do |box| @@ -99,6 +102,7 @@ Vagrant.configure(2) do |config| config.vm.define box, define_opts do |config| config.vm.box = 'elastic/centos-7-x86_64' rpm_common config, box + rpm_docker config end end 'oel-6'.tap do |box| @@ -117,12 +121,14 @@ Vagrant.configure(2) do |config| config.vm.define box, define_opts do |config| config.vm.box = 'elastic/fedora-28-x86_64' dnf_common config, box + dnf_docker config end end 'fedora-29'.tap do |box| config.vm.define box, define_opts do |config| config.vm.box = 'elastic/fedora-28-x86_64' dnf_common config, box + dnf_docker config end end 'opensuse-42'.tap do |box| @@ -185,6 +191,63 @@ def deb_common(config, name, extra: '') ) end +def ubuntu_docker(config) + config.vm.provision 'install Docker using apt', type: 'shell', inline: <<-SHELL + # Install packages to allow apt to use a repository over HTTPS + apt-get install -y \ + apt-transport-https \ + ca-certificates \ + curl \ + gnupg2 \ + software-properties-common + + # Add Docker’s official GPG key + curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - + + # Set up the stable Docker repository + add-apt-repository \ + "deb [arch=amd64] https://download.docker.com/linux/ubuntu \ + $(lsb_release -cs) \ + stable" + + # Install Docker. Unlike Fedora and CentOS, this also start the daemon. + apt-get update + apt-get install -y docker-ce docker-ce-cli containerd.io + + # Add vagrant to the Docker group, so that it can run commands + usermod -aG docker vagrant + SHELL +end + + +def deb_docker(config) + config.vm.provision 'install Docker using apt', type: 'shell', inline: <<-SHELL + # Install packages to allow apt to use a repository over HTTPS + apt-get install -y \ + apt-transport-https \ + ca-certificates \ + curl \ + gnupg2 \ + software-properties-common + + # Add Docker’s official GPG key + curl -fsSL https://download.docker.com/linux/debian/gpg | apt-key add - + + # Set up the stable Docker repository + add-apt-repository \ + "deb [arch=amd64] https://download.docker.com/linux/debian \ + $(lsb_release -cs) \ + stable" + + # Install Docker. Unlike Fedora and CentOS, this also start the daemon. + apt-get update + apt-get install -y docker-ce docker-ce-cli containerd.io + + # Add vagrant to the Docker group, so that it can run commands + usermod -aG docker vagrant + SHELL +end + def rpm_common(config, name) linux_common( config, @@ -195,6 +258,25 @@ def rpm_common(config, name) ) end +def rpm_docker(config) + config.vm.provision 'install Docker using yum', type: 'shell', inline: <<-SHELL + # Install prerequisites + yum install -y yum-utils device-mapper-persistent-data lvm2 + + # Add repository + yum-config-manager -y --add-repo https://download.docker.com/linux/centos/docker-ce.repo + + # Install Docker + yum install -y docker-ce docker-ce-cli containerd.io + + # Start Docker + systemctl enable --now docker + + # Add vagrant to the Docker group, so that it can run commands + usermod -aG docker vagrant + SHELL +end + def dnf_common(config, name) # Autodetect doesn't work.... if Vagrant.has_plugin?('vagrant-cachier') @@ -211,6 +293,25 @@ def dnf_common(config, name) ) end +def dnf_docker(config) + config.vm.provision 'install Docker using dnf', type: 'shell', inline: <<-SHELL + # Install prerequisites + dnf -y install dnf-plugins-core + + # Add repository + dnf config-manager --add-repo https://download.docker.com/linux/fedora/docker-ce.repo + + # Install Docker + dnf install -y docker-ce docker-ce-cli containerd.io + + # Start Docker + systemctl enable --now docker + + # Add vagrant to the Docker group, so that it can run commands + usermod -aG docker vagrant + SHELL +end + def suse_common(config, name, extra: '') linux_common( config, @@ -268,7 +369,7 @@ def linux_common(config, # This prevents leftovers from previous tests using the # same VM from messing up the current test - config.vm.provision 'clean es installs in tmp', run: 'always', type: 'shell', inline: <<-SHELL + config.vm.provision 'clean es installs in tmp', type: 'shell', inline: <<-SHELL rm -rf /tmp/elasticsearch* SHELL diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/DistroTestPlugin.java b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/DistroTestPlugin.java index b2267f7833ba..6203d0c9b12e 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/DistroTestPlugin.java +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/DistroTestPlugin.java @@ -319,10 +319,14 @@ public class DistroTestPlugin implements Plugin { List currentDistros = new ArrayList<>(); List upgradeDistros = new ArrayList<>(); - for (Type type : Arrays.asList(Type.DEB, Type.RPM)) { + for (Type type : Arrays.asList(Type.DEB, Type.RPM, Type.DOCKER)) { for (Flavor flavor : Flavor.values()) { for (boolean bundledJdk : Arrays.asList(true, false)) { - addDistro(distributions, type, null, flavor, bundledJdk, VersionProperties.getElasticsearch(), currentDistros); + // We should never add a Docker distro with bundledJdk == false + boolean skip = type == Type.DOCKER && bundledJdk == false; + if (skip == false) { + addDistro(distributions, type, null, flavor, bundledJdk, VersionProperties.getElasticsearch(), currentDistros); + } } } // upgrade version is always bundled jdk @@ -386,6 +390,11 @@ public class DistroTestPlugin implements Plugin { } private static String destructiveDistroTestTaskName(ElasticsearchDistribution distro) { - return "destructiveDistroTest." + distroId(distro.getType(), distro.getPlatform(), distro.getFlavor(), distro.getBundledJdk()); + Type type = distro.getType(); + return "destructiveDistroTest." + distroId( + type, + distro.getPlatform(), + distro.getFlavor(), + distro.getBundledJdk()); } } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/DistributionDownloadPlugin.java b/buildSrc/src/main/java/org/elasticsearch/gradle/DistributionDownloadPlugin.java index 64e193d4623c..029ee004b7b9 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/DistributionDownloadPlugin.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/DistributionDownloadPlugin.java @@ -93,8 +93,8 @@ public class DistributionDownloadPlugin implements Plugin { // for the distribution as a file, just depend on the artifact directly dependencies.add(distribution.configuration.getName(), dependencyNotation(project, distribution)); - // no extraction allowed for rpm or deb - if (distribution.getType() != Type.RPM && distribution.getType() != Type.DEB) { + // no extraction allowed for rpm, deb or docker + if (distribution.getType().shouldExtract()) { // for the distribution extracted, add a root level task that does the extraction, and depend on that // extracted configuration as an artifact consisting of the extracted distribution directory dependencies.add(distribution.getExtracted().configuration.getName(), @@ -221,7 +221,6 @@ public class DistributionDownloadPlugin implements Plugin { } private static Dependency projectDependency(Project project, String projectPath, String projectConfig) { - if (project.findProject(projectPath) == null) { throw new GradleException("no project [" + projectPath + "], project names: " + project.getRootProject().getAllprojects()); } @@ -233,11 +232,20 @@ public class DistributionDownloadPlugin implements Plugin { private static String distributionProjectPath(ElasticsearchDistribution distribution) { String projectPath = ":distribution"; - if (distribution.getType() == Type.INTEG_TEST_ZIP) { - projectPath += ":archives:integ-test-zip"; - } else { - projectPath += distribution.getType() == Type.ARCHIVE ? ":archives:" : ":packages:"; - projectPath += distributionProjectName(distribution); + switch (distribution.getType()) { + case INTEG_TEST_ZIP: + projectPath += ":archives:integ-test-zip"; + break; + + case DOCKER: + projectPath += ":docker:"; + projectPath += distributionProjectName(distribution); + break; + + default: + projectPath += distribution.getType() == Type.ARCHIVE ? ":archives:" : ":packages:"; + projectPath += distributionProjectName(distribution); + break; } return projectPath; } @@ -250,9 +258,12 @@ public class DistributionDownloadPlugin implements Plugin { if (distribution.getBundledJdk() == false) { projectName += "no-jdk-"; } + if (distribution.getType() == Type.ARCHIVE) { Platform platform = distribution.getPlatform(); projectName += platform.toString() + (platform == Platform.WINDOWS ? "-zip" : "-tar"); + } else if (distribution.getType() == Type.DOCKER) { + projectName += "docker-export"; } else { projectName += distribution.getType(); } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/ElasticsearchDistribution.java b/buildSrc/src/main/java/org/elasticsearch/gradle/ElasticsearchDistribution.java index 22ede2d1becf..c18485410cd7 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/ElasticsearchDistribution.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/ElasticsearchDistribution.java @@ -46,12 +46,25 @@ public class ElasticsearchDistribution implements Buildable, Iterable { INTEG_TEST_ZIP, ARCHIVE, RPM, - DEB; + DEB, + DOCKER; @Override public String toString() { return super.toString().toLowerCase(Locale.ROOT); } + + public boolean shouldExtract() { + switch (this) { + case DEB: + case DOCKER: + case RPM: + return false; + + default: + return true; + } + } } public enum Flavor { @@ -171,11 +184,16 @@ public class ElasticsearchDistribution implements Buildable, Iterable { } public Extracted getExtracted() { - if (getType() == Type.RPM || getType() == Type.DEB) { - throw new UnsupportedOperationException("distribution type [" + getType() + "] for " + - "elasticsearch distribution [" + name + "] cannot be extracted"); + switch (getType()) { + case DEB: + case DOCKER: + case RPM: + throw new UnsupportedOperationException("distribution type [" + getType() + "] for " + + "elasticsearch distribution [" + name + "] cannot be extracted"); + + default: + return extracted; } - return extracted; } @Override @@ -217,7 +235,7 @@ public class ElasticsearchDistribution implements Buildable, Iterable { if (platform.isPresent() == false) { platform.set(CURRENT_PLATFORM); } - } else { // rpm or deb + } else { // rpm, deb or docker if (platform.isPresent()) { throw new IllegalArgumentException("platform not allowed for elasticsearch distribution [" + name + "] of type [" + getType() + "]"); diff --git a/distribution/docker/build.gradle b/distribution/docker/build.gradle index e4f0a04d4e9d..0905bde750d0 100644 --- a/distribution/docker/build.gradle +++ b/distribution/docker/build.gradle @@ -186,3 +186,37 @@ assemble.dependsOn "buildDockerImage" if (tasks.findByName("composePull")) { tasks.composePull.enabled = false } + +/* + * The export subprojects write out the generated Docker images to disk, so + * that they can be easily reloaded, for example into a VM. + */ +subprojects { Project subProject -> + if (subProject.name.contains('docker-export')) { + apply plugin: 'distribution' + + final boolean oss = subProject.name.startsWith('oss') + + def exportTaskName = taskName("export", oss, "DockerImage") + def buildTaskName = taskName("build", oss, "DockerImage") + def tarFile = "${parent.projectDir}/build/elasticsearch${oss ? '-oss' : ''}_test.${VersionProperties.elasticsearch}.docker.tar" + + final Task exportDockerImageTask = task(exportTaskName, type: LoggedExec) { + executable 'docker' + args "save", + "-o", + tarFile, + "elasticsearch${oss ? '-oss' : ''}:test" + } + + exportDockerImageTask.dependsOn(parent.tasks.getByName(buildTaskName)) + + artifacts.add('default', file(tarFile)) { + type 'tar' + name "elasticsearch${oss ? '-oss' : ''}" + builtBy exportTaskName + } + + assemble.dependsOn exportTaskName + } +} diff --git a/distribution/docker/docker-export/build.gradle b/distribution/docker/docker-export/build.gradle new file mode 100644 index 000000000000..537b5a093683 --- /dev/null +++ b/distribution/docker/docker-export/build.gradle @@ -0,0 +1,2 @@ +// This file is intentionally blank. All configuration of the +// export is done in the parent project. diff --git a/distribution/docker/oss-docker-export/build.gradle b/distribution/docker/oss-docker-export/build.gradle new file mode 100644 index 000000000000..537b5a093683 --- /dev/null +++ b/distribution/docker/oss-docker-export/build.gradle @@ -0,0 +1,2 @@ +// This file is intentionally blank. All configuration of the +// export is done in the parent project. diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/DockerTests.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/DockerTests.java new file mode 100644 index 000000000000..52205263d3ed --- /dev/null +++ b/qa/os/src/test/java/org/elasticsearch/packaging/test/DockerTests.java @@ -0,0 +1,225 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.packaging.test; + +import org.apache.http.client.fluent.Request; +import org.elasticsearch.packaging.util.Distribution; +import org.elasticsearch.packaging.util.Docker.DockerShell; +import org.elasticsearch.packaging.util.Installation; +import org.elasticsearch.packaging.util.ServerUtils; +import org.elasticsearch.packaging.util.Shell.Result; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; + +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Map; + +import static java.nio.file.attribute.PosixFilePermissions.fromString; +import static org.elasticsearch.packaging.util.Docker.assertPermissionsAndOwnership; +import static org.elasticsearch.packaging.util.Docker.copyFromContainer; +import static org.elasticsearch.packaging.util.Docker.ensureImageIsLoaded; +import static org.elasticsearch.packaging.util.Docker.existsInContainer; +import static org.elasticsearch.packaging.util.Docker.removeContainer; +import static org.elasticsearch.packaging.util.Docker.runContainer; +import static org.elasticsearch.packaging.util.Docker.verifyContainerInstallation; +import static org.elasticsearch.packaging.util.Docker.waitForPathToExist; +import static org.elasticsearch.packaging.util.FileMatcher.p660; +import static org.elasticsearch.packaging.util.FileUtils.append; +import static org.elasticsearch.packaging.util.FileUtils.getTempDir; +import static org.elasticsearch.packaging.util.FileUtils.mkdir; +import static org.elasticsearch.packaging.util.FileUtils.rm; +import static org.elasticsearch.packaging.util.ServerUtils.makeRequest; +import static org.elasticsearch.packaging.util.ServerUtils.waitForElasticsearch; +import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.Matchers.emptyString; +import static org.junit.Assume.assumeTrue; + +public class DockerTests extends PackagingTestCase { + protected DockerShell sh; + + @BeforeClass + public static void filterDistros() { + assumeTrue("only Docker", distribution.packaging == Distribution.Packaging.DOCKER); + + ensureImageIsLoaded(distribution); + } + + @AfterClass + public static void cleanup() { + // runContainer also calls this, so we don't need this method to be annotated as `@After` + removeContainer(); + } + + @Before + public void setupTest() throws Exception { + sh = new DockerShell(); + installation = runContainer(distribution()); + } + + /** + * Checks that the Docker image can be run, and that it passes various checks. + */ + public void test10Install() { + verifyContainerInstallation(installation, distribution()); + } + + /** + * Checks that no plugins are initially active. + */ + public void test20PluginsListWithNoPlugins() { + final Installation.Executables bin = installation.executables(); + final Result r = sh.run(bin.elasticsearchPlugin + " list"); + + assertThat("Expected no plugins to be listed", r.stdout, emptyString()); + } + + /** + * Check that a keystore can be manually created using the provided CLI tool. + */ + public void test40CreateKeystoreManually() throws InterruptedException { + final Installation.Executables bin = installation.executables(); + + final Path keystorePath = installation.config("elasticsearch.keystore"); + + waitForPathToExist(keystorePath); + + // Move the auto-created one out of the way, or else the CLI prompts asks us to confirm + sh.run("mv " + keystorePath + " " + keystorePath + ".bak"); + + sh.run(bin.elasticsearchKeystore + " create"); + + final Result r = sh.run(bin.elasticsearchKeystore + " list"); + assertThat(r.stdout, containsString("keystore.seed")); + } + + /** + * Send some basic index, count and delete requests, in order to check that the installation + * is minimally functional. + */ + public void test50BasicApiTests() throws Exception { + waitForElasticsearch(installation); + + assertTrue(existsInContainer(installation.logs.resolve("gc.log"))); + + ServerUtils.runElasticsearchTests(); + } + + /** + * Check that the default keystore is automatically created + */ + public void test60AutoCreateKeystore() throws Exception { + final Path keystorePath = installation.config("elasticsearch.keystore"); + + waitForPathToExist(keystorePath); + + assertPermissionsAndOwnership(keystorePath, p660); + + final Installation.Executables bin = installation.executables(); + final Result result = sh.run(bin.elasticsearchKeystore + " list"); + assertThat(result.stdout, containsString("keystore.seed")); + } + + /** + * Check that the default config can be overridden using a bind mount, and that env vars are respected + */ + public void test70BindMountCustomPathConfAndJvmOptions() throws Exception { + final Path tempConf = getTempDir().resolve("esconf-alternate"); + + try { + mkdir(tempConf); + copyFromContainer(installation.config("elasticsearch.yml"), tempConf.resolve("elasticsearch.yml")); + copyFromContainer(installation.config("log4j2.properties"), tempConf.resolve("log4j2.properties")); + + // we have to disable Log4j from using JMX lest it will hit a security + // manager exception before we have configured logging; this will fail + // startup since we detect usages of logging before it is configured + final String jvmOptions = + "-Xms512m\n" + + "-Xmx512m\n" + + "-Dlog4j2.disable.jmx=true\n"; + append(tempConf.resolve("jvm.options"), jvmOptions); + + // Make the temp directory and contents accessible when bind-mounted + Files.setPosixFilePermissions(tempConf, fromString("rwxrwxrwx")); + + // Restart the container + removeContainer(); + runContainer(distribution(), tempConf, Map.of( + "ES_JAVA_OPTS", "-XX:-UseCompressedOops" + )); + + waitForElasticsearch(installation); + + final String nodesResponse = makeRequest(Request.Get("http://localhost:9200/_nodes")); + assertThat(nodesResponse, containsString("\"heap_init_in_bytes\":536870912")); + assertThat(nodesResponse, containsString("\"using_compressed_ordinary_object_pointers\":\"false\"")); + } finally { + rm(tempConf); + } + } + + /** + * Check whether the elasticsearch-certutil tool has been shipped correctly, + * and if present then it can execute. + */ + public void test90SecurityCliPackaging() { + final Installation.Executables bin = installation.executables(); + + final Path securityCli = installation.lib.resolve("tools").resolve("security-cli"); + + if (distribution().isDefault()) { + assertTrue(existsInContainer(securityCli)); + + Result result = sh.run(bin.elasticsearchCertutil + " --help"); + assertThat(result.stdout, containsString("Simplifies certificate creation for use with the Elastic Stack")); + + // Ensure that the exit code from the java command is passed back up through the shell script + result = sh.runIgnoreExitCode(bin.elasticsearchCertutil + " invalid-command"); + assertThat(result.isSuccess(), is(false)); + assertThat(result.stdout, containsString("Unknown command [invalid-command]")); + } else { + assertFalse(existsInContainer(securityCli)); + } + } + + /** + * Check that the elasticsearch-shard tool is shipped in the Docker image and is executable. + */ + public void test91ElasticsearchShardCliPackaging() { + final Installation.Executables bin = installation.executables(); + + final Result result = sh.run(bin.elasticsearchShard + " -h"); + assertThat(result.stdout, containsString("A CLI tool to remove corrupted parts of unrecoverable shards")); + } + + /** + * Check that the elasticsearch-node tool is shipped in the Docker image and is executable. + */ + public void test92ElasticsearchNodeCliPackaging() { + final Installation.Executables bin = installation.executables(); + + final Result result = sh.run(bin.elasticsearchNode + " -h"); + assertThat(result.stdout, + containsString("A CLI tool to do unsafe cluster and index manipulations on current node")); + } +} diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/PackagingTestCase.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/PackagingTestCase.java index e7bf95c98e90..3efd1b36ddbd 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/PackagingTestCase.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/test/PackagingTestCase.java @@ -69,11 +69,11 @@ public abstract class PackagingTestCase extends Assert { protected static final String systemJavaHome; static { Shell sh = new Shell(); - if (Platforms.LINUX) { - systemJavaHome = sh.run("echo $SYSTEM_JAVA_HOME").stdout.trim(); - } else { - assert Platforms.WINDOWS; + if (Platforms.WINDOWS) { systemJavaHome = sh.run("$Env:SYSTEM_JAVA_HOME").stdout.trim(); + } else { + assert Platforms.LINUX || Platforms.DARWIN; + systemJavaHome = sh.run("echo $SYSTEM_JAVA_HOME").stdout.trim(); } } diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/util/Distribution.java b/qa/os/src/test/java/org/elasticsearch/packaging/util/Distribution.java index aa040fb15fcd..13b2f31c7e4f 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/util/Distribution.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/util/Distribution.java @@ -33,9 +33,16 @@ public class Distribution { public Distribution(Path path) { this.path = path; String filename = path.getFileName().toString(); - int lastDot = filename.lastIndexOf('.'); - String extension = filename.substring(lastDot + 1); - this.packaging = Packaging.valueOf(extension.equals("gz") ? "TAR" : extension.toUpperCase(Locale.ROOT)); + + if (filename.endsWith(".gz")) { + this.packaging = Packaging.TAR; + } else if (filename.endsWith(".docker.tar")) { + this.packaging = Packaging.DOCKER; + } else { + int lastDot = filename.lastIndexOf('.'); + this.packaging = Packaging.valueOf(filename.substring(lastDot + 1).toUpperCase(Locale.ROOT)); + } + this.platform = filename.contains("windows") ? Platform.WINDOWS : Platform.LINUX; this.flavor = filename.contains("oss") ? Flavor.OSS : Flavor.DEFAULT; this.hasJdk = filename.contains("no-jdk") == false; @@ -62,7 +69,8 @@ public class Distribution { TAR(".tar.gz", Platforms.LINUX || Platforms.DARWIN), ZIP(".zip", Platforms.WINDOWS), DEB(".deb", Platforms.isDPKG()), - RPM(".rpm", Platforms.isRPM()); + RPM(".rpm", Platforms.isRPM()), + DOCKER(".docker.tar", Platforms.isDocker()); /** The extension of this distribution's file */ public final String extension; diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/util/Docker.java b/qa/os/src/test/java/org/elasticsearch/packaging/util/Docker.java new file mode 100644 index 000000000000..d78b60236bc4 --- /dev/null +++ b/qa/os/src/test/java/org/elasticsearch/packaging/util/Docker.java @@ -0,0 +1,355 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.packaging.util; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +import java.nio.file.Path; +import java.nio.file.attribute.PosixFilePermission; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Stream; + +import static java.nio.file.attribute.PosixFilePermissions.fromString; +import static org.elasticsearch.packaging.util.FileMatcher.p644; +import static org.elasticsearch.packaging.util.FileMatcher.p660; +import static org.elasticsearch.packaging.util.FileMatcher.p755; +import static org.elasticsearch.packaging.util.FileMatcher.p775; +import static org.elasticsearch.packaging.util.FileUtils.getCurrentVersion; +import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.equalTo; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; + +/** + * Utilities for running packaging tests against the Elasticsearch Docker images. + */ +public class Docker { + private static final Log logger = LogFactory.getLog(Docker.class); + + private static final Shell sh = new Shell(); + private static final DockerShell dockerShell = new DockerShell(); + + /** + * Tracks the currently running Docker image. An earlier implementation used a fixed container name, + * but that appeared to cause problems with repeatedly destroying and recreating containers with + * the same name. + */ + private static String containerId = null; + + /** + * Checks whether the required Docker image exists. If not, the image is loaded from disk. No check is made + * to see whether the image is up-to-date. + * @param distribution details about the docker image to potentially load. + */ + public static void ensureImageIsLoaded(Distribution distribution) { + final long count = sh.run("docker image ls --format '{{.Repository}}' " + distribution.flavor.name).stdout.lines().count(); + + if (count != 0) { + return; + } + + logger.info("Loading Docker image: " + distribution.path); + sh.run("docker load -i " + distribution.path); + } + + /** + * Runs an Elasticsearch Docker container. + * @param distribution details about the docker image being tested. + */ + public static Installation runContainer(Distribution distribution) throws Exception { + return runContainer(distribution, null, Collections.emptyMap()); + } + + /** + * Runs an Elasticsearch Docker container, with options for overriding the config directory + * through a bind mount, and passing additional environment variables. + * + * @param distribution details about the docker image being tested. + * @param configPath the path to the config to bind mount, or null + * @param envVars environment variables to set when running the container + */ + public static Installation runContainer(Distribution distribution, Path configPath, Map envVars) throws Exception { + removeContainer(); + + final List args = new ArrayList<>(); + + args.add("docker run"); + + // Remove the container once it exits + args.add("--rm"); + + // Run the container in the background + args.add("--detach"); + + envVars.forEach((key, value) -> args.add("--env " + key + "=\"" + value + "\"")); + + // The container won't run without configuring discovery + args.add("--env discovery.type=single-node"); + + // Map ports in the container to the host, so that we can send requests + args.add("--publish 9200:9200"); + args.add("--publish 9300:9300"); + + if (configPath != null) { + // Bind-mount the config dir, if specified + args.add("--volume \"" + configPath + ":/usr/share/elasticsearch/config\""); + } + + args.add(distribution.flavor.name + ":test"); + + final String command = String.join(" ", args); + logger.debug("Running command: " + command); + containerId = sh.run(command).stdout.trim(); + + waitForElasticsearchToStart(); + + return Installation.ofContainer(); + } + + /** + * Waits for the Elasticsearch process to start executing in the container. + * This is called every time a container is started. + */ + private static void waitForElasticsearchToStart() throws InterruptedException { + boolean isElasticsearchRunning = false; + int attempt = 0; + + do { + String psOutput = dockerShell.run("ps ax").stdout; + + if (psOutput.contains("/usr/share/elasticsearch/jdk/bin/java -X")) { + isElasticsearchRunning = true; + break; + } + + Thread.sleep(1000); + } while (attempt++ < 5); + + if (!isElasticsearchRunning) { + final String logs = sh.run("docker logs " + containerId).stdout; + fail("Elasticsearch container did start successfully.\n\n" + logs); + } + } + + /** + * Removes the currently running container. + */ + public static void removeContainer() { + if (containerId != null) { + try { + // Remove the container, forcibly killing it if necessary + logger.debug("Removing container " + containerId); + final String command = "docker rm -f " + containerId; + final Shell.Result result = sh.runIgnoreExitCode(command); + + if (result.isSuccess() == false) { + // I'm not sure why we're already removing this container, but that's OK. + if (result.stderr.contains("removal of container " + " is already in progress") == false) { + throw new RuntimeException( + "Command was not successful: [" + command + "] result: " + result.toString()); + } + } + } finally { + // Null out the containerId under all circumstances, so that even if the remove command fails + // for some reason, the other tests will still proceed. Otherwise they can get stuck, continually + // trying to remove a non-existent container ID. + containerId = null; + } + } + } + + /** + * Copies a file from the container into the local filesystem + * @param from the file to copy in the container + * @param to the location to place the copy + */ + public static void copyFromContainer(Path from, Path to) { + final String script = "docker cp " + containerId + ":" + from + " " + to; + logger.debug("Copying file from container with: " + script); + sh.run(script); + } + + /** + * Extends {@link Shell} so that executed commands happen in the currently running Docker container. + */ + public static class DockerShell extends Shell { + @Override + protected String[] getScriptCommand(String script) { + assert containerId != null; + + return super.getScriptCommand("docker exec " + + "--user elasticsearch:root " + + "--tty " + + containerId + " " + + script); + } + } + + /** + * Checks whether a path exists in the Docker container. + */ + public static boolean existsInContainer(Path path) { + logger.debug("Checking whether file " + path + " exists in container"); + final Shell.Result result = dockerShell.runIgnoreExitCode("test -e " + path); + + return result.isSuccess(); + } + + /** + * Checks that the specified path's permissions and ownership match those specified. + */ + public static void assertPermissionsAndOwnership(Path path, Set expectedPermissions) { + logger.debug("Checking permissions and ownership of [" + path + "]"); + + final String[] components = dockerShell.run("stat --format=\"%U %G %A\" " + path).stdout.split("\\s+"); + + final String username = components[0]; + final String group = components[1]; + final String permissions = components[2]; + + // The final substring() is because we don't check the directory bit, and we + // also don't want any SELinux security context indicator. + Set actualPermissions = fromString(permissions.substring(1, 10)); + + assertEquals("Permissions of " + path + " are wrong", actualPermissions, expectedPermissions); + assertThat("File owner of " + path + " is wrong", username, equalTo("elasticsearch")); + assertThat("File group of " + path + " is wrong", group, equalTo("root")); + } + + /** + * Waits for up to 5 seconds for a path to exist in the container. + */ + public static void waitForPathToExist(Path path) throws InterruptedException { + int attempt = 0; + + do { + if (existsInContainer(path)) { + return; + } + + Thread.sleep(500); + } while (attempt++ < 10); + + fail(path + " failed to exist after 5000ms"); + } + + /** + * Perform a variety of checks on an installation. If the current distribution is not OSS, additional checks are carried out. + */ + public static void verifyContainerInstallation(Installation installation, Distribution distribution) { + verifyOssInstallation(installation); + if (distribution.flavor == Distribution.Flavor.DEFAULT) { + verifyDefaultInstallation(installation); + } + } + + private static void verifyOssInstallation(Installation es) { + dockerShell.run("id elasticsearch"); + dockerShell.run("getent group elasticsearch"); + + final Shell.Result passwdResult = dockerShell.run("getent passwd elasticsearch"); + final String homeDir = passwdResult.stdout.trim().split(":")[5]; + assertThat(homeDir, equalTo("/usr/share/elasticsearch")); + + Stream.of( + es.home, + es.data, + es.logs, + es.config + ).forEach(dir -> assertPermissionsAndOwnership(dir, p775)); + + Stream.of( + es.plugins, + es.modules + ).forEach(dir -> assertPermissionsAndOwnership(dir, p755)); + + // FIXME these files should all have the same permissions + Stream.of( + "elasticsearch.keystore", +// "elasticsearch.yml", + "jvm.options" +// "log4j2.properties" + ).forEach(configFile -> assertPermissionsAndOwnership(es.config(configFile), p660)); + + Stream.of( + "elasticsearch.yml", + "log4j2.properties" + ).forEach(configFile -> assertPermissionsAndOwnership(es.config(configFile), p644)); + + assertThat( + dockerShell.run(es.bin("elasticsearch-keystore") + " list").stdout, + containsString("keystore.seed")); + + Stream.of( + es.bin, + es.lib + ).forEach(dir -> assertPermissionsAndOwnership(dir, p755)); + + Stream.of( + "elasticsearch", + "elasticsearch-cli", + "elasticsearch-env", + "elasticsearch-enve", + "elasticsearch-keystore", + "elasticsearch-node", + "elasticsearch-plugin", + "elasticsearch-shard" + ).forEach(executable -> assertPermissionsAndOwnership(es.bin(executable), p755)); + + Stream.of( + "LICENSE.txt", + "NOTICE.txt", + "README.textile" + ).forEach(doc -> assertPermissionsAndOwnership(es.home.resolve(doc), p644)); + } + + private static void verifyDefaultInstallation(Installation es) { + Stream.of( + "elasticsearch-certgen", + "elasticsearch-certutil", + "elasticsearch-croneval", + "elasticsearch-saml-metadata", + "elasticsearch-setup-passwords", + "elasticsearch-sql-cli", + "elasticsearch-syskeygen", + "elasticsearch-users", + "x-pack-env", + "x-pack-security-env", + "x-pack-watcher-env" + ).forEach(executable -> assertPermissionsAndOwnership(es.bin(executable), p755)); + + // at this time we only install the current version of archive distributions, but if that changes we'll need to pass + // the version through here + assertPermissionsAndOwnership(es.bin("elasticsearch-sql-cli-" + getCurrentVersion() + ".jar"), p755); + + Stream.of( + "role_mapping.yml", + "roles.yml", + "users", + "users_roles" + ).forEach(configFile -> assertPermissionsAndOwnership(es.config(configFile), p660)); + } +} diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/util/FileMatcher.java b/qa/os/src/test/java/org/elasticsearch/packaging/util/FileMatcher.java index f6e598b5a0d5..89113ae098ea 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/util/FileMatcher.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/util/FileMatcher.java @@ -45,6 +45,7 @@ public class FileMatcher extends TypeSafeMatcher { public enum Fileness { File, Directory } + public static final Set p775 = fromString("rwxrwxr-x"); public static final Set p755 = fromString("rwxr-xr-x"); public static final Set p750 = fromString("rwxr-x---"); public static final Set p660 = fromString("rw-rw----"); diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/util/Installation.java b/qa/os/src/test/java/org/elasticsearch/packaging/util/Installation.java index 9e3ba5b52e28..c5fdf0106df2 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/util/Installation.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/util/Installation.java @@ -84,6 +84,20 @@ public class Installation { ); } + public static Installation ofContainer() { + String root = "/usr/share/elasticsearch"; + return new Installation( + Paths.get(root), + Paths.get(root + "/config"), + Paths.get(root + "/data"), + Paths.get(root + "/logs"), + Paths.get(root + "/plugins"), + Paths.get(root + "/modules"), + null, + null + ); + } + public Path bin(String executableName) { return bin.resolve(executableName); } diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/util/Platforms.java b/qa/os/src/test/java/org/elasticsearch/packaging/util/Platforms.java index 6258c1336b2f..b0778bf460ee 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/util/Platforms.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/util/Platforms.java @@ -65,6 +65,10 @@ public class Platforms { return new Shell().runIgnoreExitCode("which service").isSuccess(); } + public static boolean isDocker() { + return new Shell().runIgnoreExitCode("which docker").isSuccess(); + } + public static void onWindows(PlatformAction action) throws Exception { if (WINDOWS) { action.run(); diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/util/Shell.java b/qa/os/src/test/java/org/elasticsearch/packaging/util/Shell.java index c7cd20024b3c..55488522797c 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/util/Shell.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/util/Shell.java @@ -93,7 +93,8 @@ public class Shell { String formattedCommand = String.format(Locale.ROOT, command, args); return run(formattedCommand); } - private String[] getScriptCommand(String script) { + + protected String[] getScriptCommand(String script) { if (Platforms.WINDOWS) { return powershellCommand(script); } else { @@ -102,11 +103,11 @@ public class Shell { } private static String[] bashCommand(String script) { - return Stream.concat(Stream.of("bash", "-c"), Stream.of(script)).toArray(String[]::new); + return new String[] { "bash", "-c", script }; } private static String[] powershellCommand(String script) { - return Stream.concat(Stream.of("powershell.exe", "-Command"), Stream.of(script)).toArray(String[]::new); + return new String[] { "powershell.exe", "-Command", script }; } private Result runScript(String[] command) { diff --git a/settings.gradle b/settings.gradle index 78c529e9b145..eae2f01f58c3 100644 --- a/settings.gradle +++ b/settings.gradle @@ -30,6 +30,8 @@ List projects = [ 'distribution:docker', 'distribution:docker:oss-docker-build-context', 'distribution:docker:docker-build-context', + 'distribution:docker:oss-docker-export', + 'distribution:docker:docker-export', 'distribution:packages:oss-deb', 'distribution:packages:deb', 'distribution:packages:oss-no-jdk-deb', From d598083051d46e7d44f577473dde52c4a32f6776 Mon Sep 17 00:00:00 2001 From: debadair Date: Sat, 5 Oct 2019 09:47:32 -0700 Subject: [PATCH 11/55] [DOCS] Comment out tag in Task Managment API Docs so it isn't rendered. (#47618) The tag for the shared content is being rendered in the output. --- docs/reference/cluster/tasks.asciidoc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/reference/cluster/tasks.asciidoc b/docs/reference/cluster/tasks.asciidoc index 9cbc421aa013..f116b98343bd 100644 --- a/docs/reference/cluster/tasks.asciidoc +++ b/docs/reference/cluster/tasks.asciidoc @@ -51,11 +51,11 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=wait_for_completion] [[tasks-api-response-codes]] ==== {api-response-codes-title} -tag::tasks-api-404[] +// tag::tasks-api-404[] `404` (Missing resources):: If `` is specified but not found, this code indicates that there are no resources that match the request. -end::tasks-api-404[] +// end::tasks-api-404[] [[tasks-api-examples]] ==== {api-examples-title} From 9945d5cf35183263184e518aa73a72959fb15a0e Mon Sep 17 00:00:00 2001 From: Dimitris Athanasiou Date: Sat, 5 Oct 2019 23:36:21 +0300 Subject: [PATCH 12/55] [ML] Mute RegressionIT.testStopAndRestart (#47624) Relates #47612 --- .../org/elasticsearch/xpack/ml/integration/RegressionIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/RegressionIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/RegressionIT.java index fed47445d066..d9cb886ba28b 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/RegressionIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/RegressionIT.java @@ -233,6 +233,7 @@ public class RegressionIT extends MlNativeDataFrameAnalyticsIntegTestCase { "Finished analysis"); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/47612") public void testStopAndRestart() throws Exception { initialize("regression_stop_and_restart"); From 4f4185358439d92e0f2ee854fd45d85a490720b7 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Mon, 7 Oct 2019 08:38:20 +0200 Subject: [PATCH 13/55] Add Consistency Assertion to SnapshotsInProgress (#47598) Assert given input shards and indices are consistent. Also, fixed the equality check for SnapshotsInProgress. Before this change the tests never had more than a single waiting shard per index so they never failed as a result of the waiting shards list not being ordered. Follow up to #47552 --- .../cluster/SnapshotsInProgress.java | 18 ++++++++++++++++-- ...SnapshotsInProgressSerializationTests.java | 19 ++++++++++++------- 2 files changed, 28 insertions(+), 9 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java b/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java index 28d89a5f8d36..92a04dd03434 100644 --- a/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java +++ b/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java @@ -42,9 +42,12 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Set; +import java.util.stream.Collectors; import static org.elasticsearch.snapshots.SnapshotInfo.METADATA_FIELD_INTRODUCED; @@ -106,12 +109,25 @@ public class SnapshotsInProgress extends AbstractNamedDiffable implement } else { this.shards = shards; this.waitingIndices = findWaitingIndices(shards); + assert assertShardsConsistent(state, indices, shards); } this.repositoryStateId = repositoryStateId; this.failure = failure; this.userMetadata = userMetadata; } + private static boolean assertShardsConsistent(State state, List indices, + ImmutableOpenMap shards) { + if ((state == State.INIT || state == State.ABORTED) && shards.isEmpty()) { + return true; + } + final Set indexNames = indices.stream().map(IndexId::getName).collect(Collectors.toSet()); + final Set indexNamesInShards = new HashSet<>(); + shards.keysIt().forEachRemaining(s -> indexNamesInShards.add(s.getIndexName())); + assert indexNames.equals(indexNamesInShards) + : "Indices in shards " + indexNamesInShards + " differ from expected indices " + indexNames + " for state [" + state + "]"; + return true; + } public Entry(Snapshot snapshot, boolean includeGlobalState, boolean partial, State state, List indices, long startTime, long repositoryStateId, ImmutableOpenMap shards, Map userMetadata) { @@ -190,7 +206,6 @@ public class SnapshotsInProgress extends AbstractNamedDiffable implement if (!shards.equals(entry.shards)) return false; if (!snapshot.equals(entry.snapshot)) return false; if (state != entry.state) return false; - if (!waitingIndices.equals(entry.waitingIndices)) return false; if (repositoryStateId != entry.repositoryStateId) return false; return true; @@ -204,7 +219,6 @@ public class SnapshotsInProgress extends AbstractNamedDiffable implement result = 31 * result + (partial ? 1 : 0); result = 31 * result + shards.hashCode(); result = 31 * result + indices.hashCode(); - result = 31 * result + waitingIndices.hashCode(); result = 31 * result + Long.hashCode(startTime); result = 31 * result + Long.hashCode(repositoryStateId); return result; diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotsInProgressSerializationTests.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotsInProgressSerializationTests.java index 5d927249804b..7d59c71ed18e 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotsInProgressSerializationTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotsInProgressSerializationTests.java @@ -36,6 +36,7 @@ import org.elasticsearch.test.AbstractDiffableWireSerializationTestCase; import java.util.ArrayList; import java.util.List; +import java.util.stream.Collectors; public class SnapshotsInProgressSerializationTests extends AbstractDiffableWireSerializationTestCase { @@ -62,13 +63,17 @@ public class SnapshotsInProgressSerializationTests extends AbstractDiffableWireS long startTime = randomLong(); long repositoryStateId = randomLong(); ImmutableOpenMap.Builder builder = ImmutableOpenMap.builder(); - int shardsCount = randomIntBetween(0, 10); - for (int j = 0; j < shardsCount; j++) { - ShardId shardId = new ShardId(new Index(randomAlphaOfLength(10), randomAlphaOfLength(10)), randomIntBetween(0, 10)); - String nodeId = randomAlphaOfLength(10); - ShardState shardState = randomFrom(ShardState.values()); - builder.put(shardId, new SnapshotsInProgress.ShardSnapshotStatus(nodeId, shardState, - shardState.failed() ? randomAlphaOfLength(10) : null, "1")); + final List esIndices = + indices.stream().map(i -> new Index(i.getName(), randomAlphaOfLength(10))).collect(Collectors.toList()); + for (Index idx : esIndices) { + int shardsCount = randomIntBetween(1, 10); + for (int j = 0; j < shardsCount; j++) { + ShardId shardId = new ShardId(idx, j); + String nodeId = randomAlphaOfLength(10); + ShardState shardState = randomFrom(ShardState.values()); + builder.put(shardId, new SnapshotsInProgress.ShardSnapshotStatus(nodeId, shardState, + shardState.failed() ? randomAlphaOfLength(10) : null, "1")); + } } ImmutableOpenMap shards = builder.build(); return new Entry(snapshot, includeGlobalState, partial, state, indices, startTime, repositoryStateId, shards, From 413aad95ca1e9067602c47697579ffcdeda7e69e Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Mon, 7 Oct 2019 08:46:01 +0200 Subject: [PATCH 14/55] Add IT for Snapshot Issue in 47552 (#47627) Adding a specific integration test that reproduces the problem fixed in #47552. The issue fixed only reproduces in the snapshot resiliency otherwise which are not available in 6.8 where the fix is being backported to as well. --- .../DedicatedClusterSnapshotRestoreIT.java | 49 +++++++++++++++++++ 1 file changed, 49 insertions(+) diff --git a/server/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/server/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java index e07107f790b4..328121006695 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java +++ b/server/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java @@ -1234,6 +1234,55 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest }, 60L, TimeUnit.SECONDS); } + public void testDataNodeRestartAfterShardSnapshotFailure() throws Exception { + logger.info("--> starting a master node and two data nodes"); + internalCluster().startMasterOnlyNode(); + final List dataNodes = internalCluster().startDataOnlyNodes(2); + logger.info("--> creating repository"); + assertAcked(client().admin().cluster().preparePutRepository("test-repo") + .setType("mock").setSettings(Settings.builder() + .put("location", randomRepoPath()) + .put("compress", randomBoolean()) + .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES))); + assertAcked(prepareCreate("test-idx", 0, Settings.builder() + .put("number_of_shards", 2).put("number_of_replicas", 0))); + ensureGreen(); + logger.info("--> indexing some data"); + final int numdocs = randomIntBetween(50, 100); + IndexRequestBuilder[] builders = new IndexRequestBuilder[numdocs]; + for (int i = 0; i < builders.length; i++) { + builders[i] = client().prepareIndex("test-idx", "type1", + Integer.toString(i)).setSource("field1", "bar " + i); + } + indexRandom(true, builders); + flushAndRefresh(); + blockAllDataNodes("test-repo"); + logger.info("--> snapshot"); + client(internalCluster().getMasterName()).admin().cluster() + .prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(false).setIndices("test-idx").get(); + logger.info("--> restarting first data node, which should cause the primary shard on it to be failed"); + internalCluster().restartNode(dataNodes.get(0), InternalTestCluster.EMPTY_CALLBACK); + + logger.info("--> wait for shard snapshot of first primary to show as failed"); + assertBusy(() -> assertThat( + client().admin().cluster().prepareSnapshotStatus("test-repo").setSnapshots("test-snap").get().getSnapshots() + .get(0).getShardsStats().getFailedShards(), is(1)), 60L, TimeUnit.SECONDS); + + logger.info("--> restarting second data node, which should cause the primary shard on it to be failed"); + internalCluster().restartNode(dataNodes.get(1), InternalTestCluster.EMPTY_CALLBACK); + + // check that snapshot completes with both failed shards being accounted for in the snapshot result + assertBusy(() -> { + GetSnapshotsResponse snapshotsStatusResponse = client().admin().cluster() + .prepareGetSnapshots("test-repo").setSnapshots("test-snap").setIgnoreUnavailable(true).get(); + assertEquals(1, snapshotsStatusResponse.getSnapshots("test-repo").size()); + SnapshotInfo snapshotInfo = snapshotsStatusResponse.getSnapshots("test-repo").get(0); + assertTrue(snapshotInfo.state().toString(), snapshotInfo.state().completed()); + assertThat(snapshotInfo.totalShards(), is(2)); + assertThat(snapshotInfo.shardFailures(), hasSize(2)); + }, 60L, TimeUnit.SECONDS); + } + public void testRetentionLeasesClearedOnRestore() throws Exception { final String repoName = "test-repo-retention-leases"; assertAcked(client().admin().cluster().preparePutRepository(repoName) From 2ea8844b170c3e975300e25373e7c31895459055 Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Mon, 7 Oct 2019 09:28:50 +0200 Subject: [PATCH 15/55] Fail earlier Put Follow requests for closed leader indices (#47582) Today when following a new leader index, we fetch the remote cluster state, check the remote cluster license, check the user privileges, retrieve the index shard stats before initiating a CCR restore session. But if the leader index to follow is closed, we're executing a bunch of operations that would inevitability fail at some point (on retrieving the index shard stats, because this type of request forbid closed indices when resolving indices). We could fail a Put Follow request at the first step by checking the leader index state directly from the remote cluster state. This also helps the Resume Follow API to fail a bit earlier. --- .../xpack/ccr/CcrLicenseChecker.java | 6 ++- .../xpack/ccr/IndexFollowingIT.java | 43 +++++++++++++++++++ 2 files changed, 48 insertions(+), 1 deletion(-) diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrLicenseChecker.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrLicenseChecker.java index 8adb6140be09..a693cc57b522 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrLicenseChecker.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrLicenseChecker.java @@ -32,6 +32,7 @@ import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.engine.CommitStats; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.IndexClosedException; import org.elasticsearch.license.RemoteClusterLicenseChecker; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.rest.RestStatus; @@ -130,7 +131,10 @@ public class CcrLicenseChecker { onFailure.accept(new IndexNotFoundException(leaderIndex)); return; } - + if (leaderIndexMetaData.getState() == IndexMetaData.State.CLOSE) { + onFailure.accept(new IndexClosedException(leaderIndexMetaData.getIndex())); + return; + } final Client remoteClient = client.getRemoteClusterClient(clusterAlias); hasPrivilegesToFollowIndices(remoteClient, new String[] {leaderIndex}, e -> { if (e == null) { diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java index 4a6c00505ca0..838cd0d4514c 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java @@ -70,6 +70,7 @@ import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.seqno.ReplicationTracker; import org.elasticsearch.index.seqno.RetentionLeaseActions; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.IndexClosedException; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.rest.RestStatus; @@ -109,6 +110,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.BooleanSupplier; import java.util.function.Consumer; import java.util.stream.Collectors; +import java.util.stream.IntStream; import java.util.stream.Stream; import static java.util.Collections.singletonMap; @@ -743,6 +745,47 @@ public class IndexFollowingIT extends CcrIntegTestCase { ensureNoCcrTasks(); } + public void testFollowClosedIndex() { + final String leaderIndex = "test-index"; + assertAcked(leaderClient().admin().indices().prepareCreate(leaderIndex) + .setSettings(Settings.builder() + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) + .build())); + assertAcked(leaderClient().admin().indices().prepareClose(leaderIndex)); + + final String followerIndex = "follow-test-index"; + expectThrows(IndexClosedException.class, + () -> followerClient().execute(PutFollowAction.INSTANCE, putFollow(leaderIndex, followerIndex)).actionGet()); + assertFalse(ESIntegTestCase.indexExists(followerIndex, followerClient())); + } + + public void testResumeFollowOnClosedIndex() throws Exception { + final String leaderIndex = "test-index"; + assertAcked(leaderClient().admin().indices().prepareCreate(leaderIndex) + .setSettings(Settings.builder() + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .build())); + ensureLeaderGreen(leaderIndex); + + final int nbDocs = randomIntBetween(10, 100); + IntStream.of(nbDocs).forEach(i -> leaderClient().prepareIndex().setIndex(leaderIndex).setSource("field", i).get()); + + final String followerIndex = "follow-test-index"; + PutFollowAction.Response response = + followerClient().execute(PutFollowAction.INSTANCE, putFollow(leaderIndex, followerIndex)).actionGet(); + assertTrue(response.isFollowIndexCreated()); + assertTrue(response.isFollowIndexShardsAcked()); + assertTrue(response.isIndexFollowingStarted()); + + pauseFollow(followerIndex); + assertAcked(leaderClient().admin().indices().prepareClose(leaderIndex)); + + expectThrows(IndexClosedException.class, () -> + followerClient().execute(ResumeFollowAction.INSTANCE, resumeFollow(followerIndex)).actionGet()); + } + public void testDeleteFollowerIndex() throws Exception { assertAcked(leaderClient().admin().indices().prepareCreate("index1") .setSettings(Settings.builder() From b9584672a460e6b2fcff47ecdd72e856bafa7dc2 Mon Sep 17 00:00:00 2001 From: Alpar Torok Date: Mon, 7 Oct 2019 09:33:26 +0300 Subject: [PATCH 16/55] Mute docker packaging tests (DockerTests) Tracked in #47639 --- .../groovy/org/elasticsearch/gradle/test/DistroTestPlugin.java | 3 ++- .../java/org/elasticsearch/packaging/test/DockerTests.java | 2 ++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/DistroTestPlugin.java b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/DistroTestPlugin.java index 6203d0c9b12e..2f254b30466a 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/DistroTestPlugin.java +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/DistroTestPlugin.java @@ -319,7 +319,8 @@ public class DistroTestPlugin implements Plugin { List currentDistros = new ArrayList<>(); List upgradeDistros = new ArrayList<>(); - for (Type type : Arrays.asList(Type.DEB, Type.RPM, Type.DOCKER)) { + // Docker disabled for https://github.com/elastic/elasticsearch/issues/47639 + for (Type type : Arrays.asList(Type.DEB, Type.RPM /*,Type.DOCKER*/)) { for (Flavor flavor : Flavor.values()) { for (boolean bundledJdk : Arrays.asList(true, false)) { // We should never add a Docker distro with bundledJdk == false diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/DockerTests.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/DockerTests.java index 52205263d3ed..daad67f7fb11 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/DockerTests.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/test/DockerTests.java @@ -28,6 +28,7 @@ import org.elasticsearch.packaging.util.Shell.Result; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; +import org.junit.Ignore; import java.nio.file.Files; import java.nio.file.Path; @@ -54,6 +55,7 @@ import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.Matchers.emptyString; import static org.junit.Assume.assumeTrue; +@Ignore("https://github.com/elastic/elasticsearch/issues/47639") public class DockerTests extends PackagingTestCase { protected DockerShell sh; From 01f91771b4449fc71a03783f56df0b074a70bdc3 Mon Sep 17 00:00:00 2001 From: Yogesh Gaikwad <902768+bizybot@users.noreply.github.com> Date: Mon, 7 Oct 2019 20:21:10 +1100 Subject: [PATCH 17/55] Add support to retrieve all API keys if user has privilege (#47274) This commit adds support to retrieve all API keys if the authenticated user is authorized to do so. This removes the restriction of specifying one of the parameters (like id, name, username and/or realm name) when the `owner` is set to `false`. Closes #46887 --- .../client/security/GetApiKeyRequest.java | 15 +++-- .../SecurityDocumentationIT.java | 12 ++++ .../security/GetApiKeyRequestTests.java | 2 - .../high-level/security/get-api-key.asciidoc | 8 +++ .../rest-api/security/get-api-keys.asciidoc | 13 ++++- .../security/action/GetApiKeyRequest.java | 12 ++-- .../action/GetApiKeyRequestTests.java | 3 - .../xpack/security/authc/ApiKeyService.java | 26 ++++----- .../security/authc/ApiKeyIntegTests.java | 58 +++++++++++++++++++ 9 files changed, 117 insertions(+), 32 deletions(-) diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/GetApiKeyRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/GetApiKeyRequest.java index 942748978638..d4ab46b0b60b 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/GetApiKeyRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/GetApiKeyRequest.java @@ -38,13 +38,13 @@ public final class GetApiKeyRequest implements Validatable, ToXContentObject { private final String name; private final boolean ownedByAuthenticatedUser; + private GetApiKeyRequest() { + this(null, null, null, null, false); + } + // pkg scope for testing GetApiKeyRequest(@Nullable String realmName, @Nullable String userName, @Nullable String apiKeyId, @Nullable String apiKeyName, boolean ownedByAuthenticatedUser) { - if (Strings.hasText(realmName) == false && Strings.hasText(userName) == false && Strings.hasText(apiKeyId) == false - && Strings.hasText(apiKeyName) == false && ownedByAuthenticatedUser == false) { - throwValidationError("One of [api key id, api key name, username, realm name] must be specified if [owner] flag is false"); - } if (Strings.hasText(apiKeyId) || Strings.hasText(apiKeyName)) { if (Strings.hasText(realmName) || Strings.hasText(userName)) { throwValidationError( @@ -147,6 +147,13 @@ public final class GetApiKeyRequest implements Validatable, ToXContentObject { return new GetApiKeyRequest(null, null, null, null, true); } + /** + * Creates get api key request to retrieve api key information for all api keys if the authenticated user is authorized to do so. + */ + public static GetApiKeyRequest forAllApiKeys() { + return new GetApiKeyRequest(); + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { return builder; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java index 88234f80e8fd..6a52146ad390 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java @@ -1985,6 +1985,18 @@ public class SecurityDocumentationIT extends ESRestHighLevelClientTestCase { verifyApiKey(getApiKeyResponse.getApiKeyInfos().get(0), expectedApiKeyInfo); } + { + // tag::get-all-api-keys-request + GetApiKeyRequest getApiKeyRequest = GetApiKeyRequest.forAllApiKeys(); + // end::get-all-api-keys-request + + GetApiKeyResponse getApiKeyResponse = client.security().getApiKey(getApiKeyRequest, RequestOptions.DEFAULT); + + assertThat(getApiKeyResponse.getApiKeyInfos(), is(notNullValue())); + assertThat(getApiKeyResponse.getApiKeyInfos().size(), is(1)); + verifyApiKey(getApiKeyResponse.getApiKeyInfos().get(0), expectedApiKeyInfo); + } + { // tag::get-user-realm-api-keys-request GetApiKeyRequest getApiKeyRequest = GetApiKeyRequest.usingRealmAndUserName("default_file", "test_user"); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/security/GetApiKeyRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/GetApiKeyRequestTests.java index cbd05ae4c5ac..ffd2e5651627 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/security/GetApiKeyRequestTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/GetApiKeyRequestTests.java @@ -52,7 +52,6 @@ public class GetApiKeyRequestTests extends ESTestCase { public void testRequestValidationFailureScenarios() throws IOException { String[][] inputs = new String[][] { - { randomNullOrEmptyString(), randomNullOrEmptyString(), randomNullOrEmptyString(), randomNullOrEmptyString(), "false" }, { randomNullOrEmptyString(), "user", "api-kid", "api-kname", "false" }, { "realm", randomNullOrEmptyString(), "api-kid", "api-kname", "false" }, { "realm", "user", "api-kid", randomNullOrEmptyString(), "false" }, @@ -60,7 +59,6 @@ public class GetApiKeyRequestTests extends ESTestCase { { "realm", randomNullOrEmptyString(), randomNullOrEmptyString(), randomNullOrEmptyString(), "true"}, { randomNullOrEmptyString(), "user", randomNullOrEmptyString(), randomNullOrEmptyString(), "true"} }; String[] expectedErrorMessages = new String[] { - "One of [api key id, api key name, username, realm name] must be specified if [owner] flag is false", "username or realm name must not be specified when the api key id or api key name is specified", "username or realm name must not be specified when the api key id or api key name is specified", "username or realm name must not be specified when the api key id or api key name is specified", diff --git a/docs/java-rest/high-level/security/get-api-key.asciidoc b/docs/java-rest/high-level/security/get-api-key.asciidoc index 8a480df34f15..e8dad80b59b0 100644 --- a/docs/java-rest/high-level/security/get-api-key.asciidoc +++ b/docs/java-rest/high-level/security/get-api-key.asciidoc @@ -23,6 +23,8 @@ The +{request}+ supports retrieving API key information for . A specific key or all API keys owned by the current authenticated user +. All API keys if the user is authorized to do so + ===== Retrieve a specific API key by its id ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- @@ -59,6 +61,12 @@ include-tagged::{doc-tests-file}[get-user-realm-api-keys-request] include-tagged::{doc-tests-file}[get-api-keys-owned-by-authenticated-user-request] -------------------------------------------------- +===== Retrieve all API keys if the user is authorized to do so +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[get-all-api-keys-request] +-------------------------------------------------- + include::../execution.asciidoc[] [id="{upid}-{api}-response"] diff --git a/x-pack/docs/en/rest-api/security/get-api-keys.asciidoc b/x-pack/docs/en/rest-api/security/get-api-keys.asciidoc index b65523574468..07951618500b 100644 --- a/x-pack/docs/en/rest-api/security/get-api-keys.asciidoc +++ b/x-pack/docs/en/rest-api/security/get-api-keys.asciidoc @@ -51,8 +51,10 @@ by the currently authenticated user. Defaults to false. The 'realm_name' or 'username' parameters cannot be specified when this parameter is set to 'true' as they are assumed to be the currently authenticated ones. -NOTE: At least one of "id", "name", "username" and "realm_name" must be specified - if "owner" is "false" (default). +NOTE: When none of the parameters "id", "name", "username" and "realm_name" +are specified, and the "owner" is set to false then it will retrieve all API +keys if the user is authorized. If the user is not authorized to retrieve other user's +API keys, then an error will be returned. [[security-api-get-api-key-example]] ==== {api-examples-title} @@ -123,6 +125,13 @@ GET /_security/api_key?owner=true -------------------------------------------------- // TEST[continued] +The following example retrieves all API keys if the user is authorized to do so: +[source,console] +-------------------------------------------------- +GET /_security/api_key +-------------------------------------------------- +// TEST[continued] + Following creates an API key [source,console] diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/GetApiKeyRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/GetApiKeyRequest.java index f08cebe8141f..feb4ea509d7b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/GetApiKeyRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/GetApiKeyRequest.java @@ -133,14 +133,16 @@ public final class GetApiKeyRequest extends ActionRequest { return new GetApiKeyRequest(null, null, null, null, true); } + /** + * Creates get api key request to retrieve api key information for all api keys if the authenticated user is authorized to do so. + */ + public static GetApiKeyRequest forAllApiKeys() { + return new GetApiKeyRequest(); + } + @Override public ActionRequestValidationException validate() { ActionRequestValidationException validationException = null; - if (Strings.hasText(realmName) == false && Strings.hasText(userName) == false && Strings.hasText(apiKeyId) == false - && Strings.hasText(apiKeyName) == false && ownedByAuthenticatedUser == false) { - validationException = addValidationError("One of [api key id, api key name, username, realm name] must be specified if " + - "[owner] flag is false", validationException); - } if (Strings.hasText(apiKeyId) || Strings.hasText(apiKeyName)) { if (Strings.hasText(realmName) || Strings.hasText(userName)) { validationException = addValidationError( diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/GetApiKeyRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/GetApiKeyRequestTests.java index 1c5548af70a8..f38920cf9cfd 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/GetApiKeyRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/GetApiKeyRequestTests.java @@ -76,8 +76,6 @@ public class GetApiKeyRequestTests extends ESTestCase { } String[][] inputs = new String[][]{ - {randomNullOrEmptyString(), randomNullOrEmptyString(), randomNullOrEmptyString(), - randomNullOrEmptyString(), "false"}, {randomNullOrEmptyString(), "user", "api-kid", "api-kname", "false"}, {"realm", randomNullOrEmptyString(), "api-kid", "api-kname", "false"}, {"realm", "user", "api-kid", randomNullOrEmptyString(), "false"}, @@ -86,7 +84,6 @@ public class GetApiKeyRequestTests extends ESTestCase { {randomNullOrEmptyString(), "user", randomNullOrEmptyString(), randomNullOrEmptyString(), "true"} }; String[][] expectedErrorMessages = new String[][]{ - {"One of [api key id, api key name, username, realm name] must be specified if [owner] flag is false"}, {"username or realm name must not be specified when the api key id or api key name is specified", "only one of [api key id, api key name] can be specified"}, {"username or realm name must not be specified when the api key id or api key name is specified", diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java index ec3c741df3d7..cc08c99cffcb 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java @@ -881,22 +881,16 @@ public class ApiKeyService { public void getApiKeys(String realmName, String username, String apiKeyName, String apiKeyId, ActionListener listener) { ensureEnabled(); - if (Strings.hasText(realmName) == false && Strings.hasText(username) == false && Strings.hasText(apiKeyName) == false - && Strings.hasText(apiKeyId) == false) { - logger.trace("none of the parameters [api key id, api key name, username, realm name] were specified for retrieval"); - listener.onFailure(new IllegalArgumentException("One of [api key id, api key name, username, realm name] must be specified")); - } else { - findApiKeysForUserRealmApiKeyIdAndNameCombination(realmName, username, apiKeyName, apiKeyId, false, false, - ActionListener.wrap(apiKeyInfos -> { - if (apiKeyInfos.isEmpty()) { - logger.debug("No active api keys found for realm [{}], user [{}], api key name [{}] and api key id [{}]", - realmName, username, apiKeyName, apiKeyId); - listener.onResponse(GetApiKeyResponse.emptyResponse()); - } else { - listener.onResponse(new GetApiKeyResponse(apiKeyInfos)); - } - }, listener::onFailure)); - } + findApiKeysForUserRealmApiKeyIdAndNameCombination(realmName, username, apiKeyName, apiKeyId, false, false, + ActionListener.wrap(apiKeyInfos -> { + if (apiKeyInfos.isEmpty()) { + logger.debug("No active api keys found for realm [{}], user [{}], api key name [{}] and api key id [{}]", + realmName, username, apiKeyName, apiKeyId); + listener.onResponse(GetApiKeyResponse.emptyResponse()); + } else { + listener.onResponse(new GetApiKeyResponse(apiKeyInfos)); + } + }, listener::onFailure)); } /** diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java index a85dbb84e1ce..1588ede5fe45 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java @@ -53,6 +53,7 @@ import java.util.Set; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; +import java.util.stream.Stream; import static org.elasticsearch.index.mapper.MapperService.SINGLE_MAPPING_NAME; import static org.elasticsearch.xpack.core.security.index.RestrictedIndicesNames.SECURITY_MAIN_ALIAS; @@ -93,6 +94,8 @@ public class ApiKeyIntegTests extends SecurityIntegTestCase { @Override public String configRoles() { return super.configRoles() + "\n" + + "no_api_key_role:\n" + + " cluster: [\"manage_token\"]\n" + "manage_api_key_role:\n" + " cluster: [\"manage_api_key\"]\n" + "manage_own_api_key_role:\n" + @@ -104,6 +107,7 @@ public class ApiKeyIntegTests extends SecurityIntegTestCase { final String usersPasswdHashed = new String( getFastStoredHashAlgoForTests().hash(SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING)); return super.configUsers() + + "user_with_no_api_key_role:" + usersPasswdHashed + "\n" + "user_with_manage_api_key_role:" + usersPasswdHashed + "\n" + "user_with_manage_own_api_key_role:" + usersPasswdHashed + "\n"; } @@ -111,6 +115,7 @@ public class ApiKeyIntegTests extends SecurityIntegTestCase { @Override public String configUsersRoles() { return super.configUsersRoles() + + "no_api_key_role:user_with_no_api_key_role\n" + "manage_api_key_role:user_with_manage_api_key_role\n" + "manage_own_api_key_role:user_with_manage_own_api_key_role\n"; } @@ -549,6 +554,49 @@ public class ApiKeyIntegTests extends SecurityIntegTestCase { response, userWithManageApiKeyRoleApiKeys.stream().map(o -> o.getId()).collect(Collectors.toSet()), null); } + public void testGetAllApiKeys() throws InterruptedException, ExecutionException { + int noOfSuperuserApiKeys = randomIntBetween(3, 5); + int noOfApiKeysForUserWithManageApiKeyRole = randomIntBetween(3, 5); + int noOfApiKeysForUserWithManageOwnApiKeyRole = randomIntBetween(3,7); + List defaultUserCreatedKeys = createApiKeys(noOfSuperuserApiKeys, null); + List userWithManageApiKeyRoleApiKeys = createApiKeys("user_with_manage_api_key_role", + noOfApiKeysForUserWithManageApiKeyRole, null, "monitor"); + List userWithManageOwnApiKeyRoleApiKeys = createApiKeys("user_with_manage_own_api_key_role", + noOfApiKeysForUserWithManageOwnApiKeyRole, null, "monitor"); + + final Client client = client().filterWithHeader(Collections.singletonMap("Authorization", UsernamePasswordToken + .basicAuthHeaderValue("user_with_manage_api_key_role", SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING))); + PlainActionFuture listener = new PlainActionFuture<>(); + client.execute(GetApiKeyAction.INSTANCE, new GetApiKeyRequest(), listener); + GetApiKeyResponse response = listener.get(); + int totalApiKeys = noOfSuperuserApiKeys + noOfApiKeysForUserWithManageApiKeyRole + noOfApiKeysForUserWithManageOwnApiKeyRole; + List allApiKeys = new ArrayList<>(); + Stream.of(defaultUserCreatedKeys, userWithManageApiKeyRoleApiKeys, userWithManageOwnApiKeyRoleApiKeys).forEach( + allApiKeys::addAll); + verifyGetResponse(new String[]{SecuritySettingsSource.TEST_SUPERUSER, "user_with_manage_api_key_role", + "user_with_manage_own_api_key_role"}, totalApiKeys, allApiKeys, response, + allApiKeys.stream().map(o -> o.getId()).collect(Collectors.toSet()), null); + } + + public void testGetAllApiKeysFailsForUserWithNoRoleOrRetrieveOwnApiKeyRole() throws InterruptedException, ExecutionException { + int noOfSuperuserApiKeys = randomIntBetween(3, 5); + int noOfApiKeysForUserWithManageApiKeyRole = randomIntBetween(3, 5); + int noOfApiKeysForUserWithManageOwnApiKeyRole = randomIntBetween(3,7); + List defaultUserCreatedKeys = createApiKeys(noOfSuperuserApiKeys, null); + List userWithManageApiKeyRoleApiKeys = createApiKeys("user_with_manage_api_key_role", + noOfApiKeysForUserWithManageApiKeyRole, null, "monitor"); + List userWithManageOwnApiKeyRoleApiKeys = createApiKeys("user_with_manage_own_api_key_role", + noOfApiKeysForUserWithManageOwnApiKeyRole, null, "monitor"); + + final String withUser = randomFrom("user_with_manage_own_api_key_role", "user_with_no_api_key_role"); + final Client client = client().filterWithHeader(Collections.singletonMap("Authorization", UsernamePasswordToken + .basicAuthHeaderValue(withUser, SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING))); + PlainActionFuture listener = new PlainActionFuture<>(); + client.execute(GetApiKeyAction.INSTANCE, new GetApiKeyRequest(), listener); + ElasticsearchSecurityException ese = expectThrows(ElasticsearchSecurityException.class, () -> listener.actionGet()); + assertErrorMessage(ese, "cluster:admin/xpack/security/api_key/get", withUser); + } + public void testInvalidateApiKeysOwnedByCurrentAuthenticatedUser() throws InterruptedException, ExecutionException { int noOfSuperuserApiKeys = randomIntBetween(3, 5); int noOfApiKeysForUserWithManageApiKeyRole = randomIntBetween(3, 5); @@ -632,6 +680,11 @@ public class ApiKeyIntegTests extends SecurityIntegTestCase { private void verifyGetResponse(String user, int expectedNumberOfApiKeys, List responses, GetApiKeyResponse response, Set validApiKeyIds, List invalidatedApiKeyIds) { + verifyGetResponse(new String[]{user}, expectedNumberOfApiKeys, responses, response, validApiKeyIds, invalidatedApiKeyIds); + } + + private void verifyGetResponse(String[] user, int expectedNumberOfApiKeys, List responses, + GetApiKeyResponse response, Set validApiKeyIds, List invalidatedApiKeyIds) { assertThat(response.getApiKeyInfos().length, equalTo(expectedNumberOfApiKeys)); List expectedIds = responses.stream().filter(o -> validApiKeyIds.contains(o.getId())).map(o -> o.getId()) .collect(Collectors.toList()); @@ -680,4 +733,9 @@ public class ApiKeyIntegTests extends SecurityIntegTestCase { assertThat(ese.getMessage(), is("action [" + action + "] is unauthorized for API key id [" + apiKeyId + "] of user [" + userName + "]")); } + + private void assertErrorMessage(final ElasticsearchSecurityException ese, String action, String userName) { + assertThat(ese.getMessage(), + is("action [" + action + "] is unauthorized for user [" + userName + "]")); + } } From 8c4de64f4ffde96ab1fed11f795e698f0a249603 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Mon, 7 Oct 2019 12:43:07 +0200 Subject: [PATCH 18/55] Simplify Snapshot Delete Further (#47626) This change removes the special path for deleting the index metadata blobs and moves deleting them to the bulk delete of unreferenced blobs at the end of the snapshot delete process. This saves N RPC calls for a snapshot containing N indices and simplifies the code. Also, this change moves the unreferenced data cleanup up the stack to make it more obvious that any exceptions during this pahse will be ignored and not fail the delete request. Lastly, this change removes the needless chaining of first deleting unreferenced data from the snapshot delete and then running the stale data cleanup (that would also run from the cleanup endpoint) and simply fires off the cleanup right after updating the repository data (index-N) in parallel to the other delete operations to speed up the delete some more. --- .../blobstore/BlobStoreRepository.java | 130 +++++++++--------- 1 file changed, 65 insertions(+), 65 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index 7f6ac6e16344..c4acfd44106d 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -107,6 +107,7 @@ import java.util.Set; import java.util.concurrent.Executor; import java.util.concurrent.atomic.AtomicBoolean; import java.util.stream.Collectors; +import java.util.stream.Stream; import static org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot.FileInfo.canonicalName; @@ -387,12 +388,43 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp ActionListener listener) throws IOException { final RepositoryData updatedRepositoryData = repositoryData.removeSnapshot(snapshotId); writeIndexGen(updatedRepositoryData, repositoryStateId); + final ActionListener afterCleanupsListener = + new GroupedActionListener<>(ActionListener.wrap(() -> listener.onResponse(null)), 2); + + // Run unreferenced blobs cleanup in parallel to snapshot deletion + threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(ActionRunnable.wrap(afterCleanupsListener, + l -> cleanupStaleBlobs(foundIndices, rootBlobs, updatedRepositoryData, ActionListener.map(l, ignored -> null)))); + deleteIndices( updatedRepositoryData, repositoryData.indicesToUpdateAfterRemovingSnapshot(snapshotId), snapshotId, - ActionListener.delegateFailure(listener, - (l, v) -> cleanupStaleBlobs(foundIndices, rootBlobs, updatedRepositoryData, ActionListener.map(l, ignored -> null)))); + ActionListener.runAfter( + ActionListener.wrap( + deleteResults -> { + // Now that all metadata (RepositoryData at the repo root as well as index-N blobs in all shard paths) + // has been updated we can execute the delete operations for all blobs that have become unreferenced as a result + final String basePath = basePath().buildAsString(); + final int basePathLen = basePath.length(); + blobContainer().deleteBlobsIgnoringIfNotExists( + Stream.concat( + deleteResults.stream().flatMap(shardResult -> { + final String shardPath = + shardContainer(shardResult.indexId, shardResult.shardId).path().buildAsString(); + return shardResult.blobsToDelete.stream().map(blob -> shardPath + blob); + }), + deleteResults.stream().map(shardResult -> shardResult.indexId).distinct().map(indexId -> + indexContainer(indexId).path().buildAsString() + globalMetaDataFormat.blobName(snapshotId.getUUID())) + ).map(absolutePath -> { + assert absolutePath.startsWith(basePath); + return absolutePath.substring(basePathLen); + }).collect(Collectors.toList())); + }, + // Any exceptions after we have updated the root level RepositoryData are only logged but won't fail the delete request + e -> logger.warn( + () -> new ParameterizedMessage("[{}] Failed to delete some blobs during snapshot delete", snapshotId), e)), + () -> afterCleanupsListener.onResponse(null)) + ); } /** @@ -551,90 +583,58 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp * @param listener Listener to invoke when finished */ private void deleteIndices(RepositoryData repositoryData, List indices, SnapshotId snapshotId, - ActionListener listener) { + ActionListener> listener) { + if (indices.isEmpty()) { - listener.onResponse(null); + listener.onResponse(Collections.emptyList()); return; } - // listener to complete once all shards folders affected by this delete have been added new metadata blobs without this snapshot - final StepListener> deleteFromMetaListener = new StepListener<>(); // Listener that flattens out the delete results for each index final ActionListener> deleteIndexMetaDataListener = new GroupedActionListener<>( - ActionListener.map(deleteFromMetaListener, - results -> results.stream().flatMap(Collection::stream).collect(Collectors.toList())), indices.size()); + ActionListener.map(listener, res -> res.stream().flatMap(Collection::stream).collect(Collectors.toList())), indices.size()); final Executor executor = threadPool.executor(ThreadPool.Names.SNAPSHOT); for (IndexId indexId : indices) { executor.execute(ActionRunnable.wrap(deleteIndexMetaDataListener, deleteIdxMetaListener -> { - IndexMetaData indexMetaData = null; + final IndexMetaData indexMetaData; try { indexMetaData = getSnapshotIndexMetaData(snapshotId, indexId); } catch (Exception ex) { logger.warn(() -> new ParameterizedMessage("[{}] [{}] failed to read metadata for index", snapshotId, indexId.getName()), ex); - } - deleteIndexMetaDataBlobIgnoringErrors(snapshotId, indexId); - if (indexMetaData != null) { - final int shardCount = indexMetaData.getNumberOfShards(); - assert shardCount > 0 : "index did not have positive shard count, get [" + shardCount + "]"; - // Listener for collecting the results of removing the snapshot from each shard's metadata in the current index - final ActionListener allShardsListener = - new GroupedActionListener<>(deleteIdxMetaListener, shardCount); - final Index index = indexMetaData.getIndex(); - for (int shardId = 0; shardId < indexMetaData.getNumberOfShards(); shardId++) { - final ShardId shard = new ShardId(index, shardId); - executor.execute(new AbstractRunnable() { - @Override - protected void doRun() throws Exception { - allShardsListener.onResponse( - deleteShardSnapshot(repositoryData, indexId, shard, snapshotId)); - } - - @Override - public void onFailure(Exception ex) { - logger.warn(() -> new ParameterizedMessage("[{}] failed to delete shard data for shard [{}][{}]", - snapshotId, indexId.getName(), shard.id()), ex); - // Just passing null here to count down the listener instead of failing it, the stale data left behind - // here will be retried in the next delete or repository cleanup - allShardsListener.onResponse(null); - } - }); - } - } else { // Just invoke the listener without any shard generations to count it down, this index will be cleaned up // by the stale data cleanup in the end. deleteIdxMetaListener.onResponse(null); + return; + } + final int shardCount = indexMetaData.getNumberOfShards(); + assert shardCount > 0 : "index did not have positive shard count, get [" + shardCount + "]"; + // Listener for collecting the results of removing the snapshot from each shard's metadata in the current index + final ActionListener allShardsListener = + new GroupedActionListener<>(deleteIdxMetaListener, shardCount); + final Index index = indexMetaData.getIndex(); + for (int shardId = 0; shardId < indexMetaData.getNumberOfShards(); shardId++) { + final ShardId shard = new ShardId(index, shardId); + executor.execute(new AbstractRunnable() { + @Override + protected void doRun() throws Exception { + allShardsListener.onResponse( + deleteShardSnapshot(repositoryData, indexId, shard, snapshotId)); + } + + @Override + public void onFailure(Exception ex) { + logger.warn(() -> new ParameterizedMessage("[{}] failed to delete shard data for shard [{}][{}]", + snapshotId, indexId.getName(), shard.id()), ex); + // Just passing null here to count down the listener instead of failing it, the stale data left behind + // here will be retried in the next delete or repository cleanup + allShardsListener.onResponse(null); + } + }); } })); } - - // Delete all the now unreferenced blobs in the shard paths - deleteFromMetaListener.whenComplete(newGens -> { - final String basePath = basePath().buildAsString(); - final int basePathLen = basePath.length(); - blobContainer().deleteBlobsIgnoringIfNotExists( - newGens.stream().flatMap(shardBlob -> { - final String shardPathAbs = shardContainer(shardBlob.indexId, shardBlob.shardId).path().buildAsString(); - assert shardPathAbs.startsWith(basePath); - final String pathToShard = shardPathAbs.substring(basePathLen); - return shardBlob.blobsToDelete.stream().map(blob -> pathToShard + blob); - }).collect(Collectors.toList()) - ); - listener.onResponse(null); - }, e -> { - logger.warn(() -> new ParameterizedMessage("[{}] Failed to delete some blobs during snapshot delete", snapshotId), e); - listener.onResponse(null); - }); - } - - private void deleteIndexMetaDataBlobIgnoringErrors(SnapshotId snapshotId, IndexId indexId) { - try { - indexMetaDataFormat.delete(indexContainer(indexId), snapshotId.getUUID()); - } catch (IOException ex) { - logger.warn(() -> new ParameterizedMessage("[{}] failed to delete metadata for index [{}]", - snapshotId, indexId.getName()), ex); - } } @Override From 924b298259c06029b39733e960024d29bc0a1f88 Mon Sep 17 00:00:00 2001 From: Yogesh Gaikwad <902768+bizybot@users.noreply.github.com> Date: Mon, 7 Oct 2019 22:00:23 +1100 Subject: [PATCH 19/55] Add 'create_doc' index privilege (#45806) Use case: User with `create_doc` index privilege will be allowed to only index new documents either via Index API or Bulk API. There are two cases that we need to think: - **User indexing a new document without specifying an Id.** For this ES auto generates an Id and now ES version 7.5.0 onwards defaults to `op_type` `create` we just need to authorize on the `op_type`. - **User indexing a new document with an Id.** This is problematic as we do not know whether a document with Id exists or not. If the `op_type` is `create` then we can assume the user is trying to add a document, if it exists it is going to throw an error from the index engine. Given these both cases, we can safely authorize based on the `op_type` value. If the value is `create` then the user with `create_doc` privilege is authorized to index new documents. In the `AuthorizationService` when authorizing a bulk request, we check the implied action. This code changes that to append the `:op_type/index` or `:op_type/create` to indicate the implied index action. --- .../client/security/user/privileges/Role.java | 3 +- .../security/get-builtin-privileges.asciidoc | 1 + .../authz/privilege/IndexPrivilege.java | 4 + .../security/authz/AuthorizationService.java | 5 +- .../CreateDocsIndexPrivilegeTests.java | 112 ++++++++++++++++++ .../authz/AuthorizationServiceTests.java | 8 +- .../test/privileges/11_builtin.yml | 2 +- 7 files changed, 128 insertions(+), 7 deletions(-) create mode 100644 x-pack/plugin/security/src/test/java/org/elasticsearch/integration/CreateDocsIndexPrivilegeTests.java diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/user/privileges/Role.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/user/privileges/Role.java index e8e1a104d8c2..c1eae86c9f18 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/user/privileges/Role.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/user/privileges/Role.java @@ -345,8 +345,9 @@ public final class Role { public static final String VIEW_INDEX_METADATA = "view_index_metadata"; public static final String MANAGE_FOLLOW_INDEX = "manage_follow_index"; public static final String MANAGE_ILM = "manage_ilm"; + public static final String CREATE_DOC = "create_doc"; public static final String[] ALL_ARRAY = new String[] { NONE, ALL, READ, READ_CROSS, CREATE, INDEX, DELETE, WRITE, MONITOR, MANAGE, - DELETE_INDEX, CREATE_INDEX, VIEW_INDEX_METADATA, MANAGE_FOLLOW_INDEX, MANAGE_ILM }; + DELETE_INDEX, CREATE_INDEX, VIEW_INDEX_METADATA, MANAGE_FOLLOW_INDEX, MANAGE_ILM, CREATE_DOC }; } } diff --git a/x-pack/docs/en/rest-api/security/get-builtin-privileges.asciidoc b/x-pack/docs/en/rest-api/security/get-builtin-privileges.asciidoc index ce84873173fc..d2a329b9638b 100644 --- a/x-pack/docs/en/rest-api/security/get-builtin-privileges.asciidoc +++ b/x-pack/docs/en/rest-api/security/get-builtin-privileges.asciidoc @@ -95,6 +95,7 @@ A successful call returns an object with "cluster" and "index" fields. "index" : [ "all", "create", + "create_doc", "create_index", "delete", "delete_index", diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/IndexPrivilege.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/IndexPrivilege.java index 08b67396c600..8ae7337ba195 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/IndexPrivilege.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/IndexPrivilege.java @@ -48,6 +48,8 @@ public final class IndexPrivilege extends Privilege { ClusterSearchShardsAction.NAME); private static final Automaton CREATE_AUTOMATON = patterns("indices:data/write/index*", "indices:data/write/bulk*", PutMappingAction.NAME); + private static final Automaton CREATE_DOC_AUTOMATON = patterns("indices:data/write/index", "indices:data/write/index[*", + "indices:data/write/index:op_type/create", "indices:data/write/bulk*", PutMappingAction.NAME); private static final Automaton INDEX_AUTOMATON = patterns("indices:data/write/index*", "indices:data/write/bulk*", "indices:data/write/update*", PutMappingAction.NAME); private static final Automaton DELETE_AUTOMATON = patterns("indices:data/write/delete*", "indices:data/write/bulk*"); @@ -73,6 +75,7 @@ public final class IndexPrivilege extends Privilege { public static final IndexPrivilege INDEX = new IndexPrivilege("index", INDEX_AUTOMATON); public static final IndexPrivilege DELETE = new IndexPrivilege("delete", DELETE_AUTOMATON); public static final IndexPrivilege WRITE = new IndexPrivilege("write", WRITE_AUTOMATON); + public static final IndexPrivilege CREATE_DOC = new IndexPrivilege("create_doc", CREATE_DOC_AUTOMATON); public static final IndexPrivilege MONITOR = new IndexPrivilege("monitor", MONITOR_AUTOMATON); public static final IndexPrivilege MANAGE = new IndexPrivilege("manage", MANAGE_AUTOMATON); public static final IndexPrivilege DELETE_INDEX = new IndexPrivilege("delete_index", DELETE_INDEX_AUTOMATON); @@ -93,6 +96,7 @@ public final class IndexPrivilege extends Privilege { entry("delete", DELETE), entry("write", WRITE), entry("create", CREATE), + entry("create_doc", CREATE_DOC), entry("delete_index", DELETE_INDEX), entry("view_index_metadata", VIEW_METADATA), entry("read_cross_cluster", READ_CROSS_CLUSTER), diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationService.java index bd81d6db4743..a9a971a09142 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationService.java @@ -93,6 +93,8 @@ public class AuthorizationService { public static final String AUTHORIZATION_INFO_KEY = "_authz_info"; private static final AuthorizationInfo SYSTEM_AUTHZ_INFO = () -> Collections.singletonMap(PRINCIPAL_ROLES_FIELD_NAME, new String[] { SystemUser.ROLE_NAME }); + private static final String IMPLIED_INDEX_ACTION = IndexAction.NAME + ":op_type/index"; + private static final String IMPLIED_CREATE_ACTION = IndexAction.NAME + ":op_type/create"; private static final Logger logger = LogManager.getLogger(AuthorizationService.class); @@ -536,8 +538,9 @@ public class AuthorizationService { final DocWriteRequest docWriteRequest = item.request(); switch (docWriteRequest.opType()) { case INDEX: + return IMPLIED_INDEX_ACTION; case CREATE: - return IndexAction.NAME; + return IMPLIED_CREATE_ACTION; case UPDATE: return UpdateAction.NAME; case DELETE: diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/CreateDocsIndexPrivilegeTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/CreateDocsIndexPrivilegeTests.java new file mode 100644 index 000000000000..edc9e7e4fd94 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/CreateDocsIndexPrivilegeTests.java @@ -0,0 +1,112 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.integration; + +import org.elasticsearch.client.Request; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.xpack.core.security.authc.support.Hasher; +import org.junit.Before; + +import java.io.IOException; + +public class CreateDocsIndexPrivilegeTests extends AbstractPrivilegeTestCase { + private static final String INDEX_NAME = "index-1"; + private static final String CREATE_DOC_USER = "create_doc_user"; + private String jsonDoc = "{ \"name\" : \"elasticsearch\", \"body\": \"foo bar\" }"; + private static final String ROLES = + "all_indices_role:\n" + + " indices:\n" + + " - names: '*'\n" + + " privileges: [ all ]\n" + + "create_doc_role:\n" + + " indices:\n" + + " - names: '*'\n" + + " privileges: [ create_doc ]\n"; + + private static final String USERS_ROLES = + "all_indices_role:admin\n" + + "create_doc_role:" + CREATE_DOC_USER + "\n"; + + @Override + protected boolean addMockHttpTransport() { + return false; // enable http + } + + @Override + protected String configRoles() { + return super.configRoles() + "\n" + ROLES; + } + + @Override + protected String configUsers() { + final String usersPasswdHashed = new String(Hasher.resolve( + randomFrom("pbkdf2", "pbkdf2_1000", "bcrypt", "bcrypt9")).hash(new SecureString("passwd".toCharArray()))); + + return super.configUsers() + + "admin:" + usersPasswdHashed + "\n" + + CREATE_DOC_USER + ":" + usersPasswdHashed + "\n"; + } + + @Override + protected String configUsersRoles() { + return super.configUsersRoles() + USERS_ROLES; + } + + @Before + public void insertBaseDocumentsAsAdmin() throws Exception { + Request request = new Request("PUT", "/" + INDEX_NAME + "/_doc/1"); + request.setJsonEntity(jsonDoc); + request.addParameter("refresh", "true"); + assertAccessIsAllowed("admin", request); + } + + public void testCreateDocUserCanIndexNewDocumentsWithAutoGeneratedId() throws IOException { + assertAccessIsAllowed(CREATE_DOC_USER, "POST", "/" + INDEX_NAME + "/_doc", "{ \"foo\" : \"bar\" }"); + } + + public void testCreateDocUserCanIndexNewDocumentsWithExternalIdAndOpTypeIsCreate() throws IOException { + assertAccessIsAllowed(CREATE_DOC_USER, randomFrom("PUT", "POST"), "/" + INDEX_NAME + "/_doc/2?op_type=create", "{ \"foo\" : " + + "\"bar\" }"); + } + + public void testCreateDocUserIsDeniedToIndexNewDocumentsWithExternalIdAndOpTypeIsIndex() throws IOException { + assertAccessIsDenied(CREATE_DOC_USER, randomFrom("PUT", "POST"), "/" + INDEX_NAME + "/_doc/3", "{ \"foo\" : \"bar\" }"); + } + + public void testCreateDocUserIsDeniedToIndexUpdatesToExistingDocument() throws IOException { + assertAccessIsDenied(CREATE_DOC_USER, "POST", "/" + INDEX_NAME + "/_doc/1/_update", "{ \"doc\" : { \"foo\" : \"baz\" } }"); + assertAccessIsDenied(CREATE_DOC_USER, "PUT", "/" + INDEX_NAME + "/_doc/1", "{ \"foo\" : \"baz\" }"); + } + + public void testCreateDocUserCanIndexNewDocumentsWithAutoGeneratedIdUsingBulkApi() throws IOException { + assertAccessIsAllowed(CREATE_DOC_USER, randomFrom("PUT", "POST"), + "/" + INDEX_NAME + "/_bulk", "{ \"index\" : { } }\n{ \"foo\" : \"bar\" }\n"); + } + + public void testCreateDocUserCanIndexNewDocumentsWithAutoGeneratedIdAndOpTypeCreateUsingBulkApi() throws IOException { + assertAccessIsAllowed(CREATE_DOC_USER, randomFrom("PUT", "POST"), + "/" + INDEX_NAME + "/_bulk", "{ \"create\" : { } }\n{ \"foo\" : \"bar\" }\n"); + } + + public void testCreateDocUserCanIndexNewDocumentsWithExternalIdAndOpTypeIsCreateUsingBulkApi() throws IOException { + assertAccessIsAllowed(CREATE_DOC_USER, randomFrom("PUT", "POST"), + "/" + INDEX_NAME + "/_bulk", "{ \"create\" : { \"_id\" : \"4\" } }\n{ \"foo\" : \"bar\" }\n"); + } + + public void testCreateDocUserIsDeniedToIndexNewDocumentsWithExternalIdAndOpTypeIsIndexUsingBulkApi() throws IOException { + assertBodyHasAccessIsDenied(CREATE_DOC_USER, randomFrom("PUT", "POST"), + "/" + INDEX_NAME + "/_bulk", "{ \"index\" : { \"_id\" : \"5\" } }\n{ \"foo\" : \"bar\" }\n"); + } + + public void testCreateDocUserIsDeniedToIndexUpdatesToExistingDocumentUsingBulkApi() throws IOException { + assertBodyHasAccessIsDenied(CREATE_DOC_USER, randomFrom("PUT", "POST"), + "/" + INDEX_NAME + "/_bulk", "{ \"index\" : { \"_id\" : \"1\" } }\n{ \"doc\" : {\"foo\" : \"bazbaz\"} }\n"); + assertBodyHasAccessIsDenied(CREATE_DOC_USER, randomFrom("PUT", "POST"), + "/" + INDEX_NAME + "/_bulk", "{ \"update\" : { \"_id\" : \"1\" } }\n{ \"doc\" : {\"foo\" : \"bazbaz\"} }\n"); + } + +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java index 89fcc394a2c5..d9c3d4d90e7c 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java @@ -1189,16 +1189,16 @@ public class AuthorizationServiceTests extends ESTestCase { eq(DeleteAction.NAME), eq("alias-2"), eq(BulkItemRequest.class.getSimpleName()), eq(request.remoteAddress()), authzInfoRoles(new String[] { role.getName() })); verify(auditTrail).explicitIndexAccessEvent(eq(requestId), eq(AuditLevel.ACCESS_GRANTED), eq(authentication), - eq(IndexAction.NAME), eq("concrete-index"), eq(BulkItemRequest.class.getSimpleName()), + eq(IndexAction.NAME + ":op_type/index"), eq("concrete-index"), eq(BulkItemRequest.class.getSimpleName()), eq(request.remoteAddress()), authzInfoRoles(new String[] { role.getName() })); verify(auditTrail).explicitIndexAccessEvent(eq(requestId), eq(AuditLevel.ACCESS_GRANTED), eq(authentication), - eq(IndexAction.NAME), eq("alias-1"), eq(BulkItemRequest.class.getSimpleName()), + eq(IndexAction.NAME + ":op_type/index"), eq("alias-1"), eq(BulkItemRequest.class.getSimpleName()), eq(request.remoteAddress()), authzInfoRoles(new String[] { role.getName() })); verify(auditTrail).explicitIndexAccessEvent(eq(requestId), eq(AuditLevel.ACCESS_DENIED), eq(authentication), eq(DeleteAction.NAME), eq("alias-1"), eq(BulkItemRequest.class.getSimpleName()), eq(request.remoteAddress()), authzInfoRoles(new String[] { role.getName() })); verify(auditTrail).explicitIndexAccessEvent(eq(requestId), eq(AuditLevel.ACCESS_DENIED), eq(authentication), - eq(IndexAction.NAME), eq("alias-2"), eq(BulkItemRequest.class.getSimpleName()), + eq(IndexAction.NAME + ":op_type/index"), eq("alias-2"), eq(BulkItemRequest.class.getSimpleName()), eq(request.remoteAddress()), authzInfoRoles(new String[] { role.getName() })); verify(auditTrail).accessGranted(eq(requestId), eq(authentication), eq(action), eq(request), authzInfoRoles(new String[] { role.getName() })); // bulk request is allowed @@ -1232,7 +1232,7 @@ public class AuthorizationServiceTests extends ESTestCase { eq(DeleteAction.NAME), Matchers.startsWith("datemath-"), eq(BulkItemRequest.class.getSimpleName()), eq(request.remoteAddress()), authzInfoRoles(new String[] { role.getName() })); verify(auditTrail, times(2)).explicitIndexAccessEvent(eq(requestId), eq(AuditLevel.ACCESS_GRANTED), eq(authentication), - eq(IndexAction.NAME), Matchers.startsWith("datemath-"), eq(BulkItemRequest.class.getSimpleName()), + eq(IndexAction.NAME + ":op_type/index"), Matchers.startsWith("datemath-"), eq(BulkItemRequest.class.getSimpleName()), eq(request.remoteAddress()), authzInfoRoles(new String[] { role.getName() })); // bulk request is allowed verify(auditTrail).accessGranted(eq(requestId), eq(authentication), eq(action), eq(request), diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/privileges/11_builtin.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/privileges/11_builtin.yml index dd36e6e60308..9ac2fdf23c9a 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/privileges/11_builtin.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/privileges/11_builtin.yml @@ -16,4 +16,4 @@ setup: # I would much prefer we could just check that specific entries are in the array, but we don't have # an assertion for that - length: { "cluster" : 30 } - - length: { "index" : 16 } + - length: { "index" : 17 } From e99435a7f66b5d91a5db80487f22ac2ca4a71314 Mon Sep 17 00:00:00 2001 From: Dimitris Athanasiou Date: Mon, 7 Oct 2019 15:28:21 +0300 Subject: [PATCH 20/55] [ML] Additional outlier detection parameters (#47600) Adds the following parameters to `outlier_detection`: - `compute_feature_influence` (boolean): whether to compute or not feature influence scores - `outlier_fraction` (double): the proportion of the data set assumed to be outlying prior to running outlier detection - `standardization_enabled` (boolean): whether to apply standardization to the feature values --- .../client/ml/dataframe/OutlierDetection.java | 91 ++++++++- .../client/MachineLearningIT.java | 5 +- .../MlClientDocumentationIT.java | 4 + .../ml/dataframe/OutlierDetectionTests.java | 13 ++ .../ml/put-data-frame-analytics.asciidoc | 4 + .../apis/dfanalyticsresources.asciidoc | 2 +- .../apis/put-dfanalytics.asciidoc | 9 +- .../dataframe/analyses/OutlierDetection.java | 147 +++++++++++--- .../analyses/OutlierDetectionTests.java | 35 +++- .../ml/qa/ml-with-security/build.gradle | 6 + .../OutlierDetectionWithMissingFieldsIT.java | 3 +- .../integration/RunDataFrameAnalyticsIT.java | 109 ++++++++++- .../DataFrameAnalyticsIndexTests.java | 2 +- .../dataframe/SourceDestValidatorTests.java | 16 +- .../ExtractedFieldsDetectorTests.java | 2 +- .../test/ml/data_frame_analytics_crud.yml | 182 +++++++++++++++++- 16 files changed, 562 insertions(+), 68 deletions(-) diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/OutlierDetection.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/OutlierDetection.java index fe5094fb7190..f58c9b3d3542 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/OutlierDetection.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/OutlierDetection.java @@ -19,7 +19,6 @@ package org.elasticsearch.client.ml.dataframe; -import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; import org.elasticsearch.common.xcontent.ObjectParser; @@ -48,6 +47,9 @@ public class OutlierDetection implements DataFrameAnalysis { static final ParseField N_NEIGHBORS = new ParseField("n_neighbors"); static final ParseField METHOD = new ParseField("method"); public static final ParseField FEATURE_INFLUENCE_THRESHOLD = new ParseField("feature_influence_threshold"); + static final ParseField COMPUTE_FEATURE_INFLUENCE = new ParseField("compute_feature_influence"); + static final ParseField OUTLIER_FRACTION = new ParseField("outlier_fraction"); + static final ParseField STANDARDIZATION_ENABLED = new ParseField("standardization_enabled"); private static ObjectParser PARSER = new ObjectParser<>(NAME.getPreferredName(), true, Builder::new); @@ -60,22 +62,49 @@ public class OutlierDetection implements DataFrameAnalysis { throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]"); }, METHOD, ObjectParser.ValueType.STRING); PARSER.declareDouble(Builder::setFeatureInfluenceThreshold, FEATURE_INFLUENCE_THRESHOLD); + PARSER.declareBoolean(Builder::setComputeFeatureInfluence, COMPUTE_FEATURE_INFLUENCE); + PARSER.declareDouble(Builder::setOutlierFraction, OUTLIER_FRACTION); + PARSER.declareBoolean(Builder::setStandardizationEnabled, STANDARDIZATION_ENABLED); } + /** + * The number of neighbors. Leave unspecified for dynamic detection. + */ private final Integer nNeighbors; + + /** + * The method. Leave unspecified for a dynamic mixture of methods. + */ private final Method method; + + /** + * The min outlier score required to calculate feature influence. Defaults to 0.1. + */ private final Double featureInfluenceThreshold; /** - * Constructs the outlier detection configuration - * @param nNeighbors The number of neighbors. Leave unspecified for dynamic detection. - * @param method The method. Leave unspecified for a dynamic mixture of methods. - * @param featureInfluenceThreshold The min outlier score required to calculate feature influence. Defaults to 0.1. + * Whether to compute feature influence or not. Defaults to true. */ - private OutlierDetection(@Nullable Integer nNeighbors, @Nullable Method method, @Nullable Double featureInfluenceThreshold) { + private final Boolean computeFeatureInfluence; + + /** + * The proportion of data assumed to be outlying prior to outlier detection. Defaults to 0.05. + */ + private final Double outlierFraction; + + /** + * Whether to perform standardization. + */ + private final Boolean standardizationEnabled; + + private OutlierDetection(Integer nNeighbors, Method method, Double featureInfluenceThreshold, Boolean computeFeatureInfluence, + Double outlierFraction, Boolean standardizationEnabled) { this.nNeighbors = nNeighbors; this.method = method; this.featureInfluenceThreshold = featureInfluenceThreshold; + this.computeFeatureInfluence = computeFeatureInfluence; + this.outlierFraction = outlierFraction; + this.standardizationEnabled = standardizationEnabled; } @Override @@ -95,6 +124,18 @@ public class OutlierDetection implements DataFrameAnalysis { return featureInfluenceThreshold; } + public Boolean getComputeFeatureInfluence() { + return computeFeatureInfluence; + } + + public Double getOutlierFraction() { + return outlierFraction; + } + + public Boolean getStandardizationEnabled() { + return standardizationEnabled; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); @@ -107,6 +148,15 @@ public class OutlierDetection implements DataFrameAnalysis { if (featureInfluenceThreshold != null) { builder.field(FEATURE_INFLUENCE_THRESHOLD.getPreferredName(), featureInfluenceThreshold); } + if (computeFeatureInfluence != null) { + builder.field(COMPUTE_FEATURE_INFLUENCE.getPreferredName(), computeFeatureInfluence); + } + if (outlierFraction != null) { + builder.field(OUTLIER_FRACTION.getPreferredName(), outlierFraction); + } + if (standardizationEnabled != null) { + builder.field(STANDARDIZATION_ENABLED.getPreferredName(), standardizationEnabled); + } builder.endObject(); return builder; } @@ -119,12 +169,16 @@ public class OutlierDetection implements DataFrameAnalysis { OutlierDetection other = (OutlierDetection) o; return Objects.equals(nNeighbors, other.nNeighbors) && Objects.equals(method, other.method) - && Objects.equals(featureInfluenceThreshold, other.featureInfluenceThreshold); + && Objects.equals(featureInfluenceThreshold, other.featureInfluenceThreshold) + && Objects.equals(computeFeatureInfluence, other.computeFeatureInfluence) + && Objects.equals(outlierFraction, other.outlierFraction) + && Objects.equals(standardizationEnabled, other.standardizationEnabled); } @Override public int hashCode() { - return Objects.hash(nNeighbors, method, featureInfluenceThreshold); + return Objects.hash(nNeighbors, method, featureInfluenceThreshold, computeFeatureInfluence, outlierFraction, + standardizationEnabled); } @Override @@ -150,6 +204,9 @@ public class OutlierDetection implements DataFrameAnalysis { private Integer nNeighbors; private Method method; private Double featureInfluenceThreshold; + private Boolean computeFeatureInfluence; + private Double outlierFraction; + private Boolean standardizationEnabled; private Builder() {} @@ -168,8 +225,24 @@ public class OutlierDetection implements DataFrameAnalysis { return this; } + public Builder setComputeFeatureInfluence(Boolean computeFeatureInfluence) { + this.computeFeatureInfluence = computeFeatureInfluence; + return this; + } + + public Builder setOutlierFraction(Double outlierFraction) { + this.outlierFraction = outlierFraction; + return this; + } + + public Builder setStandardizationEnabled(Boolean standardizationEnabled) { + this.standardizationEnabled = standardizationEnabled; + return this; + } + public OutlierDetection build() { - return new OutlierDetection(nNeighbors, method, featureInfluenceThreshold); + return new OutlierDetection(nNeighbors, method, featureInfluenceThreshold, computeFeatureInfluence, outlierFraction, + standardizationEnabled); } } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java index e00679749b53..43f774ded28f 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java @@ -1246,7 +1246,10 @@ public class MachineLearningIT extends ESRestHighLevelClientTestCase { assertThat(createdConfig.getSource().getQueryConfig(), equalTo(new QueryConfig(new MatchAllQueryBuilder()))); // default value assertThat(createdConfig.getDest().getIndex(), equalTo(config.getDest().getIndex())); assertThat(createdConfig.getDest().getResultsField(), equalTo("ml")); // default value - assertThat(createdConfig.getAnalysis(), equalTo(config.getAnalysis())); + assertThat(createdConfig.getAnalysis(), equalTo(OutlierDetection.builder() + .setComputeFeatureInfluence(true) + .setOutlierFraction(0.05) + .setStandardizationEnabled(true).build())); assertThat(createdConfig.getAnalyzedFields(), equalTo(config.getAnalyzedFields())); assertThat(createdConfig.getModelMemoryLimit(), equalTo(ByteSizeValue.parseBytesSizeValue("1gb", ""))); // default value assertThat(createdConfig.getDescription(), equalTo("some description")); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java index f1017e86bd06..cd2d961f9b38 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java @@ -2932,6 +2932,10 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase { DataFrameAnalysis outlierDetectionCustomized = OutlierDetection.builder() // <1> .setMethod(OutlierDetection.Method.DISTANCE_KNN) // <2> .setNNeighbors(5) // <3> + .setFeatureInfluenceThreshold(0.1) // <4> + .setComputeFeatureInfluence(true) // <5> + .setOutlierFraction(0.05) // <6> + .setStandardizationEnabled(true) // <7> .build(); // end::put-data-frame-analytics-outlier-detection-customized diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/OutlierDetectionTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/OutlierDetectionTests.java index 7307999a2bf7..28315365cb2d 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/OutlierDetectionTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/OutlierDetectionTests.java @@ -26,6 +26,7 @@ import java.io.IOException; import static org.hamcrest.Matchers.closeTo; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; public class OutlierDetectionTests extends AbstractXContentTestCase { @@ -34,6 +35,9 @@ public class OutlierDetectionTests extends AbstractXContentTestCase Constructing a new OutlierDetection object <2> The method used to perform the analysis <3> Number of neighbors taken into account during analysis +<4> The min `outlier_score` required to compute feature influence +<5> Whether to compute feature influence +<6> The proportion of the data set that is assumed to be outlying prior to outlier detection +<7> Whether to apply standardization to feature values ===== Regression diff --git a/docs/reference/ml/df-analytics/apis/dfanalyticsresources.asciidoc b/docs/reference/ml/df-analytics/apis/dfanalyticsresources.asciidoc index 8659df88027d..7447a919437f 100644 --- a/docs/reference/ml/df-analytics/apis/dfanalyticsresources.asciidoc +++ b/docs/reference/ml/df-analytics/apis/dfanalyticsresources.asciidoc @@ -134,7 +134,7 @@ An `outlier_detection` configuration object has the following properties: {oldetection}. For example, 0.05 means it is assumed that 5% of values are real outliers and 95% are inliers. -`standardize_columns`:: +`standardization_enabled`:: (boolean) If `true`, then the following operation is performed on the columns before computing outlier scores: (x_i - mean(x_i)) / sd(x_i). Defaults to `true`. For more information, see diff --git a/docs/reference/ml/df-analytics/apis/put-dfanalytics.asciidoc b/docs/reference/ml/df-analytics/apis/put-dfanalytics.asciidoc index 2386d1e7f74b..40a17485814b 100644 --- a/docs/reference/ml/df-analytics/apis/put-dfanalytics.asciidoc +++ b/docs/reference/ml/df-analytics/apis/put-dfanalytics.asciidoc @@ -139,6 +139,9 @@ PUT _ml/data_frame/analytics/loganalytics }, "analysis": { "outlier_detection": { + "compute_feature_influence": true, + "outlier_fraction": 0.05, + "standardization_enabled": true } } } @@ -164,7 +167,11 @@ The API returns the following result: "results_field": "ml" }, "analysis": { - "outlier_detection": {} + "outlier_detection": { + "compute_feature_influence": true, + "outlier_fraction": 0.05, + "standardization_enabled": true + } }, "model_memory_limit": "1gb", "create_time" : 1562265491319, diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/OutlierDetection.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/OutlierDetection.java index d94176c2bbdb..47325ffdea82 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/OutlierDetection.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/OutlierDetection.java @@ -5,11 +5,11 @@ */ package org.elasticsearch.xpack.core.ml.dataframe.analyses; +import org.elasticsearch.Version; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; @@ -30,39 +30,68 @@ public class OutlierDetection implements DataFrameAnalysis { public static final ParseField N_NEIGHBORS = new ParseField("n_neighbors"); public static final ParseField METHOD = new ParseField("method"); public static final ParseField FEATURE_INFLUENCE_THRESHOLD = new ParseField("feature_influence_threshold"); + public static final ParseField COMPUTE_FEATURE_INFLUENCE = new ParseField("compute_feature_influence"); + public static final ParseField OUTLIER_FRACTION = new ParseField("outlier_fraction"); + public static final ParseField STANDARDIZATION_ENABLED = new ParseField("standardization_enabled"); - private static final ConstructingObjectParser LENIENT_PARSER = createParser(true); - private static final ConstructingObjectParser STRICT_PARSER = createParser(false); + private static final ObjectParser LENIENT_PARSER = createParser(true); + private static final ObjectParser STRICT_PARSER = createParser(false); - private static ConstructingObjectParser createParser(boolean lenient) { - ConstructingObjectParser parser = new ConstructingObjectParser<>(NAME.getPreferredName(), lenient, - a -> new OutlierDetection((Integer) a[0], (Method) a[1], (Double) a[2])); - parser.declareInt(ConstructingObjectParser.optionalConstructorArg(), N_NEIGHBORS); - parser.declareField(ConstructingObjectParser.optionalConstructorArg(), p -> { + private static ObjectParser createParser(boolean lenient) { + ObjectParser parser = new ObjectParser<>(NAME.getPreferredName(), lenient, Builder::new); + parser.declareInt(Builder::setNNeighbors, N_NEIGHBORS); + parser.declareField(Builder::setMethod, p -> { if (p.currentToken() == XContentParser.Token.VALUE_STRING) { return Method.fromString(p.text()); } throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]"); }, METHOD, ObjectParser.ValueType.STRING); - parser.declareDouble(ConstructingObjectParser.optionalConstructorArg(), FEATURE_INFLUENCE_THRESHOLD); + parser.declareDouble(Builder::setFeatureInfluenceThreshold, FEATURE_INFLUENCE_THRESHOLD); + parser.declareBoolean(Builder::setComputeFeatureInfluence, COMPUTE_FEATURE_INFLUENCE); + parser.declareDouble(Builder::setOutlierFraction, OUTLIER_FRACTION); + parser.declareBoolean(Builder::setStandardizationEnabled, STANDARDIZATION_ENABLED); return parser; } public static OutlierDetection fromXContent(XContentParser parser, boolean ignoreUnknownFields) { - return ignoreUnknownFields ? LENIENT_PARSER.apply(parser, null) : STRICT_PARSER.apply(parser, null); + return ignoreUnknownFields ? LENIENT_PARSER.apply(parser, null).build() : STRICT_PARSER.apply(parser, null).build(); } + /** + * The number of neighbors. Leave unspecified for dynamic detection. + */ + @Nullable private final Integer nNeighbors; + + /** + * The method. Leave unspecified for a dynamic mixture of methods. + */ + @Nullable private final Method method; + + /** + * The min outlier score required to calculate feature influence. Defaults to 0.1. + */ + @Nullable private final Double featureInfluenceThreshold; /** - * Constructs the outlier detection configuration - * @param nNeighbors The number of neighbors. Leave unspecified for dynamic detection. - * @param method The method. Leave unspecified for a dynamic mixture of methods. - * @param featureInfluenceThreshold The min outlier score required to calculate feature influence. Defaults to 0.1. + * Whether to compute feature influence or not. Defaults to true. */ - public OutlierDetection(@Nullable Integer nNeighbors, @Nullable Method method, @Nullable Double featureInfluenceThreshold) { + private final boolean computeFeatureInfluence; + + /** + * The proportion of data assumed to be outlying prior to outlier detection. Defaults to 0.05. + */ + private final double outlierFraction; + + /** + * Whether to perform standardization. + */ + private final boolean standardizationEnabled; + + private OutlierDetection(Integer nNeighbors, Method method, Double featureInfluenceThreshold, boolean computeFeatureInfluence, + double outlierFraction, boolean standardizationEnabled) { if (nNeighbors != null && nNeighbors <= 0) { throw ExceptionsHelper.badRequestException("[{}] must be a positive integer", N_NEIGHBORS.getPreferredName()); } @@ -71,22 +100,31 @@ public class OutlierDetection implements DataFrameAnalysis { throw ExceptionsHelper.badRequestException("[{}] must be in [0, 1]", FEATURE_INFLUENCE_THRESHOLD.getPreferredName()); } + if (outlierFraction < 0.0 || outlierFraction > 1.0) { + throw ExceptionsHelper.badRequestException("[{}] must be in [0, 1]", OUTLIER_FRACTION.getPreferredName()); + } + this.nNeighbors = nNeighbors; this.method = method; this.featureInfluenceThreshold = featureInfluenceThreshold; - } - - /** - * Constructs the default outlier detection configuration - */ - public OutlierDetection() { - this(null, null, null); + this.computeFeatureInfluence = computeFeatureInfluence; + this.outlierFraction = outlierFraction; + this.standardizationEnabled = standardizationEnabled; } public OutlierDetection(StreamInput in) throws IOException { nNeighbors = in.readOptionalVInt(); method = in.readBoolean() ? in.readEnum(Method.class) : null; featureInfluenceThreshold = in.readOptionalDouble(); + if (in.getVersion().onOrAfter(Version.V_7_5_0)) { + computeFeatureInfluence = in.readBoolean(); + outlierFraction = in.readDouble(); + standardizationEnabled = in.readBoolean(); + } else { + computeFeatureInfluence = true; + outlierFraction = 0.05; + standardizationEnabled = true; + } } @Override @@ -106,6 +144,12 @@ public class OutlierDetection implements DataFrameAnalysis { } out.writeOptionalDouble(featureInfluenceThreshold); + + if (out.getVersion().onOrAfter(Version.V_7_5_0)) { + out.writeBoolean(computeFeatureInfluence); + out.writeDouble(outlierFraction); + out.writeBoolean(standardizationEnabled); + } } @Override @@ -120,6 +164,9 @@ public class OutlierDetection implements DataFrameAnalysis { if (featureInfluenceThreshold != null) { builder.field(FEATURE_INFLUENCE_THRESHOLD.getPreferredName(), featureInfluenceThreshold); } + builder.field(COMPUTE_FEATURE_INFLUENCE.getPreferredName(), computeFeatureInfluence); + builder.field(OUTLIER_FRACTION.getPreferredName(), outlierFraction); + builder.field(STANDARDIZATION_ENABLED.getPreferredName(), standardizationEnabled); builder.endObject(); return builder; } @@ -131,12 +178,16 @@ public class OutlierDetection implements DataFrameAnalysis { OutlierDetection that = (OutlierDetection) o; return Objects.equals(nNeighbors, that.nNeighbors) && Objects.equals(method, that.method) - && Objects.equals(featureInfluenceThreshold, that.featureInfluenceThreshold); + && Objects.equals(featureInfluenceThreshold, that.featureInfluenceThreshold) + && computeFeatureInfluence == that.computeFeatureInfluence + && outlierFraction == that.outlierFraction + && standardizationEnabled == that.standardizationEnabled; } @Override public int hashCode() { - return Objects.hash(nNeighbors, method, featureInfluenceThreshold); + return Objects.hash(nNeighbors, method, featureInfluenceThreshold, computeFeatureInfluence, outlierFraction, + standardizationEnabled); } @Override @@ -151,6 +202,9 @@ public class OutlierDetection implements DataFrameAnalysis { if (featureInfluenceThreshold != null) { params.put(FEATURE_INFLUENCE_THRESHOLD.getPreferredName(), featureInfluenceThreshold); } + params.put(COMPUTE_FEATURE_INFLUENCE.getPreferredName(), computeFeatureInfluence); + params.put(OUTLIER_FRACTION.getPreferredName(), outlierFraction); + params.put(STANDARDIZATION_ENABLED.getPreferredName(), standardizationEnabled); return params; } @@ -191,4 +245,49 @@ public class OutlierDetection implements DataFrameAnalysis { return name().toLowerCase(Locale.ROOT); } } + + public static class Builder { + + private Integer nNeighbors; + private Method method; + private Double featureInfluenceThreshold; + private boolean computeFeatureInfluence = true; + private double outlierFraction = 0.05; + private boolean standardizationEnabled = true; + + public Builder setNNeighbors(Integer nNeighbors) { + this.nNeighbors = nNeighbors; + return this; + } + + public Builder setMethod(Method method) { + this.method = method; + return this; + } + + public Builder setFeatureInfluenceThreshold(Double featureInfluenceThreshold) { + this.featureInfluenceThreshold = featureInfluenceThreshold; + return this; + } + + public Builder setComputeFeatureInfluence(boolean computeFeatureInfluence) { + this.computeFeatureInfluence = computeFeatureInfluence; + return this; + } + + public Builder setOutlierFraction(double outlierFraction) { + this.outlierFraction = outlierFraction; + return this; + } + + public Builder setStandardizationEnabled(boolean standardizationEnabled) { + this.standardizationEnabled = standardizationEnabled; + return this; + } + + public OutlierDetection build() { + return new OutlierDetection(nNeighbors, method, featureInfluenceThreshold, computeFeatureInfluence, outlierFraction, + standardizationEnabled); + } + } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/OutlierDetectionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/OutlierDetectionTests.java index 4588f674cfd7..61cd85e3bbba 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/OutlierDetectionTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/OutlierDetectionTests.java @@ -32,7 +32,14 @@ public class OutlierDetectionTests extends AbstractSerializingTestCase params = outlierDetection.getParams(); + assertThat(params.size(), equalTo(3)); + assertThat(params.containsKey("compute_feature_influence"), is(true)); + assertThat(params.get("compute_feature_influence"), is(true)); + assertThat(params.containsKey("outlier_fraction"), is(true)); + assertThat((double) params.get("outlier_fraction"), closeTo(0.05, 0.0001)); + assertThat(params.containsKey("standardization_enabled"), is(true)); + assertThat(params.get("standardization_enabled"), is(true)); } public void testGetParams_GivenExplicitValues() { - OutlierDetection outlierDetection = new OutlierDetection(42, OutlierDetection.Method.LDOF, 0.42); + OutlierDetection outlierDetection = new OutlierDetection.Builder() + .setNNeighbors(42) + .setMethod(OutlierDetection.Method.LDOF) + .setFeatureInfluenceThreshold(0.42) + .setComputeFeatureInfluence(false) + .setOutlierFraction(0.9) + .setStandardizationEnabled(false) + .build(); Map params = outlierDetection.getParams(); - assertThat(params.size(), equalTo(3)); + assertThat(params.size(), equalTo(6)); assertThat(params.get(OutlierDetection.N_NEIGHBORS.getPreferredName()), equalTo(42)); assertThat(params.get(OutlierDetection.METHOD.getPreferredName()), equalTo(OutlierDetection.Method.LDOF)); assertThat((Double) params.get(OutlierDetection.FEATURE_INFLUENCE_THRESHOLD.getPreferredName()), is(closeTo(0.42, 1E-9))); + assertThat(params.get(OutlierDetection.COMPUTE_FEATURE_INFLUENCE.getPreferredName()), is(false)); + assertThat((Double) params.get(OutlierDetection.OUTLIER_FRACTION.getPreferredName()), + is(closeTo(0.9, 1E-9))); + assertThat(params.get(OutlierDetection.STANDARDIZATION_ENABLED.getPreferredName()), is(false)); } public void testGetStateDocId() { diff --git a/x-pack/plugin/ml/qa/ml-with-security/build.gradle b/x-pack/plugin/ml/qa/ml-with-security/build.gradle index b0fbfc5cd37b..e330d032c0a0 100644 --- a/x-pack/plugin/ml/qa/ml-with-security/build.gradle +++ b/x-pack/plugin/ml/qa/ml-with-security/build.gradle @@ -62,6 +62,12 @@ integTest.runner { 'ml/data_frame_analytics_crud/Test put config given missing analysis', 'ml/data_frame_analytics_crud/Test put config given empty analysis', 'ml/data_frame_analytics_crud/Test max model memory limit', + 'ml/data_frame_analytics_crud/Test put outlier_detection given n_neighbors is negative', + 'ml/data_frame_analytics_crud/Test put outlier_detection given n_neighbors is zero', + 'ml/data_frame_analytics_crud/Test put outlier_detection given feature_influence_threshold is negative', + 'ml/data_frame_analytics_crud/Test put outlier_detection given feature_influence_threshold is greater than one', + 'ml/data_frame_analytics_crud/Test put outlier_detection given outlier_fraction is negative', + 'ml/data_frame_analytics_crud/Test put outlier_detection given outlier_fraction is greater than one', 'ml/data_frame_analytics_crud/Test put regression given dependent_variable is not defined', 'ml/data_frame_analytics_crud/Test put regression given negative lambda', 'ml/data_frame_analytics_crud/Test put regression given negative gamma', diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/OutlierDetectionWithMissingFieldsIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/OutlierDetectionWithMissingFieldsIT.java index c1c2fec780b6..741d74f83745 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/OutlierDetectionWithMissingFieldsIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/OutlierDetectionWithMissingFieldsIT.java @@ -69,7 +69,8 @@ public class OutlierDetectionWithMissingFieldsIT extends MlNativeDataFrameAnalyt } String id = "test_outlier_detection_with_missing_fields"; - DataFrameAnalyticsConfig config = buildAnalytics(id, sourceIndex, sourceIndex + "-results", null, new OutlierDetection()); + DataFrameAnalyticsConfig config = buildAnalytics(id, sourceIndex, sourceIndex + "-results", null, + new OutlierDetection.Builder().build()); registerAnalytics(config); putAnalytics(config); diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/RunDataFrameAnalyticsIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/RunDataFrameAnalyticsIT.java index 145cdc97d245..9211e43f8d71 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/RunDataFrameAnalyticsIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/RunDataFrameAnalyticsIT.java @@ -72,7 +72,8 @@ public class RunDataFrameAnalyticsIT extends MlNativeDataFrameAnalyticsIntegTest } String id = "test_outlier_detection_with_few_docs"; - DataFrameAnalyticsConfig config = buildAnalytics(id, sourceIndex, sourceIndex + "-results", null, new OutlierDetection()); + DataFrameAnalyticsConfig config = buildAnalytics(id, sourceIndex, sourceIndex + "-results", null, + new OutlierDetection.Builder().build()); registerAnalytics(config); putAnalytics(config); @@ -147,7 +148,8 @@ public class RunDataFrameAnalyticsIT extends MlNativeDataFrameAnalyticsIntegTest } String id = "test_outlier_detection_with_enough_docs_to_scroll"; - DataFrameAnalyticsConfig config = buildAnalytics(id, sourceIndex, sourceIndex + "-results", "custom_ml", new OutlierDetection()); + DataFrameAnalyticsConfig config = buildAnalytics(id, sourceIndex, sourceIndex + "-results", "custom_ml", + new OutlierDetection.Builder().build()); registerAnalytics(config); putAnalytics(config); @@ -216,7 +218,8 @@ public class RunDataFrameAnalyticsIT extends MlNativeDataFrameAnalyticsIntegTest } String id = "test_outlier_detection_with_more_fields_than_docvalue_limit"; - DataFrameAnalyticsConfig config = buildAnalytics(id, sourceIndex, sourceIndex + "-results", null, new OutlierDetection()); + DataFrameAnalyticsConfig config = buildAnalytics(id, sourceIndex, sourceIndex + "-results", null, + new OutlierDetection.Builder().build()); registerAnalytics(config); putAnalytics(config); @@ -279,7 +282,8 @@ public class RunDataFrameAnalyticsIT extends MlNativeDataFrameAnalyticsIntegTest } String id = "test_stop_outlier_detection_with_enough_docs_to_scroll"; - DataFrameAnalyticsConfig config = buildAnalytics(id, sourceIndex, sourceIndex + "-results", "custom_ml", new OutlierDetection()); + DataFrameAnalyticsConfig config = buildAnalytics(id, sourceIndex, sourceIndex + "-results", "custom_ml", + new OutlierDetection.Builder().build()); registerAnalytics(config); putAnalytics(config); @@ -347,7 +351,7 @@ public class RunDataFrameAnalyticsIT extends MlNativeDataFrameAnalyticsIntegTest .setId(id) .setSource(new DataFrameAnalyticsSource(sourceIndex, null)) .setDest(new DataFrameAnalyticsDest(destIndex, null)) - .setAnalysis(new OutlierDetection()) + .setAnalysis(new OutlierDetection.Builder().build()) .build(); registerAnalytics(config); putAnalytics(config); @@ -405,7 +409,7 @@ public class RunDataFrameAnalyticsIT extends MlNativeDataFrameAnalyticsIntegTest } String id = "test_outlier_detection_with_pre_existing_dest_index"; - DataFrameAnalyticsConfig config = buildAnalytics(id, sourceIndex, destIndex, null, new OutlierDetection()); + DataFrameAnalyticsConfig config = buildAnalytics(id, sourceIndex, destIndex, null, new OutlierDetection.Builder().build()); registerAnalytics(config); putAnalytics(config); @@ -461,7 +465,7 @@ public class RunDataFrameAnalyticsIT extends MlNativeDataFrameAnalyticsIntegTest .setId(id) .setSource(new DataFrameAnalyticsSource(new String[] { sourceIndex }, null)) .setDest(new DataFrameAnalyticsDest(sourceIndex + "-results", null)) - .setAnalysis(new OutlierDetection()) + .setAnalysis(new OutlierDetection.Builder().build()) .setModelMemoryLimit(modelMemoryLimit) .build(); @@ -503,7 +507,8 @@ public class RunDataFrameAnalyticsIT extends MlNativeDataFrameAnalyticsIntegTest } String id = "test_outlier_detection_stop_and_restart"; - DataFrameAnalyticsConfig config = buildAnalytics(id, sourceIndex, sourceIndex + "-results", "custom_ml", new OutlierDetection()); + DataFrameAnalyticsConfig config = buildAnalytics(id, sourceIndex, sourceIndex + "-results", "custom_ml", + new OutlierDetection.Builder().build()); registerAnalytics(config); putAnalytics(config); @@ -545,4 +550,92 @@ public class RunDataFrameAnalyticsIT extends MlNativeDataFrameAnalyticsIntegTest assertProgress(id, 100, 100, 100, 100); assertThat(searchStoredProgress(id).getHits().getTotalHits().value, equalTo(1L)); } + + public void testOutlierDetectionWithCustomParams() throws Exception { + String sourceIndex = "test-outlier-detection-with-custom-params"; + + client().admin().indices().prepareCreate(sourceIndex) + .addMapping("_doc", "numeric_1", "type=double", "numeric_2", "type=float", "categorical_1", "type=keyword") + .get(); + + BulkRequestBuilder bulkRequestBuilder = client().prepareBulk(); + bulkRequestBuilder.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + + for (int i = 0; i < 5; i++) { + IndexRequest indexRequest = new IndexRequest(sourceIndex); + + // We insert one odd value out of 5 for one feature + String docId = i == 0 ? "outlier" : "normal" + i; + indexRequest.id(docId); + indexRequest.source("numeric_1", i == 0 ? 100.0 : 1.0, "numeric_2", 1.0, "categorical_1", "foo_" + i); + bulkRequestBuilder.add(indexRequest); + } + BulkResponse bulkResponse = bulkRequestBuilder.get(); + if (bulkResponse.hasFailures()) { + fail("Failed to index data: " + bulkResponse.buildFailureMessage()); + } + + String id = "test_outlier_detection_with_custom_params"; + DataFrameAnalyticsConfig config = buildAnalytics(id, sourceIndex, sourceIndex + "-results", null, + new OutlierDetection.Builder() + .setNNeighbors(3) + .setMethod(OutlierDetection.Method.DISTANCE_KNN) + .setFeatureInfluenceThreshold(0.01) + .setComputeFeatureInfluence(false) + .setOutlierFraction(0.04) + .setStandardizationEnabled(true) + .build()); + registerAnalytics(config); + putAnalytics(config); + + assertState(id, DataFrameAnalyticsState.STOPPED); + assertProgress(id, 0, 0, 0, 0); + + startAnalytics(id); + waitUntilAnalyticsIsStopped(id); + + SearchResponse sourceData = client().prepareSearch(sourceIndex).get(); + double scoreOfOutlier = 0.0; + double scoreOfNonOutlier = -1.0; + for (SearchHit hit : sourceData.getHits()) { + GetResponse destDocGetResponse = client().prepareGet().setIndex(config.getDest().getIndex()).setId(hit.getId()).get(); + assertThat(destDocGetResponse.isExists(), is(true)); + Map sourceDoc = hit.getSourceAsMap(); + Map destDoc = destDocGetResponse.getSource(); + for (String field : sourceDoc.keySet()) { + assertThat(destDoc.containsKey(field), is(true)); + assertThat(destDoc.get(field), equalTo(sourceDoc.get(field))); + } + assertThat(destDoc.containsKey("ml"), is(true)); + + @SuppressWarnings("unchecked") + Map resultsObject = (Map) destDoc.get("ml"); + + assertThat(resultsObject.containsKey("outlier_score"), is(true)); + assertThat(resultsObject.containsKey("feature_influence"), is(false)); + + double outlierScore = (double) resultsObject.get("outlier_score"); + assertThat(outlierScore, allOf(greaterThanOrEqualTo(0.0), lessThanOrEqualTo(1.0))); + if (hit.getId().equals("outlier")) { + scoreOfOutlier = outlierScore; + } else { + if (scoreOfNonOutlier < 0) { + scoreOfNonOutlier = outlierScore; + } else { + assertThat(outlierScore, equalTo(scoreOfNonOutlier)); + } + } + } + assertThat(scoreOfOutlier, is(greaterThan(scoreOfNonOutlier))); + + assertProgress(id, 100, 100, 100, 100); + assertThat(searchStoredProgress(id).getHits().getTotalHits().value, equalTo(1L)); + assertThatAuditMessagesMatch(id, + "Created analytics with analysis type [outlier_detection]", + "Estimated memory usage for this analytics to be", + "Started analytics", + "Creating destination index [test-outlier-detection-with-custom-params-results]", + "Finished reindexing to destination index [test-outlier-detection-with-custom-params-results]", + "Finished analysis"); + } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsIndexTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsIndexTests.java index 82b8b0a6c65b..40bba1503cdc 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsIndexTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsIndexTests.java @@ -60,7 +60,7 @@ public class DataFrameAnalyticsIndexTests extends ESTestCase { .setId(ANALYTICS_ID) .setSource(new DataFrameAnalyticsSource(SOURCE_INDEX, null)) .setDest(new DataFrameAnalyticsDest(DEST_INDEX, null)) - .setAnalysis(new OutlierDetection()) + .setAnalysis(new OutlierDetection.Builder().build()) .build(); private static final int CURRENT_TIME_MILLIS = 123456789; private static final String CREATED_BY = "data-frame-analytics"; diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/SourceDestValidatorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/SourceDestValidatorTests.java index 7df1af62449e..c9423aadbe03 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/SourceDestValidatorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/SourceDestValidatorTests.java @@ -67,7 +67,7 @@ public class SourceDestValidatorTests extends ESTestCase { .setId("test") .setSource(createSource("source-1")) .setDest(new DataFrameAnalyticsDest("dest", null)) - .setAnalysis(new OutlierDetection()) + .setAnalysis(new OutlierDetection.Builder().build()) .build(); SourceDestValidator validator = new SourceDestValidator(CLUSTER_STATE, new IndexNameExpressionResolver()); @@ -79,7 +79,7 @@ public class SourceDestValidatorTests extends ESTestCase { .setId("test") .setSource(createSource("missing")) .setDest(new DataFrameAnalyticsDest("dest", null)) - .setAnalysis(new OutlierDetection()) + .setAnalysis(new OutlierDetection.Builder().build()) .build(); SourceDestValidator validator = new SourceDestValidator(CLUSTER_STATE, new IndexNameExpressionResolver()); @@ -94,7 +94,7 @@ public class SourceDestValidatorTests extends ESTestCase { .setId("test") .setSource(createSource("missing*")) .setDest(new DataFrameAnalyticsDest("dest", null)) - .setAnalysis(new OutlierDetection()) + .setAnalysis(new OutlierDetection.Builder().build()) .build(); SourceDestValidator validator = new SourceDestValidator(CLUSTER_STATE, new IndexNameExpressionResolver()); @@ -109,7 +109,7 @@ public class SourceDestValidatorTests extends ESTestCase { .setId("test") .setSource(createSource("source-1")) .setDest(new DataFrameAnalyticsDest("source-1", null)) - .setAnalysis(new OutlierDetection()) + .setAnalysis(new OutlierDetection.Builder().build()) .build(); SourceDestValidator validator = new SourceDestValidator(CLUSTER_STATE, new IndexNameExpressionResolver()); @@ -124,7 +124,7 @@ public class SourceDestValidatorTests extends ESTestCase { .setId("test") .setSource(createSource("source-*")) .setDest(new DataFrameAnalyticsDest(SOURCE_2, null)) - .setAnalysis(new OutlierDetection()) + .setAnalysis(new OutlierDetection.Builder().build()) .build(); SourceDestValidator validator = new SourceDestValidator(CLUSTER_STATE, new IndexNameExpressionResolver()); @@ -139,7 +139,7 @@ public class SourceDestValidatorTests extends ESTestCase { .setId("test") .setSource(createSource("source-1,source-*")) .setDest(new DataFrameAnalyticsDest(SOURCE_2, null)) - .setAnalysis(new OutlierDetection()) + .setAnalysis(new OutlierDetection.Builder().build()) .build(); SourceDestValidator validator = new SourceDestValidator(CLUSTER_STATE, new IndexNameExpressionResolver()); @@ -154,7 +154,7 @@ public class SourceDestValidatorTests extends ESTestCase { .setId("test") .setSource(createSource(SOURCE_1)) .setDest(new DataFrameAnalyticsDest("dest-alias", null)) - .setAnalysis(new OutlierDetection()) + .setAnalysis(new OutlierDetection.Builder().build()) .build(); SourceDestValidator validator = new SourceDestValidator(CLUSTER_STATE, new IndexNameExpressionResolver()); @@ -170,7 +170,7 @@ public class SourceDestValidatorTests extends ESTestCase { .setId("test") .setSource(createSource("source-1")) .setDest(new DataFrameAnalyticsDest("source-1-alias", null)) - .setAnalysis(new OutlierDetection()) + .setAnalysis(new OutlierDetection.Builder().build()) .build(); SourceDestValidator validator = new SourceDestValidator(CLUSTER_STATE, new IndexNameExpressionResolver()); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/extractor/ExtractedFieldsDetectorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/extractor/ExtractedFieldsDetectorTests.java index db381373709c..c31b82009f73 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/extractor/ExtractedFieldsDetectorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/extractor/ExtractedFieldsDetectorTests.java @@ -508,7 +508,7 @@ public class ExtractedFieldsDetectorTests extends ESTestCase { .setSource(new DataFrameAnalyticsSource(SOURCE_INDEX, null)) .setDest(new DataFrameAnalyticsDest(DEST_INDEX, RESULTS_FIELD)) .setAnalyzedFields(analyzedFields) - .setAnalysis(new OutlierDetection()) + .setAnalysis(new OutlierDetection.Builder().build()) .build(); } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/data_frame_analytics_crud.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/data_frame_analytics_crud.yml index 939a8812d041..da00a12ceb91 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/data_frame_analytics_crud.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/data_frame_analytics_crud.yml @@ -53,7 +53,13 @@ setup: - match: { source.index: ["index-source"] } - match: { source.query: {"term" : { "user" : "Kimchy"} } } - match: { dest.index: "index-dest" } - - match: { analysis: {"outlier_detection":{}} } + - match: { analysis: { + "outlier_detection":{ + "compute_feature_influence": true, + "outlier_fraction": 0.05, + "standardization_enabled": true + } + }} - match: { analyzed_fields: {"includes" : ["obj1.*", "obj2.*" ], "excludes": [] } } - is_true: create_time - is_true: version @@ -66,7 +72,13 @@ setup: - match: { data_frame_analytics.0.source.index: ["index-source"] } - match: { data_frame_analytics.0.source.query: {"term" : { "user" : "Kimchy"} } } - match: { data_frame_analytics.0.dest.index: "index-dest" } - - match: { data_frame_analytics.0.analysis: {"outlier_detection":{}} } + - match: { data_frame_analytics.0.analysis: { + "outlier_detection":{ + "compute_feature_influence": true, + "outlier_fraction": 0.05, + "standardization_enabled": true + } + }} - match: { data_frame_analytics.0.analyzed_fields: {"includes" : ["obj1.*", "obj2.*" ], "excludes": [] } } - is_true: data_frame_analytics.0.create_time - is_true: data_frame_analytics.0.version @@ -148,7 +160,13 @@ setup: - match: { source.index: ["index-source"] } - match: { source.query: {"match_all" : {} } } - match: { dest.index: "index-dest" } - - match: { analysis: {"outlier_detection":{}} } + - match: { analysis: { + "outlier_detection":{ + "compute_feature_influence": true, + "outlier_fraction": 0.05, + "standardization_enabled": true + } + }} - is_true: create_time - is_true: version @@ -170,7 +188,10 @@ setup: "outlier_detection":{ "n_neighbors": 5, "method": "lof", - "feature_influence_threshold": 0.0 + "feature_influence_threshold": 0.0, + "compute_feature_influence": false, + "outlier_fraction": 0.95, + "standardization_enabled": false } } } @@ -178,9 +199,16 @@ setup: - match: { source.index: ["index-source"] } - match: { source.query: {"match_all" : {} } } - match: { dest.index: "index-dest" } - - match: { analysis.outlier_detection.n_neighbors: 5 } - - match: { analysis.outlier_detection.method: "lof" } - - match: { analysis.outlier_detection.feature_influence_threshold: 0.0 } + - match: { analysis: { + "outlier_detection":{ + "n_neighbors": 5, + "method": "lof", + "feature_influence_threshold": 0.0, + "compute_feature_influence": false, + "outlier_fraction": 0.95, + "standardization_enabled": false + } + }} - is_true: create_time - is_true: version @@ -924,7 +952,13 @@ setup: - match: { source.index: ["index-source"] } - match: { source.query: {"term" : { "user" : "Kimchy"} } } - match: { dest.index: "index-dest" } - - match: { analysis: {"outlier_detection":{}} } + - match: { analysis: { + "outlier_detection":{ + "compute_feature_influence": true, + "outlier_fraction": 0.05, + "standardization_enabled": true + } + }} - match: { analyzed_fields: {"includes" : ["obj1.*", "obj2.*" ], "excludes": [] } } - match: { model_memory_limit: "20mb" } @@ -938,6 +972,138 @@ setup: xpack.ml.max_model_memory_limit: null - match: {transient: {}} +--- +"Test put outlier_detection given n_neighbors is negative": + + - do: + catch: /\[n_neighbors\] must be a positive integer/ + ml.put_data_frame_analytics: + id: "outlier_detection-with-negative-n_neighbors" + body: > + { + "source": { + "index": "index-source" + }, + "dest": { + "index": "index-dest" + }, + "analysis": { + "outlier_detection": { + "n_neighbors": -1 + } + } + } + +--- +"Test put outlier_detection given n_neighbors is zero": + + - do: + catch: /\[n_neighbors\] must be a positive integer/ + ml.put_data_frame_analytics: + id: "outlier_detection-with-zero-n_neighbors" + body: > + { + "source": { + "index": "index-source" + }, + "dest": { + "index": "index-dest" + }, + "analysis": { + "outlier_detection": { + "n_neighbors": 0 + } + } + } + +--- +"Test put outlier_detection given feature_influence_threshold is negative": + + - do: + catch: /\[feature_influence_threshold\] must be in \[0, 1\]/ + ml.put_data_frame_analytics: + id: "outlier_detection-with-negative-feature_influence_threshold" + body: > + { + "source": { + "index": "index-source" + }, + "dest": { + "index": "index-dest" + }, + "analysis": { + "outlier_detection": { + "feature_influence_threshold": -0.001 + } + } + } + +--- +"Test put outlier_detection given feature_influence_threshold is greater than one": + + - do: + catch: /\[feature_influence_threshold\] must be in \[0, 1\]/ + ml.put_data_frame_analytics: + id: "outlier_detection-with-negative-feature_influence_threshold" + body: > + { + "source": { + "index": "index-source" + }, + "dest": { + "index": "index-dest" + }, + "analysis": { + "outlier_detection": { + "feature_influence_threshold": 1.001 + } + } + } + +--- +"Test put outlier_detection given outlier_fraction is negative": + + - do: + catch: /\[outlier_fraction\] must be in \[0, 1\]/ + ml.put_data_frame_analytics: + id: "outlier_detection-with-negative-outlier_fraction" + body: > + { + "source": { + "index": "index-source" + }, + "dest": { + "index": "index-dest" + }, + "analysis": { + "outlier_detection": { + "outlier_fraction": -0.001 + } + } + } + +--- +"Test put outlier_detection given outlier_fraction is greater than one": + + - do: + catch: /\[outlier_fraction\] must be in \[0, 1\]/ + ml.put_data_frame_analytics: + id: "outlier_detection-with-negative-outlier_fraction" + body: > + { + "source": { + "index": "index-source" + }, + "dest": { + "index": "index-dest" + }, + "analysis": { + "outlier_detection": { + "outlier_fraction": 1.001 + } + } + } + --- "Test put regression given dependent_variable is not defined": From 7a622f024f555566ab06176f85b830baf47e61c0 Mon Sep 17 00:00:00 2001 From: Alan Woodward Date: Mon, 7 Oct 2019 13:29:12 +0100 Subject: [PATCH 21/55] Remove types from BulkRequest (#46983) This commit removes types entirely from BulkRequest, both as a global parameter and as individual entries on update/index/delete lines. Relates to #41059 --- .../elasticsearch/client/BulkProcessorIT.java | 98 ++------------- .../BulkRequestWithGlobalParametersIT.java | 17 +-- .../java/org/elasticsearch/client/CrudIT.java | 16 --- docs/build.gradle | 3 - .../bucket/rare-terms-aggregation.asciidoc | 2 +- docs/reference/sql/getting-started.asciidoc | 2 +- .../test/lang_mustache/60_typed_keys.yml | 10 +- .../reindex/AsyncBulkByScrollActionTests.java | 6 +- .../BulkIndexByScrollResponseTests.java | 2 +- .../test/delete_by_query/10_basic.yml | 2 - .../test/update_by_query/10_basic.yml | 2 - .../netty4/Netty4HttpRequestSizeLimitIT.java | 2 +- .../test/repository_azure/10_repository.yml | 7 -- .../test/repository_gcs/10_repository.yml | 7 -- .../20_repository_permanent_credentials.yml | 3 - .../test/multi_cluster/60_tophits.yml | 2 +- .../elasticsearch/upgrades/IndexingIT.java | 4 +- .../org/elasticsearch/upgrades/XPackIT.java | 6 +- .../ingest_mustache/10_ingest_disabled.yml | 5 - .../test/bulk/11_basic_with_types.yml | 119 ------------------ .../bulk/21_list_of_strings_with_types.yml | 17 --- .../test/bulk/31_big_string_with_types.yml | 17 --- .../test/bulk/41_source_with_types.yml | 76 ----------- .../test/bulk/51_refresh_with_types.yml | 48 ------- .../test/bulk/70_mix_typeless_typeful.yml | 31 ----- .../test/bulk/81_cas_with_types.yml | 41 ------ .../mget/14_alias_to_multiple_indices.yml | 6 +- .../elasticsearch/action/DocWriteRequest.java | 8 -- .../action/bulk/BulkItemRequest.java | 2 +- .../action/bulk/BulkItemResponse.java | 60 ++++----- .../bulk/BulkPrimaryExecutionContext.java | 4 +- .../action/bulk/BulkProcessor.java | 23 +--- .../action/bulk/BulkRequest.java | 86 +++---------- .../action/bulk/BulkRequestBuilder.java | 24 +--- .../action/bulk/BulkRequestParser.java | 65 ++++------ .../action/bulk/BulkResponse.java | 4 +- .../action/bulk/TransportBulkAction.java | 11 +- .../action/delete/DeleteRequest.java | 22 +--- .../action/index/IndexRequest.java | 15 +-- .../action/update/UpdateRequest.java | 21 +--- .../java/org/elasticsearch/client/Client.java | 4 +- .../client/support/AbstractClient.java | 4 +- .../index/reindex/BulkByScrollResponse.java | 6 +- .../rest/action/document/RestBulkAction.java | 25 +--- .../action/bulk/BulkIntegrationIT.java | 3 +- .../action/bulk/BulkItemResponseTests.java | 9 +- .../action/bulk/BulkProcessorIT.java | 56 ++++----- .../action/bulk/BulkRequestModifierTests.java | 1 - .../action/bulk/BulkRequestParserTests.java | 34 +++-- .../action/bulk/BulkRequestTests.java | 40 +----- .../action/bulk/BulkResponseTests.java | 5 +- .../action/bulk/BulkWithUpdatesIT.java | 3 - .../elasticsearch/action/bulk/RetryTests.java | 2 +- .../bulk/TransportBulkActionTookTests.java | 5 +- .../bulk/TransportShardBulkActionTests.java | 9 +- .../document/DocumentActionsIT.java | 6 - .../reindex/BulkByScrollResponseTests.java | 4 +- .../template/SimpleIndexTemplateIT.java | 1 - .../elasticsearch/search/geo/GeoFilterIT.java | 2 +- .../elasticsearch/action/bulk/bulk-log.json | 24 ++-- .../action/bulk/simple-bulk.json | 6 +- .../action/bulk/simple-bulk10.json | 18 +-- .../action/bulk/simple-bulk11.json | 6 +- .../action/bulk/simple-bulk4.json | 2 +- .../action/bulk/simple-bulk5.json | 6 +- .../action/bulk/simple-bulk6.json | 6 +- .../action/bulk/simple-bulk7.json | 6 +- .../action/bulk/simple-bulk8.json | 6 +- .../elasticsearch/search/geo/gzippedmap.gz | Bin 7686 -> 7710 bytes .../action/MonitoringBulkRequest.java | 6 +- .../mapper/FlatObjectSearchTests.java | 4 +- .../action/MonitoringBulkRequestTests.java | 10 +- .../exporter/http/HttpExporterIT.java | 2 +- .../security/authc/ExpiredApiKeysRemover.java | 4 +- .../security/authc/ExpiredTokenRemover.java | 4 +- .../integration/BulkUpdateTests.java | 2 +- .../test/rollup/rollup_search.yml | 6 - .../resources/rest-api-spec/test/sql/sql.yml | 3 - .../rest-api-spec/test/sql/translate.yml | 1 - .../actions/index/ExecutableIndexAction.java | 1 - .../actions/index/IndexActionTests.java | 2 +- .../elasticsearch/upgrades/IndexingIT.java | 4 +- .../test/old_cluster/50_token_auth.yml | 10 +- 83 files changed, 233 insertions(+), 1021 deletions(-) delete mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/bulk/11_basic_with_types.yml delete mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/bulk/21_list_of_strings_with_types.yml delete mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/bulk/31_big_string_with_types.yml delete mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/bulk/41_source_with_types.yml delete mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/bulk/51_refresh_with_types.yml delete mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/bulk/70_mix_typeless_typeful.yml delete mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/bulk/81_cas_with_types.yml diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkProcessorIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkProcessorIT.java index 9a5eff72c54f..bfa2e20e2b7f 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkProcessorIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkProcessorIT.java @@ -36,8 +36,6 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.rest.action.document.RestBulkAction; import org.elasticsearch.search.SearchHit; import org.hamcrest.Matcher; @@ -74,12 +72,6 @@ public class BulkProcessorIT extends ESRestHighLevelClientTestCase { bulkListener), listener); } - private static BulkProcessor.Builder initBulkProcessorBuilderUsingTypes(BulkProcessor.Listener listener) { - return BulkProcessor.builder( - (request, bulkListener) -> highLevelClient().bulkAsync(request, expectWarnings(RestBulkAction.TYPES_DEPRECATION_MESSAGE), - bulkListener), listener); - } - public void testThatBulkProcessorCountIsCorrect() throws Exception { final CountDownLatch latch = new CountDownLatch(1); BulkProcessorTestListener listener = new BulkProcessorTestListener(latch); @@ -170,7 +162,6 @@ public class BulkProcessorIT extends ESRestHighLevelClientTestCase { for (BulkItemResponse bulkItemResponse : listener.bulkItems) { assertThat(bulkItemResponse.getFailureMessage(), bulkItemResponse.isFailed(), equalTo(false)); assertThat(bulkItemResponse.getIndex(), equalTo("test")); - assertThat(bulkItemResponse.getType(), equalTo("_doc")); //with concurrent requests > 1 we can't rely on the order of the bulk requests assertThat(Integer.valueOf(bulkItemResponse.getId()), both(greaterThan(0)).and(lessThanOrEqualTo(numDocs))); //we do want to check that we don't get duplicate ids back @@ -269,7 +260,6 @@ public class BulkProcessorIT extends ESRestHighLevelClientTestCase { Set readOnlyIds = new HashSet<>(); for (BulkItemResponse bulkItemResponse : listener.bulkItems) { assertThat(bulkItemResponse.getIndex(), either(equalTo("test")).or(equalTo("test-ro"))); - assertThat(bulkItemResponse.getType(), equalTo("_doc")); if (bulkItemResponse.getIndex().equals("test")) { assertThat(bulkItemResponse.isFailed(), equalTo(false)); //with concurrent requests > 1 we can't rely on the order of the bulk requests @@ -298,7 +288,6 @@ public class BulkProcessorIT extends ESRestHighLevelClientTestCase { // tag::bulk-processor-mix-parameters try (BulkProcessor processor = initBulkProcessorBuilder(listener) .setGlobalIndex("tweets") - .setGlobalType("_doc") .setGlobalRouting("routing") .setGlobalPipeline("pipeline_id") .build()) { @@ -326,99 +315,36 @@ public class BulkProcessorIT extends ESRestHighLevelClientTestCase { createIndexWithMultipleShards("test"); createFieldAddingPipleine("pipeline_id", "fieldNameXYZ", "valueXYZ"); - final String customType = "testType"; - final String ignoredType = "ignoredType"; int numDocs = randomIntBetween(10, 10); { final CountDownLatch latch = new CountDownLatch(1); BulkProcessorTestListener listener = new BulkProcessorTestListener(latch); //Check that untyped document additions inherit the global type - String globalType = customType; String localType = null; - try (BulkProcessor processor = initBulkProcessorBuilderUsingTypes(listener) - //let's make sure that the bulk action limit trips, one single execution will index all the documents - .setConcurrentRequests(randomIntBetween(0, 1)).setBulkActions(numDocs) - .setFlushInterval(TimeValue.timeValueHours(24)).setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB)) - .setGlobalIndex("test") - .setGlobalType(globalType) - .setGlobalRouting("routing") - .setGlobalPipeline("pipeline_id") - .build()) { - - indexDocs(processor, numDocs, null, localType, "test", globalType, "pipeline_id"); - latch.await(); - - assertThat(listener.beforeCounts.get(), equalTo(1)); - assertThat(listener.afterCounts.get(), equalTo(1)); - assertThat(listener.bulkFailures.size(), equalTo(0)); - assertResponseItems(listener.bulkItems, numDocs, globalType); - - Iterable hits = searchAll(new SearchRequest("test").routing("routing")); - - assertThat(hits, everyItem(hasProperty(fieldFromSource("fieldNameXYZ"), equalTo("valueXYZ")))); - assertThat(hits, containsInAnyOrder(expectedIds(numDocs))); - } - - } - { - //Check that typed document additions don't inherit the global type - String globalType = ignoredType; - String localType = customType; - final CountDownLatch latch = new CountDownLatch(1); - BulkProcessorTestListener listener = new BulkProcessorTestListener(latch); - try (BulkProcessor processor = initBulkProcessorBuilderUsingTypes(listener) - //let's make sure that the bulk action limit trips, one single execution will index all the documents - .setConcurrentRequests(randomIntBetween(0, 1)).setBulkActions(numDocs) - .setFlushInterval(TimeValue.timeValueHours(24)).setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB)) - .setGlobalIndex("test") - .setGlobalType(globalType) - .setGlobalRouting("routing") - .setGlobalPipeline("pipeline_id") - .build()) { - indexDocs(processor, numDocs, null, localType, "test", globalType, "pipeline_id"); - latch.await(); - - assertThat(listener.beforeCounts.get(), equalTo(1)); - assertThat(listener.afterCounts.get(), equalTo(1)); - assertThat(listener.bulkFailures.size(), equalTo(0)); - assertResponseItems(listener.bulkItems, numDocs, localType); - - Iterable hits = searchAll(new SearchRequest("test").routing("routing")); - - assertThat(hits, everyItem(hasProperty(fieldFromSource("fieldNameXYZ"), equalTo("valueXYZ")))); - assertThat(hits, containsInAnyOrder(expectedIds(numDocs))); - } - } - { - //Check that untyped document additions and untyped global inherit the established custom type - // (the custom document type introduced to the mapping by the earlier code in this test) - String globalType = null; - String localType = null; - final CountDownLatch latch = new CountDownLatch(1); - BulkProcessorTestListener listener = new BulkProcessorTestListener(latch); try (BulkProcessor processor = initBulkProcessorBuilder(listener) //let's make sure that the bulk action limit trips, one single execution will index all the documents .setConcurrentRequests(randomIntBetween(0, 1)).setBulkActions(numDocs) .setFlushInterval(TimeValue.timeValueHours(24)).setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB)) .setGlobalIndex("test") - .setGlobalType(globalType) .setGlobalRouting("routing") .setGlobalPipeline("pipeline_id") .build()) { - indexDocs(processor, numDocs, null, localType, "test", globalType, "pipeline_id"); + + indexDocs(processor, numDocs, null, localType, "test", "pipeline_id"); latch.await(); assertThat(listener.beforeCounts.get(), equalTo(1)); assertThat(listener.afterCounts.get(), equalTo(1)); assertThat(listener.bulkFailures.size(), equalTo(0)); - assertResponseItems(listener.bulkItems, numDocs, MapperService.SINGLE_MAPPING_NAME); + assertResponseItems(listener.bulkItems, numDocs); Iterable hits = searchAll(new SearchRequest("test").routing("routing")); assertThat(hits, everyItem(hasProperty(fieldFromSource("fieldNameXYZ"), equalTo("valueXYZ")))); assertThat(hits, containsInAnyOrder(expectedIds(numDocs))); } + } } @@ -431,7 +357,7 @@ public class BulkProcessorIT extends ESRestHighLevelClientTestCase { } private MultiGetRequest indexDocs(BulkProcessor processor, int numDocs, String localIndex, String localType, - String globalIndex, String globalType, String globalPipeline) throws Exception { + String globalIndex, String globalPipeline) throws Exception { MultiGetRequest multiGetRequest = new MultiGetRequest(); for (int i = 1; i <= numDocs; i++) { if (randomBoolean()) { @@ -439,12 +365,7 @@ public class BulkProcessorIT extends ESRestHighLevelClientTestCase { .source(XContentType.JSON, "field", randomRealisticUnicodeOfLengthBetween(1, 30))); } else { BytesArray data = bytesBulkRequest(localIndex, localType, i); - processor.add(data, globalIndex, globalType, globalPipeline, XContentType.JSON); - - if (localType != null) { - // If the payload contains types, parsing it into a bulk request results in a warning. - assertWarnings(RestBulkAction.TYPES_DEPRECATION_MESSAGE); - } + processor.add(data, globalIndex, globalPipeline, XContentType.JSON); } multiGetRequest.add(localIndex, Integer.toString(i)); } @@ -475,19 +396,14 @@ public class BulkProcessorIT extends ESRestHighLevelClientTestCase { } private MultiGetRequest indexDocs(BulkProcessor processor, int numDocs) throws Exception { - return indexDocs(processor, numDocs, "test", null, null, null, null); + return indexDocs(processor, numDocs, "test", null, null, null); } private static void assertResponseItems(List bulkItemResponses, int numDocs) { - assertResponseItems(bulkItemResponses, numDocs, MapperService.SINGLE_MAPPING_NAME); - } - - private static void assertResponseItems(List bulkItemResponses, int numDocs, String expectedType) { assertThat(bulkItemResponses.size(), is(numDocs)); int i = 1; for (BulkItemResponse bulkItemResponse : bulkItemResponses) { assertThat(bulkItemResponse.getIndex(), equalTo("test")); - assertThat(bulkItemResponse.getType(), equalTo(expectedType)); assertThat(bulkItemResponse.getId(), equalTo(Integer.toString(i++))); assertThat("item " + i + " failed with cause: " + bulkItemResponse.getFailureMessage(), bulkItemResponse.isFailed(), equalTo(false)); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkRequestWithGlobalParametersIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkRequestWithGlobalParametersIT.java index b8f557d20616..3b918ed3a111 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkRequestWithGlobalParametersIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkRequestWithGlobalParametersIT.java @@ -106,7 +106,7 @@ public class BulkRequestWithGlobalParametersIT extends ESRestHighLevelClientTest } public void testGlobalIndex() throws IOException { - BulkRequest request = new BulkRequest("global_index", null); + BulkRequest request = new BulkRequest("global_index"); request.add(new IndexRequest().id("1") .source(XContentType.JSON, "field", "bulk1")); request.add(new IndexRequest().id("2") @@ -120,7 +120,7 @@ public class BulkRequestWithGlobalParametersIT extends ESRestHighLevelClientTest @SuppressWarnings("unchecked") public void testIndexGlobalAndPerRequest() throws IOException { - BulkRequest request = new BulkRequest("global_index", null); + BulkRequest request = new BulkRequest("global_index"); request.add(new IndexRequest("local_index").id("1") .source(XContentType.JSON, "field", "bulk1")); request.add(new IndexRequest().id("2") // will take global index @@ -168,19 +168,6 @@ public class BulkRequestWithGlobalParametersIT extends ESRestHighLevelClientTest assertThat(hits, containsInAnyOrder(hasId("1"), hasId("2"))); } - public void testGlobalIndexNoTypes() throws IOException { - BulkRequest request = new BulkRequest("global_index"); - request.add(new IndexRequest().id("1") - .source(XContentType.JSON, "field", "bulk1")); - request.add(new IndexRequest().id("2") - .source(XContentType.JSON, "field", "bulk2")); - - bulk(request); - - Iterable hits = searchAll("global_index"); - assertThat(hits, everyItem(hasIndex("global_index"))); - } - private BulkResponse bulk(BulkRequest request) throws IOException { BulkResponse bulkResponse = execute(request, highLevelClient()::bulk, highLevelClient()::bulkAsync, RequestOptions.DEFAULT); assertFalse(bulkResponse.hasFailures()); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java index 1f7928dc4e99..e2010aaece66 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java @@ -54,7 +54,6 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.get.GetResult; import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.rest.action.document.RestBulkAction; import org.elasticsearch.rest.action.document.RestDeleteAction; import org.elasticsearch.rest.action.document.RestIndexAction; import org.elasticsearch.rest.action.document.RestUpdateAction; @@ -401,20 +400,6 @@ public class CrudIT extends ESRestHighLevelClientTestCase { } } - public void testMultiGetWithTypes() throws IOException { - BulkRequest bulk = new BulkRequest(); - bulk.setRefreshPolicy(RefreshPolicy.IMMEDIATE); - bulk.add(new IndexRequest("index", "type", "id1") - .source("{\"field\":\"value1\"}", XContentType.JSON)); - bulk.add(new IndexRequest("index", "type", "id2") - .source("{\"field\":\"value2\"}", XContentType.JSON)); - - highLevelClient().bulk(bulk, expectWarnings(RestBulkAction.TYPES_DEPRECATION_MESSAGE)); - MultiGetRequest multiGetRequest = new MultiGetRequest(); - multiGetRequest.add("index", "id1"); - multiGetRequest.add("index", "id2"); - } - public void testIndex() throws IOException { final XContentType xContentType = randomFrom(XContentType.values()); { @@ -897,7 +882,6 @@ public class CrudIT extends ESRestHighLevelClientTestCase { assertEquals(i, bulkItemResponse.getItemId()); assertEquals("index", bulkItemResponse.getIndex()); - assertEquals("_doc", bulkItemResponse.getType()); assertEquals(String.valueOf(i), bulkItemResponse.getId()); DocWriteRequest.OpType requestOpType = bulkRequest.requests().get(i).opType(); diff --git a/docs/build.gradle b/docs/build.gradle index b7e2f81e3d74..23308a225806 100644 --- a/docs/build.gradle +++ b/docs/build.gradle @@ -574,7 +574,6 @@ buildRestTests.setups['library'] = ''' - do: bulk: index: library - type: book refresh: true body: | {"index":{"_id": "Leviathan Wakes"}} @@ -923,7 +922,6 @@ buildRestTests.setups['farequote_data'] = buildRestTests.setups['farequote_index - do: bulk: index: farequote - type: metric refresh: true body: | {"index": {"_id":"1"}} @@ -983,7 +981,6 @@ buildRestTests.setups['server_metrics_data'] = buildRestTests.setups['server_met - do: bulk: index: server-metrics - type: metric refresh: true body: | {"index": {"_id":"1177"}} diff --git a/docs/reference/aggregations/bucket/rare-terms-aggregation.asciidoc b/docs/reference/aggregations/bucket/rare-terms-aggregation.asciidoc index 88500a4c887e..f3e906f7b53b 100644 --- a/docs/reference/aggregations/bucket/rare-terms-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/rare-terms-aggregation.asciidoc @@ -25,7 +25,7 @@ PUT /products } } -POST /products/_doc/_bulk?refresh +POST /products/_bulk?refresh {"index":{"_id":0}} {"genre": "rock", "product": "Product A"} {"index":{"_id":1}} diff --git a/docs/reference/sql/getting-started.asciidoc b/docs/reference/sql/getting-started.asciidoc index dbcdb68d5e6a..00ba8be6ba56 100644 --- a/docs/reference/sql/getting-started.asciidoc +++ b/docs/reference/sql/getting-started.asciidoc @@ -8,7 +8,7 @@ an index with some data to experiment with: [source,console] -------------------------------------------------- -PUT /library/book/_bulk?refresh +PUT /library/_bulk?refresh {"index":{"_id": "Leviathan Wakes"}} {"name": "Leviathan Wakes", "author": "James S.A. Corey", "release_date": "2011-06-02", "page_count": 561} {"index":{"_id": "Hyperion"}} diff --git a/modules/lang-mustache/src/test/resources/rest-api-spec/test/lang_mustache/60_typed_keys.yml b/modules/lang-mustache/src/test/resources/rest-api-spec/test/lang_mustache/60_typed_keys.yml index 0f97afbe5ab6..84a55f8b6f99 100644 --- a/modules/lang-mustache/src/test/resources/rest-api-spec/test/lang_mustache/60_typed_keys.yml +++ b/modules/lang-mustache/src/test/resources/rest-api-spec/test/lang_mustache/60_typed_keys.yml @@ -21,15 +21,15 @@ setup: bulk: refresh: true body: - - '{"index": {"_index": "test-0", "_type": "_doc"}}' + - '{"index": {"_index": "test-0"}}' - '{"ip": "10.0.0.1", "integer": 38, "float": 12.5713, "name": "Ruth", "bool": true}' - - '{"index": {"_index": "test-0", "_type": "_doc"}}' + - '{"index": {"_index": "test-0"}}' - '{"ip": "10.0.0.2", "integer": 42, "float": 15.3393, "name": "Jackie", "surname": "Bowling", "bool": false}' - - '{"index": {"_index": "test-1", "_type": "_doc"}}' + - '{"index": {"_index": "test-1"}}' - '{"ip": "10.0.0.3", "integer": 29, "float": 19.0517, "name": "Stephanie", "bool": true}' - - '{"index": {"_index": "test-1", "_type": "_doc"}}' + - '{"index": {"_index": "test-1"}}' - '{"ip": "10.0.0.4", "integer": 19, "float": 19.3717, "surname": "Hamilton", "bool": true}' - - '{"index": {"_index": "test-2", "_type": "_doc"}}' + - '{"index": {"_index": "test-2"}}' - '{"ip": "10.0.0.5", "integer": 0, "float": 17.3349, "name": "Natalie", "bool": false}' --- diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java index d4b154fb97d3..3a91aac0c6a3 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java @@ -290,7 +290,7 @@ public class AsyncBulkByScrollActionTests extends ESTestCase { if (rarely()) { versionConflicts++; responses[i] = new BulkItemResponse(i, randomFrom(DocWriteRequest.OpType.values()), - new Failure(shardId.getIndexName(), "type", "id" + i, + new Failure(shardId.getIndexName(), "id" + i, new VersionConflictEngineException(shardId, "id", "test"))); continue; } @@ -399,7 +399,7 @@ public class AsyncBulkByScrollActionTests extends ESTestCase { * Mimicks bulk indexing failures. */ public void testBulkFailuresAbortRequest() throws Exception { - Failure failure = new Failure("index", "type", "id", new RuntimeException("test")); + Failure failure = new Failure("index", "id", new RuntimeException("test")); DummyAsyncBulkByScrollAction action = new DummyAsyncBulkByScrollAction(); BulkResponse bulkResponse = new BulkResponse(new BulkItemResponse[] {new BulkItemResponse(0, DocWriteRequest.OpType.CREATE, failure)}, randomLong()); @@ -902,7 +902,7 @@ public class AsyncBulkByScrollActionTests extends ESTestCase { } if (i == toReject) { responses[i] = new BulkItemResponse(i, item.opType(), - new Failure(response.getIndex(), response.getType(), response.getId(), new EsRejectedExecutionException())); + new Failure(response.getIndex(), response.getId(), new EsRejectedExecutionException())); } else { responses[i] = new BulkItemResponse(i, item.opType(), response); } diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/BulkIndexByScrollResponseTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/BulkIndexByScrollResponseTests.java index 6809a02585e1..0fcc51ac7e8d 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/BulkIndexByScrollResponseTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/BulkIndexByScrollResponseTests.java @@ -51,7 +51,7 @@ public class BulkIndexByScrollResponseTests extends ESTestCase { BulkByScrollTask.Status status = new BulkByScrollTask.Status(i, 0, 0, 0, 0, 0, 0, 0, 0, 0, timeValueMillis(0), 0f, thisReasonCancelled, timeValueMillis(0)); List bulkFailures = frequently() ? emptyList() - : IntStream.range(0, between(1, 3)).mapToObj(j -> new BulkItemResponse.Failure("idx", "type", "id", new Exception())) + : IntStream.range(0, between(1, 3)).mapToObj(j -> new BulkItemResponse.Failure("idx", "id", new Exception())) .collect(Collectors.toList()); allBulkFailures.addAll(bulkFailures); List searchFailures = frequently() ? emptyList() diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/10_basic.yml b/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/10_basic.yml index 1763baebe027..d36511f3a169 100644 --- a/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/10_basic.yml +++ b/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/10_basic.yml @@ -124,7 +124,6 @@ - match: {version_conflicts: 1} - match: {batches: 1} - match: {failures.0.index: test} - - match: {failures.0.type: _doc} - match: {failures.0.id: "1"} - match: {failures.0.status: 409} - match: {failures.0.cause.type: version_conflict_engine_exception} @@ -177,7 +176,6 @@ - match: {version_conflicts: 1} - match: {batches: 1} - match: {failures.0.index: test} - - match: {failures.0.type: _doc} - match: {failures.0.id: "1"} - match: {failures.0.status: 409} - match: {failures.0.cause.type: version_conflict_engine_exception} diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/10_basic.yml b/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/10_basic.yml index 2a3696a4005c..1aaf66478210 100644 --- a/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/10_basic.yml +++ b/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/10_basic.yml @@ -104,7 +104,6 @@ - match: {version_conflicts: 1} - match: {batches: 1} - match: {failures.0.index: test} - - match: {failures.0.type: _doc} - match: {failures.0.id: "1"} - match: {failures.0.status: 409} - match: {failures.0.cause.type: version_conflict_engine_exception} @@ -144,7 +143,6 @@ - match: {version_conflicts: 1} - match: {batches: 1} - match: {failures.0.index: test} - - match: {failures.0.type: _doc} - match: {failures.0.id: "1"} - match: {failures.0.status: 409} - match: {failures.0.cause.type: version_conflict_engine_exception} diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpRequestSizeLimitIT.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpRequestSizeLimitIT.java index eaec286c7b23..76d7f58403cb 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpRequestSizeLimitIT.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpRequestSizeLimitIT.java @@ -80,7 +80,7 @@ public class Netty4HttpRequestSizeLimitIT extends ESNetty4IntegTestCase { @SuppressWarnings("unchecked") Tuple[] requests = new Tuple[150]; for (int i = 0; i < requests.length; i++) { - requests[i] = Tuple.tuple("/index/type/_bulk", bulkRequest); + requests[i] = Tuple.tuple("/index/_bulk", bulkRequest); } HttpServerTransport httpServerTransport = internalCluster().getInstance(HttpServerTransport.class); diff --git a/plugins/repository-azure/qa/microsoft-azure-storage/src/test/resources/rest-api-spec/test/repository_azure/10_repository.yml b/plugins/repository-azure/qa/microsoft-azure-storage/src/test/resources/rest-api-spec/test/repository_azure/10_repository.yml index fade1f9f1e67..088c4d28ef71 100644 --- a/plugins/repository-azure/qa/microsoft-azure-storage/src/test/resources/rest-api-spec/test/repository_azure/10_repository.yml +++ b/plugins/repository-azure/qa/microsoft-azure-storage/src/test/resources/rest-api-spec/test/repository_azure/10_repository.yml @@ -35,17 +35,14 @@ setup: body: - index: _index: docs - _type: doc _id: 1 - snapshot: one - index: _index: docs - _type: doc _id: 2 - snapshot: one - index: _index: docs - _type: doc _id: 3 - snapshot: one @@ -83,22 +80,18 @@ setup: body: - index: _index: docs - _type: doc _id: 4 - snapshot: two - index: _index: docs - _type: doc _id: 5 - snapshot: two - index: _index: docs - _type: doc _id: 6 - snapshot: two - index: _index: docs - _type: doc _id: 7 - snapshot: two diff --git a/plugins/repository-gcs/qa/google-cloud-storage/src/test/resources/rest-api-spec/test/repository_gcs/10_repository.yml b/plugins/repository-gcs/qa/google-cloud-storage/src/test/resources/rest-api-spec/test/repository_gcs/10_repository.yml index 553b6a3e14e5..154a6b52c156 100644 --- a/plugins/repository-gcs/qa/google-cloud-storage/src/test/resources/rest-api-spec/test/repository_gcs/10_repository.yml +++ b/plugins/repository-gcs/qa/google-cloud-storage/src/test/resources/rest-api-spec/test/repository_gcs/10_repository.yml @@ -48,17 +48,14 @@ setup: body: - index: _index: docs - _type: doc _id: 1 - snapshot: one - index: _index: docs - _type: doc _id: 2 - snapshot: one - index: _index: docs - _type: doc _id: 3 - snapshot: one @@ -96,22 +93,18 @@ setup: body: - index: _index: docs - _type: doc _id: 4 - snapshot: two - index: _index: docs - _type: doc _id: 5 - snapshot: two - index: _index: docs - _type: doc _id: 6 - snapshot: two - index: _index: docs - _type: doc _id: 7 - snapshot: two diff --git a/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/20_repository_permanent_credentials.yml b/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/20_repository_permanent_credentials.yml index 57b2e42bb250..eaf0c766464d 100644 --- a/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/20_repository_permanent_credentials.yml +++ b/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/20_repository_permanent_credentials.yml @@ -133,17 +133,14 @@ setup: body: - index: _index: docs - _type: doc _id: 1 - snapshot: one - index: _index: docs - _type: doc _id: 2 - snapshot: one - index: _index: docs - _type: doc _id: 3 - snapshot: one diff --git a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/60_tophits.yml b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/60_tophits.yml index 9d94e7d5abb3..ae213bae2140 100644 --- a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/60_tophits.yml +++ b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/60_tophits.yml @@ -24,7 +24,7 @@ teardown: bulk: refresh: true body: - - '{"index": {"_index": "single_doc_index", "_type": "test_type"}}' + - '{"index": {"_index": "single_doc_index" }}' - '{"f1": "local_cluster", "sort_field": 0}' - do: search: diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java index 6edff83df122..cf4416a59db0 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java @@ -24,7 +24,6 @@ import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.common.Booleans; -import org.elasticsearch.rest.action.document.RestBulkAction; import java.io.IOException; import java.nio.charset.StandardCharsets; @@ -182,12 +181,11 @@ public class IndexingIT extends AbstractRollingTestCase { private void bulk(String index, String valueSuffix, int count) throws IOException { StringBuilder b = new StringBuilder(); for (int i = 0; i < count; i++) { - b.append("{\"index\": {\"_index\": \"").append(index).append("\", \"_type\": \"_doc\"}}\n"); + b.append("{\"index\": {\"_index\": \"").append(index).append("\"}}\n"); b.append("{\"f1\": \"v").append(i).append(valueSuffix).append("\", \"f2\": ").append(i).append("}\n"); } Request bulk = new Request("POST", "/_bulk"); bulk.addParameter("refresh", "true"); - bulk.setOptions(expectWarnings(RestBulkAction.TYPES_DEPRECATION_MESSAGE)); bulk.setJsonEntity(b.toString()); client().performRequest(bulk); } diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/XPackIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/XPackIT.java index ba1cd80fc88a..0e6cd229084e 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/XPackIT.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/XPackIT.java @@ -19,9 +19,8 @@ package org.elasticsearch.upgrades; import org.apache.http.util.EntityUtils; -import org.junit.Before; import org.elasticsearch.client.Request; -import org.elasticsearch.rest.action.document.RestBulkAction; +import org.junit.Before; import java.io.IOException; @@ -53,14 +52,13 @@ public class XPackIT extends AbstractRollingTestCase { * might have already installed a trial license. */ public void testBasicFeature() throws IOException { - Request bulk = new Request("POST", "/sql_test/doc/_bulk"); + Request bulk = new Request("POST", "/sql_test/_bulk"); bulk.setJsonEntity( "{\"index\":{}}\n" + "{\"f\": \"1\"}\n" + "{\"index\":{}}\n" + "{\"f\": \"2\"}\n"); bulk.addParameter("refresh", "true"); - bulk.setOptions(expectWarnings(RestBulkAction.TYPES_DEPRECATION_MESSAGE)); client().performRequest(bulk); Request sql = new Request("POST", "/_sql"); diff --git a/qa/smoke-test-ingest-disabled/src/test/resources/rest-api-spec/test/ingest_mustache/10_ingest_disabled.yml b/qa/smoke-test-ingest-disabled/src/test/resources/rest-api-spec/test/ingest_mustache/10_ingest_disabled.yml index 5a3f64151f4e..fb29017c3605 100644 --- a/qa/smoke-test-ingest-disabled/src/test/resources/rest-api-spec/test/ingest_mustache/10_ingest_disabled.yml +++ b/qa/smoke-test-ingest-disabled/src/test/resources/rest-api-spec/test/ingest_mustache/10_ingest_disabled.yml @@ -54,7 +54,6 @@ "docs": [ { "_index": "index", - "_type": "type", "_id": "id", "_source": { "foo": "bar" @@ -92,12 +91,10 @@ body: - index: _index: test_index - _type: test_type _id: test_id - f1: v1 - index: _index: test_index - _type: test_type _id: test_id2 - f1: v2 @@ -109,12 +106,10 @@ body: - index: _index: test_index - _type: test_type _id: test_id - f1: v1 - index: _index: test_index - _type: test_type _id: test_id2 pipeline: my_pipeline_1 - f1: v2 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/11_basic_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/11_basic_with_types.yml deleted file mode 100644 index 99a2d00882a3..000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/11_basic_with_types.yml +++ /dev/null @@ -1,119 +0,0 @@ ---- -"Array of objects": - - do: - bulk: - refresh: true - body: - - index: - _index: test_index - _type: test_type - _id: test_id - - f1: v1 - f2: 42 - - index: - _index: test_index - _type: test_type - _id: test_id2 - - f1: v2 - f2: 47 - - - do: - count: - index: test_index - - - match: {count: 2} - ---- -"Empty _id": - - do: - bulk: - refresh: true - body: - - index: - _index: test - _type: type - _id: '' - - f: 1 - - index: - _index: test - _type: type - _id: id - - f: 2 - - index: - _index: test - _type: type - - f: 3 - - match: { errors: true } - - match: { items.0.index.status: 400 } - - match: { items.0.index.error.type: illegal_argument_exception } - - match: { items.0.index.error.reason: if _id is specified it must not be empty } - - match: { items.1.index.result: created } - - match: { items.2.index.result: created } - - - do: - count: - index: test - - - match: { count: 2 } - ---- -"Empty _id with op_type create": - - skip: - version: " - 7.4.99" - reason: "auto id + op type create only supported since 7.5" - - - do: - bulk: - refresh: true - body: - - index: - _index: test - _type: type - _id: '' - - f: 1 - - index: - _index: test - _type: type - _id: id - - f: 2 - - index: - _index: test - _type: type - - f: 3 - - create: - _index: test - _type: type - - f: 4 - - index: - _index: test - op_type: create - - f: 5 - - match: { errors: true } - - match: { items.0.index.status: 400 } - - match: { items.0.index.error.type: illegal_argument_exception } - - match: { items.0.index.error.reason: if _id is specified it must not be empty } - - match: { items.1.index.result: created } - - match: { items.2.index.result: created } - - match: { items.3.create.result: created } - - match: { items.4.create.result: created } - - - do: - count: - index: test - - - match: { count: 4 } - ---- -"empty action": - - skip: - features: headers - - - do: - catch: /Malformed action\/metadata line \[3\], expected FIELD_NAME but found \[END_OBJECT\]/ - headers: - Content-Type: application/json - bulk: - body: | - {"index": {"_index": "test_index", "_type": "test_type", "_id": "test_id"}} - {"f1": "v1", "f2": 42} - {} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/21_list_of_strings_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/21_list_of_strings_with_types.yml deleted file mode 100644 index def91f428072..000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/21_list_of_strings_with_types.yml +++ /dev/null @@ -1,17 +0,0 @@ ---- -"List of strings": - - do: - bulk: - refresh: true - body: - - '{"index": {"_index": "test_index", "_type": "test_type", "_id": "test_id"}}' - - '{"f1": "v1", "f2": 42}' - - '{"index": {"_index": "test_index", "_type": "test_type", "_id": "test_id2"}}' - - '{"f1": "v2", "f2": 47}' - - - do: - count: - index: test_index - - - match: {count: 2} - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/31_big_string_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/31_big_string_with_types.yml deleted file mode 100644 index 1d117253c9b0..000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/31_big_string_with_types.yml +++ /dev/null @@ -1,17 +0,0 @@ ---- -"One big string": - - do: - bulk: - refresh: true - body: | - {"index": {"_index": "test_index", "_type": "test_type", "_id": "test_id"}} - {"f1": "v1", "f2": 42} - {"index": {"_index": "test_index", "_type": "test_type", "_id": "test_id2"}} - {"f1": "v2", "f2": 47} - - - do: - count: - index: test_index - - - match: {count: 2} - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/41_source_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/41_source_with_types.yml deleted file mode 100644 index 3c8a86c13bda..000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/41_source_with_types.yml +++ /dev/null @@ -1,76 +0,0 @@ ---- -"Source filtering": - - do: - index: - refresh: true - index: test_index - type: test_type - id: test_id_1 - body: { "foo": "bar", "bar": "foo" } - - - do: - index: - refresh: true - index: test_index - type: test_type - id: test_id_2 - body: { "foo": "qux", "bar": "pux" } - - - do: - index: - refresh: true - index: test_index - type: test_type - id: test_id_3 - body: { "foo": "corge", "bar": "forge" } - - - - do: - bulk: - refresh: true - body: | - { "update": { "_index": "test_index", "_type": "test_type", "_id": "test_id_1", "_source": true } } - { "doc": { "foo": "baz" } } - { "update": { "_index": "test_index", "_type": "test_type", "_id": "test_id_2" } } - { "_source": true, "doc": { "foo": "quux" } } - - - match: { items.0.update.get._source.foo: baz } - - match: { items.1.update.get._source.foo: quux } - - - do: - bulk: - index: test_index - type: test_type - _source: true - body: | - { "update": { "_id": "test_id_3" } } - { "doc": { "foo": "garply" } } - - - match: { items.0.update.get._source.foo: garply } - - - do: - bulk: - refresh: true - body: | - { "update": { "_index": "test_index", "_type": "test_type", "_id": "test_id_1", "_source": {"includes": "bar"} } } - { "doc": { "foo": "baz" } } - { "update": { "_index": "test_index", "_type": "test_type", "_id": "test_id_2" } } - { "_source": {"includes": "foo"}, "doc": { "foo": "quux" } } - - - match: { items.0.update.get._source.bar: foo } - - is_false: items.0.update.get._source.foo - - match: { items.1.update.get._source.foo: quux } - - is_false: items.1.update.get._source.bar - - - do: - bulk: - index: test_index - type: test_type - _source_includes: foo - body: | - { "update": { "_id": "test_id_3" } } - { "doc": { "foo": "garply" } } - - - match: { items.0.update.get._source.foo: garply } - - is_false: items.0.update.get._source.bar - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/51_refresh_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/51_refresh_with_types.yml deleted file mode 100644 index 6326b9464caa..000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/51_refresh_with_types.yml +++ /dev/null @@ -1,48 +0,0 @@ ---- -"refresh=true immediately makes changes are visible in search": - - do: - bulk: - refresh: true - body: | - {"index": {"_index": "bulk_50_refresh_1", "_type": "test_type", "_id": "bulk_50_refresh_id1"}} - {"f1": "v1", "f2": 42} - {"index": {"_index": "bulk_50_refresh_1", "_type": "test_type", "_id": "bulk_50_refresh_id2"}} - {"f1": "v2", "f2": 47} - - - do: - count: - index: bulk_50_refresh_1 - - match: {count: 2} - ---- -"refresh=empty string immediately makes changes are visible in search": - - do: - bulk: - refresh: "" - body: | - {"index": {"_index": "bulk_50_refresh_2", "_type": "test_type", "_id": "bulk_50_refresh_id3"}} - {"f1": "v1", "f2": 42} - {"index": {"_index": "bulk_50_refresh_2", "_type": "test_type", "_id": "bulk_50_refresh_id4"}} - {"f1": "v2", "f2": 47} - - - do: - count: - index: bulk_50_refresh_2 - - match: {count: 2} - - ---- -"refresh=wait_for waits until changes are visible in search": - - do: - bulk: - refresh: wait_for - body: | - {"index": {"_index": "bulk_50_refresh_3", "_type": "test_type", "_id": "bulk_50_refresh_id5"}} - {"f1": "v1", "f2": 42} - {"index": {"_index": "bulk_50_refresh_3", "_type": "test_type", "_id": "bulk_50_refresh_id6"}} - {"f1": "v2", "f2": 47} - - - do: - count: - index: bulk_50_refresh_3 - - match: {count: 2} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/70_mix_typeless_typeful.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/70_mix_typeless_typeful.yml deleted file mode 100644 index 50bf6ac5bcf3..000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/70_mix_typeless_typeful.yml +++ /dev/null @@ -1,31 +0,0 @@ ---- -"bulk without types on an index that has types": - - - do: - indices.create: # not using include_type_name: false on purpose - include_type_name: true - index: index - body: - mappings: - not_doc: - properties: - foo: - type: "keyword" - - do: - bulk: - refresh: true - body: - - index: - _index: index - _id: 0 - - foo: bar - - index: - _index: index - _id: 1 - - foo: bar - - - do: - count: - index: index - - - match: {count: 2} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/81_cas_with_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/81_cas_with_types.yml deleted file mode 100644 index 7de82e4fb23e..000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/81_cas_with_types.yml +++ /dev/null @@ -1,41 +0,0 @@ ---- -"Compare And Swap Sequence Numbers": - - - do: - index: - index: test_1 - type: _doc - id: 1 - body: { foo: bar } - - match: { _version: 1} - - set: { _seq_no: seqno } - - set: { _primary_term: primary_term } - - - do: - bulk: - body: - - index: - _index: test_1 - _type: _doc - _id: 1 - if_seq_no: 10000 - if_primary_term: $primary_term - - foo: bar2 - - - match: { errors: true } - - match: { items.0.index.status: 409 } - - match: { items.0.index.error.type: version_conflict_engine_exception } - - - do: - bulk: - body: - - index: - _index: test_1 - _type: _doc - _id: 1 - if_seq_no: $seqno - if_primary_term: $primary_term - - foo: bar2 - - - match: { errors: false} - - match: { items.0.index.status: 200 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/mget/14_alias_to_multiple_indices.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/mget/14_alias_to_multiple_indices.yml index 8d18ff768511..87a40732be88 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/mget/14_alias_to_multiple_indices.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/mget/14_alias_to_multiple_indices.yml @@ -6,11 +6,11 @@ bulk: refresh: true body: | - {"index": {"_index": "test_1", "_type": "_doc", "_id": 1}} + {"index": {"_index": "test_1", "_id": 1}} { "foo": "bar" } - {"index": {"_index": "test_2", "_type": "_doc", "_id": 2}} + {"index": {"_index": "test_2", "_id": 2}} { "foo": "bar" } - {"index": {"_index": "test_3", "_type": "_doc", "_id": 3}} + {"index": {"_index": "test_3", "_id": 3}} { "foo": "bar" } - do: diff --git a/server/src/main/java/org/elasticsearch/action/DocWriteRequest.java b/server/src/main/java/org/elasticsearch/action/DocWriteRequest.java index c078dac1bb58..fc8816460c96 100644 --- a/server/src/main/java/org/elasticsearch/action/DocWriteRequest.java +++ b/server/src/main/java/org/elasticsearch/action/DocWriteRequest.java @@ -65,14 +65,6 @@ public interface DocWriteRequest extends IndicesRequest { */ String type(); - /** - * Set the default type supplied to a bulk - * request if this individual request's type is null - * or empty - * @return the Request - */ - T defaultTypeIfNull(String defaultType); - /** * Get the id of the document for this request * @return the id diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkItemRequest.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkItemRequest.java index e1306a437ca9..eaacf67c587a 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkItemRequest.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkItemRequest.java @@ -78,7 +78,7 @@ public class BulkItemRequest implements Writeable { */ public void abort(String index, Exception cause) { if (primaryResponse == null) { - final BulkItemResponse.Failure failure = new BulkItemResponse.Failure(index, request.type(), request.id(), + final BulkItemResponse.Failure failure = new BulkItemResponse.Failure(index, request.id(), Objects.requireNonNull(cause), true); setPrimaryResponse(new BulkItemResponse(id, request.opType(), failure)); } else { diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java index 396d59c71c3a..2243a95dc7a9 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java @@ -21,6 +21,7 @@ package org.elasticsearch.action.bulk; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.Version; import org.elasticsearch.action.DocWriteRequest.OpType; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.delete.DeleteResponse; @@ -37,6 +38,7 @@ import org.elasticsearch.common.xcontent.StatusToXContentObject; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.rest.RestStatus; @@ -54,7 +56,6 @@ import static org.elasticsearch.common.xcontent.XContentParserUtils.throwUnknown public class BulkItemResponse implements Writeable, StatusToXContentObject { private static final String _INDEX = "_index"; - private static final String _TYPE = "_type"; private static final String _ID = "_id"; private static final String STATUS = "status"; private static final String ERROR = "error"; @@ -73,7 +74,6 @@ public class BulkItemResponse implements Writeable, StatusToXContentObject { builder.field(STATUS, response.status().getStatus()); } else { builder.field(_INDEX, failure.getIndex()); - builder.field(_TYPE, failure.getType()); builder.field(_ID, failure.getId()); builder.field(STATUS, failure.getStatus().getStatus()); builder.startObject(ERROR); @@ -151,7 +151,7 @@ public class BulkItemResponse implements Writeable, StatusToXContentObject { BulkItemResponse bulkItemResponse; if (exception != null) { - Failure failure = new Failure(builder.getShardId().getIndexName(), builder.getType(), builder.getId(), exception, status); + Failure failure = new Failure(builder.getShardId().getIndexName(), builder.getId(), exception, status); bulkItemResponse = new BulkItemResponse(id, opType, failure); } else { bulkItemResponse = new BulkItemResponse(id, opType, builder.build()); @@ -164,13 +164,11 @@ public class BulkItemResponse implements Writeable, StatusToXContentObject { */ public static class Failure implements Writeable, ToXContentFragment { public static final String INDEX_FIELD = "index"; - public static final String TYPE_FIELD = "type"; public static final String ID_FIELD = "id"; public static final String CAUSE_FIELD = "cause"; public static final String STATUS_FIELD = "status"; private final String index; - private final String type; private final String id; private final Exception cause; private final RestStatus status; @@ -183,12 +181,11 @@ public class BulkItemResponse implements Writeable, StatusToXContentObject { true, a -> new Failure( - (String)a[0], (String)a[1], (String)a[2], (Exception)a[3], RestStatus.fromCode((int)a[4]) + (String)a[0], (String)a[1], (Exception)a[2], RestStatus.fromCode((int)a[3]) ) ); static { PARSER.declareString(constructorArg(), new ParseField(INDEX_FIELD)); - PARSER.declareString(constructorArg(), new ParseField(TYPE_FIELD)); PARSER.declareString(optionalConstructorArg(), new ParseField(ID_FIELD)); PARSER.declareObject(constructorArg(), (p, c) -> ElasticsearchException.fromXContent(p), new ParseField(CAUSE_FIELD)); PARSER.declareInt(constructorArg(), new ParseField(STATUS_FIELD)); @@ -197,29 +194,28 @@ public class BulkItemResponse implements Writeable, StatusToXContentObject { /** * For write failures before operation was assigned a sequence number. * - * use @{link {@link #Failure(String, String, String, Exception, long)}} + * use @{link {@link #Failure(String, String, Exception, long)}} * to record operation sequence no with failure */ - public Failure(String index, String type, String id, Exception cause) { - this(index, type, id, cause, ExceptionsHelper.status(cause), SequenceNumbers.UNASSIGNED_SEQ_NO, false); + public Failure(String index, String id, Exception cause) { + this(index, id, cause, ExceptionsHelper.status(cause), SequenceNumbers.UNASSIGNED_SEQ_NO, false); } - public Failure(String index, String type, String id, Exception cause, boolean aborted) { - this(index, type, id, cause, ExceptionsHelper.status(cause), SequenceNumbers.UNASSIGNED_SEQ_NO, aborted); + public Failure(String index, String id, Exception cause, boolean aborted) { + this(index, id, cause, ExceptionsHelper.status(cause), SequenceNumbers.UNASSIGNED_SEQ_NO, aborted); } - public Failure(String index, String type, String id, Exception cause, RestStatus status) { - this(index, type, id, cause, status, SequenceNumbers.UNASSIGNED_SEQ_NO, false); + public Failure(String index, String id, Exception cause, RestStatus status) { + this(index, id, cause, status, SequenceNumbers.UNASSIGNED_SEQ_NO, false); } /** For write failures after operation was assigned a sequence number. */ - public Failure(String index, String type, String id, Exception cause, long seqNo) { - this(index, type, id, cause, ExceptionsHelper.status(cause), seqNo, false); + public Failure(String index, String id, Exception cause, long seqNo) { + this(index, id, cause, ExceptionsHelper.status(cause), seqNo, false); } - public Failure(String index, String type, String id, Exception cause, RestStatus status, long seqNo, boolean aborted) { + public Failure(String index, String id, Exception cause, RestStatus status, long seqNo, boolean aborted) { this.index = index; - this.type = type; this.id = id; this.cause = cause; this.status = status; @@ -232,7 +228,11 @@ public class BulkItemResponse implements Writeable, StatusToXContentObject { */ public Failure(StreamInput in) throws IOException { index = in.readString(); - type = in.readString(); + if (in.getVersion().before(Version.V_8_0_0)) { + in.readString(); + // can't make an assertion about type names here because too many tests still set their own + // types bypassing various checks + } id = in.readOptionalString(); cause = in.readException(); status = ExceptionsHelper.status(cause); @@ -243,7 +243,9 @@ public class BulkItemResponse implements Writeable, StatusToXContentObject { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(getIndex()); - out.writeString(getType()); + if (out.getVersion().before(Version.V_8_0_0)) { + out.writeString(MapperService.SINGLE_MAPPING_NAME); + } out.writeOptionalString(getId()); out.writeException(getCause()); out.writeZLong(getSeqNo()); @@ -257,13 +259,6 @@ public class BulkItemResponse implements Writeable, StatusToXContentObject { return this.index; } - /** - * The type of the action. - */ - public String getType() { - return type; - } - /** * The id of the action. */ @@ -313,7 +308,6 @@ public class BulkItemResponse implements Writeable, StatusToXContentObject { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.field(INDEX_FIELD, index); - builder.field(TYPE_FIELD, type); if (id != null) { builder.field(ID_FIELD, id); } @@ -398,16 +392,6 @@ public class BulkItemResponse implements Writeable, StatusToXContentObject { return response.getIndex(); } - /** - * The type of the action. - */ - public String getType() { - if (failure != null) { - return failure.getType(); - } - return response.getType(); - } - /** * The id of the action. */ diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkPrimaryExecutionContext.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkPrimaryExecutionContext.java index 65452f9a75db..8967ba4f41b2 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkPrimaryExecutionContext.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkPrimaryExecutionContext.java @@ -239,7 +239,7 @@ class BulkPrimaryExecutionContext { executionResult = new BulkItemResponse(getCurrentItem().id(), docWriteRequest.opType(), // Make sure to use getCurrentItem().index() here, if you use docWriteRequest.index() it will use the // concrete index instead of an alias if used! - new BulkItemResponse.Failure(getCurrentItem().index(), docWriteRequest.type(), docWriteRequest.id(), cause)); + new BulkItemResponse.Failure(getCurrentItem().index(), docWriteRequest.id(), cause)); markAsCompleted(executionResult); } @@ -273,7 +273,7 @@ class BulkPrimaryExecutionContext { // Make sure to use request.index() here, if you // use docWriteRequest.index() it will use the // concrete index instead of an alias if used! - new BulkItemResponse.Failure(request.index(), docWriteRequest.type(), docWriteRequest.id(), + new BulkItemResponse.Failure(request.index(), docWriteRequest.id(), result.getFailure(), result.getSeqNo())); break; default: diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java index 08c42c5ea40d..a44d47859d9c 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java @@ -92,7 +92,6 @@ public class BulkProcessor implements Closeable { private TimeValue flushInterval = null; private BackoffPolicy backoffPolicy = BackoffPolicy.exponentialBackoff(); private String globalIndex; - private String globalType; private String globalRouting; private String globalPipeline; @@ -148,11 +147,6 @@ public class BulkProcessor implements Closeable { return this; } - public Builder setGlobalType(String globalType) { - this.globalType = globalType; - return this; - } - public Builder setGlobalRouting(String globalRouting) { this.globalRouting = globalRouting; return this; @@ -188,7 +182,7 @@ public class BulkProcessor implements Closeable { } private Supplier createBulkRequestWithGlobalDefaults() { - return () -> new BulkRequest(globalIndex, globalType) + return () -> new BulkRequest(globalIndex) .pipeline(globalPipeline) .routing(globalRouting); } @@ -344,22 +338,13 @@ public class BulkProcessor implements Closeable { /** * Adds the data from the bytes to be processed by the bulk processor */ - public BulkProcessor add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType, - XContentType xContentType) throws Exception { - return add(data, defaultIndex, defaultType, null, xContentType); - } - - /** - * Adds the data from the bytes to be processed by the bulk processor - */ - public BulkProcessor add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType, - @Nullable String defaultPipeline, - XContentType xContentType) throws Exception { + public BulkProcessor add(BytesReference data, @Nullable String defaultIndex, + @Nullable String defaultPipeline, XContentType xContentType) throws Exception { Tuple bulkRequestToExecute = null; lock.lock(); try { ensureOpen(); - bulkRequest.add(data, defaultIndex, defaultType, null, null, defaultPipeline, + bulkRequest.add(data, defaultIndex, null, null, defaultPipeline, true, xContentType); bulkRequestToExecute = newBulkRequestIfNeeded(); } finally { diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java index c1a4014b94b9..9484e1582d93 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java @@ -37,7 +37,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; import java.io.IOException; @@ -74,7 +73,6 @@ public class BulkRequest extends ActionRequest implements CompositeIndicesReques private String globalPipeline; private String globalRouting; private String globalIndex; - private String globalType; private long sizeInBytes = 0; @@ -93,15 +91,6 @@ public class BulkRequest extends ActionRequest implements CompositeIndicesReques public BulkRequest(@Nullable String globalIndex) { this.globalIndex = globalIndex; - } - - /** - * @deprecated Types are in the process of being removed. Use {@link #BulkRequest(String)} instead - */ - @Deprecated - public BulkRequest(@Nullable String globalIndex, @Nullable String globalType) { - this.globalIndex = globalIndex; - this.globalType = globalType; } /** @@ -225,84 +214,42 @@ public class BulkRequest extends ActionRequest implements CompositeIndicesReques * Adds a framed data in binary format */ public BulkRequest add(byte[] data, int from, int length, XContentType xContentType) throws IOException { - return add(data, from, length, null, null, xContentType); + return add(data, from, length, null, xContentType); } /** * Adds a framed data in binary format - * @deprecated use {@link #add(byte[], int, int, String, XContentType)} instead */ - @Deprecated - public BulkRequest add(byte[] data, int from, int length, @Nullable String defaultIndex, @Nullable String defaultType, + public BulkRequest add(byte[] data, int from, int length, @Nullable String defaultIndex, XContentType xContentType) throws IOException { - return add(new BytesArray(data, from, length), defaultIndex, defaultType, xContentType); + return add(new BytesArray(data, from, length), defaultIndex, xContentType); } + /** + * Adds a framed data in binary format + */ + public BulkRequest add(BytesReference data, @Nullable String defaultIndex, + XContentType xContentType) throws IOException { + return add(data, defaultIndex, null, null, null, true, xContentType); + } - /** - * Adds a framed data in binary format - */ - public BulkRequest add(byte[] data, int from, int length, @Nullable String defaultIndex, - XContentType xContentType) throws IOException { - return add(new BytesArray(data, from, length), defaultIndex, MapperService.SINGLE_MAPPING_NAME, xContentType); - } - - /** - * Adds a framed data in binary format - * @deprecated use {@link #add(BytesReference, String, XContentType)} instead - */ - @Deprecated - public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType, - XContentType xContentType) throws IOException { - return add(data, defaultIndex, defaultType, null, null, null, true, xContentType); - } - - /** - * Adds a framed data in binary format - */ - public BulkRequest add(BytesReference data, @Nullable String defaultIndex, - XContentType xContentType) throws IOException { - return add(data, defaultIndex, MapperService.SINGLE_MAPPING_NAME, null, null, null, true, xContentType); - } - - /** - * Adds a framed data in binary format - * @deprecated use {@link #add(BytesReference, String, boolean, XContentType)} instead - */ - @Deprecated - public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType, boolean allowExplicitIndex, - XContentType xContentType) throws IOException { - return add(data, defaultIndex, defaultType, null, null, null, allowExplicitIndex, xContentType); - } - /** * Adds a framed data in binary format */ public BulkRequest add(BytesReference data, @Nullable String defaultIndex, boolean allowExplicitIndex, XContentType xContentType) throws IOException { - return add(data, defaultIndex, MapperService.SINGLE_MAPPING_NAME, null, null, null, allowExplicitIndex, xContentType); - } - - public BulkRequest add(BytesReference data, @Nullable String defaultIndex, - @Nullable String defaultRouting, @Nullable FetchSourceContext defaultFetchSourceContext, - @Nullable String defaultPipeline, boolean allowExplicitIndex, - XContentType xContentType) throws IOException { - return add(data, defaultIndex, MapperService.SINGLE_MAPPING_NAME, defaultRouting, defaultFetchSourceContext, - defaultPipeline, allowExplicitIndex, xContentType); + return add(data, defaultIndex, null, null, null, allowExplicitIndex, xContentType); + } - /** - * @deprecated use {@link #add(BytesReference, String, String, FetchSourceContext, String, boolean, XContentType)} instead - */ - @Deprecated - public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType, + public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultRouting, @Nullable FetchSourceContext defaultFetchSourceContext, @Nullable String defaultPipeline, boolean allowExplicitIndex, XContentType xContentType) throws IOException { String routing = valueOrDefault(defaultRouting, globalRouting); String pipeline = valueOrDefault(defaultPipeline, globalPipeline); - new BulkRequestParser(true).parse(data, defaultIndex, defaultType, routing, defaultFetchSourceContext, pipeline, - allowExplicitIndex, xContentType, this::internalAdd, this::internalAdd, this::add); + new BulkRequestParser(true).parse(data, defaultIndex, routing, defaultFetchSourceContext, pipeline, + allowExplicitIndex, xContentType, (indexRequest, type) -> internalAdd(indexRequest), this::internalAdd, this::add); return this; } @@ -418,9 +365,6 @@ public class BulkRequest extends ActionRequest implements CompositeIndicesReques private void applyGlobalMandatoryParameters(DocWriteRequest request) { request.index(valueOrDefault(request.index(), globalIndex)); - if (Strings.isNullOrEmpty(globalType) == false && MapperService.SINGLE_MAPPING_NAME.equals(globalType) == false) { - request.defaultTypeIfNull(globalType); - } } private static String valueOrDefault(String value, String globalDefault) { diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestBuilder.java index 34837d0e696d..edb3c60feaa8 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestBuilder.java @@ -33,7 +33,6 @@ import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.index.mapper.MapperService; /** * A bulk request holds an ordered {@link IndexRequest}s and {@link DeleteRequest}s and allows to executes @@ -42,14 +41,6 @@ import org.elasticsearch.index.mapper.MapperService; public class BulkRequestBuilder extends ActionRequestBuilder implements WriteRequestBuilder { - /** - * @deprecated use {@link #BulkRequestBuilder(ElasticsearchClient, BulkAction, String)} instead - */ - @Deprecated - public BulkRequestBuilder(ElasticsearchClient client, BulkAction action, @Nullable String globalIndex, @Nullable String globalType) { - super(client, action, new BulkRequest(globalIndex, globalType)); - } - public BulkRequestBuilder(ElasticsearchClient client, BulkAction action, @Nullable String globalIndex) { super(client, action, new BulkRequest(globalIndex)); } @@ -117,25 +108,14 @@ public class BulkRequestBuilder extends ActionRequestBuilder indexRequestConsumer, - Consumer updateRequestConsumer, - Consumer deleteRequestConsumer) throws IOException { - parse(data, defaultIndex, null, defaultRouting, defaultFetchSourceContext, defaultPipeline, allowExplicitIndex, xContentType, - indexRequestConsumer, updateRequestConsumer, deleteRequestConsumer); - } - - /** - * Parse the provided {@code data} assuming the provided default values. Index requests - * will be passed to the {@code indexRequestConsumer}, update requests to the - * {@code updateRequestConsumer} and delete requests to the {@code deleteRequestConsumer}. - * @deprecated Use {@link #parse(BytesReference, String, String, FetchSourceContext, String, boolean, XContentType, - * Consumer, Consumer, Consumer)} instead. - */ - @Deprecated - public void parse( - BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType, - @Nullable String defaultRouting, @Nullable FetchSourceContext defaultFetchSourceContext, - @Nullable String defaultPipeline, boolean allowExplicitIndex, - XContentType xContentType, - Consumer indexRequestConsumer, + BiConsumer indexRequestConsumer, Consumer updateRequestConsumer, Consumer deleteRequestConsumer) throws IOException { XContent xContent = xContentType.xContent(); int line = 0; int from = 0; byte marker = xContent.streamSeparator(); - boolean typesDeprecationLogged = false; while (true) { int nextMarker = findNextMarker(marker, from, data); if (nextMarker == -1) { @@ -172,7 +148,7 @@ public final class BulkRequestParser { String action = parser.currentName(); String index = defaultIndex; - String type = defaultType; + String type = null; String id = null; String routing = defaultRouting; FetchSourceContext fetchSourceContext = defaultFetchSourceContext; @@ -199,10 +175,10 @@ public final class BulkRequestParser { throw new IllegalArgumentException("explicit index in bulk is not allowed"); } index = parser.text(); - } else if (TYPE.match(currentFieldName, parser.getDeprecationHandler())) { - if (warnOnTypeUsage && typesDeprecationLogged == false) { - deprecationLogger.deprecatedAndMaybeLog("bulk_with_types", RestBulkAction.TYPES_DEPRECATION_MESSAGE); - typesDeprecationLogged = true; + } else if (TYPE.match(currentFieldName, parser.getDeprecationHandler())) { + if (errorOnType) { + throw new IllegalArgumentException("Action/metadata line [" + line + "] contains an unknown parameter [" + + currentFieldName + "]"); } type = parser.text(); } else if (ID.match(currentFieldName, parser.getDeprecationHandler())) { @@ -246,7 +222,7 @@ public final class BulkRequestParser { } if ("delete".equals(action)) { - deleteRequestConsumer.accept(new DeleteRequest(index, type, id).routing(routing) + deleteRequestConsumer.accept(new DeleteRequest(index).id(id).routing(routing) .version(version).versionType(versionType).setIfSeqNo(ifSeqNo).setIfPrimaryTerm(ifPrimaryTerm)); } else { nextMarker = findNextMarker(marker, from, data); @@ -259,28 +235,29 @@ public final class BulkRequestParser { // of index request. if ("index".equals(action)) { if (opType == null) { - indexRequestConsumer.accept(new IndexRequest(index, type, id).routing(routing) + indexRequestConsumer.accept(new IndexRequest(index).id(id).routing(routing) .version(version).versionType(versionType) .setPipeline(pipeline).setIfSeqNo(ifSeqNo).setIfPrimaryTerm(ifPrimaryTerm) - .source(sliceTrimmingCarriageReturn(data, from, nextMarker,xContentType), xContentType)); + .source(sliceTrimmingCarriageReturn(data, from, nextMarker,xContentType), xContentType), type); } else { - indexRequestConsumer.accept(new IndexRequest(index, type, id).routing(routing) + indexRequestConsumer.accept(new IndexRequest(index).id(id).routing(routing) .version(version).versionType(versionType) .create("create".equals(opType)).setPipeline(pipeline) .setIfSeqNo(ifSeqNo).setIfPrimaryTerm(ifPrimaryTerm) - .source(sliceTrimmingCarriageReturn(data, from, nextMarker, xContentType), xContentType)); + .source(sliceTrimmingCarriageReturn(data, from, nextMarker, xContentType), xContentType), type); } } else if ("create".equals(action)) { - indexRequestConsumer.accept(new IndexRequest(index, type, id).routing(routing) + indexRequestConsumer.accept(new IndexRequest(index).id(id).routing(routing) .version(version).versionType(versionType) .create(true).setPipeline(pipeline).setIfSeqNo(ifSeqNo).setIfPrimaryTerm(ifPrimaryTerm) - .source(sliceTrimmingCarriageReturn(data, from, nextMarker, xContentType), xContentType)); + .source(sliceTrimmingCarriageReturn(data, from, nextMarker, xContentType), xContentType), type); } else if ("update".equals(action)) { if (version != Versions.MATCH_ANY || versionType != VersionType.INTERNAL) { throw new IllegalArgumentException("Update requests do not support versioning. " + "Please use `if_seq_no` and `if_primary_term` instead"); } - UpdateRequest updateRequest = new UpdateRequest(index, type, id).routing(routing).retryOnConflict(retryOnConflict) + UpdateRequest updateRequest = new UpdateRequest().index(index).id(id).routing(routing) + .retryOnConflict(retryOnConflict) .setIfSeqNo(ifSeqNo).setIfPrimaryTerm(ifPrimaryTerm) .routing(routing); // EMPTY is safe here because we never call namedObject diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkResponse.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkResponse.java index c021825bc7a8..7d59eec63ca7 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkResponse.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkResponse.java @@ -118,8 +118,8 @@ public class BulkResponse extends ActionResponse implements Iterable responses, int idx, DocWriteRequest request, String index, Exception e) { if (index.equals(request.index())) { - responses.set(idx, new BulkItemResponse(idx, request.opType(), new BulkItemResponse.Failure(request.index(), request.type(), + responses.set(idx, new BulkItemResponse(idx, request.opType(), new BulkItemResponse.Failure(request.index(), request.id(), e))); return true; } @@ -471,7 +471,7 @@ public class TransportBulkAction extends HandledTransportAction docWriteRequest = request.request(); responses.set(request.id(), new BulkItemResponse(request.id(), docWriteRequest.opType(), - new BulkItemResponse.Failure(indexName, docWriteRequest.type(), docWriteRequest.id(), e))); + new BulkItemResponse.Failure(indexName, docWriteRequest.id(), e))); } if (counter.decrementAndGet() == 0) { finishHim(); @@ -614,7 +614,7 @@ public class TransportBulkAction extends HandledTransportAction request, int idx, Exception unavailableException) { - BulkItemResponse.Failure failure = new BulkItemResponse.Failure(request.index(), request.type(), request.id(), + BulkItemResponse.Failure failure = new BulkItemResponse.Failure(request.index(), request.id(), unavailableException); BulkItemResponse bulkItemResponse = new BulkItemResponse(idx, request.opType(), failure); responses.set(idx, bulkItemResponse); @@ -804,8 +804,7 @@ public class TransportBulkAction extends HandledTransportAction @Override public String type() { if (type == null) { - return MapperService.SINGLE_MAPPING_NAME; + return MapperService.SINGLE_MAPPING_NAME; } return type; } @@ -157,22 +157,6 @@ public class DeleteRequest extends ReplicatedWriteRequest this.type = type; return this; } - - /** - * Set the default type supplied to a bulk - * request if this individual request's type is null - * or empty - * - * @deprecated Types are in the process of being removed. - */ - @Deprecated - @Override - public DeleteRequest defaultTypeIfNull(String defaultType) { - if (Strings.isNullOrEmpty(type)) { - type = defaultType; - } - return this; - } /** * The id of the document to delete. @@ -292,8 +276,8 @@ public class DeleteRequest extends ReplicatedWriteRequest @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - // A 7.x request allows null types but if deserialized in a 6.x node will cause nullpointer exceptions. - // So we use the type accessor method here to make the type non-null (will default it to "_doc"). + // A 7.x request allows null types but if deserialized in a 6.x node will cause nullpointer exceptions. + // So we use the type accessor method here to make the type non-null (will default it to "_doc"). out.writeString(type()); out.writeString(id); out.writeOptionalString(routing()); diff --git a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java index ed04bddd7f53..7d2718aef5a3 100644 --- a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java @@ -270,20 +270,7 @@ public class IndexRequest extends ReplicatedWriteRequest implement return this; } - /** - * Set the default type supplied to a bulk - * request if this individual request's type is null - * or empty - * @deprecated Types are in the process of being removed. - */ - @Deprecated - @Override - public IndexRequest defaultTypeIfNull(String defaultType) { - if (Strings.isNullOrEmpty(type)) { - type = defaultType; - } - return this; - } + /** * The id of the indexed document. If not set, will be automatically generated. */ diff --git a/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java b/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java index 97057eedd60d..00b850109e49 100644 --- a/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java +++ b/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java @@ -217,7 +217,7 @@ public class UpdateRequest extends InstanceShardOperationRequest @Override public String type() { if (type == null) { - return MapperService.SINGLE_MAPPING_NAME; + return MapperService.SINGLE_MAPPING_NAME; } return type; } @@ -233,21 +233,6 @@ public class UpdateRequest extends InstanceShardOperationRequest return this; } - /** - * Set the default type supplied to a bulk - * request if this individual request's type is null - * or empty - * @deprecated Types are in the process of being removed. - */ - @Deprecated - @Override - public UpdateRequest defaultTypeIfNull(String defaultType) { - if (Strings.isNullOrEmpty(type)) { - type = defaultType; - } - return this; - } - /** * The id of the indexed document. */ @@ -855,8 +840,8 @@ public class UpdateRequest extends InstanceShardOperationRequest public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); waitForActiveShards.writeTo(out); - // A 7.x request allows null types but if deserialized in a 6.x node will cause nullpointer exceptions. - // So we use the type accessor method here to make the type non-null (will default it to "_doc"). + // A 7.x request allows null types but if deserialized in a 6.x node will cause nullpointer exceptions. + // So we use the type accessor method here to make the type non-null (will default it to "_doc"). out.writeString(type()); out.writeString(id); out.writeOptionalString(routing); diff --git a/server/src/main/java/org/elasticsearch/client/Client.java b/server/src/main/java/org/elasticsearch/client/Client.java index 7e7cbf1c18be..336875a602e9 100644 --- a/server/src/main/java/org/elasticsearch/client/Client.java +++ b/server/src/main/java/org/elasticsearch/client/Client.java @@ -231,9 +231,9 @@ public interface Client extends ElasticsearchClient, Releasable { BulkRequestBuilder prepareBulk(); /** - * Executes a bulk of index / delete operations with default index and/or type + * Executes a bulk of index / delete operations with default index */ - BulkRequestBuilder prepareBulk(@Nullable String globalIndex, @Nullable String globalType); + BulkRequestBuilder prepareBulk(@Nullable String globalIndex); /** * Gets the document that was indexed from an index with a type and id. diff --git a/server/src/main/java/org/elasticsearch/client/support/AbstractClient.java b/server/src/main/java/org/elasticsearch/client/support/AbstractClient.java index 1180298f386e..dbfcfc309fa1 100644 --- a/server/src/main/java/org/elasticsearch/client/support/AbstractClient.java +++ b/server/src/main/java/org/elasticsearch/client/support/AbstractClient.java @@ -465,8 +465,8 @@ public abstract class AbstractClient implements Client { } @Override - public BulkRequestBuilder prepareBulk(@Nullable String globalIndex, @Nullable String globalType) { - return new BulkRequestBuilder(this, BulkAction.INSTANCE, globalIndex, globalType); + public BulkRequestBuilder prepareBulk(@Nullable String globalIndex) { + return new BulkRequestBuilder(this, BulkAction.INSTANCE, globalIndex); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/reindex/BulkByScrollResponse.java b/server/src/main/java/org/elasticsearch/index/reindex/BulkByScrollResponse.java index 59ac32c66610..44bcba686f60 100644 --- a/server/src/main/java/org/elasticsearch/index/reindex/BulkByScrollResponse.java +++ b/server/src/main/java/org/elasticsearch/index/reindex/BulkByScrollResponse.java @@ -225,7 +225,6 @@ public class BulkByScrollResponse extends ActionResponse implements ToXContentFr ensureExpectedToken(Token.START_OBJECT, parser.currentToken(), parser::getTokenLocation); Token token; String index = null; - String type = null; String id = null; Integer status = null; Integer shardId = null; @@ -255,9 +254,6 @@ public class BulkByScrollResponse extends ActionResponse implements ToXContentFr case Failure.INDEX_FIELD: index = parser.text(); break; - case Failure.TYPE_FIELD: - type = parser.text(); - break; case Failure.ID_FIELD: id = parser.text(); break; @@ -283,7 +279,7 @@ public class BulkByScrollResponse extends ActionResponse implements ToXContentFr } } if (bulkExc != null) { - return new Failure(index, type, id, bulkExc, RestStatus.fromCode(status)); + return new Failure(index, id, bulkExc, RestStatus.fromCode(status)); } else if (searchExc != null) { if (status == null) { return new SearchFailure(searchExc, index, shardId, nodeId); diff --git a/server/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java b/server/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java index 33fd1d46b3ec..43cd684bd47a 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java @@ -19,20 +19,16 @@ package org.elasticsearch.rest.action.document; -import org.apache.logging.log4j.LogManager; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkShardRequest; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.client.Requests; import org.elasticsearch.client.node.NodeClient; -import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestStatusToXContentListener; -import org.elasticsearch.rest.action.search.RestSearchAction; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; import java.io.IOException; @@ -42,19 +38,16 @@ import static org.elasticsearch.rest.RestRequest.Method.PUT; /** *
- * { "index" : { "_index" : "test", "_type" : "type1", "_id" : "1" }
+ * { "index" : { "_index" : "test", "_id" : "1" }
  * { "type1" : { "field1" : "value1" } }
- * { "delete" : { "_index" : "test", "_type" : "type1", "_id" : "2" } }
- * { "create" : { "_index" : "test", "_type" : "type1", "_id" : "1" }
+ * { "delete" : { "_index" : "test", "_id" : "2" } }
+ * { "create" : { "_index" : "test", "_id" : "1" }
  * { "type1" : { "field1" : "value1" } }
  * 
*/ public class RestBulkAction extends BaseRestHandler { private final boolean allowExplicitIndex; - private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(RestSearchAction.class)); - public static final String TYPES_DEPRECATION_MESSAGE = "[types removal]" + - " Specifying types in bulk requests is deprecated."; public RestBulkAction(Settings settings, RestController controller) { controller.registerHandler(POST, "/_bulk", this); @@ -62,10 +55,6 @@ public class RestBulkAction extends BaseRestHandler { controller.registerHandler(POST, "/{index}/_bulk", this); controller.registerHandler(PUT, "/{index}/_bulk", this); - // Deprecated typed endpoints. - controller.registerHandler(POST, "/{index}/{type}/_bulk", this); - controller.registerHandler(PUT, "/{index}/{type}/_bulk", this); - this.allowExplicitIndex = MULTI_ALLOW_EXPLICIT_INDEX.get(settings); } @@ -78,12 +67,6 @@ public class RestBulkAction extends BaseRestHandler { public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { BulkRequest bulkRequest = Requests.bulkRequest(); String defaultIndex = request.param("index"); - String defaultType = request.param("type"); - if (defaultType == null) { - defaultType = MapperService.SINGLE_MAPPING_NAME; - } else { - deprecationLogger.deprecatedAndMaybeLog("bulk_with_types", RestBulkAction.TYPES_DEPRECATION_MESSAGE); - } String defaultRouting = request.param("routing"); FetchSourceContext defaultFetchSourceContext = FetchSourceContext.parseFromRestRequest(request); String defaultPipeline = request.param("pipeline"); @@ -93,7 +76,7 @@ public class RestBulkAction extends BaseRestHandler { } bulkRequest.timeout(request.paramAsTime("timeout", BulkShardRequest.DEFAULT_TIMEOUT)); bulkRequest.setRefreshPolicy(request.param("refresh")); - bulkRequest.add(request.requiredContent(), defaultIndex, defaultType, defaultRouting, + bulkRequest.add(request.requiredContent(), defaultIndex, defaultRouting, defaultFetchSourceContext, defaultPipeline, allowExplicitIndex, request.getXContentType()); return channel -> client.bulk(bulkRequest, new RestStatusToXContentListener<>(channel)); diff --git a/server/src/test/java/org/elasticsearch/action/bulk/BulkIntegrationIT.java b/server/src/test/java/org/elasticsearch/action/bulk/BulkIntegrationIT.java index d053f93546e1..5116abaf02a5 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/BulkIntegrationIT.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/BulkIntegrationIT.java @@ -64,7 +64,6 @@ public class BulkIntegrationIT extends ESIntegTestCase { assertBusy(() -> { GetMappingsResponse mappingsResponse = client().admin().indices().prepareGetMappings().get(); assertTrue(mappingsResponse.getMappings().containsKey("logstash-2014.03.30")); - assertTrue(mappingsResponse.getMappings().get("logstash-2014.03.30").containsKey("logs")); }); } @@ -129,7 +128,7 @@ public class BulkIntegrationIT extends ESIntegTestCase { { createSamplePipeline("pipeline"); - BulkRequestBuilder bulkBuilder = client().prepareBulk("test","type1") + BulkRequestBuilder bulkBuilder = client().prepareBulk("test") .routing("routing") .pipeline("pipeline"); diff --git a/server/src/test/java/org/elasticsearch/action/bulk/BulkItemResponseTests.java b/server/src/test/java/org/elasticsearch/action/bulk/BulkItemResponseTests.java index 20a42407720f..f6a366237040 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/BulkItemResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/BulkItemResponseTests.java @@ -44,7 +44,7 @@ import static org.hamcrest.Matchers.containsString; public class BulkItemResponseTests extends ESTestCase { public void testFailureToString() { - Failure failure = new Failure("index", "type", "id", new RuntimeException("test")); + Failure failure = new Failure("index", "id", new RuntimeException("test")); String toString = failure.toString(); assertThat(toString, containsString("\"type\":\"runtime_exception\"")); assertThat(toString, containsString("\"reason\":\"test\"")); @@ -88,16 +88,15 @@ public class BulkItemResponseTests extends ESTestCase { int itemId = randomIntBetween(0, 100); String index = randomAlphaOfLength(5); - String type = randomAlphaOfLength(5); String id = randomAlphaOfLength(5); DocWriteRequest.OpType opType = randomFrom(DocWriteRequest.OpType.values()); final Tuple exceptions = randomExceptions(); Exception bulkItemCause = (Exception) exceptions.v1(); - Failure bulkItemFailure = new Failure(index, type, id, bulkItemCause); + Failure bulkItemFailure = new Failure(index, id, bulkItemCause); BulkItemResponse bulkItemResponse = new BulkItemResponse(itemId, opType, bulkItemFailure); - Failure expectedBulkItemFailure = new Failure(index, type, id, exceptions.v2(), ExceptionsHelper.status(bulkItemCause)); + Failure expectedBulkItemFailure = new Failure(index, id, exceptions.v2(), ExceptionsHelper.status(bulkItemCause)); BulkItemResponse expectedBulkItemResponse = new BulkItemResponse(itemId, opType, expectedBulkItemFailure); BytesReference originalBytes = toShuffledXContent(bulkItemResponse, xContentType, ToXContent.EMPTY_PARAMS, randomBoolean()); @@ -120,7 +119,6 @@ public class BulkItemResponseTests extends ESTestCase { public static void assertBulkItemResponse(BulkItemResponse expected, BulkItemResponse actual) { assertEquals(expected.getItemId(), actual.getItemId()); assertEquals(expected.getIndex(), actual.getIndex()); - assertEquals(expected.getType(), actual.getType()); assertEquals(expected.getId(), actual.getId()); assertEquals(expected.getOpType(), actual.getOpType()); assertEquals(expected.getVersion(), actual.getVersion()); @@ -131,7 +129,6 @@ public class BulkItemResponseTests extends ESTestCase { BulkItemResponse.Failure actualFailure = actual.getFailure(); assertEquals(expectedFailure.getIndex(), actualFailure.getIndex()); - assertEquals(expectedFailure.getType(), actualFailure.getType()); assertEquals(expectedFailure.getId(), actualFailure.getId()); assertEquals(expectedFailure.getMessage(), actualFailure.getMessage()); assertEquals(expectedFailure.getStatus(), actualFailure.getStatus()); diff --git a/server/src/test/java/org/elasticsearch/action/bulk/BulkProcessorIT.java b/server/src/test/java/org/elasticsearch/action/bulk/BulkProcessorIT.java index 6e2f78f45614..0c080c193f4b 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/BulkProcessorIT.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/BulkProcessorIT.java @@ -27,14 +27,10 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.client.Client; import org.elasticsearch.client.Requests; import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.test.ESIntegTestCase; import java.util.Arrays; @@ -62,10 +58,10 @@ public class BulkProcessorIT extends ESIntegTestCase { int numDocs = randomIntBetween(10, 100); try (BulkProcessor processor = BulkProcessor.builder(client(), listener) - //let's make sure that the bulk action limit trips, one single execution will index all the documents - .setConcurrentRequests(randomIntBetween(0, 1)).setBulkActions(numDocs) - .setFlushInterval(TimeValue.timeValueHours(24)).setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB)) - .build()) { + //let's make sure that the bulk action limit trips, one single execution will index all the documents + .setConcurrentRequests(randomIntBetween(0, 1)).setBulkActions(numDocs) + .setFlushInterval(TimeValue.timeValueHours(24)).setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB)) + .build()) { MultiGetRequestBuilder multiGetRequestBuilder = indexDocs(client(), processor, numDocs); @@ -86,9 +82,9 @@ public class BulkProcessorIT extends ESIntegTestCase { int numDocs = randomIntBetween(10, 100); try (BulkProcessor processor = BulkProcessor.builder(client(), listener) - //let's make sure that this bulk won't be automatically flushed - .setConcurrentRequests(randomIntBetween(0, 10)).setBulkActions(numDocs + randomIntBetween(1, 100)) - .setFlushInterval(TimeValue.timeValueHours(24)).setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB)).build()) { + //let's make sure that this bulk won't be automatically flushed + .setConcurrentRequests(randomIntBetween(0, 10)).setBulkActions(numDocs + randomIntBetween(1, 100)) + .setFlushInterval(TimeValue.timeValueHours(24)).setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB)).build()) { MultiGetRequestBuilder multiGetRequestBuilder = indexDocs(client(), processor, numDocs); @@ -121,9 +117,9 @@ public class BulkProcessorIT extends ESIntegTestCase { MultiGetRequestBuilder multiGetRequestBuilder; try (BulkProcessor processor = BulkProcessor.builder(client(), listener) - .setConcurrentRequests(concurrentRequests).setBulkActions(bulkActions) - //set interval and size to high values - .setFlushInterval(TimeValue.timeValueHours(24)).setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB)).build()) { + .setConcurrentRequests(concurrentRequests).setBulkActions(bulkActions) + //set interval and size to high values + .setFlushInterval(TimeValue.timeValueHours(24)).setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB)).build()) { multiGetRequestBuilder = indexDocs(client(), processor, numDocs); @@ -146,7 +142,6 @@ public class BulkProcessorIT extends ESIntegTestCase { for (BulkItemResponse bulkItemResponse : listener.bulkItems) { assertThat(bulkItemResponse.getFailureMessage(), bulkItemResponse.isFailed(), equalTo(false)); assertThat(bulkItemResponse.getIndex(), equalTo("test")); - assertThat(bulkItemResponse.getType(), equalTo("test")); //with concurrent requests > 1 we can't rely on the order of the bulk requests assertThat(Integer.valueOf(bulkItemResponse.getId()), both(greaterThan(0)).and(lessThanOrEqualTo(numDocs))); //we do want to check that we don't get duplicate ids back @@ -161,11 +156,11 @@ public class BulkProcessorIT extends ESIntegTestCase { int numDocs = randomIntBetween(10, 100); BulkProcessor processor = BulkProcessor.builder(client(), listener) - //let's make sure that the bulk action limit trips, one single execution will index all the documents - .setConcurrentRequests(randomIntBetween(0, 1)).setBulkActions(numDocs) - .setFlushInterval(TimeValue.timeValueHours(24)).setBulkSize(new ByteSizeValue(randomIntBetween(1, 10), - RandomPicks.randomFrom(random(), ByteSizeUnit.values()))) - .build(); + //let's make sure that the bulk action limit trips, one single execution will index all the documents + .setConcurrentRequests(randomIntBetween(0, 1)).setBulkActions(numDocs) + .setFlushInterval(TimeValue.timeValueHours(24)).setBulkSize(new ByteSizeValue(randomIntBetween(1, 10), + RandomPicks.randomFrom(random(), ByteSizeUnit.values()))) + .build(); MultiGetRequestBuilder multiGetRequestBuilder = indexDocs(client(), processor, numDocs); assertThat(processor.isOpen(), is(true)); @@ -189,7 +184,7 @@ public class BulkProcessorIT extends ESIntegTestCase { public void testBulkProcessorConcurrentRequestsReadOnlyIndex() throws Exception { createIndex("test-ro"); assertAcked(client().admin().indices().prepareUpdateSettings("test-ro") - .setSettings(Settings.builder().put(IndexMetaData.SETTING_BLOCKS_WRITE, true))); + .setSettings(Settings.builder().put(IndexMetaData.SETTING_BLOCKS_WRITE, true))); ensureGreen(); int bulkActions = randomIntBetween(10, 100); @@ -208,9 +203,9 @@ public class BulkProcessorIT extends ESIntegTestCase { BulkProcessorTestListener listener = new BulkProcessorTestListener(latch, closeLatch); try (BulkProcessor processor = BulkProcessor.builder(client(), listener) - .setConcurrentRequests(concurrentRequests).setBulkActions(bulkActions) - //set interval and size to high values - .setFlushInterval(TimeValue.timeValueHours(24)).setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB)).build()) { + .setConcurrentRequests(concurrentRequests).setBulkActions(bulkActions) + //set interval and size to high values + .setFlushInterval(TimeValue.timeValueHours(24)).setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB)).build()) { for (int i = 1; i <= numDocs; i++) { if (randomBoolean()) { @@ -237,7 +232,6 @@ public class BulkProcessorIT extends ESIntegTestCase { Set readOnlyIds = new HashSet<>(); for (BulkItemResponse bulkItemResponse : listener.bulkItems) { assertThat(bulkItemResponse.getIndex(), either(equalTo("test")).or(equalTo("test-ro"))); - assertThat(bulkItemResponse.getType(), equalTo("test")); if (bulkItemResponse.getIndex().equals("test")) { assertThat(bulkItemResponse.isFailed(), equalTo(false)); //with concurrent requests > 1 we can't rely on the order of the bulk requests @@ -259,15 +253,8 @@ public class BulkProcessorIT extends ESIntegTestCase { private static MultiGetRequestBuilder indexDocs(Client client, BulkProcessor processor, int numDocs) throws Exception { MultiGetRequestBuilder multiGetRequestBuilder = client.prepareMultiGet(); for (int i = 1; i <= numDocs; i++) { - if (randomBoolean()) { - processor.add(new IndexRequest("test", "test", Integer.toString(i)) - .source(Requests.INDEX_CONTENT_TYPE, "field", randomRealisticUnicodeOfLengthBetween(1, 30))); - } else { - final String source = "{ \"index\":{\"_index\":\"test\",\"_type\":\"test\",\"_id\":\"" + Integer.toString(i) + "\"} }\n" - + Strings.toString(JsonXContent.contentBuilder() - .startObject().field("field", randomRealisticUnicodeOfLengthBetween(1, 30)).endObject()) + "\n"; - processor.add(new BytesArray(source), null, null, XContentType.JSON); - } + processor.add(new IndexRequest("test", "test", Integer.toString(i)) + .source(Requests.INDEX_CONTENT_TYPE, "field", randomRealisticUnicodeOfLengthBetween(1, 30))); multiGetRequestBuilder.add("test", Integer.toString(i)); } return multiGetRequestBuilder; @@ -278,7 +265,6 @@ public class BulkProcessorIT extends ESIntegTestCase { int i = 1; for (BulkItemResponse bulkItemResponse : bulkItemResponses) { assertThat(bulkItemResponse.getIndex(), equalTo("test")); - assertThat(bulkItemResponse.getType(), equalTo("test")); assertThat(bulkItemResponse.getId(), equalTo(Integer.toString(i++))); assertThat("item " + i + " failed with cause: " + bulkItemResponse.getFailureMessage(), bulkItemResponse.isFailed(), equalTo(false)); diff --git a/server/src/test/java/org/elasticsearch/action/bulk/BulkRequestModifierTests.java b/server/src/test/java/org/elasticsearch/action/bulk/BulkRequestModifierTests.java index 5a168264a740..9bb7732f4da4 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/BulkRequestModifierTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/BulkRequestModifierTests.java @@ -74,7 +74,6 @@ public class BulkRequestModifierTests extends ESTestCase { BulkItemResponse item = bulkResponse.getItems()[j]; assertThat(item.isFailed(), is(true)); assertThat(item.getFailure().getIndex(), equalTo("_index")); - assertThat(item.getFailure().getType(), equalTo("_type")); assertThat(item.getFailure().getId(), equalTo(String.valueOf(j))); assertThat(item.getFailure().getMessage(), equalTo("java.lang.RuntimeException")); } else { diff --git a/server/src/test/java/org/elasticsearch/action/bulk/BulkRequestParserTests.java b/server/src/test/java/org/elasticsearch/action/bulk/BulkRequestParserTests.java index fbcd5c46e2f9..1ddeecef7154 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/BulkRequestParserTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/BulkRequestParserTests.java @@ -21,7 +21,6 @@ package org.elasticsearch.action.bulk; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.rest.action.document.RestBulkAction; import org.elasticsearch.test.ESTestCase; import java.io.IOException; @@ -33,8 +32,8 @@ public class BulkRequestParserTests extends ESTestCase { BytesArray request = new BytesArray("{ \"index\":{ \"_id\": \"bar\" } }\n{}\n"); BulkRequestParser parser = new BulkRequestParser(randomBoolean()); final AtomicBoolean parsed = new AtomicBoolean(); - parser.parse(request, "foo", null, null, null, null, false, XContentType.JSON, - indexRequest -> { + parser.parse(request, "foo", null, null, null, false, XContentType.JSON, + (indexRequest, type) -> { assertFalse(parsed.get()); assertEquals("foo", indexRequest.index()); assertEquals("bar", indexRequest.id()); @@ -48,8 +47,8 @@ public class BulkRequestParserTests extends ESTestCase { BytesArray request = new BytesArray("{ \"delete\":{ \"_id\": \"bar\" } }\n"); BulkRequestParser parser = new BulkRequestParser(randomBoolean()); final AtomicBoolean parsed = new AtomicBoolean(); - parser.parse(request, "foo", null, null, null, null, false, XContentType.JSON, - req -> fail(), req -> fail(), + parser.parse(request, "foo", null, null, null, false, XContentType.JSON, + (req, type) -> fail(), req -> fail(), deleteRequest -> { assertFalse(parsed.get()); assertEquals("foo", deleteRequest.index()); @@ -63,8 +62,8 @@ public class BulkRequestParserTests extends ESTestCase { BytesArray request = new BytesArray("{ \"update\":{ \"_id\": \"bar\" } }\n{}\n"); BulkRequestParser parser = new BulkRequestParser(randomBoolean()); final AtomicBoolean parsed = new AtomicBoolean(); - parser.parse(request, "foo", null, null, null, null, false, XContentType.JSON, - req -> fail(), + parser.parse(request, "foo", null, null, null, false, XContentType.JSON, + (req, type) -> fail(), updateRequest -> { assertFalse(parsed.get()); assertEquals("foo", updateRequest.index()); @@ -79,36 +78,35 @@ public class BulkRequestParserTests extends ESTestCase { BytesArray request = new BytesArray("{ \"index\":{ \"_id\": \"bar\" } }\n{}"); BulkRequestParser parser = new BulkRequestParser(randomBoolean()); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> parser.parse(request, "foo", null, null, null, null, false, XContentType.JSON, - indexRequest -> fail(), req -> fail(), req -> fail())); + () -> parser.parse(request, "foo", null, null, null, false, XContentType.JSON, + (req, type) -> fail(), req -> fail(), req -> fail())); assertEquals("The bulk request must be terminated by a newline [\\n]", e.getMessage()); } public void testFailOnExplicitIndex() throws IOException { BytesArray request = new BytesArray("{ \"index\":{ \"_index\": \"foo\", \"_id\": \"bar\" } }\n{}\n"); BulkRequestParser parser = new BulkRequestParser(randomBoolean()); - + IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, - () -> parser.parse(request, null, null, null, null, null, false, XContentType.JSON, - req -> fail(), req -> fail(), req -> fail())); + () -> parser.parse(request, null, null, null, null, false, XContentType.JSON, + (req, type) -> fail(), req -> fail(), req -> fail())); assertEquals("explicit index in bulk is not allowed", ex.getMessage()); } - public void testTypeWarning() throws IOException { + public void testTypesStillParsedForBulkMonitoring() throws IOException { BytesArray request = new BytesArray("{ \"index\":{ \"_type\": \"quux\", \"_id\": \"bar\" } }\n{}\n"); - BulkRequestParser parser = new BulkRequestParser(true); + BulkRequestParser parser = new BulkRequestParser(false); final AtomicBoolean parsed = new AtomicBoolean(); - parser.parse(request, "foo", null, null, null, null, false, XContentType.JSON, - indexRequest -> { + parser.parse(request, "foo", null, null, null, false, XContentType.JSON, + (indexRequest, type) -> { assertFalse(parsed.get()); assertEquals("foo", indexRequest.index()); assertEquals("bar", indexRequest.id()); + assertEquals("quux", type); parsed.set(true); }, req -> fail(), req -> fail()); assertTrue(parsed.get()); - - assertWarnings(RestBulkAction.TYPES_DEPRECATION_MESSAGE); } } diff --git a/server/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java b/server/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java index ebd6590a80cc..22a50231fdfc 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java @@ -34,7 +34,6 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.rest.action.document.RestBulkAction; import org.elasticsearch.script.Script; import org.elasticsearch.test.ESTestCase; @@ -63,12 +62,10 @@ public class BulkRequestTests extends ESTestCase { assertThat(((IndexRequest) bulkRequest.requests().get(0)).source(), equalTo(new BytesArray("{ \"field1\" : \"value1\" }"))); assertThat(bulkRequest.requests().get(1), instanceOf(DeleteRequest.class)); assertThat(((IndexRequest) bulkRequest.requests().get(2)).source(), equalTo(new BytesArray("{ \"field1\" : \"value3\" }"))); - //This test's JSON contains outdated references to types - assertWarnings(RestBulkAction.TYPES_DEPRECATION_MESSAGE); } public void testSimpleBulkWithCarriageReturn() throws Exception { - String bulkAction = "{ \"index\":{\"_index\":\"test\",\"_type\":\"type1\",\"_id\":\"1\"} }\r\n{ \"field1\" : \"value1\" }\r\n"; + String bulkAction = "{ \"index\":{\"_index\":\"test\",\"_id\":\"1\"} }\r\n{ \"field1\" : \"value1\" }\r\n"; BulkRequest bulkRequest = new BulkRequest(); bulkRequest.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, XContentType.JSON); assertThat(bulkRequest.numberOfActions(), equalTo(1)); @@ -76,8 +73,6 @@ public class BulkRequestTests extends ESTestCase { Map sourceMap = XContentHelper.convertToMap(((IndexRequest) bulkRequest.requests().get(0)).source(), false, XContentType.JSON).v2(); assertEquals("value1", sourceMap.get("field1")); - //This test's JSON contains outdated references to types - assertWarnings(RestBulkAction.TYPES_DEPRECATION_MESSAGE); } public void testSimpleBulk2() throws Exception { @@ -103,7 +98,6 @@ public class BulkRequestTests extends ESTestCase { assertThat(((UpdateRequest) bulkRequest.requests().get(0)).retryOnConflict(), equalTo(2)); assertThat(((UpdateRequest) bulkRequest.requests().get(0)).doc().source().utf8ToString(), equalTo("{\"field\":\"value\"}")); assertThat(bulkRequest.requests().get(1).id(), equalTo("0")); - assertThat(bulkRequest.requests().get(1).type(), equalTo("type1")); assertThat(bulkRequest.requests().get(1).index(), equalTo("index1")); Script script = ((UpdateRequest) bulkRequest.requests().get(1)).script(); assertThat(script, notNullValue()); @@ -114,21 +108,17 @@ public class BulkRequestTests extends ESTestCase { assertThat(scriptParams.size(), equalTo(1)); assertThat(scriptParams.get("param1"), equalTo(1)); assertThat(((UpdateRequest) bulkRequest.requests().get(1)).upsertRequest().source().utf8ToString(), equalTo("{\"counter\":1}")); - //This test's JSON contains outdated references to types - assertWarnings(RestBulkAction.TYPES_DEPRECATION_MESSAGE); } public void testBulkAllowExplicitIndex() throws Exception { String bulkAction1 = copyToStringFromClasspath("/org/elasticsearch/action/bulk/simple-bulk.json"); Exception ex = expectThrows(Exception.class, () -> new BulkRequest().add( - new BytesArray(bulkAction1.getBytes(StandardCharsets.UTF_8)), null, null, false, XContentType.JSON)); + new BytesArray(bulkAction1.getBytes(StandardCharsets.UTF_8)), null, false, XContentType.JSON)); assertEquals("explicit index in bulk is not allowed", ex.getMessage()); String bulkAction = copyToStringFromClasspath("/org/elasticsearch/action/bulk/simple-bulk5.json"); - new BulkRequest().add(new BytesArray(bulkAction.getBytes(StandardCharsets.UTF_8)), "test", null, false, XContentType.JSON); - //This test's JSON contains outdated references to types - assertWarnings(RestBulkAction.TYPES_DEPRECATION_MESSAGE); + new BulkRequest().add(new BytesArray(bulkAction.getBytes(StandardCharsets.UTF_8)), "test", false, XContentType.JSON); } public void testBulkAddIterable() { @@ -150,8 +140,6 @@ public class BulkRequestTests extends ESTestCase { ParsingException exc = expectThrows(ParsingException.class, () -> bulkRequest.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, XContentType.JSON)); assertThat(exc.getMessage(), containsString("Unknown key for a VALUE_STRING in [hello]")); - //This test's JSON contains outdated references to types - assertWarnings(RestBulkAction.TYPES_DEPRECATION_MESSAGE); } public void testSimpleBulk7() throws Exception { @@ -161,8 +149,6 @@ public class BulkRequestTests extends ESTestCase { () -> bulkRequest.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, XContentType.JSON)); assertThat(exc.getMessage(), containsString("Malformed action/metadata line [5], expected a simple value for field [_unknown] but found [START_ARRAY]")); - //This test's JSON contains outdated references to types - assertWarnings(RestBulkAction.TYPES_DEPRECATION_MESSAGE); } public void testSimpleBulk8() throws Exception { @@ -171,8 +157,6 @@ public class BulkRequestTests extends ESTestCase { IllegalArgumentException exc = expectThrows(IllegalArgumentException.class, () -> bulkRequest.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, XContentType.JSON)); assertThat(exc.getMessage(), containsString("Action/metadata line [3] contains an unknown parameter [_foo]")); - //This test's JSON contains outdated references to types - assertWarnings(RestBulkAction.TYPES_DEPRECATION_MESSAGE); } public void testSimpleBulk9() throws Exception { @@ -189,12 +173,10 @@ public class BulkRequestTests extends ESTestCase { BulkRequest bulkRequest = new BulkRequest(); bulkRequest.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, XContentType.JSON); assertThat(bulkRequest.numberOfActions(), equalTo(9)); - //This test's JSON contains outdated references to types - assertWarnings(RestBulkAction.TYPES_DEPRECATION_MESSAGE); } public void testBulkActionShouldNotContainArray() throws Exception { - String bulkAction = "{ \"index\":{\"_index\":[\"index1\", \"index2\"],\"_type\":\"type1\",\"_id\":\"1\"} }\r\n" + String bulkAction = "{ \"index\":{\"_index\":[\"index1\", \"index2\"],\"_id\":\"1\"} }\r\n" + "{ \"field1\" : \"value1\" }\r\n"; BulkRequest bulkRequest = new BulkRequest(); IllegalArgumentException exc = expectThrows(IllegalArgumentException.class, @@ -280,7 +262,6 @@ public class BulkRequestTests extends ESTestCase { builder.startObject(); builder.startObject("index"); builder.field("_index", "index"); - builder.field("_type", "type"); builder.field("_id", "test"); builder.endObject(); builder.endObject(); @@ -296,19 +277,16 @@ public class BulkRequestTests extends ESTestCase { } BulkRequest bulkRequest = new BulkRequest(); - bulkRequest.add(data, null, null, xContentType); + bulkRequest.add(data, null, xContentType); assertEquals(1, bulkRequest.requests().size()); DocWriteRequest docWriteRequest = bulkRequest.requests().get(0); assertEquals(DocWriteRequest.OpType.INDEX, docWriteRequest.opType()); assertEquals("index", docWriteRequest.index()); - assertEquals("type", docWriteRequest.type()); assertEquals("test", docWriteRequest.id()); assertThat(docWriteRequest, instanceOf(IndexRequest.class)); IndexRequest request = (IndexRequest) docWriteRequest; assertEquals(1, request.sourceAsMap().size()); assertEquals("value", request.sourceAsMap().get("field")); - //This test's content contains outdated references to types - assertWarnings(RestBulkAction.TYPES_DEPRECATION_MESSAGE); } public void testToValidateUpsertRequestAndCASInBulkRequest() throws IOException { @@ -319,7 +297,6 @@ public class BulkRequestTests extends ESTestCase { builder.startObject(); builder.startObject("update"); builder.field("_index", "index"); - builder.field("_type", "type"); builder.field("_id", "id"); builder.field("if_seq_no", 1L); builder.field("if_primary_term", 100L); @@ -334,7 +311,6 @@ public class BulkRequestTests extends ESTestCase { values.put("if_seq_no", 1L); values.put("if_primary_term", 100L); values.put("_index", "index"); - values.put("_type", "type"); builder.field("upsert", values); builder.endObject(); } @@ -342,10 +318,8 @@ public class BulkRequestTests extends ESTestCase { data = out.bytes(); } BulkRequest bulkRequest = new BulkRequest(); - bulkRequest.add(data, null, null, xContentType); + bulkRequest.add(data, null, xContentType); assertThat(bulkRequest.validate().validationErrors(), contains("upsert requests don't support `if_seq_no` and `if_primary_term`")); - //This test's JSON contains outdated references to types - assertWarnings(RestBulkAction.TYPES_DEPRECATION_MESSAGE); } public void testBulkTerminatedByNewline() throws Exception { @@ -359,7 +333,5 @@ public class BulkRequestTests extends ESTestCase { bulkRequestWithNewLine.add(bulkActionWithNewLine.getBytes(StandardCharsets.UTF_8), 0, bulkActionWithNewLine.length(), null, XContentType.JSON); assertEquals(3, bulkRequestWithNewLine.numberOfActions()); - //This test's JSON contains outdated references to types - assertWarnings(RestBulkAction.TYPES_DEPRECATION_MESSAGE); } } diff --git a/server/src/test/java/org/elasticsearch/action/bulk/BulkResponseTests.java b/server/src/test/java/org/elasticsearch/action/bulk/BulkResponseTests.java index 5da1451a1384..49b3c6aca5b6 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/BulkResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/BulkResponseTests.java @@ -73,16 +73,15 @@ public class BulkResponseTests extends ESTestCase { expectedBulkItems[i] = new BulkItemResponse(i, opType, randomDocWriteResponses.v2()); } else { String index = randomAlphaOfLength(5); - String type = randomAlphaOfLength(5); String id = randomAlphaOfLength(5); Tuple failures = randomExceptions(); Exception bulkItemCause = (Exception) failures.v1(); bulkItems[i] = new BulkItemResponse(i, opType, - new BulkItemResponse.Failure(index, type, id, bulkItemCause)); + new BulkItemResponse.Failure(index, id, bulkItemCause)); expectedBulkItems[i] = new BulkItemResponse(i, opType, - new BulkItemResponse.Failure(index, type, id, failures.v2(), ExceptionsHelper.status(bulkItemCause))); + new BulkItemResponse.Failure(index, id, failures.v2(), ExceptionsHelper.status(bulkItemCause))); } } diff --git a/server/src/test/java/org/elasticsearch/action/bulk/BulkWithUpdatesIT.java b/server/src/test/java/org/elasticsearch/action/bulk/BulkWithUpdatesIT.java index 2d3ecff5fd06..1bf3ea429e79 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/BulkWithUpdatesIT.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/BulkWithUpdatesIT.java @@ -321,7 +321,6 @@ public class BulkWithUpdatesIT extends ESIntegTestCase { assertThat(response.getItems()[i].getId(), equalTo(Integer.toString(i))); assertThat(response.getItems()[i].getVersion(), equalTo(1L)); assertThat(response.getItems()[i].getIndex(), equalTo("test")); - assertThat(response.getItems()[i].getType(), equalTo("type1")); assertThat(response.getItems()[i].getOpType(), equalTo(OpType.UPDATE)); assertThat(response.getItems()[i].getResponse().getId(), equalTo(Integer.toString(i))); assertThat(response.getItems()[i].getResponse().getVersion(), equalTo(1L)); @@ -359,7 +358,6 @@ public class BulkWithUpdatesIT extends ESIntegTestCase { assertThat(response.getItems()[i].getId(), equalTo(Integer.toString(i))); assertThat(response.getItems()[i].getVersion(), equalTo(2L)); assertThat(response.getItems()[i].getIndex(), equalTo("test")); - assertThat(response.getItems()[i].getType(), equalTo("type1")); assertThat(response.getItems()[i].getOpType(), equalTo(OpType.UPDATE)); assertThat(response.getItems()[i].getResponse().getId(), equalTo(Integer.toString(i))); assertThat(response.getItems()[i].getResponse().getVersion(), equalTo(2L)); @@ -383,7 +381,6 @@ public class BulkWithUpdatesIT extends ESIntegTestCase { assertThat(response.getItems()[i].getId(), equalTo(Integer.toString(id))); assertThat(response.getItems()[i].getVersion(), equalTo(3L)); assertThat(response.getItems()[i].getIndex(), equalTo("test")); - assertThat(response.getItems()[i].getType(), equalTo("type1")); assertThat(response.getItems()[i].getOpType(), equalTo(OpType.UPDATE)); } } diff --git a/server/src/test/java/org/elasticsearch/action/bulk/RetryTests.java b/server/src/test/java/org/elasticsearch/action/bulk/RetryTests.java index bfbccf0b1426..81da3d98b084 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/RetryTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/RetryTests.java @@ -232,7 +232,7 @@ public class RetryTests extends ESTestCase { } private BulkItemResponse failedResponse() { - return new BulkItemResponse(1, OpType.INDEX, new BulkItemResponse.Failure("test", "test", "1", + return new BulkItemResponse(1, OpType.INDEX, new BulkItemResponse.Failure("test", "1", new EsRejectedExecutionException("pool full"))); } } diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTookTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTookTests.java index d8fe82bf88e7..2b2c22cd9be5 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTookTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTookTests.java @@ -21,10 +21,10 @@ package org.elasticsearch.action.bulk; import org.apache.lucene.util.Constants; -import org.elasticsearch.action.ActionType; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.support.ActionFilters; @@ -38,7 +38,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.IndexNotFoundException; -import org.elasticsearch.rest.action.document.RestBulkAction; import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.transport.CapturingTransport; @@ -199,8 +198,6 @@ public class TransportBulkActionTookTests extends ESTestCase { } }); - //This test's JSON contains outdated references to types - assertWarnings(RestBulkAction.TYPES_DEPRECATION_MESSAGE); } static class Resolver extends IndexNameExpressionResolver { diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java index 8acb3e8cc93a..fab305327027 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java @@ -153,7 +153,6 @@ public class TransportShardBulkActionTests extends IndexShardTestCase { BulkItemResponse.Failure failure = primaryResponse.getFailure(); assertThat(failure.getIndex(), equalTo("index")); - assertThat(failure.getType(), equalTo("_doc")); assertThat(failure.getId(), equalTo("id")); assertThat(failure.getCause().getClass(), equalTo(VersionConflictEngineException.class)); assertThat(failure.getCause().getMessage(), @@ -201,7 +200,6 @@ public class TransportShardBulkActionTests extends IndexShardTestCase { BulkItemResponse response = result.finalResponseIfSuccessful.getResponses()[i]; assertThat(response.getItemId(), equalTo(i)); assertThat(response.getIndex(), equalTo("index")); - assertThat(response.getType(), equalTo("_doc")); assertThat(response.getId(), equalTo("id_" + i)); assertThat(response.getOpType(), equalTo(DocWriteRequest.OpType.INDEX)); if (response.getItemId() == rejectItem.id()) { @@ -336,7 +334,6 @@ public class TransportShardBulkActionTests extends IndexShardTestCase { assertThat(primaryResponse.getFailureMessage(), containsString("some kind of exception")); BulkItemResponse.Failure failure = primaryResponse.getFailure(); assertThat(failure.getIndex(), equalTo("index")); - assertThat(failure.getType(), equalTo("_doc")); assertThat(failure.getId(), equalTo("id")); assertThat(failure.getCause(), equalTo(err)); @@ -515,7 +512,6 @@ public class TransportShardBulkActionTests extends IndexShardTestCase { assertThat(primaryResponse.getFailureMessage(), containsString("I'm dead <(x.x)>")); BulkItemResponse.Failure failure = primaryResponse.getFailure(); assertThat(failure.getIndex(), equalTo("index")); - assertThat(failure.getType(), equalTo("_doc")); assertThat(failure.getId(), equalTo("id")); assertThat(failure.getCause(), equalTo(err)); assertThat(failure.getStatus(), equalTo(RestStatus.INTERNAL_SERVER_ERROR)); @@ -563,7 +559,6 @@ public class TransportShardBulkActionTests extends IndexShardTestCase { assertThat(primaryResponse.getFailureMessage(), containsString("I'm conflicted <(;_;)>")); BulkItemResponse.Failure failure = primaryResponse.getFailure(); assertThat(failure.getIndex(), equalTo("index")); - assertThat(failure.getType(), equalTo("_doc")); assertThat(failure.getId(), equalTo("id")); assertThat(failure.getCause(), equalTo(err)); assertThat(failure.getStatus(), equalTo(RestStatus.CONFLICT)); @@ -692,7 +687,6 @@ public class TransportShardBulkActionTests extends IndexShardTestCase { assertThat(primaryResponse.getFailureMessage(), containsString("oops")); BulkItemResponse.Failure failure = primaryResponse.getFailure(); assertThat(failure.getIndex(), equalTo("index")); - assertThat(failure.getType(), equalTo("_doc")); assertThat(failure.getId(), equalTo("id")); assertThat(failure.getCause(), equalTo(err)); assertThat(failure.getStatus(), equalTo(RestStatus.INTERNAL_SERVER_ERROR)); @@ -744,8 +738,7 @@ public class TransportShardBulkActionTests extends IndexShardTestCase { DocWriteRequest.OpType.DELETE, DocWriteRequest.OpType.INDEX ), - new BulkItemResponse.Failure("index", "_doc", "1", - exception, 1L) + new BulkItemResponse.Failure("index", "1", exception, 1L) )); BulkItemRequest[] itemRequests = new BulkItemRequest[1]; itemRequests[0] = itemRequest; diff --git a/server/src/test/java/org/elasticsearch/document/DocumentActionsIT.java b/server/src/test/java/org/elasticsearch/document/DocumentActionsIT.java index 03a503ecf593..b17f36adb5eb 100644 --- a/server/src/test/java/org/elasticsearch/document/DocumentActionsIT.java +++ b/server/src/test/java/org/elasticsearch/document/DocumentActionsIT.java @@ -200,37 +200,31 @@ public class DocumentActionsIT extends ESIntegTestCase { assertThat(bulkResponse.getItems()[0].isFailed(), equalTo(false)); assertThat(bulkResponse.getItems()[0].getOpType(), equalTo(OpType.INDEX)); assertThat(bulkResponse.getItems()[0].getIndex(), equalTo(getConcreteIndexName())); - assertThat(bulkResponse.getItems()[0].getType(), equalTo("type1")); assertThat(bulkResponse.getItems()[0].getId(), equalTo("1")); assertThat(bulkResponse.getItems()[1].isFailed(), equalTo(false)); assertThat(bulkResponse.getItems()[1].getOpType(), equalTo(OpType.CREATE)); assertThat(bulkResponse.getItems()[1].getIndex(), equalTo(getConcreteIndexName())); - assertThat(bulkResponse.getItems()[1].getType(), equalTo("type1")); assertThat(bulkResponse.getItems()[1].getId(), equalTo("2")); assertThat(bulkResponse.getItems()[2].isFailed(), equalTo(false)); assertThat(bulkResponse.getItems()[2].getOpType(), equalTo(OpType.INDEX)); assertThat(bulkResponse.getItems()[2].getIndex(), equalTo(getConcreteIndexName())); - assertThat(bulkResponse.getItems()[2].getType(), equalTo("type1")); String generatedId3 = bulkResponse.getItems()[2].getId(); assertThat(bulkResponse.getItems()[3].isFailed(), equalTo(false)); assertThat(bulkResponse.getItems()[3].getOpType(), equalTo(OpType.CREATE)); assertThat(bulkResponse.getItems()[3].getIndex(), equalTo(getConcreteIndexName())); - assertThat(bulkResponse.getItems()[3].getType(), equalTo("type1")); String generatedId4 = bulkResponse.getItems()[3].getId(); assertThat(bulkResponse.getItems()[4].isFailed(), equalTo(false)); assertThat(bulkResponse.getItems()[4].getOpType(), equalTo(OpType.DELETE)); assertThat(bulkResponse.getItems()[4].getIndex(), equalTo(getConcreteIndexName())); - assertThat(bulkResponse.getItems()[4].getType(), equalTo("type1")); assertThat(bulkResponse.getItems()[4].getId(), equalTo("1")); assertThat(bulkResponse.getItems()[5].isFailed(), equalTo(true)); assertThat(bulkResponse.getItems()[5].getOpType(), equalTo(OpType.INDEX)); assertThat(bulkResponse.getItems()[5].getIndex(), equalTo(getConcreteIndexName())); - assertThat(bulkResponse.getItems()[5].getType(), equalTo("type1")); waitForRelocation(ClusterHealthStatus.GREEN); RefreshResponse refreshResponse = client().admin().indices().prepareRefresh("test").execute().actionGet(); diff --git a/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollResponseTests.java b/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollResponseTests.java index 8791c11bf274..726e15af4e8b 100644 --- a/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollResponseTests.java +++ b/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollResponseTests.java @@ -62,7 +62,7 @@ public class BulkByScrollResponseTests extends AbstractXContentTestCase randomIndexingFailures() { return usually() ? emptyList() - : singletonList(new Failure(randomSimpleString(random()), randomSimpleString(random()), + : singletonList(new Failure(randomSimpleString(random()), randomSimpleString(random()), new IllegalArgumentException("test"))); } @@ -91,7 +91,6 @@ public class BulkByScrollResponseTests extends AbstractXContentTestCase%}_SKjF^Zo-r|3W_e;jiDn`|jO$pZw$fU%&eLJ-+h(FQ0$={s+mw z`V>FB|I0V{Ft}G&+IRe~KY#x1SKkb{Z~y++FY$lg|Mi={{QbZ2*#~U!&ELNG>g&(H z#TGw!_vxpfVigy>E7^^I{=+-pUsEi>$3FhG&pr^W_EL(QpA~^C&gDG6n&JR#sR2Si z9B{GJTD^YcGU38&@y*Tex{$|$DYqErf&(sNS0C5;c-QJv2)E~JE%;XSvnpV@^^$zQ zg^SU9SGRirF!fODJWG&!xYzC))|?*-9JcUwjWWPm&VbX;;#jY@m?j2XQb5;nU%iJP z>y_$zN)s*IHv$LU+nMzu4x5I#U~9Tw?A5n<5kX%o(dGH&76x4MHT5#_mjp1hR$SW= z<6dKSp@y|D>7cga=bP6s5cr&4KKtxDlIzD1`Hy~hpUBgzFI|b;*H@h93Xum;M|X81 zq2tgMwANdiuUX0PzFOygdB8=`Z74Tz z-ldc#r?}%Uu{yu_fPfJ+PqLlg7P#zPo7;j02VC}X z(bTwa0NF08)%5}-kR31z{^VBkz9COB!u!hK6TdR?uMdAfAXmuxXZOiEyw>8U?9M=x zzev^zXb%fXp}w}-eVG>tdb>oFY)ArJN-e6yhNuYMBn1B2@SLFKMYdxV$R3xL{F7y3 zPYc(6>I$4H8?6$sKnFmQ7$ACR{m4}Z?4o06tMFr}kQo@ElyfhS}@!WumkXF!d z^F89&)@|>mV3Y?YcT(1UD;L~V1#l2jb@R-6-O?ccOsJ_)0F|4?k*poJTA1#lw$ zraTxDv4iW(5EV5qa1aNR&Ff%&aI|mpvoPQe_Pv0uAO7LK9&c#am+gc2kq7>2$r{Sn3HgyzGF`ca8vy#h^9%>NK0 zwkP;y=JgCPn!Tngs@O)5ZL!@*T^Q#MmaeG}0Rtn@)6iYRfGc!_OykL}pyOr4r2&>< ziZ_$A!^EzBUvI$C!k8ZoKeEuG+$=ni(ghauk~_mhg>m3FFSHD}{qaAPsAjZBWLyk;2Jp@ySnxdaS zg}1oG01JlbSO;7N8|OVl&|vpL7nrpD7&UUPl@93i0(bq(FYa(q9US|zm=^!2T3bQlhqn@)!)AL!(2 zEaWvLQ4-L!32cDnO?B2bW6jStz2cueFrhuH3UklEFjw$ID+zu?uka6Qq0lykwB#Tu!vKSb{z z1be>g1zi370q?;R3uXh*w7lY$G3*$i$Ki2CePD`e2Vc_R+%@^20jT=Mhoxrn1Os<@ zX!BrT5qii+cnGDd0z@#f(lg=~{-qWgfU3bVc3#5cM#j=<_nNF-#;>_YMBPr3$ra(+?68fG{XUr@IrgBI^j1X zp@PeSAfw;m5_8KCF7wIL0IU&YZ&}I#*q8;e$Zb==78fg|0SKcC;@*t<0hpNl$O8bB zS1fT`b*0QR*m(&UaW;{p8=V0Vre2EU?^McldIj==_`C0MB}(+{E<$myc)$R3rTe_4 z1h^tuELn7daE#T~)V3_HT*337!leP1L`2_W7Y6|^&s!;AfGcG0r8w!56H&k%24I9P zjryLEZreOrB=wgF_aC;3h0X~mW{Bd7@xhg-k&OFG7|x@ey$ry%i9=zJ7s*p>hp*#E z@S7}BBRc}FI!0nQzU0wi8?;TRapPFF*OE8_0U=6u&yEYykg}wS*JohtH>Z7P52C@r zEf%iNKpDGbG-N?C!=#CzfDLlDO^nt7m|*z@OD7ctWPL;0j|e=#hKy$EC_(L_&5skd z2;t0e64Ml0(&cFs&%_uBcafA?M0S`hl zZjs9s=2v6zx`GI+ozfuGxP>kdX}s^%E=5cNXGJy&hco58NKpadE9EUGL%>wQD;7+f zM@|q3Z%8&VZ;{>{J>WtXdPoD1CSSN=7QitFS8m}_Ocux(*~!9~Os`h5P{t8~l6AzS zcS&ME?3BYSYhBVn7i1laMO&J(=`I7al31rHyG0~~6hZKBut?Pbz;Fu~V_l4|&sG_l zdb1$i(K?wRPiD!GLR-j0L|DTKEmotX$l{`AoIyYbu}XBmc|X%dtY=EJDJDjPVx?^< z{h)vWTbl8q4-8sc@!>cCJ=2-<5=q$r{^S-UqoAhy?U$aES`%)~D8-G6CLpV2=Ed@) zR?zhpS{KpJD6g@%^8Ah8uYitb_l;zyCB+E&JkEd%Bn7iC^@P}1v>>eLzNytR{+y-q zLI2L!{A9g2`7wnp&Nh`BOMh-~RwSS9Re@JV6mP}ok=b*_SbLDbf}WeM5-i(mk&4s& z#aTY&Jio#;vQ-ilfs1V`!K%}khAh4X18=-7LZC?Y4504YnpPp$a0x(qlu4QbTwmghhQG~R4FhRhg)C|n~YI#ZC{lq*T zf(0z*)oE3WNUphWmiVtEYbJXsm}}Y8{zxa`$Z^XTh&{>jCJzB(ZuYFz18-VoqgMM! zGHSkek^pt57dMkzRVyXSmgVr`Oc6hZzo3PeKCUu97JM?y?xC2bbmX(e4oe)bKDQXY z8F+njvd3znt$?VIaeZxhy_%1++!HS}OO_bwWT*uvnPDj}r{lJTCd#@5)T`JW^P5_Y zI?T75ZQ&>zqj-6Z*g#bq<#zS=7gu-%Yb3AvSsvkyyp@o7ipdDSJ0#POend-L39F7( zcq6_+e3vIHanaP^4Ia;P-=#*G;@K=otqWsO?OuvF)+-x%R}E;zzFb6+c}!8ZWuiA_ zHddj{<@`u_dbi1=s&FX)S9854f!EZmuGmK{Y^r1+i|~ASS0OC7oKMhVOz{lnu7O~s zgv}(<+Ro6@jS&J$sgxn}Y<_?>@tfyeWmmW3Sl)~ktWe!?6QG5OAybOO=%ScnfIe}R z8!gdQY=Q?(lv4i{Fi2bSc7(Oam>@kv&p6YbO0F~V(XA43U;%`|locEEVHQ9f7Z5zx=of5|Up>fgH%JOKLP~v&?oz^pWC}HDrXJA1 zj3}AP!i!4wk~0IzFcV6KGTIt%>}W3-f#@3ZT|-&Lj(OBGXTKHIr=Wllm))z`K+iR}YzkD7$6{2kg`zaaOb99_e5z&v zOPwnN@2j441b_kPGqvdaPzqG(E3M)g*)Ov&V|f}?&JXuGSgy+JdW=t-3)MzlwA=ql zn2}@@w)E@>pR<_3A`$}%tQ2jJmX(-rLQ`0}+GC=;Qgr_xj|xGvCHnFBsb1lrb6g(3 z4TyQ=2|m1wbnpu+taXb#;9Oc|xKsy`tvFkFIO7NnTP zaFE8XZF3;n0CP3gZVEmR^d(T8Stp-KaZ$*=tw_g!%NcIK)Xrl;&-|sgAfXoyfM;(7 zq&_PgPEssRq~a*{WkG8hl4rc2^xK;M;%IQ(&;f6Pb8nG$!clPUk>E_%FRWU&QQ!(E zz~VI{0hd__7lPZ|UMZ~Ij9aJW${k2fdkaQnL5#Cc2KliM3zBF^1U5ufb8NJ#J4z;a zB3`e)E72K|ZT) ztMp58X!2e z#%9Kx%$AgZ-AFT<7U`qNo1UP><`j;ex#hju=*AFbGu6LDsSkI);zT?s?qu|L3Wo%# zcB%P}7H{;HeaG!s>K?YV>afm0Tov-wh>LI`!}T&(R*lg;bW1@>Z(&uLKt^kcz0aF> zPh{~yy0_U3!^i}^jUqlf9s>?r_4}-_uek=Z`53B$@O8nhnXHy_rR16hwQim8L99SX2lUAn0>RJ$PG<*hmS_qM_0;76~uooH8A)$%A6}F^4o=h^6 z^FvAvT+6S@)6q&TlmfHFvTO7s@e%H`A}>k<&@gE)nFdKg9$IvyPlxF_g$YWOzwY;P3@zjN-~gxk=-@I%{Nm^#4v*2dH|`gOAL z6<^^&tvS?3oy{IBvAJRjpK7#I4T(dsRl`CXl0?g4^9!r0zLfhaMHMg(@>!XVb3J-? zhbwmLkMAQs!uU|UR}j&qzH6=l7kVbTGSh<~sL(AFIMc47W{X8i0+jU7J*Pu|IIX8x zN?KxwFf!Bk((i|}!%jz$osTw4X`2pJN9BiO7v}`4^LUT2cEwuw_xq+5IJ39)#Qoa* z3mUfF=IHo5{MWz=w&?(jYCk5ax6v&FnZ0k7DnY?Y+=4&4p4w(=)mvYJv)~s9`7LwTJy^T2GN^D{cjlt zO4KLzQbQYHoLGdbRR~laz{1K65??C0C+V>T82uK{u4jmYHlg4$hf-sg87czCYV}6k zwUQ+YM7SW$Qt+4ZS{%;RSl&P*22ftP_aUPI9Y8XFlK>vt__R|LWbigwe7zLd}oj$O2~8?{D!V_C}?B z7)Bm&ZFs-ijU{Zc#%pM{5X3U@2I&Jq6mH8$*vqgh<`K3levRpv$>LVJvrK{xTM2&4 z0_yOi9=3aq;9dwzQ+nyxkeO0@aflE=_p5+z)C&@hTyK;zNO^Q1y#XxDac(^XEJP;< z7&U4Vz3zkL!kFKf+?K-_4x0v2_{c;G!`z=o=p zn{_6ARPy;WnSGudKgzYArtTf*;`DW9?FDCj&|V@zdAoCC%{n*FttxA${u{jk%iV_2 zcFMXKAydB=wa559cYq3HLGI%eFn34K!+_ z)b>Xi3IrT_*c@QsH2Oj+KyrnZKyyI^Ism0vpi!!``C$Nd8V25v*==#ao$P!8VZXa? zwy}~mSuGS>0s1vuhx98S?!Rkr>*?1^05<`C9 zgcQqJp@Gus#+7{=2Qyey;$(>B^I@20Lp9Hbbe;|UJRKr>K9n>;W#`SPWLqNR{7dH! zII^sktHOh7U2E&B-we>POR8HlUnp~%?ZW}*)7HPJ#r2iqe+@en*1)&onmVU-@zQl+ zPcLxUZ&CQ5ZA@n~0ghPrN}TCI%A&HhX%%0B6s`%JGhTeg(+sz4+Ib8_u6vBCst92b zl$94I;X$?zP1|9_JUC&FOt=^R0g`N4TP^?uXb5=TqCb+rs|+Yaz;> z=_JyA^GSkkEO&)r+3~;7Yb@HP=Gc7YI4OcA+<>FpV*xgtpBi--u?sA2Z9Wv(rggw| zCb23o(fUlD9c+80LheEB61#V&Viza#O8({}dN7~O$~>OAc{UvTcozBRbCsnX$2`7J@>Ip%;l&3P>C`#Q z*CAK&WE)YHlWV4cKBK+(LOC+8%-s;Vgs4Atw&1N_Hm-@9@+X6`|OEH zEe5N@8MU^|=1${iHYh#c_R8$lpFfhP&3>q%vNG>lEok0)H>BvD^R~7j4s%vl_p7o~ zBG;Ln786s`*l4cwz0r~dx8-f&1rC3w&d2fUcNPo?g_DsH z7xULj)@Wq?^IlTfldiKh7S#cFvfq{3s2@FQigPxr?e$V28Ph`?#P2|%RV?8b8VZiC z2?Jby7Nu}78SIQkH`WVOlrkw8$BYHdE{PsF#sW+Ldr-8igpYW0*obg}k}daa*9&O+ z6;k0GHgJOa2* zF}b(X9d3NZ#<*WcmKdM6x)T2W(6#!Uo@>urMGcqYI4etDgyD-p(JYNPW|7P2Ju@e~ z#+>%a@^P$}w;yUzu!C){IJgh&S5ESqxMHopyIjwCRP1tAJ9GX6TxUP%b^Hrq=RY}i z_zPyIKZthr18bT2$8|aVoxHIBCe#3nEWy}msDVlu&DML(XzAgX#Alsyab9{-fp;ZI|o{?gWYZ_#P5(sQ1^Ac+3-K*@PHIB65w*G89fmFSsK z^pyp3LwqL4V~6K#?3HkT!f8s?xn+x;ZFmslze3j#-^b9todj0g!Z3?dEu{vH$=8 literal 7686 zcmV+h9{J%PiwFotUF}u^17~_^aByX0ZDDW#tzBKOY)5f@=T}_aXP=|~sP5|K&42^O z5D>;F2|^J96j?$xB90V<{NJ-yf7U)e;L)co460!!Q5bKKl3<@85mfxe)Il^;-9_5kKX_FYkU~ov&-dM{@!*{>`{qM1g3*P1A`al2CJ71qA=HO%P|H>aeBAM2lvzwpg zMwgvSY5u9iPFSRXVtm-?Vpdf@K5~h4;W_)_=HI%I`ht>540FLwm$0jk`}z2;>LrBh z^OX{O$>TF^WU16FzFz5K^xmm?4-iTXs`FVwdk^id-%RyR$oQtv2fprGpK$&^LP=5O~YKUl(Al{>dSnQc6=pAm*yXr z(CNgh)I85m8eu3UyJ8QrHC;9@1LYwSDXdg9{IE^Cb~Ca*{tbfx-K z%^N@_6@RL8ztrg>m^tJNop&M!oNP_mmWfsN1^8)I@Kk9rFbOlP1$53585x9*+H-Rz=t32r=Yl$(Fjr{1 z@y9+kI;`s#7gil+Ac-!kqNx!!&dL`S@y~$*>^5ui63Y5uNTP>Az$wg4sI}!eWJvF_ zj$^qf!G~pPAQA!kCBnwdBOK4C87D1UyN8IA77g=IeLv_Ju!l8{_$a5an6?|UK+BbL zFvo|Rh6!DQ*y$ov7%X8h*N<(Txwk<^C9#x|q_MAZL2FcS20_&M>+9wMrI@TU08Ifd zU#3<2zGAM|=drOs5tqw*6vg|llPW`@6kPAxL)v|KTK%a7$9g%?H8JZY7&8Z*vJCK0 z?V>T(#>X)7vUH@TzK)9O(%!KN20fXj=ggSP~f?`h*x=nAx%&lHGs(y>?GNXbh-JCNeVQh zHadl5LHJ?T!PmJCx@JQ&TnU&n-`9hq7h_CCv*hLvobCIlBmCsX5yB#?Y{RBC-gAVo zKkdpdngh{C2^SX#bWM3qb~cGkazo{e#P{$wtpLi16p@%kXLbj_Au*}PkLpuTLI6q|Dk zYl;=8nU8(F4C|A{IDHv6f~auCT%C;m^|C+OoG|FiST33zQGkO>y-ZRggUB$>8M}r~ zml=k6^A;JdpWEJ|bg~4WzliZ*K&RfW*XiiBAD(MvT6PdSADCk?hC`=@FoV% zolYE#nY7bQBAdm6@82Wr*t#>c%Yy5ilU!WjjQ0dHW)t<%;_vtLQRWNqG{=X&(Rug) zs+S-L&yTlHOIT7<&#^Xox;(WCV6U*ygzKu z7ZE}%FT4f1SgVz2Rh@c!${gfZHoc67#FZ{oy#BaBe@x*)&?Q{vUoos@lz?X!9SSt8KY|FW+I^fesiQK<@ z`l`rfm;<-`C2)h@<#OVr>E{Xf6|W?vM^F@{KfnB$P6MHe_lXasuJzv7^;iVe|}_1vf%8Dz6V({mRHM6l)ts5EvKCeV`%!s+2Qghdu%@O(sklIt)r5xzliF!G7ka1g$^?ZXc6i(7G`o5>0$ z^y57lAKU`s%bKH!jA?w~1O%IsOwX)+trT64*D}11?UJMs=x7en@F}krI&TF@`XO=C zaUgac*&POoXL09Y7z;AGuq*%GJtmmeoYEyDsf-M=%hwV|f}v3Rc<@}ZiG@bhG2<=5 zC7LPE;(hhz)?lE0^83wZ{8rPh{J3uKONFS2&FA8fqgyu1^|Y^x<^X228>*?ga5aQ1gei2w2vZ!#eMUx-^T{GbBFAT}?w9-r5;oUnoE4LsyGYe=2~VNNv+LP& zCoD!sHko;9H5&8q+8+s_+{!4m9YLp#xn(1)d~~=L#b`Qi9J9HtmhT`T^otEPxgedh zRWS178ko&aVt4jn8a#NuUYAhunrcp4kisx|A`HR;Wn5%yI}!@K$q;SUh=fFehVCoBcH;q) zgb>mT*%&U3*?3(2Ji|CNMS&qrmurT{=hD_FX%NDBfe+nR3IXbL@$guPfQIpX!QrHN zGik_ui9OZX7Vu!CB`xjcGHc5*__0D$R+iJC)VRf1ATl7Zn4sMJ1h=3_gAUJ>QnC00 z5+W!|YWV^BKp`v9)-(CY0)_B`WrdX(dGY80FSN2D(h2DthKsO(j`hcUrIRQtx6&)K znQ5Y`3&WB7BjQN*Bg%LeXM(LO5yREC&Z9he4-RXV*UNzzym4FBR3 zX*xg{u7ok_7W{E-%52$-1sV716oNE`B|!_xRR~bIEFixcyI71kO0b6F9p(D$L4dLl8!NfD3cfN!SF+hxJ@^)0zjPw1rlMQ^s zt(haam}n9bK6EjtSKJ*8p0I4IOS1-N&QXZMl96j%yPpL_+h}6NzU|wc@Ny4hk0^2%8f1-160%Lc0)m5$sjNLyyeUf_nj(bx%s|VROl{kA5}Y{KqTgq_+C*7 zW#is@x)XF4s7~$oU{m!2&BDsiiaWL6Lv!=yXB~4%o@G5?6%+c#Vv0xuRuaU-I-!xp zhL@P{JMcEiMwF>v1Q9(e{u?l#rp}5ffJm=2B^&G^6f0@@0=*W7M~Jgk)( zg%c~>bKH70Y*1(>O*r0H3uUKc8yo9$N&D7(oJd*R(8O>t(aKB>o@A!Fe0w@>o9VY) z5(CWSY335svG5evA=aBdx$fh`q&kQK7m{4DiENHUl@Pu&?2p$Az; z)AziJ%V9%+n{31v2VdKj<+ypO%LX6Mb6=Csp>wm{%K?-l9@zpm`+C{1w~l;RtUGRO zI*;|$vi>Lwf@b!UfY#8BrynU7 zA@|N#xOOvV|8o|br7=AG=Z8C!M*S6g!Ced9;73Z)KniM1q!ibl(NIo#R2A8T8#YE00SiNIx>9{R* zIZnchE?>Dpgqh6WP~4tW2BA;1w-;SwYWpQZO?IH7?fDU%hgB)_V?E})R|Hl% zq}$Qg>JNhdZKH!_+x%JZY(6p(|E+>raqE-Ler$Gwdg^L|U1z%Dq5+7Htq#3i z4$wi+O|h{I;5YWNGhMijVLzOb1z4V0g05$KDN|`;OQYOm}4bPK?7BP}bItk?khvJ$% zLl0xMnFlr9EOWhs2Yl37fBxAm?^7+>0IuJC(9pv|_;F$V5D|C=FeJ8_PhbNAw#%Ag z_5dt;b|)3tSO4=`Ea#PjTVY(tRMmbBOx)1KmL;NiBXII(hr8Jvutt>`}+|%g=5N5-{+zT<=O{ zAGXUMx8I-k^PhG=9yU~-_Gf-T+yC3=^051T-TK7@gSWecpb755A7YVUaNwyYjT3WY z!6x`(uaPdxR6A&%ap=k+4mu_aR?)8XmX~J0rk~NW>4!T%EQGlMfo1A?>K1{fysK-H z{&w^WPk>iuKS3~s;_e%Q4HY^&ZLSe0Q$&Ns0wX|X^UhSxj8!&n26}(=G5BN>4m?5L zQuS*lM|W|-nH|8m9*;Q?(_%2&7&K{E9w+mR@H=${^uG0yw>FFD*S)bjA;V9{Wy-}d zWeQJsgwV2|KEh{Nj&K-F2fp~Q$6K`Z7$7YZQ$`yIyk#esQz_W%QmtkOi>-liHR&jY z={~v9is2b7oBAzNRnQReGLECAzM|SrXEqHJJa5zFsTCs(tS~jYY@j+)N9#$oN&QQ&PJyelN`s7I@e$r3KsU*Q6%7KOVJUkbO62h6#4kpvhVl4;`QbOPs8y)_DHO9*69l2w- z*y0a|sX+GNma&HcbV_7EilBm@*WqkO`#!8Q^e4A!(@B=jXBP23#55h@os*#AWjrS#&cBrmVpnZ}0pt;Qs6GWTYCJ+2ALJyZ88$ zTm>p>IrfA{FdT)7TK15VM>-$8nkKhKw}CQlk}-j?vx1~bN%v5MN%iKY4+BX2@^zER z=?{|$pM=?%`z8&z^b(*bSGZ6a0|dlYDr+Z5eW1oUk&p_X>CYWmOW$mgAG{IHnj zi<_O6I(u)3PP)fCKQ*1FrWg;!&Y6|LX{E*#Oi-)C#!9Li#oNd@DM~N4?#y7Hy~D#? z=^wv-8xgM0xC{lm-PLc;H8f4JrU#&Gy9_njf(XfiCQ)0lCe&m%-pCw1QcQHx$O@Ts ze5a<9?)o|#UkqoGbmO>st4UM2pPRE6Iw z-+?n6ji3BhMgZc@#rp#ojNF>YAB)-Oa&SVyP+2?HIM;+q7zCH}H6)|Skm6*zU zUw8@+fnbFKtqLAyETL>zNJUGisf-~hBj_L&hj3TcCT7r~no{eBizet$c+&_#=`)X@ zLnBL@r8je)?fbA3yb*LTNS^A45Tg60(Fmsln87++iEYlIFt4w@CdMRf%Xr0_HIz=q ziCTDHg_0g)e+Z-@n@UbqS{F9R7>Ri{%R`%#NtH9_SjE~V_cStg6ciq4$*E<)NEcd{ z8)87FD{qs%`3jG^ieKMkX4VsT3E@5j%Xu#UX!yYCFoO>mj&M3`;dpw(={ShP=@Q4& zD$d4Vcn;1>=`weNoiheECnNpUm`@BgQCM)hcIsA&v)bQ=lpM)b(A!`l%B&5d?T@ztYR=XuTa zxCnb%)jhB*4>OWC+D{xO5$zF#P~Z1tr#Kn(MOKsbb2eeZ!JwlYTT@1Cqo{DO9^MJ0 zT6&j{f+Q*Y2|5-{%Y(3Wc@(O8cCB>6N*BWWK3gz$x|5wBb&9|F)vehQMHV!T4IcM6 z&L7aAe$?-N+CG2Wjjx3I8+*?;^?M|PAJBo2)YB>xp%eEct@ZGQm1;4|6sQ5?>crV~ zBPCk^8R&wSJ#!%rAbQ{!hiJ0FfwNL1u0HiTpuxd~Y@{tPV?@pHqB{q+N_N9_+qZB^ zhkVG4*0*Q9p3RIspFw*z)An>m?)l7JfpJgeQaKmaIHJ2w`=%TZNHL?(jEgb{%(O!7#zmGs1w*b)j*OHp&4i@m z^o0wc=Txg5UBbhx+84KS8BiJ%2$ea^;l5OAO$VFp6CIoG z9LI*xE$npEkc87Cpwc*|a=SoOa`7R<-_=f6S=Cbi468W#s|Zi8mZ zUA3zZXvlqQ>g)0J*t4NZLi^G?9GlaA)v<6>=pER&G$E>z4Gjr%))VcI>ANRgHQi2S zdIX5ue7($B^vcSwO%Sj_n9rq`L_%g~T)vn>tcO7Sg87V`@4Bp!pS?$KH4>jSB%d}o zpEpb&_Fo@x&%?%ae|{^>K*%}6jL4tAxI++VKa}03a4&oySg3pZmE}gdHF&FLQ$YCL z=z(^IbdreDXxg0%MviyL^stK7KC{5H*i|YgGsM>PaJccqw8D8S}t3OOj~Q64N_8KDBCQz_$zG)@a$H5d)tT28~)iXbzIi3 zpeP}g_aS?eY2;}g5S;R6ri7F=DthB|TiQLb?Z&I=A^f+2kbIhy5K?CXd374y+B8 zS#8IB93$(6*1`sonLjnKv&1qW=LzPxm`H}76w~dw2Oz#71&XtZ1x&rqY z6%Jx3dkTzqy=Bzd*&wau9fT}x6m6k8e;;Vi2p?Zt*lU%sj%hv9G5FzNzlWjHesrss z(yg(9&-uQ>Smi8@;=s$={O~7wB#zM%vvYoed^jTy4ozXW?Hg5W05!Pg%vsHrnHo7f z1>yq7vsvtx=IRqe0-`cm%e}Shqt5P&k8gpg+-i-F1ZtN1yn1M-Xp-$&HuN~XdY-O5%>Ld;9-ro(&vMzvKS+1@19zvtmv`QMcG}JM zwoE@tJN@qCTit+l{mw+wq5i&~6|OFN)?K|4syIg0L32CYabXMp95LpIUFR5C2IXV6 z9UDja35&jY(lHqb9>6JvVF;v6D}nI9f6%%J-`x0NCi3U6o}m)bFpsdX`R$G)R7Y3U zrV zSeQDuoxZb8ALioz^p=|j%RQakaBuqx)pU>*l4={S`pm4-8VBr?P9VXH-*S55F9*4u zi|ot=OXoO&%QVt)$u?qd2J?RY6w$%nk2=Voe{$;}p|SQs!t&gAkbw{t0QX0+A^ z5Xd@wd^iSW&qnU&(?GM=gf`*bapO6BrxT;EV;+!i_@% diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/action/MonitoringBulkRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/action/MonitoringBulkRequest.java index 5c1d700343fc..9645a1817f40 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/action/MonitoringBulkRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/action/MonitoringBulkRequest.java @@ -80,7 +80,7 @@ public class MonitoringBulkRequest extends ActionRequest { // MonitoringBulkRequest accepts a body request that has the same format as the BulkRequest new BulkRequestParser(false).parse(content, null, null, null, null, true, xContentType, - indexRequest -> { + (indexRequest, type) -> { // we no longer accept non-timestamped indexes from Kibana, LS, or Beats because we do not use the data // and it was duplicated anyway; by simply dropping it, we allow BWC for older clients that still send it if (MonitoringIndex.from(indexRequest.index()) != MonitoringIndex.TIMESTAMPED) { @@ -89,11 +89,11 @@ public class MonitoringBulkRequest extends ActionRequest { final BytesReference source = indexRequest.source(); if (source.length() == 0) { throw new IllegalArgumentException("source is missing for monitoring document [" - + indexRequest.index() + "][" + indexRequest.type() + "][" + indexRequest.id() + "]"); + + indexRequest.index() + "][" + type + "][" + indexRequest.id() + "]"); } // builds a new monitoring document based on the index request - add(new MonitoringBulkDoc(system, indexRequest.type(), indexRequest.id(), timestamp, intervalMillis, source, + add(new MonitoringBulkDoc(system, type, indexRequest.id(), timestamp, intervalMillis, source, xContentType)); }, updateRequest -> { throw new IllegalArgumentException("monitoring bulk requests should only contain index requests"); }, diff --git a/x-pack/plugin/mapper-flattened/src/test/java/org/elasticsearch/xpack/flattened/mapper/FlatObjectSearchTests.java b/x-pack/plugin/mapper-flattened/src/test/java/org/elasticsearch/xpack/flattened/mapper/FlatObjectSearchTests.java index 816ae889125b..1e0d88efe978 100644 --- a/x-pack/plugin/mapper-flattened/src/test/java/org/elasticsearch/xpack/flattened/mapper/FlatObjectSearchTests.java +++ b/x-pack/plugin/mapper-flattened/src/test/java/org/elasticsearch/xpack/flattened/mapper/FlatObjectSearchTests.java @@ -233,7 +233,7 @@ public class FlatObjectSearchTests extends ESSingleNodeTestCase { int numDocs = randomIntBetween(2, 100); int precisionThreshold = randomIntBetween(0, 1 << randomInt(20)); - BulkRequestBuilder bulkRequest = client().prepareBulk("test", "_doc") + BulkRequestBuilder bulkRequest = client().prepareBulk("test") .setRefreshPolicy(RefreshPolicy.IMMEDIATE); // Add a random number of documents containing a flat object field, plus @@ -300,7 +300,7 @@ public class FlatObjectSearchTests extends ESSingleNodeTestCase { } public void testTermsAggregation() throws IOException { - BulkRequestBuilder bulkRequest = client().prepareBulk("test", "_doc") + BulkRequestBuilder bulkRequest = client().prepareBulk("test") .setRefreshPolicy(RefreshPolicy.IMMEDIATE); for (int i = 0; i < 5; i++) { bulkRequest.add(client().prepareIndex() diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/action/MonitoringBulkRequestTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/action/MonitoringBulkRequestTests.java index f1d03bf5ff76..e14ef1715375 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/action/MonitoringBulkRequestTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/action/MonitoringBulkRequestTests.java @@ -78,7 +78,6 @@ public class MonitoringBulkRequestTests extends ESTestCase { final XContentType xContentType = XContentType.JSON; final int nbDocs = randomIntBetween(1, 20); - final String[] types = new String[nbDocs]; final String[] ids = new String[nbDocs]; final BytesReference[] sources = new BytesReference[nbDocs]; @@ -93,8 +92,7 @@ public class MonitoringBulkRequestTests extends ESTestCase { builder.field("_index", ""); } - types[i] = randomAlphaOfLength(5); - builder.field("_type", types[i]); + builder.field("_type", "_doc"); if (randomBoolean()) { ids[i] = randomAlphaOfLength(10); @@ -132,7 +130,6 @@ public class MonitoringBulkRequestTests extends ESTestCase { int count = 0; for (final MonitoringBulkDoc bulkDoc : bulkDocs) { assertThat(bulkDoc.getSystem(), equalTo(system)); - assertThat(bulkDoc.getType(), equalTo(types[count])); assertThat(bulkDoc.getId(), equalTo(ids[count])); assertThat(bulkDoc.getTimestamp(), equalTo(timestamp)); assertThat(bulkDoc.getIntervalMillis(), equalTo(interval)); @@ -158,7 +155,7 @@ public class MonitoringBulkRequestTests extends ESTestCase { builder.startObject("index"); { builder.field("_index", ""); - builder.field("_type", "doc"); + builder.field("_type", "_doc"); builder.field("_id", String.valueOf(i)); } builder.endObject(); @@ -186,7 +183,7 @@ public class MonitoringBulkRequestTests extends ESTestCase { bulkRequest.add(randomFrom(MonitoredSystem.values()), content.bytes(), xContentType, 0L, 0L) ); - assertThat(e.getMessage(), containsString("source is missing for monitoring document [][doc][" + nbDocs + "]")); + assertThat(e.getMessage(), containsString("source is missing for monitoring document [][_doc][" + nbDocs + "]")); } public void testAddRequestContentWithUnrecognizedIndexName() throws IOException { @@ -202,7 +199,6 @@ public class MonitoringBulkRequestTests extends ESTestCase { builder.startObject("index"); { builder.field("_index", indexName); - builder.field("_type", "doc"); } builder.endObject(); } diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterIT.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterIT.java index 250a69a81163..1bed5f1c7fff 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterIT.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterIT.java @@ -856,7 +856,7 @@ public class HttpExporterIT extends MonitoringIntegTestCase { private void assertBulkRequest(String requestBody, int numberOfActions) throws Exception { BulkRequest bulkRequest = Requests.bulkRequest() - .add(new BytesArray(requestBody.getBytes(StandardCharsets.UTF_8)), null, null, XContentType.JSON); + .add(new BytesArray(requestBody.getBytes(StandardCharsets.UTF_8)), null, XContentType.JSON); assertThat(bulkRequest.numberOfActions(), equalTo(numberOfActions)); for (DocWriteRequest actionRequest : bulkRequest.requests()) { assertThat(actionRequest, instanceOf(IndexRequest.class)); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ExpiredApiKeysRemover.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ExpiredApiKeysRemover.java index 755224671e1e..fc92430f56df 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ExpiredApiKeysRemover.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ExpiredApiKeysRemover.java @@ -83,8 +83,8 @@ public final class ExpiredApiKeysRemover extends AbstractRunnable { logger.debug("delete by query of api keys finished with [{}] deletions, [{}] bulk failures, [{}] search failures", response.getDeleted(), response.getBulkFailures().size(), response.getSearchFailures().size()); for (BulkItemResponse.Failure failure : response.getBulkFailures()) { - logger.debug(new ParameterizedMessage("deletion failed for index [{}], type [{}], id [{}]", - failure.getIndex(), failure.getType(), failure.getId()), failure.getCause()); + logger.debug(new ParameterizedMessage("deletion failed for index [{}], id [{}]", + failure.getIndex(), failure.getId()), failure.getCause()); } for (ScrollableHitSource.SearchFailure failure : response.getSearchFailures()) { logger.debug(new ParameterizedMessage("search failed for index [{}], shard [{}] on node [{}]", diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ExpiredTokenRemover.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ExpiredTokenRemover.java index 23e7bb2fe0fe..78ac4e374530 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ExpiredTokenRemover.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ExpiredTokenRemover.java @@ -109,8 +109,8 @@ final class ExpiredTokenRemover extends AbstractRunnable { logger.debug("delete by query of tokens finished with [{}] deletions, [{}] bulk failures, [{}] search failures", response.getDeleted(), response.getBulkFailures().size(), response.getSearchFailures().size()); for (BulkItemResponse.Failure failure : response.getBulkFailures()) { - logger.debug(new ParameterizedMessage("deletion failed for index [{}], type [{}], id [{}]", - failure.getIndex(), failure.getType(), failure.getId()), failure.getCause()); + logger.debug(new ParameterizedMessage("deletion failed for index [{}], id [{}]", + failure.getIndex(), failure.getId()), failure.getCause()); } for (ScrollableHitSource.SearchFailure failure : response.getSearchFailures()) { logger.debug(new ParameterizedMessage("search failed for index [{}], shard [{}] on node [{}]", diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/BulkUpdateTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/BulkUpdateTests.java index 88a72072c952..644c1c4137e4 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/BulkUpdateTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/BulkUpdateTests.java @@ -107,7 +107,7 @@ public class BulkUpdateTests extends SecurityIntegTestCase { Request bulkRequest = new Request("POST", "/_bulk"); bulkRequest.setOptions(options); bulkRequest.setJsonEntity( - "{\"update\": {\"_index\": \"index1\", \"_type\": \"_doc\", \"_id\": \"1\"}}\n" + + "{\"update\": {\"_index\": \"index1\", \"_id\": \"1\"}}\n" + "{\"doc\": {\"bulk updated\":\"bulk updated\"}}\n"); getRestClient().performRequest(bulkRequest); diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/rollup_search.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/rollup_search.yml index ca04327eab72..8da1c1800a7e 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/rollup_search.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/rollup_search.yml @@ -965,7 +965,6 @@ setup: body: - index: _index: "tz_rollup" - _type: "_doc" - timestamp.date_histogram.timestamp: 1531221000000 timestamp.date_histogram.interval: "5m" timestamp.date_histogram.time_zone: "America/Edmonton" @@ -978,7 +977,6 @@ setup: - index: _index: "tz_rollup" - _type: "_doc" - timestamp.date_histogram.timestamp: 1531221300000 timestamp.date_histogram.interval: "5m" timestamp.date_histogram.time_zone: "America/Edmonton" @@ -991,7 +989,6 @@ setup: - index: _index: "tz_rollup" - _type: "_doc" - timestamp.date_histogram.timestamp: 1531221600000 timestamp.date_histogram.interval: "5m" timestamp.date_histogram.time_zone: "America/Edmonton" @@ -1118,7 +1115,6 @@ setup: body: - index: _index: "tz_rollup" - _type: "_doc" - timestamp.date_histogram.timestamp: 1531221000000 timestamp.date_histogram.interval: "5m" timestamp.date_histogram.time_zone: "Canada/Mountain" @@ -1131,7 +1127,6 @@ setup: - index: _index: "tz_rollup" - _type: "_doc" - timestamp.date_histogram.timestamp: 1531221300000 timestamp.date_histogram.interval: "5m" timestamp.date_histogram.time_zone: "Canada/Mountain" @@ -1144,7 +1139,6 @@ setup: - index: _index: "tz_rollup" - _type: "_doc" - timestamp.date_histogram.timestamp: 1531221600000 timestamp.date_histogram.interval: "5m" timestamp.date_histogram.time_zone: "Canada/Mountain" diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/sql/sql.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/sql/sql.yml index 9ac15b309b16..af45542eefb1 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/sql/sql.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/sql/sql.yml @@ -6,19 +6,16 @@ setup: body: - index: _index: test - _type: doc _id: 1 - str: test1 int: 1 - index: _index: test - _type: doc _id: 2 - str: test2 int: 2 - index: _index: test - _type: doc _id: 3 - str: test3 int: 3 diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/sql/translate.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/sql/translate.yml index 02926f469ff7..3e61e2ed0e9e 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/sql/translate.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/sql/translate.yml @@ -6,7 +6,6 @@ body: - index: _index: test - _type: doc _id: 1 - str: test1 int: 1 diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/index/ExecutableIndexAction.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/index/ExecutableIndexAction.java index 82ac6f8d6d7f..ee299d05b09b 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/index/ExecutableIndexAction.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/index/ExecutableIndexAction.java @@ -201,7 +201,6 @@ public class ExecutableIndexAction extends ExecutableAction { .field("failed", item.isFailed()) .field("message", item.getFailureMessage()) .field("id", item.getId()) - .field("type", item.getType()) .field("index", item.getIndex()) .endObject(); } else { diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/index/IndexActionTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/index/IndexActionTests.java index 4e7fb4347433..6edfdf221499 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/index/IndexActionTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/index/IndexActionTests.java @@ -352,7 +352,7 @@ public class IndexActionTests extends ESTestCase { ArgumentCaptor captor = ArgumentCaptor.forClass(BulkRequest.class); PlainActionFuture listener = PlainActionFuture.newFuture(); - BulkItemResponse.Failure failure = new BulkItemResponse.Failure("test-index", "test-type", "anything", + BulkItemResponse.Failure failure = new BulkItemResponse.Failure("test-index", "anything", new ElasticsearchException("anything")); BulkItemResponse firstResponse = new BulkItemResponse(0, DocWriteRequest.OpType.INDEX, failure); BulkItemResponse secondResponse; diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java index a392fea9beb3..ad5a8b68a5ea 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java @@ -9,7 +9,6 @@ import org.apache.http.util.EntityUtils; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.common.Booleans; -import org.elasticsearch.rest.action.document.RestBulkAction; import java.io.IOException; import java.nio.charset.StandardCharsets; @@ -109,12 +108,11 @@ public class IndexingIT extends AbstractUpgradeTestCase { private void bulk(String index, String valueSuffix, int count) throws IOException { StringBuilder b = new StringBuilder(); for (int i = 0; i < count; i++) { - b.append("{\"index\": {\"_index\": \"").append(index).append("\", \"_type\": \"_doc\"}}\n"); + b.append("{\"index\": {\"_index\": \"").append(index).append("\"}}\n"); b.append("{\"f1\": \"v").append(i).append(valueSuffix).append("\", \"f2\": ").append(i).append("}\n"); } Request bulk = new Request("POST", "/_bulk"); bulk.addParameter("refresh", "true"); - bulk.setOptions(expectWarnings(RestBulkAction.TYPES_DEPRECATION_MESSAGE)); bulk.setJsonEntity(b.toString()); client().performRequest(bulk); } diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/50_token_auth.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/50_token_auth.yml index e4d0eb8757f8..2f44c37a37f9 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/50_token_auth.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/50_token_auth.yml @@ -56,15 +56,15 @@ bulk: refresh: true body: - - '{"index": {"_index": "token_index", "_type": "_doc", "_id" : "1"}}' + - '{"index": {"_index": "token_index", "_id" : "1"}}' - '{"f1": "v1_old", "f2": 0}' - - '{"index": {"_index": "token_index", "_type": "_doc", "_id" : "2"}}' + - '{"index": {"_index": "token_index", "_id" : "2"}}' - '{"f1": "v2_old", "f2": 1}' - - '{"index": {"_index": "token_index", "_type": "_doc", "_id" : "3"}}' + - '{"index": {"_index": "token_index", "_id" : "3"}}' - '{"f1": "v3_old", "f2": 2}' - - '{"index": {"_index": "token_index", "_type": "_doc", "_id" : "4"}}' + - '{"index": {"_index": "token_index", "_id" : "4"}}' - '{"f1": "v4_old", "f2": 3}' - - '{"index": {"_index": "token_index", "_type": "_doc", "_id" : "5"}}' + - '{"index": {"_index": "token_index", "_id" : "5"}}' - '{"f1": "v5_old", "f2": 4}' - do: From 73cd9e0c1bcc3ee4869f2ed9f79816055e694396 Mon Sep 17 00:00:00 2001 From: Alpar Torok Date: Mon, 7 Oct 2019 15:33:25 +0300 Subject: [PATCH 22/55] Fix the windows packaging tests exit code $? is a boolean and was used incorrectly --- .ci/os.ps1 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.ci/os.ps1 b/.ci/os.ps1 index 25c8886a0a4b..028bb460a71d 100644 --- a/.ci/os.ps1 +++ b/.ci/os.ps1 @@ -33,4 +33,4 @@ $ErrorActionPreference="Continue" -x :distribution:packages:buildOssRpm ` -x :distribution:packages:buildRpm ` -exit $? +exit $LastExitCode From 08a22d0b393f4a76c52dabc5e7b9cafcc19c30ca Mon Sep 17 00:00:00 2001 From: Marios Trivyzas Date: Mon, 7 Oct 2019 14:37:20 +0200 Subject: [PATCH 23/55] SQL: Allow whitespaces in escape patterns (#47577) * SQL: Allow whitespaces in escape patterns Previously, we supported only the format `{fn ()}` but other DBs like MSSQL, DB2, MariaDB/MySQL alos allow whitespaces between `{` and `fn`. Furhermore, also some applications - like PowerBI - generate escape sequences with spaces: `select { fn name(params) } etc.` Add support for white spaces between `{` and the escape pattern definition like `fn`, `ts`, `d`, `guid` etc. Closes: #47401 * Fix imports --- x-pack/plugin/sql/src/main/antlr/SqlBase.g4 | 19 +- .../plugin/sql/src/main/antlr/SqlBase.tokens | 100 +-- .../sql/src/main/antlr/SqlBaseLexer.tokens | 98 +-- .../xpack/sql/parser/SqlBaseLexer.java | 816 +++++++++--------- .../xpack/sql/parser/SqlBaseParser.java | 171 ++-- .../sql/parser/EscapedFunctionsTests.java | 52 +- 6 files changed, 638 insertions(+), 618 deletions(-) diff --git a/x-pack/plugin/sql/src/main/antlr/SqlBase.g4 b/x-pack/plugin/sql/src/main/antlr/SqlBase.g4 index 86c119524989..fa8f868b71e3 100644 --- a/x-pack/plugin/sql/src/main/antlr/SqlBase.g4 +++ b/x-pack/plugin/sql/src/main/antlr/SqlBase.g4 @@ -217,7 +217,7 @@ pattern patternEscape : ESCAPE escape=string - | ESCAPE_ESC escape=string '}' + | ESCAPE_ESC escape=string ESC_END ; valueExpression @@ -274,7 +274,7 @@ extractTemplate functionExpression : functionTemplate - | FUNCTION_ESC functionTemplate '}' + | FUNCTION_ESC functionTemplate ESC_END ; functionTemplate @@ -480,15 +480,16 @@ YEAR: 'YEAR'; YEARS: 'YEARS'; // Escaped Sequence -ESCAPE_ESC: '{ESCAPE'; -FUNCTION_ESC: '{FN'; -LIMIT_ESC:'{LIMIT'; -DATE_ESC: '{D'; -TIME_ESC: '{T'; -TIMESTAMP_ESC: '{TS'; +ESCAPE_ESC: ESC_START 'ESCAPE'; +FUNCTION_ESC: ESC_START 'FN'; +LIMIT_ESC: ESC_START 'LIMIT'; +DATE_ESC: ESC_START 'D'; +TIME_ESC: ESC_START 'T'; +TIMESTAMP_ESC: ESC_START 'TS'; // mapped to string literal -GUID_ESC: '{GUID'; +GUID_ESC: ESC_START 'GUID'; +ESC_START: '{' (WS)*; ESC_END: '}'; // Operators diff --git a/x-pack/plugin/sql/src/main/antlr/SqlBase.tokens b/x-pack/plugin/sql/src/main/antlr/SqlBase.tokens index 9771af465bb4..4fc458a7984a 100644 --- a/x-pack/plugin/sql/src/main/antlr/SqlBase.tokens +++ b/x-pack/plugin/sql/src/main/antlr/SqlBase.tokens @@ -106,36 +106,37 @@ DATE_ESC=105 TIME_ESC=106 TIMESTAMP_ESC=107 GUID_ESC=108 -ESC_END=109 -EQ=110 -NULLEQ=111 -NEQ=112 -LT=113 -LTE=114 -GT=115 -GTE=116 -PLUS=117 -MINUS=118 -ASTERISK=119 -SLASH=120 -PERCENT=121 -CAST_OP=122 -CONCAT=123 -DOT=124 -PARAM=125 -STRING=126 -INTEGER_VALUE=127 -DECIMAL_VALUE=128 -IDENTIFIER=129 -DIGIT_IDENTIFIER=130 -TABLE_IDENTIFIER=131 -QUOTED_IDENTIFIER=132 -BACKQUOTED_IDENTIFIER=133 -SIMPLE_COMMENT=134 -BRACKETED_COMMENT=135 -WS=136 -UNRECOGNIZED=137 -DELIMITER=138 +ESC_START=109 +ESC_END=110 +EQ=111 +NULLEQ=112 +NEQ=113 +LT=114 +LTE=115 +GT=116 +GTE=117 +PLUS=118 +MINUS=119 +ASTERISK=120 +SLASH=121 +PERCENT=122 +CAST_OP=123 +CONCAT=124 +DOT=125 +PARAM=126 +STRING=127 +INTEGER_VALUE=128 +DECIMAL_VALUE=129 +IDENTIFIER=130 +DIGIT_IDENTIFIER=131 +TABLE_IDENTIFIER=132 +QUOTED_IDENTIFIER=133 +BACKQUOTED_IDENTIFIER=134 +SIMPLE_COMMENT=135 +BRACKETED_COMMENT=136 +WS=137 +UNRECOGNIZED=138 +DELIMITER=139 '('=1 ')'=2 ','=3 @@ -237,26 +238,19 @@ DELIMITER=138 'WITH'=99 'YEAR'=100 'YEARS'=101 -'{ESCAPE'=102 -'{FN'=103 -'{LIMIT'=104 -'{D'=105 -'{T'=106 -'{TS'=107 -'{GUID'=108 -'}'=109 -'='=110 -'<=>'=111 -'<'=113 -'<='=114 -'>'=115 -'>='=116 -'+'=117 -'-'=118 -'*'=119 -'/'=120 -'%'=121 -'::'=122 -'||'=123 -'.'=124 -'?'=125 +'}'=110 +'='=111 +'<=>'=112 +'<'=114 +'<='=115 +'>'=116 +'>='=117 +'+'=118 +'-'=119 +'*'=120 +'/'=121 +'%'=122 +'::'=123 +'||'=124 +'.'=125 +'?'=126 diff --git a/x-pack/plugin/sql/src/main/antlr/SqlBaseLexer.tokens b/x-pack/plugin/sql/src/main/antlr/SqlBaseLexer.tokens index adb6142e8653..8773710b0137 100644 --- a/x-pack/plugin/sql/src/main/antlr/SqlBaseLexer.tokens +++ b/x-pack/plugin/sql/src/main/antlr/SqlBaseLexer.tokens @@ -106,35 +106,36 @@ DATE_ESC=105 TIME_ESC=106 TIMESTAMP_ESC=107 GUID_ESC=108 -ESC_END=109 -EQ=110 -NULLEQ=111 -NEQ=112 -LT=113 -LTE=114 -GT=115 -GTE=116 -PLUS=117 -MINUS=118 -ASTERISK=119 -SLASH=120 -PERCENT=121 -CAST_OP=122 -CONCAT=123 -DOT=124 -PARAM=125 -STRING=126 -INTEGER_VALUE=127 -DECIMAL_VALUE=128 -IDENTIFIER=129 -DIGIT_IDENTIFIER=130 -TABLE_IDENTIFIER=131 -QUOTED_IDENTIFIER=132 -BACKQUOTED_IDENTIFIER=133 -SIMPLE_COMMENT=134 -BRACKETED_COMMENT=135 -WS=136 -UNRECOGNIZED=137 +ESC_START=109 +ESC_END=110 +EQ=111 +NULLEQ=112 +NEQ=113 +LT=114 +LTE=115 +GT=116 +GTE=117 +PLUS=118 +MINUS=119 +ASTERISK=120 +SLASH=121 +PERCENT=122 +CAST_OP=123 +CONCAT=124 +DOT=125 +PARAM=126 +STRING=127 +INTEGER_VALUE=128 +DECIMAL_VALUE=129 +IDENTIFIER=130 +DIGIT_IDENTIFIER=131 +TABLE_IDENTIFIER=132 +QUOTED_IDENTIFIER=133 +BACKQUOTED_IDENTIFIER=134 +SIMPLE_COMMENT=135 +BRACKETED_COMMENT=136 +WS=137 +UNRECOGNIZED=138 '('=1 ')'=2 ','=3 @@ -236,26 +237,19 @@ UNRECOGNIZED=137 'WITH'=99 'YEAR'=100 'YEARS'=101 -'{ESCAPE'=102 -'{FN'=103 -'{LIMIT'=104 -'{D'=105 -'{T'=106 -'{TS'=107 -'{GUID'=108 -'}'=109 -'='=110 -'<=>'=111 -'<'=113 -'<='=114 -'>'=115 -'>='=116 -'+'=117 -'-'=118 -'*'=119 -'/'=120 -'%'=121 -'::'=122 -'||'=123 -'.'=124 -'?'=125 +'}'=110 +'='=111 +'<=>'=112 +'<'=114 +'<='=115 +'>'=116 +'>='=117 +'+'=118 +'-'=119 +'*'=120 +'/'=121 +'%'=122 +'::'=123 +'||'=124 +'.'=125 +'?'=126 diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseLexer.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseLexer.java index cba3c1ee9a39..a8bbcf7a92de 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseLexer.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseLexer.java @@ -32,12 +32,12 @@ class SqlBaseLexer extends Lexer { TABLES=88, TEXT=89, THEN=90, TRUE=91, TO=92, TYPE=93, TYPES=94, USING=95, VERIFY=96, WHEN=97, WHERE=98, WITH=99, YEAR=100, YEARS=101, ESCAPE_ESC=102, FUNCTION_ESC=103, LIMIT_ESC=104, DATE_ESC=105, TIME_ESC=106, TIMESTAMP_ESC=107, - GUID_ESC=108, ESC_END=109, EQ=110, NULLEQ=111, NEQ=112, LT=113, LTE=114, - GT=115, GTE=116, PLUS=117, MINUS=118, ASTERISK=119, SLASH=120, PERCENT=121, - CAST_OP=122, CONCAT=123, DOT=124, PARAM=125, STRING=126, INTEGER_VALUE=127, - DECIMAL_VALUE=128, IDENTIFIER=129, DIGIT_IDENTIFIER=130, TABLE_IDENTIFIER=131, - QUOTED_IDENTIFIER=132, BACKQUOTED_IDENTIFIER=133, SIMPLE_COMMENT=134, - BRACKETED_COMMENT=135, WS=136, UNRECOGNIZED=137; + GUID_ESC=108, ESC_START=109, ESC_END=110, EQ=111, NULLEQ=112, NEQ=113, + LT=114, LTE=115, GT=116, GTE=117, PLUS=118, MINUS=119, ASTERISK=120, SLASH=121, + PERCENT=122, CAST_OP=123, CONCAT=124, DOT=125, PARAM=126, STRING=127, + INTEGER_VALUE=128, DECIMAL_VALUE=129, IDENTIFIER=130, DIGIT_IDENTIFIER=131, + TABLE_IDENTIFIER=132, QUOTED_IDENTIFIER=133, BACKQUOTED_IDENTIFIER=134, + SIMPLE_COMMENT=135, BRACKETED_COMMENT=136, WS=137, UNRECOGNIZED=138; public static String[] modeNames = { "DEFAULT_MODE" }; @@ -57,12 +57,12 @@ class SqlBaseLexer extends Lexer { "SYS", "TABLE", "TABLES", "TEXT", "THEN", "TRUE", "TO", "TYPE", "TYPES", "USING", "VERIFY", "WHEN", "WHERE", "WITH", "YEAR", "YEARS", "ESCAPE_ESC", "FUNCTION_ESC", "LIMIT_ESC", "DATE_ESC", "TIME_ESC", "TIMESTAMP_ESC", - "GUID_ESC", "ESC_END", "EQ", "NULLEQ", "NEQ", "LT", "LTE", "GT", "GTE", - "PLUS", "MINUS", "ASTERISK", "SLASH", "PERCENT", "CAST_OP", "CONCAT", - "DOT", "PARAM", "STRING", "INTEGER_VALUE", "DECIMAL_VALUE", "IDENTIFIER", - "DIGIT_IDENTIFIER", "TABLE_IDENTIFIER", "QUOTED_IDENTIFIER", "BACKQUOTED_IDENTIFIER", - "EXPONENT", "DIGIT", "LETTER", "SIMPLE_COMMENT", "BRACKETED_COMMENT", - "WS", "UNRECOGNIZED" + "GUID_ESC", "ESC_START", "ESC_END", "EQ", "NULLEQ", "NEQ", "LT", "LTE", + "GT", "GTE", "PLUS", "MINUS", "ASTERISK", "SLASH", "PERCENT", "CAST_OP", + "CONCAT", "DOT", "PARAM", "STRING", "INTEGER_VALUE", "DECIMAL_VALUE", + "IDENTIFIER", "DIGIT_IDENTIFIER", "TABLE_IDENTIFIER", "QUOTED_IDENTIFIER", + "BACKQUOTED_IDENTIFIER", "EXPONENT", "DIGIT", "LETTER", "SIMPLE_COMMENT", + "BRACKETED_COMMENT", "WS", "UNRECOGNIZED" }; private static final String[] _LITERAL_NAMES = { @@ -81,9 +81,9 @@ class SqlBaseLexer extends Lexer { "'QUERY'", "'SCHEMAS'", "'SECOND'", "'SECONDS'", "'SELECT'", "'SHOW'", "'SYS'", "'TABLE'", "'TABLES'", "'TEXT'", "'THEN'", "'TRUE'", "'TO'", "'TYPE'", "'TYPES'", "'USING'", "'VERIFY'", "'WHEN'", "'WHERE'", "'WITH'", - "'YEAR'", "'YEARS'", "'{ESCAPE'", "'{FN'", "'{LIMIT'", "'{D'", "'{T'", - "'{TS'", "'{GUID'", "'}'", "'='", "'<=>'", null, "'<'", "'<='", "'>'", - "'>='", "'+'", "'-'", "'*'", "'/'", "'%'", "'::'", "'||'", "'.'", "'?'" + "'YEAR'", "'YEARS'", null, null, null, null, null, null, null, null, "'}'", + "'='", "'<=>'", null, "'<'", "'<='", "'>'", "'>='", "'+'", "'-'", "'*'", + "'/'", "'%'", "'::'", "'||'", "'.'", "'?'" }; private static final String[] _SYMBOLIC_NAMES = { null, null, null, null, null, "ALL", "ANALYZE", "ANALYZED", "AND", "ANY", @@ -100,11 +100,12 @@ class SqlBaseLexer extends Lexer { "SYS", "TABLE", "TABLES", "TEXT", "THEN", "TRUE", "TO", "TYPE", "TYPES", "USING", "VERIFY", "WHEN", "WHERE", "WITH", "YEAR", "YEARS", "ESCAPE_ESC", "FUNCTION_ESC", "LIMIT_ESC", "DATE_ESC", "TIME_ESC", "TIMESTAMP_ESC", - "GUID_ESC", "ESC_END", "EQ", "NULLEQ", "NEQ", "LT", "LTE", "GT", "GTE", - "PLUS", "MINUS", "ASTERISK", "SLASH", "PERCENT", "CAST_OP", "CONCAT", - "DOT", "PARAM", "STRING", "INTEGER_VALUE", "DECIMAL_VALUE", "IDENTIFIER", - "DIGIT_IDENTIFIER", "TABLE_IDENTIFIER", "QUOTED_IDENTIFIER", "BACKQUOTED_IDENTIFIER", - "SIMPLE_COMMENT", "BRACKETED_COMMENT", "WS", "UNRECOGNIZED" + "GUID_ESC", "ESC_START", "ESC_END", "EQ", "NULLEQ", "NEQ", "LT", "LTE", + "GT", "GTE", "PLUS", "MINUS", "ASTERISK", "SLASH", "PERCENT", "CAST_OP", + "CONCAT", "DOT", "PARAM", "STRING", "INTEGER_VALUE", "DECIMAL_VALUE", + "IDENTIFIER", "DIGIT_IDENTIFIER", "TABLE_IDENTIFIER", "QUOTED_IDENTIFIER", + "BACKQUOTED_IDENTIFIER", "SIMPLE_COMMENT", "BRACKETED_COMMENT", "WS", + "UNRECOGNIZED" }; public static final Vocabulary VOCABULARY = new VocabularyImpl(_LITERAL_NAMES, _SYMBOLIC_NAMES); @@ -161,7 +162,7 @@ class SqlBaseLexer extends Lexer { public ATN getATN() { return _ATN; } public static final String _serializedATN = - "\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\2\u008b\u047f\b\1\4"+ + "\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\2\u008c\u0488\b\1\4"+ "\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7\4\b\t\b\4\t\t\t\4\n\t\n"+ "\4\13\t\13\4\f\t\f\4\r\t\r\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22"+ "\t\22\4\23\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30\4\31"+ @@ -178,390 +179,395 @@ class SqlBaseLexer extends Lexer { "\u0080\4\u0081\t\u0081\4\u0082\t\u0082\4\u0083\t\u0083\4\u0084\t\u0084"+ "\4\u0085\t\u0085\4\u0086\t\u0086\4\u0087\t\u0087\4\u0088\t\u0088\4\u0089"+ "\t\u0089\4\u008a\t\u008a\4\u008b\t\u008b\4\u008c\t\u008c\4\u008d\t\u008d"+ - "\3\2\3\2\3\3\3\3\3\4\3\4\3\5\3\5\3\6\3\6\3\6\3\6\3\7\3\7\3\7\3\7\3\7\3"+ - "\7\3\7\3\7\3\b\3\b\3\b\3\b\3\b\3\b\3\b\3\b\3\b\3\t\3\t\3\t\3\t\3\n\3\n"+ - "\3\n\3\n\3\13\3\13\3\13\3\f\3\f\3\f\3\f\3\r\3\r\3\r\3\r\3\r\3\r\3\r\3"+ - "\r\3\16\3\16\3\16\3\17\3\17\3\17\3\17\3\17\3\20\3\20\3\20\3\20\3\20\3"+ - "\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\22\3\22\3\22\3\22\3\22\3\22\3"+ - "\22\3\22\3\22\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\24\3\24\3\24\3"+ - "\24\3\24\3\24\3\24\3\24\3\25\3\25\3\25\3\25\3\25\3\25\3\25\3\25\3\25\3"+ - "\25\3\25\3\25\3\25\3\26\3\26\3\26\3\26\3\26\3\26\3\26\3\26\3\26\3\26\3"+ - "\26\3\26\3\26\3\27\3\27\3\27\3\27\3\27\3\27\3\27\3\27\3\27\3\27\3\27\3"+ - "\27\3\27\3\27\3\27\3\27\3\27\3\27\3\30\3\30\3\30\3\30\3\31\3\31\3\31\3"+ - "\31\3\31\3\32\3\32\3\32\3\32\3\32\3\32\3\33\3\33\3\33\3\33\3\33\3\34\3"+ - "\34\3\34\3\34\3\34\3\34\3\34\3\34\3\34\3\35\3\35\3\35\3\35\3\35\3\35\3"+ - "\35\3\35\3\35\3\36\3\36\3\36\3\36\3\36\3\37\3\37\3\37\3\37\3 \3 \3 \3"+ - " \3 \3 \3 \3!\3!\3!\3!\3!\3!\3!\3!\3!\3!\3!\3\"\3\"\3\"\3\"\3\"\3\"\3"+ - "\"\3#\3#\3#\3#\3#\3#\3#\3#\3$\3$\3$\3$\3$\3$\3$\3$\3%\3%\3%\3%\3%\3%\3"+ - "&\3&\3&\3&\3&\3&\3\'\3\'\3\'\3\'\3(\3(\3(\3(\3(\3(\3(\3)\3)\3)\3)\3)\3"+ - "*\3*\3*\3*\3*\3*\3*\3+\3+\3+\3+\3+\3,\3,\3,\3,\3,\3,\3,\3,\3,\3,\3-\3"+ - "-\3-\3-\3-\3-\3-\3-\3-\3.\3.\3.\3.\3.\3.\3/\3/\3/\3/\3/\3/\3/\3\60\3\60"+ - "\3\60\3\60\3\60\3\61\3\61\3\61\3\61\3\61\3\61\3\62\3\62\3\62\3\63\3\63"+ - "\3\63\3\63\3\63\3\63\3\63\3\63\3\64\3\64\3\64\3\64\3\64\3\64\3\65\3\65"+ - "\3\65\3\65\3\65\3\65\3\65\3\65\3\65\3\66\3\66\3\66\3\67\3\67\3\67\3\67"+ - "\3\67\38\38\38\38\38\39\39\39\39\39\3:\3:\3:\3:\3:\3;\3;\3;\3;\3;\3;\3"+ - "<\3<\3<\3<\3<\3<\3<\3=\3=\3=\3=\3=\3=\3>\3>\3>\3>\3>\3>\3>\3?\3?\3?\3"+ - "?\3?\3?\3?\3?\3@\3@\3@\3@\3@\3@\3A\3A\3A\3A\3A\3A\3A\3B\3B\3B\3B\3B\3"+ - "B\3B\3B\3C\3C\3C\3C\3D\3D\3D\3D\3D\3E\3E\3E\3E\3E\3E\3F\3F\3F\3G\3G\3"+ - "G\3G\3G\3G\3G\3G\3G\3G\3H\3H\3H\3I\3I\3I\3I\3I\3I\3J\3J\3J\3J\3J\3J\3"+ - "K\3K\3K\3K\3K\3K\3K\3L\3L\3L\3L\3L\3L\3L\3L\3L\3M\3M\3M\3M\3M\3M\3N\3"+ - "N\3N\3N\3N\3O\3O\3O\3O\3O\3O\3P\3P\3P\3P\3P\3P\3Q\3Q\3Q\3Q\3Q\3Q\3R\3"+ - "R\3R\3R\3R\3R\3R\3R\3S\3S\3S\3S\3S\3S\3S\3T\3T\3T\3T\3T\3T\3T\3T\3U\3"+ - "U\3U\3U\3U\3U\3U\3V\3V\3V\3V\3V\3W\3W\3W\3W\3X\3X\3X\3X\3X\3X\3Y\3Y\3"+ - "Y\3Y\3Y\3Y\3Y\3Z\3Z\3Z\3Z\3Z\3[\3[\3[\3[\3[\3\\\3\\\3\\\3\\\3\\\3]\3]"+ - "\3]\3^\3^\3^\3^\3^\3_\3_\3_\3_\3_\3_\3`\3`\3`\3`\3`\3`\3a\3a\3a\3a\3a"+ - "\3a\3a\3b\3b\3b\3b\3b\3c\3c\3c\3c\3c\3c\3d\3d\3d\3d\3d\3e\3e\3e\3e\3e"+ - "\3f\3f\3f\3f\3f\3f\3g\3g\3g\3g\3g\3g\3g\3g\3h\3h\3h\3h\3i\3i\3i\3i\3i"+ - "\3i\3i\3j\3j\3j\3k\3k\3k\3l\3l\3l\3l\3m\3m\3m\3m\3m\3m\3n\3n\3o\3o\3p"+ - "\3p\3p\3p\3q\3q\3q\3q\5q\u03bd\nq\3r\3r\3s\3s\3s\3t\3t\3u\3u\3u\3v\3v"+ - "\3w\3w\3x\3x\3y\3y\3z\3z\3{\3{\3{\3|\3|\3|\3}\3}\3~\3~\3\177\3\177\3\177"+ - "\3\177\7\177\u03e1\n\177\f\177\16\177\u03e4\13\177\3\177\3\177\3\u0080"+ - "\6\u0080\u03e9\n\u0080\r\u0080\16\u0080\u03ea\3\u0081\6\u0081\u03ee\n"+ - "\u0081\r\u0081\16\u0081\u03ef\3\u0081\3\u0081\7\u0081\u03f4\n\u0081\f"+ - "\u0081\16\u0081\u03f7\13\u0081\3\u0081\3\u0081\6\u0081\u03fb\n\u0081\r"+ - "\u0081\16\u0081\u03fc\3\u0081\6\u0081\u0400\n\u0081\r\u0081\16\u0081\u0401"+ - "\3\u0081\3\u0081\7\u0081\u0406\n\u0081\f\u0081\16\u0081\u0409\13\u0081"+ - "\5\u0081\u040b\n\u0081\3\u0081\3\u0081\3\u0081\3\u0081\6\u0081\u0411\n"+ - "\u0081\r\u0081\16\u0081\u0412\3\u0081\3\u0081\5\u0081\u0417\n\u0081\3"+ - "\u0082\3\u0082\5\u0082\u041b\n\u0082\3\u0082\3\u0082\3\u0082\7\u0082\u0420"+ - "\n\u0082\f\u0082\16\u0082\u0423\13\u0082\3\u0083\3\u0083\3\u0083\3\u0083"+ - "\6\u0083\u0429\n\u0083\r\u0083\16\u0083\u042a\3\u0084\3\u0084\3\u0084"+ - "\6\u0084\u0430\n\u0084\r\u0084\16\u0084\u0431\3\u0085\3\u0085\3\u0085"+ - "\3\u0085\7\u0085\u0438\n\u0085\f\u0085\16\u0085\u043b\13\u0085\3\u0085"+ - "\3\u0085\3\u0086\3\u0086\3\u0086\3\u0086\7\u0086\u0443\n\u0086\f\u0086"+ - "\16\u0086\u0446\13\u0086\3\u0086\3\u0086\3\u0087\3\u0087\5\u0087\u044c"+ - "\n\u0087\3\u0087\6\u0087\u044f\n\u0087\r\u0087\16\u0087\u0450\3\u0088"+ - "\3\u0088\3\u0089\3\u0089\3\u008a\3\u008a\3\u008a\3\u008a\7\u008a\u045b"+ - "\n\u008a\f\u008a\16\u008a\u045e\13\u008a\3\u008a\5\u008a\u0461\n\u008a"+ - "\3\u008a\5\u008a\u0464\n\u008a\3\u008a\3\u008a\3\u008b\3\u008b\3\u008b"+ - "\3\u008b\3\u008b\7\u008b\u046d\n\u008b\f\u008b\16\u008b\u0470\13\u008b"+ - "\3\u008b\3\u008b\3\u008b\3\u008b\3\u008b\3\u008c\6\u008c\u0478\n\u008c"+ - "\r\u008c\16\u008c\u0479\3\u008c\3\u008c\3\u008d\3\u008d\3\u046e\2\u008e"+ - "\3\3\5\4\7\5\t\6\13\7\r\b\17\t\21\n\23\13\25\f\27\r\31\16\33\17\35\20"+ - "\37\21!\22#\23%\24\'\25)\26+\27-\30/\31\61\32\63\33\65\34\67\359\36;\37"+ - "= ?!A\"C#E$G%I&K\'M(O)Q*S+U,W-Y.[/]\60_\61a\62c\63e\64g\65i\66k\67m8o"+ - "9q:s;u{?}@\177A\u0081B\u0083C\u0085D\u0087E\u0089F\u008bG\u008dH"+ - "\u008fI\u0091J\u0093K\u0095L\u0097M\u0099N\u009bO\u009dP\u009fQ\u00a1"+ - "R\u00a3S\u00a5T\u00a7U\u00a9V\u00abW\u00adX\u00afY\u00b1Z\u00b3[\u00b5"+ - "\\\u00b7]\u00b9^\u00bb_\u00bd`\u00bfa\u00c1b\u00c3c\u00c5d\u00c7e\u00c9"+ - "f\u00cbg\u00cdh\u00cfi\u00d1j\u00d3k\u00d5l\u00d7m\u00d9n\u00dbo\u00dd"+ - "p\u00dfq\u00e1r\u00e3s\u00e5t\u00e7u\u00e9v\u00ebw\u00edx\u00efy\u00f1"+ - "z\u00f3{\u00f5|\u00f7}\u00f9~\u00fb\177\u00fd\u0080\u00ff\u0081\u0101"+ - "\u0082\u0103\u0083\u0105\u0084\u0107\u0085\u0109\u0086\u010b\u0087\u010d"+ - "\2\u010f\2\u0111\2\u0113\u0088\u0115\u0089\u0117\u008a\u0119\u008b\3\2"+ - "\13\3\2))\4\2BBaa\3\2$$\3\2bb\4\2--//\3\2\62;\3\2C\\\4\2\f\f\17\17\5\2"+ - "\13\f\17\17\"\"\u049f\2\3\3\2\2\2\2\5\3\2\2\2\2\7\3\2\2\2\2\t\3\2\2\2"+ - "\2\13\3\2\2\2\2\r\3\2\2\2\2\17\3\2\2\2\2\21\3\2\2\2\2\23\3\2\2\2\2\25"+ - "\3\2\2\2\2\27\3\2\2\2\2\31\3\2\2\2\2\33\3\2\2\2\2\35\3\2\2\2\2\37\3\2"+ - "\2\2\2!\3\2\2\2\2#\3\2\2\2\2%\3\2\2\2\2\'\3\2\2\2\2)\3\2\2\2\2+\3\2\2"+ - "\2\2-\3\2\2\2\2/\3\2\2\2\2\61\3\2\2\2\2\63\3\2\2\2\2\65\3\2\2\2\2\67\3"+ - "\2\2\2\29\3\2\2\2\2;\3\2\2\2\2=\3\2\2\2\2?\3\2\2\2\2A\3\2\2\2\2C\3\2\2"+ - "\2\2E\3\2\2\2\2G\3\2\2\2\2I\3\2\2\2\2K\3\2\2\2\2M\3\2\2\2\2O\3\2\2\2\2"+ - "Q\3\2\2\2\2S\3\2\2\2\2U\3\2\2\2\2W\3\2\2\2\2Y\3\2\2\2\2[\3\2\2\2\2]\3"+ - "\2\2\2\2_\3\2\2\2\2a\3\2\2\2\2c\3\2\2\2\2e\3\2\2\2\2g\3\2\2\2\2i\3\2\2"+ - "\2\2k\3\2\2\2\2m\3\2\2\2\2o\3\2\2\2\2q\3\2\2\2\2s\3\2\2\2\2u\3\2\2\2\2"+ - "w\3\2\2\2\2y\3\2\2\2\2{\3\2\2\2\2}\3\2\2\2\2\177\3\2\2\2\2\u0081\3\2\2"+ - "\2\2\u0083\3\2\2\2\2\u0085\3\2\2\2\2\u0087\3\2\2\2\2\u0089\3\2\2\2\2\u008b"+ - "\3\2\2\2\2\u008d\3\2\2\2\2\u008f\3\2\2\2\2\u0091\3\2\2\2\2\u0093\3\2\2"+ - "\2\2\u0095\3\2\2\2\2\u0097\3\2\2\2\2\u0099\3\2\2\2\2\u009b\3\2\2\2\2\u009d"+ - "\3\2\2\2\2\u009f\3\2\2\2\2\u00a1\3\2\2\2\2\u00a3\3\2\2\2\2\u00a5\3\2\2"+ - "\2\2\u00a7\3\2\2\2\2\u00a9\3\2\2\2\2\u00ab\3\2\2\2\2\u00ad\3\2\2\2\2\u00af"+ - "\3\2\2\2\2\u00b1\3\2\2\2\2\u00b3\3\2\2\2\2\u00b5\3\2\2\2\2\u00b7\3\2\2"+ - "\2\2\u00b9\3\2\2\2\2\u00bb\3\2\2\2\2\u00bd\3\2\2\2\2\u00bf\3\2\2\2\2\u00c1"+ - "\3\2\2\2\2\u00c3\3\2\2\2\2\u00c5\3\2\2\2\2\u00c7\3\2\2\2\2\u00c9\3\2\2"+ - "\2\2\u00cb\3\2\2\2\2\u00cd\3\2\2\2\2\u00cf\3\2\2\2\2\u00d1\3\2\2\2\2\u00d3"+ - "\3\2\2\2\2\u00d5\3\2\2\2\2\u00d7\3\2\2\2\2\u00d9\3\2\2\2\2\u00db\3\2\2"+ - "\2\2\u00dd\3\2\2\2\2\u00df\3\2\2\2\2\u00e1\3\2\2\2\2\u00e3\3\2\2\2\2\u00e5"+ - "\3\2\2\2\2\u00e7\3\2\2\2\2\u00e9\3\2\2\2\2\u00eb\3\2\2\2\2\u00ed\3\2\2"+ - "\2\2\u00ef\3\2\2\2\2\u00f1\3\2\2\2\2\u00f3\3\2\2\2\2\u00f5\3\2\2\2\2\u00f7"+ - "\3\2\2\2\2\u00f9\3\2\2\2\2\u00fb\3\2\2\2\2\u00fd\3\2\2\2\2\u00ff\3\2\2"+ - "\2\2\u0101\3\2\2\2\2\u0103\3\2\2\2\2\u0105\3\2\2\2\2\u0107\3\2\2\2\2\u0109"+ - "\3\2\2\2\2\u010b\3\2\2\2\2\u0113\3\2\2\2\2\u0115\3\2\2\2\2\u0117\3\2\2"+ - "\2\2\u0119\3\2\2\2\3\u011b\3\2\2\2\5\u011d\3\2\2\2\7\u011f\3\2\2\2\t\u0121"+ - "\3\2\2\2\13\u0123\3\2\2\2\r\u0127\3\2\2\2\17\u012f\3\2\2\2\21\u0138\3"+ - "\2\2\2\23\u013c\3\2\2\2\25\u0140\3\2\2\2\27\u0143\3\2\2\2\31\u0147\3\2"+ - "\2\2\33\u014f\3\2\2\2\35\u0152\3\2\2\2\37\u0157\3\2\2\2!\u015c\3\2\2\2"+ - "#\u0164\3\2\2\2%\u016d\3\2\2\2\'\u0175\3\2\2\2)\u017d\3\2\2\2+\u018a\3"+ - "\2\2\2-\u0197\3\2\2\2/\u01a9\3\2\2\2\61\u01ad\3\2\2\2\63\u01b2\3\2\2\2"+ - "\65\u01b8\3\2\2\2\67\u01bd\3\2\2\29\u01c6\3\2\2\2;\u01cf\3\2\2\2=\u01d4"+ - "\3\2\2\2?\u01d8\3\2\2\2A\u01df\3\2\2\2C\u01ea\3\2\2\2E\u01f1\3\2\2\2G"+ - "\u01f9\3\2\2\2I\u0201\3\2\2\2K\u0207\3\2\2\2M\u020d\3\2\2\2O\u0211\3\2"+ - "\2\2Q\u0218\3\2\2\2S\u021d\3\2\2\2U\u0224\3\2\2\2W\u0229\3\2\2\2Y\u0233"+ - "\3\2\2\2[\u023c\3\2\2\2]\u0242\3\2\2\2_\u0249\3\2\2\2a\u024e\3\2\2\2c"+ - "\u0254\3\2\2\2e\u0257\3\2\2\2g\u025f\3\2\2\2i\u0265\3\2\2\2k\u026e\3\2"+ - "\2\2m\u0271\3\2\2\2o\u0276\3\2\2\2q\u027b\3\2\2\2s\u0280\3\2\2\2u\u0285"+ - "\3\2\2\2w\u028b\3\2\2\2y\u0292\3\2\2\2{\u0298\3\2\2\2}\u029f\3\2\2\2\177"+ - "\u02a7\3\2\2\2\u0081\u02ad\3\2\2\2\u0083\u02b4\3\2\2\2\u0085\u02bc\3\2"+ - "\2\2\u0087\u02c0\3\2\2\2\u0089\u02c5\3\2\2\2\u008b\u02cb\3\2\2\2\u008d"+ - "\u02ce\3\2\2\2\u008f\u02d8\3\2\2\2\u0091\u02db\3\2\2\2\u0093\u02e1\3\2"+ - "\2\2\u0095\u02e7\3\2\2\2\u0097\u02ee\3\2\2\2\u0099\u02f7\3\2\2\2\u009b"+ - "\u02fd\3\2\2\2\u009d\u0302\3\2\2\2\u009f\u0308\3\2\2\2\u00a1\u030e\3\2"+ - "\2\2\u00a3\u0314\3\2\2\2\u00a5\u031c\3\2\2\2\u00a7\u0323\3\2\2\2\u00a9"+ - "\u032b\3\2\2\2\u00ab\u0332\3\2\2\2\u00ad\u0337\3\2\2\2\u00af\u033b\3\2"+ - "\2\2\u00b1\u0341\3\2\2\2\u00b3\u0348\3\2\2\2\u00b5\u034d\3\2\2\2\u00b7"+ - "\u0352\3\2\2\2\u00b9\u0357\3\2\2\2\u00bb\u035a\3\2\2\2\u00bd\u035f\3\2"+ - "\2\2\u00bf\u0365\3\2\2\2\u00c1\u036b\3\2\2\2\u00c3\u0372\3\2\2\2\u00c5"+ - "\u0377\3\2\2\2\u00c7\u037d\3\2\2\2\u00c9\u0382\3\2\2\2\u00cb\u0387\3\2"+ - "\2\2\u00cd\u038d\3\2\2\2\u00cf\u0395\3\2\2\2\u00d1\u0399\3\2\2\2\u00d3"+ - "\u03a0\3\2\2\2\u00d5\u03a3\3\2\2\2\u00d7\u03a6\3\2\2\2\u00d9\u03aa\3\2"+ - "\2\2\u00db\u03b0\3\2\2\2\u00dd\u03b2\3\2\2\2\u00df\u03b4\3\2\2\2\u00e1"+ - "\u03bc\3\2\2\2\u00e3\u03be\3\2\2\2\u00e5\u03c0\3\2\2\2\u00e7\u03c3\3\2"+ - "\2\2\u00e9\u03c5\3\2\2\2\u00eb\u03c8\3\2\2\2\u00ed\u03ca\3\2\2\2\u00ef"+ - "\u03cc\3\2\2\2\u00f1\u03ce\3\2\2\2\u00f3\u03d0\3\2\2\2\u00f5\u03d2\3\2"+ - "\2\2\u00f7\u03d5\3\2\2\2\u00f9\u03d8\3\2\2\2\u00fb\u03da\3\2\2\2\u00fd"+ - "\u03dc\3\2\2\2\u00ff\u03e8\3\2\2\2\u0101\u0416\3\2\2\2\u0103\u041a\3\2"+ - "\2\2\u0105\u0424\3\2\2\2\u0107\u042f\3\2\2\2\u0109\u0433\3\2\2\2\u010b"+ - "\u043e\3\2\2\2\u010d\u0449\3\2\2\2\u010f\u0452\3\2\2\2\u0111\u0454\3\2"+ - "\2\2\u0113\u0456\3\2\2\2\u0115\u0467\3\2\2\2\u0117\u0477\3\2\2\2\u0119"+ - "\u047d\3\2\2\2\u011b\u011c\7*\2\2\u011c\4\3\2\2\2\u011d\u011e\7+\2\2\u011e"+ - "\6\3\2\2\2\u011f\u0120\7.\2\2\u0120\b\3\2\2\2\u0121\u0122\7<\2\2\u0122"+ - "\n\3\2\2\2\u0123\u0124\7C\2\2\u0124\u0125\7N\2\2\u0125\u0126\7N\2\2\u0126"+ - "\f\3\2\2\2\u0127\u0128\7C\2\2\u0128\u0129\7P\2\2\u0129\u012a\7C\2\2\u012a"+ - "\u012b\7N\2\2\u012b\u012c\7[\2\2\u012c\u012d\7\\\2\2\u012d\u012e\7G\2"+ - "\2\u012e\16\3\2\2\2\u012f\u0130\7C\2\2\u0130\u0131\7P\2\2\u0131\u0132"+ - "\7C\2\2\u0132\u0133\7N\2\2\u0133\u0134\7[\2\2\u0134\u0135\7\\\2\2\u0135"+ - "\u0136\7G\2\2\u0136\u0137\7F\2\2\u0137\20\3\2\2\2\u0138\u0139\7C\2\2\u0139"+ - "\u013a\7P\2\2\u013a\u013b\7F\2\2\u013b\22\3\2\2\2\u013c\u013d\7C\2\2\u013d"+ - "\u013e\7P\2\2\u013e\u013f\7[\2\2\u013f\24\3\2\2\2\u0140\u0141\7C\2\2\u0141"+ - "\u0142\7U\2\2\u0142\26\3\2\2\2\u0143\u0144\7C\2\2\u0144\u0145\7U\2\2\u0145"+ - "\u0146\7E\2\2\u0146\30\3\2\2\2\u0147\u0148\7D\2\2\u0148\u0149\7G\2\2\u0149"+ - "\u014a\7V\2\2\u014a\u014b\7Y\2\2\u014b\u014c\7G\2\2\u014c\u014d\7G\2\2"+ - "\u014d\u014e\7P\2\2\u014e\32\3\2\2\2\u014f\u0150\7D\2\2\u0150\u0151\7"+ - "[\2\2\u0151\34\3\2\2\2\u0152\u0153\7E\2\2\u0153\u0154\7C\2\2\u0154\u0155"+ - "\7U\2\2\u0155\u0156\7G\2\2\u0156\36\3\2\2\2\u0157\u0158\7E\2\2\u0158\u0159"+ - "\7C\2\2\u0159\u015a\7U\2\2\u015a\u015b\7V\2\2\u015b \3\2\2\2\u015c\u015d"+ - "\7E\2\2\u015d\u015e\7C\2\2\u015e\u015f\7V\2\2\u015f\u0160\7C\2\2\u0160"+ - "\u0161\7N\2\2\u0161\u0162\7Q\2\2\u0162\u0163\7I\2\2\u0163\"\3\2\2\2\u0164"+ - "\u0165\7E\2\2\u0165\u0166\7C\2\2\u0166\u0167\7V\2\2\u0167\u0168\7C\2\2"+ - "\u0168\u0169\7N\2\2\u0169\u016a\7Q\2\2\u016a\u016b\7I\2\2\u016b\u016c"+ - "\7U\2\2\u016c$\3\2\2\2\u016d\u016e\7E\2\2\u016e\u016f\7Q\2\2\u016f\u0170"+ - "\7N\2\2\u0170\u0171\7W\2\2\u0171\u0172\7O\2\2\u0172\u0173\7P\2\2\u0173"+ - "\u0174\7U\2\2\u0174&\3\2\2\2\u0175\u0176\7E\2\2\u0176\u0177\7Q\2\2\u0177"+ - "\u0178\7P\2\2\u0178\u0179\7X\2\2\u0179\u017a\7G\2\2\u017a\u017b\7T\2\2"+ - "\u017b\u017c\7V\2\2\u017c(\3\2\2\2\u017d\u017e\7E\2\2\u017e\u017f\7W\2"+ - "\2\u017f\u0180\7T\2\2\u0180\u0181\7T\2\2\u0181\u0182\7G\2\2\u0182\u0183"+ - "\7P\2\2\u0183\u0184\7V\2\2\u0184\u0185\7a\2\2\u0185\u0186\7F\2\2\u0186"+ - "\u0187\7C\2\2\u0187\u0188\7V\2\2\u0188\u0189\7G\2\2\u0189*\3\2\2\2\u018a"+ - "\u018b\7E\2\2\u018b\u018c\7W\2\2\u018c\u018d\7T\2\2\u018d\u018e\7T\2\2"+ - "\u018e\u018f\7G\2\2\u018f\u0190\7P\2\2\u0190\u0191\7V\2\2\u0191\u0192"+ - "\7a\2\2\u0192\u0193\7V\2\2\u0193\u0194\7K\2\2\u0194\u0195\7O\2\2\u0195"+ - "\u0196\7G\2\2\u0196,\3\2\2\2\u0197\u0198\7E\2\2\u0198\u0199\7W\2\2\u0199"+ - "\u019a\7T\2\2\u019a\u019b\7T\2\2\u019b\u019c\7G\2\2\u019c\u019d\7P\2\2"+ - "\u019d\u019e\7V\2\2\u019e\u019f\7a\2\2\u019f\u01a0\7V\2\2\u01a0\u01a1"+ - "\7K\2\2\u01a1\u01a2\7O\2\2\u01a2\u01a3\7G\2\2\u01a3\u01a4\7U\2\2\u01a4"+ - "\u01a5\7V\2\2\u01a5\u01a6\7C\2\2\u01a6\u01a7\7O\2\2\u01a7\u01a8\7R\2\2"+ - "\u01a8.\3\2\2\2\u01a9\u01aa\7F\2\2\u01aa\u01ab\7C\2\2\u01ab\u01ac\7[\2"+ - "\2\u01ac\60\3\2\2\2\u01ad\u01ae\7F\2\2\u01ae\u01af\7C\2\2\u01af\u01b0"+ - "\7[\2\2\u01b0\u01b1\7U\2\2\u01b1\62\3\2\2\2\u01b2\u01b3\7F\2\2\u01b3\u01b4"+ - "\7G\2\2\u01b4\u01b5\7D\2\2\u01b5\u01b6\7W\2\2\u01b6\u01b7\7I\2\2\u01b7"+ - "\64\3\2\2\2\u01b8\u01b9\7F\2\2\u01b9\u01ba\7G\2\2\u01ba\u01bb\7U\2\2\u01bb"+ - "\u01bc\7E\2\2\u01bc\66\3\2\2\2\u01bd\u01be\7F\2\2\u01be\u01bf\7G\2\2\u01bf"+ - "\u01c0\7U\2\2\u01c0\u01c1\7E\2\2\u01c1\u01c2\7T\2\2\u01c2\u01c3\7K\2\2"+ - "\u01c3\u01c4\7D\2\2\u01c4\u01c5\7G\2\2\u01c58\3\2\2\2\u01c6\u01c7\7F\2"+ - "\2\u01c7\u01c8\7K\2\2\u01c8\u01c9\7U\2\2\u01c9\u01ca\7V\2\2\u01ca\u01cb"+ - "\7K\2\2\u01cb\u01cc\7P\2\2\u01cc\u01cd\7E\2\2\u01cd\u01ce\7V\2\2\u01ce"+ - ":\3\2\2\2\u01cf\u01d0\7G\2\2\u01d0\u01d1\7N\2\2\u01d1\u01d2\7U\2\2\u01d2"+ - "\u01d3\7G\2\2\u01d3<\3\2\2\2\u01d4\u01d5\7G\2\2\u01d5\u01d6\7P\2\2\u01d6"+ - "\u01d7\7F\2\2\u01d7>\3\2\2\2\u01d8\u01d9\7G\2\2\u01d9\u01da\7U\2\2\u01da"+ - "\u01db\7E\2\2\u01db\u01dc\7C\2\2\u01dc\u01dd\7R\2\2\u01dd\u01de\7G\2\2"+ - "\u01de@\3\2\2\2\u01df\u01e0\7G\2\2\u01e0\u01e1\7Z\2\2\u01e1\u01e2\7G\2"+ - "\2\u01e2\u01e3\7E\2\2\u01e3\u01e4\7W\2\2\u01e4\u01e5\7V\2\2\u01e5\u01e6"+ - "\7C\2\2\u01e6\u01e7\7D\2\2\u01e7\u01e8\7N\2\2\u01e8\u01e9\7G\2\2\u01e9"+ - "B\3\2\2\2\u01ea\u01eb\7G\2\2\u01eb\u01ec\7Z\2\2\u01ec\u01ed\7K\2\2\u01ed"+ - "\u01ee\7U\2\2\u01ee\u01ef\7V\2\2\u01ef\u01f0\7U\2\2\u01f0D\3\2\2\2\u01f1"+ - "\u01f2\7G\2\2\u01f2\u01f3\7Z\2\2\u01f3\u01f4\7R\2\2\u01f4\u01f5\7N\2\2"+ - "\u01f5\u01f6\7C\2\2\u01f6\u01f7\7K\2\2\u01f7\u01f8\7P\2\2\u01f8F\3\2\2"+ - "\2\u01f9\u01fa\7G\2\2\u01fa\u01fb\7Z\2\2\u01fb\u01fc\7V\2\2\u01fc\u01fd"+ - "\7T\2\2\u01fd\u01fe\7C\2\2\u01fe\u01ff\7E\2\2\u01ff\u0200\7V\2\2\u0200"+ - "H\3\2\2\2\u0201\u0202\7H\2\2\u0202\u0203\7C\2\2\u0203\u0204\7N\2\2\u0204"+ - "\u0205\7U\2\2\u0205\u0206\7G\2\2\u0206J\3\2\2\2\u0207\u0208\7H\2\2\u0208"+ - "\u0209\7K\2\2\u0209\u020a\7T\2\2\u020a\u020b\7U\2\2\u020b\u020c\7V\2\2"+ - "\u020cL\3\2\2\2\u020d\u020e\7H\2\2\u020e\u020f\7Q\2\2\u020f\u0210\7T\2"+ - "\2\u0210N\3\2\2\2\u0211\u0212\7H\2\2\u0212\u0213\7Q\2\2\u0213\u0214\7"+ - "T\2\2\u0214\u0215\7O\2\2\u0215\u0216\7C\2\2\u0216\u0217\7V\2\2\u0217P"+ - "\3\2\2\2\u0218\u0219\7H\2\2\u0219\u021a\7T\2\2\u021a\u021b\7Q\2\2\u021b"+ - "\u021c\7O\2\2\u021cR\3\2\2\2\u021d\u021e\7H\2\2\u021e\u021f\7T\2\2\u021f"+ - "\u0220\7Q\2\2\u0220\u0221\7\\\2\2\u0221\u0222\7G\2\2\u0222\u0223\7P\2"+ - "\2\u0223T\3\2\2\2\u0224\u0225\7H\2\2\u0225\u0226\7W\2\2\u0226\u0227\7"+ - "N\2\2\u0227\u0228\7N\2\2\u0228V\3\2\2\2\u0229\u022a\7H\2\2\u022a\u022b"+ - "\7W\2\2\u022b\u022c\7P\2\2\u022c\u022d\7E\2\2\u022d\u022e\7V\2\2\u022e"+ - "\u022f\7K\2\2\u022f\u0230\7Q\2\2\u0230\u0231\7P\2\2\u0231\u0232\7U\2\2"+ - "\u0232X\3\2\2\2\u0233\u0234\7I\2\2\u0234\u0235\7T\2\2\u0235\u0236\7C\2"+ - "\2\u0236\u0237\7R\2\2\u0237\u0238\7J\2\2\u0238\u0239\7X\2\2\u0239\u023a"+ - "\7K\2\2\u023a\u023b\7\\\2\2\u023bZ\3\2\2\2\u023c\u023d\7I\2\2\u023d\u023e"+ - "\7T\2\2\u023e\u023f\7Q\2\2\u023f\u0240\7W\2\2\u0240\u0241\7R\2\2\u0241"+ - "\\\3\2\2\2\u0242\u0243\7J\2\2\u0243\u0244\7C\2\2\u0244\u0245\7X\2\2\u0245"+ - "\u0246\7K\2\2\u0246\u0247\7P\2\2\u0247\u0248\7I\2\2\u0248^\3\2\2\2\u0249"+ - "\u024a\7J\2\2\u024a\u024b\7Q\2\2\u024b\u024c\7W\2\2\u024c\u024d\7T\2\2"+ - "\u024d`\3\2\2\2\u024e\u024f\7J\2\2\u024f\u0250\7Q\2\2\u0250\u0251\7W\2"+ - "\2\u0251\u0252\7T\2\2\u0252\u0253\7U\2\2\u0253b\3\2\2\2\u0254\u0255\7"+ - "K\2\2\u0255\u0256\7P\2\2\u0256d\3\2\2\2\u0257\u0258\7K\2\2\u0258\u0259"+ - "\7P\2\2\u0259\u025a\7E\2\2\u025a\u025b\7N\2\2\u025b\u025c\7W\2\2\u025c"+ - "\u025d\7F\2\2\u025d\u025e\7G\2\2\u025ef\3\2\2\2\u025f\u0260\7K\2\2\u0260"+ - "\u0261\7P\2\2\u0261\u0262\7P\2\2\u0262\u0263\7G\2\2\u0263\u0264\7T\2\2"+ - "\u0264h\3\2\2\2\u0265\u0266\7K\2\2\u0266\u0267\7P\2\2\u0267\u0268\7V\2"+ - "\2\u0268\u0269\7G\2\2\u0269\u026a\7T\2\2\u026a\u026b\7X\2\2\u026b\u026c"+ - "\7C\2\2\u026c\u026d\7N\2\2\u026dj\3\2\2\2\u026e\u026f\7K\2\2\u026f\u0270"+ - "\7U\2\2\u0270l\3\2\2\2\u0271\u0272\7L\2\2\u0272\u0273\7Q\2\2\u0273\u0274"+ - "\7K\2\2\u0274\u0275\7P\2\2\u0275n\3\2\2\2\u0276\u0277\7N\2\2\u0277\u0278"+ - "\7C\2\2\u0278\u0279\7U\2\2\u0279\u027a\7V\2\2\u027ap\3\2\2\2\u027b\u027c"+ - "\7N\2\2\u027c\u027d\7G\2\2\u027d\u027e\7H\2\2\u027e\u027f\7V\2\2\u027f"+ - "r\3\2\2\2\u0280\u0281\7N\2\2\u0281\u0282\7K\2\2\u0282\u0283\7M\2\2\u0283"+ - "\u0284\7G\2\2\u0284t\3\2\2\2\u0285\u0286\7N\2\2\u0286\u0287\7K\2\2\u0287"+ - "\u0288\7O\2\2\u0288\u0289\7K\2\2\u0289\u028a\7V\2\2\u028av\3\2\2\2\u028b"+ - "\u028c\7O\2\2\u028c\u028d\7C\2\2\u028d\u028e\7R\2\2\u028e\u028f\7R\2\2"+ - "\u028f\u0290\7G\2\2\u0290\u0291\7F\2\2\u0291x\3\2\2\2\u0292\u0293\7O\2"+ - "\2\u0293\u0294\7C\2\2\u0294\u0295\7V\2\2\u0295\u0296\7E\2\2\u0296\u0297"+ - "\7J\2\2\u0297z\3\2\2\2\u0298\u0299\7O\2\2\u0299\u029a\7K\2\2\u029a\u029b"+ - "\7P\2\2\u029b\u029c\7W\2\2\u029c\u029d\7V\2\2\u029d\u029e\7G\2\2\u029e"+ - "|\3\2\2\2\u029f\u02a0\7O\2\2\u02a0\u02a1\7K\2\2\u02a1\u02a2\7P\2\2\u02a2"+ - "\u02a3\7W\2\2\u02a3\u02a4\7V\2\2\u02a4\u02a5\7G\2\2\u02a5\u02a6\7U\2\2"+ - "\u02a6~\3\2\2\2\u02a7\u02a8\7O\2\2\u02a8\u02a9\7Q\2\2\u02a9\u02aa\7P\2"+ - "\2\u02aa\u02ab\7V\2\2\u02ab\u02ac\7J\2\2\u02ac\u0080\3\2\2\2\u02ad\u02ae"+ - "\7O\2\2\u02ae\u02af\7Q\2\2\u02af\u02b0\7P\2\2\u02b0\u02b1\7V\2\2\u02b1"+ - "\u02b2\7J\2\2\u02b2\u02b3\7U\2\2\u02b3\u0082\3\2\2\2\u02b4\u02b5\7P\2"+ - "\2\u02b5\u02b6\7C\2\2\u02b6\u02b7\7V\2\2\u02b7\u02b8\7W\2\2\u02b8\u02b9"+ - "\7T\2\2\u02b9\u02ba\7C\2\2\u02ba\u02bb\7N\2\2\u02bb\u0084\3\2\2\2\u02bc"+ - "\u02bd\7P\2\2\u02bd\u02be\7Q\2\2\u02be\u02bf\7V\2\2\u02bf\u0086\3\2\2"+ - "\2\u02c0\u02c1\7P\2\2\u02c1\u02c2\7W\2\2\u02c2\u02c3\7N\2\2\u02c3\u02c4"+ - "\7N\2\2\u02c4\u0088\3\2\2\2\u02c5\u02c6\7P\2\2\u02c6\u02c7\7W\2\2\u02c7"+ - "\u02c8\7N\2\2\u02c8\u02c9\7N\2\2\u02c9\u02ca\7U\2\2\u02ca\u008a\3\2\2"+ - "\2\u02cb\u02cc\7Q\2\2\u02cc\u02cd\7P\2\2\u02cd\u008c\3\2\2\2\u02ce\u02cf"+ - "\7Q\2\2\u02cf\u02d0\7R\2\2\u02d0\u02d1\7V\2\2\u02d1\u02d2\7K\2\2\u02d2"+ - "\u02d3\7O\2\2\u02d3\u02d4\7K\2\2\u02d4\u02d5\7\\\2\2\u02d5\u02d6\7G\2"+ - "\2\u02d6\u02d7\7F\2\2\u02d7\u008e\3\2\2\2\u02d8\u02d9\7Q\2\2\u02d9\u02da"+ - "\7T\2\2\u02da\u0090\3\2\2\2\u02db\u02dc\7Q\2\2\u02dc\u02dd\7T\2\2\u02dd"+ - "\u02de\7F\2\2\u02de\u02df\7G\2\2\u02df\u02e0\7T\2\2\u02e0\u0092\3\2\2"+ - "\2\u02e1\u02e2\7Q\2\2\u02e2\u02e3\7W\2\2\u02e3\u02e4\7V\2\2\u02e4\u02e5"+ - "\7G\2\2\u02e5\u02e6\7T\2\2\u02e6\u0094\3\2\2\2\u02e7\u02e8\7R\2\2\u02e8"+ - "\u02e9\7C\2\2\u02e9\u02ea\7T\2\2\u02ea\u02eb\7U\2\2\u02eb\u02ec\7G\2\2"+ - "\u02ec\u02ed\7F\2\2\u02ed\u0096\3\2\2\2\u02ee\u02ef\7R\2\2\u02ef\u02f0"+ - "\7J\2\2\u02f0\u02f1\7[\2\2\u02f1\u02f2\7U\2\2\u02f2\u02f3\7K\2\2\u02f3"+ - "\u02f4\7E\2\2\u02f4\u02f5\7C\2\2\u02f5\u02f6\7N\2\2\u02f6\u0098\3\2\2"+ - "\2\u02f7\u02f8\7R\2\2\u02f8\u02f9\7K\2\2\u02f9\u02fa\7X\2\2\u02fa\u02fb"+ - "\7Q\2\2\u02fb\u02fc\7V\2\2\u02fc\u009a\3\2\2\2\u02fd\u02fe\7R\2\2\u02fe"+ - "\u02ff\7N\2\2\u02ff\u0300\7C\2\2\u0300\u0301\7P\2\2\u0301\u009c\3\2\2"+ - "\2\u0302\u0303\7T\2\2\u0303\u0304\7K\2\2\u0304\u0305\7I\2\2\u0305\u0306"+ - "\7J\2\2\u0306\u0307\7V\2\2\u0307\u009e\3\2\2\2\u0308\u0309\7T\2\2\u0309"+ - "\u030a\7N\2\2\u030a\u030b\7K\2\2\u030b\u030c\7M\2\2\u030c\u030d\7G\2\2"+ - "\u030d\u00a0\3\2\2\2\u030e\u030f\7S\2\2\u030f\u0310\7W\2\2\u0310\u0311"+ - "\7G\2\2\u0311\u0312\7T\2\2\u0312\u0313\7[\2\2\u0313\u00a2\3\2\2\2\u0314"+ - "\u0315\7U\2\2\u0315\u0316\7E\2\2\u0316\u0317\7J\2\2\u0317\u0318\7G\2\2"+ - "\u0318\u0319\7O\2\2\u0319\u031a\7C\2\2\u031a\u031b\7U\2\2\u031b\u00a4"+ - "\3\2\2\2\u031c\u031d\7U\2\2\u031d\u031e\7G\2\2\u031e\u031f\7E\2\2\u031f"+ - "\u0320\7Q\2\2\u0320\u0321\7P\2\2\u0321\u0322\7F\2\2\u0322\u00a6\3\2\2"+ - "\2\u0323\u0324\7U\2\2\u0324\u0325\7G\2\2\u0325\u0326\7E\2\2\u0326\u0327"+ - "\7Q\2\2\u0327\u0328\7P\2\2\u0328\u0329\7F\2\2\u0329\u032a\7U\2\2\u032a"+ - "\u00a8\3\2\2\2\u032b\u032c\7U\2\2\u032c\u032d\7G\2\2\u032d\u032e\7N\2"+ - "\2\u032e\u032f\7G\2\2\u032f\u0330\7E\2\2\u0330\u0331\7V\2\2\u0331\u00aa"+ - "\3\2\2\2\u0332\u0333\7U\2\2\u0333\u0334\7J\2\2\u0334\u0335\7Q\2\2\u0335"+ - "\u0336\7Y\2\2\u0336\u00ac\3\2\2\2\u0337\u0338\7U\2\2\u0338\u0339\7[\2"+ - "\2\u0339\u033a\7U\2\2\u033a\u00ae\3\2\2\2\u033b\u033c\7V\2\2\u033c\u033d"+ - "\7C\2\2\u033d\u033e\7D\2\2\u033e\u033f\7N\2\2\u033f\u0340\7G\2\2\u0340"+ - "\u00b0\3\2\2\2\u0341\u0342\7V\2\2\u0342\u0343\7C\2\2\u0343\u0344\7D\2"+ - "\2\u0344\u0345\7N\2\2\u0345\u0346\7G\2\2\u0346\u0347\7U\2\2\u0347\u00b2"+ - "\3\2\2\2\u0348\u0349\7V\2\2\u0349\u034a\7G\2\2\u034a\u034b\7Z\2\2\u034b"+ - "\u034c\7V\2\2\u034c\u00b4\3\2\2\2\u034d\u034e\7V\2\2\u034e\u034f\7J\2"+ - "\2\u034f\u0350\7G\2\2\u0350\u0351\7P\2\2\u0351\u00b6\3\2\2\2\u0352\u0353"+ - "\7V\2\2\u0353\u0354\7T\2\2\u0354\u0355\7W\2\2\u0355\u0356\7G\2\2\u0356"+ - "\u00b8\3\2\2\2\u0357\u0358\7V\2\2\u0358\u0359\7Q\2\2\u0359\u00ba\3\2\2"+ - "\2\u035a\u035b\7V\2\2\u035b\u035c\7[\2\2\u035c\u035d\7R\2\2\u035d\u035e"+ - "\7G\2\2\u035e\u00bc\3\2\2\2\u035f\u0360\7V\2\2\u0360\u0361\7[\2\2\u0361"+ - "\u0362\7R\2\2\u0362\u0363\7G\2\2\u0363\u0364\7U\2\2\u0364\u00be\3\2\2"+ - "\2\u0365\u0366\7W\2\2\u0366\u0367\7U\2\2\u0367\u0368\7K\2\2\u0368\u0369"+ - "\7P\2\2\u0369\u036a\7I\2\2\u036a\u00c0\3\2\2\2\u036b\u036c\7X\2\2\u036c"+ - "\u036d\7G\2\2\u036d\u036e\7T\2\2\u036e\u036f\7K\2\2\u036f\u0370\7H\2\2"+ - "\u0370\u0371\7[\2\2\u0371\u00c2\3\2\2\2\u0372\u0373\7Y\2\2\u0373\u0374"+ - "\7J\2\2\u0374\u0375\7G\2\2\u0375\u0376\7P\2\2\u0376\u00c4\3\2\2\2\u0377"+ - "\u0378\7Y\2\2\u0378\u0379\7J\2\2\u0379\u037a\7G\2\2\u037a\u037b\7T\2\2"+ - "\u037b\u037c\7G\2\2\u037c\u00c6\3\2\2\2\u037d\u037e\7Y\2\2\u037e\u037f"+ - "\7K\2\2\u037f\u0380\7V\2\2\u0380\u0381\7J\2\2\u0381\u00c8\3\2\2\2\u0382"+ - "\u0383\7[\2\2\u0383\u0384\7G\2\2\u0384\u0385\7C\2\2\u0385\u0386\7T\2\2"+ - "\u0386\u00ca\3\2\2\2\u0387\u0388\7[\2\2\u0388\u0389\7G\2\2\u0389\u038a"+ - "\7C\2\2\u038a\u038b\7T\2\2\u038b\u038c\7U\2\2\u038c\u00cc\3\2\2\2\u038d"+ - "\u038e\7}\2\2\u038e\u038f\7G\2\2\u038f\u0390\7U\2\2\u0390\u0391\7E\2\2"+ - "\u0391\u0392\7C\2\2\u0392\u0393\7R\2\2\u0393\u0394\7G\2\2\u0394\u00ce"+ - "\3\2\2\2\u0395\u0396\7}\2\2\u0396\u0397\7H\2\2\u0397\u0398\7P\2\2\u0398"+ - "\u00d0\3\2\2\2\u0399\u039a\7}\2\2\u039a\u039b\7N\2\2\u039b\u039c\7K\2"+ - "\2\u039c\u039d\7O\2\2\u039d\u039e\7K\2\2\u039e\u039f\7V\2\2\u039f\u00d2"+ - "\3\2\2\2\u03a0\u03a1\7}\2\2\u03a1\u03a2\7F\2\2\u03a2\u00d4\3\2\2\2\u03a3"+ - "\u03a4\7}\2\2\u03a4\u03a5\7V\2\2\u03a5\u00d6\3\2\2\2\u03a6\u03a7\7}\2"+ - "\2\u03a7\u03a8\7V\2\2\u03a8\u03a9\7U\2\2\u03a9\u00d8\3\2\2\2\u03aa\u03ab"+ - "\7}\2\2\u03ab\u03ac\7I\2\2\u03ac\u03ad\7W\2\2\u03ad\u03ae\7K\2\2\u03ae"+ - "\u03af\7F\2\2\u03af\u00da\3\2\2\2\u03b0\u03b1\7\177\2\2\u03b1\u00dc\3"+ - "\2\2\2\u03b2\u03b3\7?\2\2\u03b3\u00de\3\2\2\2\u03b4\u03b5\7>\2\2\u03b5"+ - "\u03b6\7?\2\2\u03b6\u03b7\7@\2\2\u03b7\u00e0\3\2\2\2\u03b8\u03b9\7>\2"+ - "\2\u03b9\u03bd\7@\2\2\u03ba\u03bb\7#\2\2\u03bb\u03bd\7?\2\2\u03bc\u03b8"+ - "\3\2\2\2\u03bc\u03ba\3\2\2\2\u03bd\u00e2\3\2\2\2\u03be\u03bf\7>\2\2\u03bf"+ - "\u00e4\3\2\2\2\u03c0\u03c1\7>\2\2\u03c1\u03c2\7?\2\2\u03c2\u00e6\3\2\2"+ - "\2\u03c3\u03c4\7@\2\2\u03c4\u00e8\3\2\2\2\u03c5\u03c6\7@\2\2\u03c6\u03c7"+ - "\7?\2\2\u03c7\u00ea\3\2\2\2\u03c8\u03c9\7-\2\2\u03c9\u00ec\3\2\2\2\u03ca"+ - "\u03cb\7/\2\2\u03cb\u00ee\3\2\2\2\u03cc\u03cd\7,\2\2\u03cd\u00f0\3\2\2"+ - "\2\u03ce\u03cf\7\61\2\2\u03cf\u00f2\3\2\2\2\u03d0\u03d1\7\'\2\2\u03d1"+ - "\u00f4\3\2\2\2\u03d2\u03d3\7<\2\2\u03d3\u03d4\7<\2\2\u03d4\u00f6\3\2\2"+ - "\2\u03d5\u03d6\7~\2\2\u03d6\u03d7\7~\2\2\u03d7\u00f8\3\2\2\2\u03d8\u03d9"+ - "\7\60\2\2\u03d9\u00fa\3\2\2\2\u03da\u03db\7A\2\2\u03db\u00fc\3\2\2\2\u03dc"+ - "\u03e2\7)\2\2\u03dd\u03e1\n\2\2\2\u03de\u03df\7)\2\2\u03df\u03e1\7)\2"+ - "\2\u03e0\u03dd\3\2\2\2\u03e0\u03de\3\2\2\2\u03e1\u03e4\3\2\2\2\u03e2\u03e0"+ - "\3\2\2\2\u03e2\u03e3\3\2\2\2\u03e3\u03e5\3\2\2\2\u03e4\u03e2\3\2\2\2\u03e5"+ - "\u03e6\7)\2\2\u03e6\u00fe\3\2\2\2\u03e7\u03e9\5\u010f\u0088\2\u03e8\u03e7"+ - "\3\2\2\2\u03e9\u03ea\3\2\2\2\u03ea\u03e8\3\2\2\2\u03ea\u03eb\3\2\2\2\u03eb"+ - "\u0100\3\2\2\2\u03ec\u03ee\5\u010f\u0088\2\u03ed\u03ec\3\2\2\2\u03ee\u03ef"+ - "\3\2\2\2\u03ef\u03ed\3\2\2\2\u03ef\u03f0\3\2\2\2\u03f0\u03f1\3\2\2\2\u03f1"+ - "\u03f5\5\u00f9}\2\u03f2\u03f4\5\u010f\u0088\2\u03f3\u03f2\3\2\2\2\u03f4"+ - "\u03f7\3\2\2\2\u03f5\u03f3\3\2\2\2\u03f5\u03f6\3\2\2\2\u03f6\u0417\3\2"+ - "\2\2\u03f7\u03f5\3\2\2\2\u03f8\u03fa\5\u00f9}\2\u03f9\u03fb\5\u010f\u0088"+ - "\2\u03fa\u03f9\3\2\2\2\u03fb\u03fc\3\2\2\2\u03fc\u03fa\3\2\2\2\u03fc\u03fd"+ - "\3\2\2\2\u03fd\u0417\3\2\2\2\u03fe\u0400\5\u010f\u0088\2\u03ff\u03fe\3"+ - "\2\2\2\u0400\u0401\3\2\2\2\u0401\u03ff\3\2\2\2\u0401\u0402\3\2\2\2\u0402"+ - "\u040a\3\2\2\2\u0403\u0407\5\u00f9}\2\u0404\u0406\5\u010f\u0088\2\u0405"+ - "\u0404\3\2\2\2\u0406\u0409\3\2\2\2\u0407\u0405\3\2\2\2\u0407\u0408\3\2"+ - "\2\2\u0408\u040b\3\2\2\2\u0409\u0407\3\2\2\2\u040a\u0403\3\2\2\2\u040a"+ - "\u040b\3\2\2\2\u040b\u040c\3\2\2\2\u040c\u040d\5\u010d\u0087\2\u040d\u0417"+ - "\3\2\2\2\u040e\u0410\5\u00f9}\2\u040f\u0411\5\u010f\u0088\2\u0410\u040f"+ - "\3\2\2\2\u0411\u0412\3\2\2\2\u0412\u0410\3\2\2\2\u0412\u0413\3\2\2\2\u0413"+ - "\u0414\3\2\2\2\u0414\u0415\5\u010d\u0087\2\u0415\u0417\3\2\2\2\u0416\u03ed"+ - "\3\2\2\2\u0416\u03f8\3\2\2\2\u0416\u03ff\3\2\2\2\u0416\u040e\3\2\2\2\u0417"+ - "\u0102\3\2\2\2\u0418\u041b\5\u0111\u0089\2\u0419\u041b\7a\2\2\u041a\u0418"+ - "\3\2\2\2\u041a\u0419\3\2\2\2\u041b\u0421\3\2\2\2\u041c\u0420\5\u0111\u0089"+ - "\2\u041d\u0420\5\u010f\u0088\2\u041e\u0420\t\3\2\2\u041f\u041c\3\2\2\2"+ - "\u041f\u041d\3\2\2\2\u041f\u041e\3\2\2\2\u0420\u0423\3\2\2\2\u0421\u041f"+ - "\3\2\2\2\u0421\u0422\3\2\2\2\u0422\u0104\3\2\2\2\u0423\u0421\3\2\2\2\u0424"+ - "\u0428\5\u010f\u0088\2\u0425\u0429\5\u0111\u0089\2\u0426\u0429\5\u010f"+ - "\u0088\2\u0427\u0429\t\3\2\2\u0428\u0425\3\2\2\2\u0428\u0426\3\2\2\2\u0428"+ - "\u0427\3\2\2\2\u0429\u042a\3\2\2\2\u042a\u0428\3\2\2\2\u042a\u042b\3\2"+ - "\2\2\u042b\u0106\3\2\2\2\u042c\u0430\5\u0111\u0089\2\u042d\u0430\5\u010f"+ - "\u0088\2\u042e\u0430\7a\2\2\u042f\u042c\3\2\2\2\u042f\u042d\3\2\2\2\u042f"+ - "\u042e\3\2\2\2\u0430\u0431\3\2\2\2\u0431\u042f\3\2\2\2\u0431\u0432\3\2"+ - "\2\2\u0432\u0108\3\2\2\2\u0433\u0439\7$\2\2\u0434\u0438\n\4\2\2\u0435"+ - "\u0436\7$\2\2\u0436\u0438\7$\2\2\u0437\u0434\3\2\2\2\u0437\u0435\3\2\2"+ - "\2\u0438\u043b\3\2\2\2\u0439\u0437\3\2\2\2\u0439\u043a\3\2\2\2\u043a\u043c"+ - "\3\2\2\2\u043b\u0439\3\2\2\2\u043c\u043d\7$\2\2\u043d\u010a\3\2\2\2\u043e"+ - "\u0444\7b\2\2\u043f\u0443\n\5\2\2\u0440\u0441\7b\2\2\u0441\u0443\7b\2"+ - "\2\u0442\u043f\3\2\2\2\u0442\u0440\3\2\2\2\u0443\u0446\3\2\2\2\u0444\u0442"+ - "\3\2\2\2\u0444\u0445\3\2\2\2\u0445\u0447\3\2\2\2\u0446\u0444\3\2\2\2\u0447"+ - "\u0448\7b\2\2\u0448\u010c\3\2\2\2\u0449\u044b\7G\2\2\u044a\u044c\t\6\2"+ - "\2\u044b\u044a\3\2\2\2\u044b\u044c\3\2\2\2\u044c\u044e\3\2\2\2\u044d\u044f"+ - "\5\u010f\u0088\2\u044e\u044d\3\2\2\2\u044f\u0450\3\2\2\2\u0450\u044e\3"+ - "\2\2\2\u0450\u0451\3\2\2\2\u0451\u010e\3\2\2\2\u0452\u0453\t\7\2\2\u0453"+ - "\u0110\3\2\2\2\u0454\u0455\t\b\2\2\u0455\u0112\3\2\2\2\u0456\u0457\7/"+ - "\2\2\u0457\u0458\7/\2\2\u0458\u045c\3\2\2\2\u0459\u045b\n\t\2\2\u045a"+ - "\u0459\3\2\2\2\u045b\u045e\3\2\2\2\u045c\u045a\3\2\2\2\u045c\u045d\3\2"+ - "\2\2\u045d\u0460\3\2\2\2\u045e\u045c\3\2\2\2\u045f\u0461\7\17\2\2\u0460"+ - "\u045f\3\2\2\2\u0460\u0461\3\2\2\2\u0461\u0463\3\2\2\2\u0462\u0464\7\f"+ - "\2\2\u0463\u0462\3\2\2\2\u0463\u0464\3\2\2\2\u0464\u0465\3\2\2\2\u0465"+ - "\u0466\b\u008a\2\2\u0466\u0114\3\2\2\2\u0467\u0468\7\61\2\2\u0468\u0469"+ - "\7,\2\2\u0469\u046e\3\2\2\2\u046a\u046d\5\u0115\u008b\2\u046b\u046d\13"+ - "\2\2\2\u046c\u046a\3\2\2\2\u046c\u046b\3\2\2\2\u046d\u0470\3\2\2\2\u046e"+ - "\u046f\3\2\2\2\u046e\u046c\3\2\2\2\u046f\u0471\3\2\2\2\u0470\u046e\3\2"+ - "\2\2\u0471\u0472\7,\2\2\u0472\u0473\7\61\2\2\u0473\u0474\3\2\2\2\u0474"+ - "\u0475\b\u008b\2\2\u0475\u0116\3\2\2\2\u0476\u0478\t\n\2\2\u0477\u0476"+ - "\3\2\2\2\u0478\u0479\3\2\2\2\u0479\u0477\3\2\2\2\u0479\u047a\3\2\2\2\u047a"+ - "\u047b\3\2\2\2\u047b\u047c\b\u008c\2\2\u047c\u0118\3\2\2\2\u047d\u047e"+ - "\13\2\2\2\u047e\u011a\3\2\2\2\"\2\u03bc\u03e0\u03e2\u03ea\u03ef\u03f5"+ - "\u03fc\u0401\u0407\u040a\u0412\u0416\u041a\u041f\u0421\u0428\u042a\u042f"+ - "\u0431\u0437\u0439\u0442\u0444\u044b\u0450\u045c\u0460\u0463\u046c\u046e"+ - "\u0479\3\2\3\2"; + "\4\u008e\t\u008e\3\2\3\2\3\3\3\3\3\4\3\4\3\5\3\5\3\6\3\6\3\6\3\6\3\7\3"+ + "\7\3\7\3\7\3\7\3\7\3\7\3\7\3\b\3\b\3\b\3\b\3\b\3\b\3\b\3\b\3\b\3\t\3\t"+ + "\3\t\3\t\3\n\3\n\3\n\3\n\3\13\3\13\3\13\3\f\3\f\3\f\3\f\3\r\3\r\3\r\3"+ + "\r\3\r\3\r\3\r\3\r\3\16\3\16\3\16\3\17\3\17\3\17\3\17\3\17\3\20\3\20\3"+ + "\20\3\20\3\20\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\22\3\22\3\22\3"+ + "\22\3\22\3\22\3\22\3\22\3\22\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3"+ + "\24\3\24\3\24\3\24\3\24\3\24\3\24\3\24\3\25\3\25\3\25\3\25\3\25\3\25\3"+ + "\25\3\25\3\25\3\25\3\25\3\25\3\25\3\26\3\26\3\26\3\26\3\26\3\26\3\26\3"+ + "\26\3\26\3\26\3\26\3\26\3\26\3\27\3\27\3\27\3\27\3\27\3\27\3\27\3\27\3"+ + "\27\3\27\3\27\3\27\3\27\3\27\3\27\3\27\3\27\3\27\3\30\3\30\3\30\3\30\3"+ + "\31\3\31\3\31\3\31\3\31\3\32\3\32\3\32\3\32\3\32\3\32\3\33\3\33\3\33\3"+ + "\33\3\33\3\34\3\34\3\34\3\34\3\34\3\34\3\34\3\34\3\34\3\35\3\35\3\35\3"+ + "\35\3\35\3\35\3\35\3\35\3\35\3\36\3\36\3\36\3\36\3\36\3\37\3\37\3\37\3"+ + "\37\3 \3 \3 \3 \3 \3 \3 \3!\3!\3!\3!\3!\3!\3!\3!\3!\3!\3!\3\"\3\"\3\""+ + "\3\"\3\"\3\"\3\"\3#\3#\3#\3#\3#\3#\3#\3#\3$\3$\3$\3$\3$\3$\3$\3$\3%\3"+ + "%\3%\3%\3%\3%\3&\3&\3&\3&\3&\3&\3\'\3\'\3\'\3\'\3(\3(\3(\3(\3(\3(\3(\3"+ + ")\3)\3)\3)\3)\3*\3*\3*\3*\3*\3*\3*\3+\3+\3+\3+\3+\3,\3,\3,\3,\3,\3,\3"+ + ",\3,\3,\3,\3-\3-\3-\3-\3-\3-\3-\3-\3-\3.\3.\3.\3.\3.\3.\3/\3/\3/\3/\3"+ + "/\3/\3/\3\60\3\60\3\60\3\60\3\60\3\61\3\61\3\61\3\61\3\61\3\61\3\62\3"+ + "\62\3\62\3\63\3\63\3\63\3\63\3\63\3\63\3\63\3\63\3\64\3\64\3\64\3\64\3"+ + "\64\3\64\3\65\3\65\3\65\3\65\3\65\3\65\3\65\3\65\3\65\3\66\3\66\3\66\3"+ + "\67\3\67\3\67\3\67\3\67\38\38\38\38\38\39\39\39\39\39\3:\3:\3:\3:\3:\3"+ + ";\3;\3;\3;\3;\3;\3<\3<\3<\3<\3<\3<\3<\3=\3=\3=\3=\3=\3=\3>\3>\3>\3>\3"+ + ">\3>\3>\3?\3?\3?\3?\3?\3?\3?\3?\3@\3@\3@\3@\3@\3@\3A\3A\3A\3A\3A\3A\3"+ + "A\3B\3B\3B\3B\3B\3B\3B\3B\3C\3C\3C\3C\3D\3D\3D\3D\3D\3E\3E\3E\3E\3E\3"+ + "E\3F\3F\3F\3G\3G\3G\3G\3G\3G\3G\3G\3G\3G\3H\3H\3H\3I\3I\3I\3I\3I\3I\3"+ + "J\3J\3J\3J\3J\3J\3K\3K\3K\3K\3K\3K\3K\3L\3L\3L\3L\3L\3L\3L\3L\3L\3M\3"+ + "M\3M\3M\3M\3M\3N\3N\3N\3N\3N\3O\3O\3O\3O\3O\3O\3P\3P\3P\3P\3P\3P\3Q\3"+ + "Q\3Q\3Q\3Q\3Q\3R\3R\3R\3R\3R\3R\3R\3R\3S\3S\3S\3S\3S\3S\3S\3T\3T\3T\3"+ + "T\3T\3T\3T\3T\3U\3U\3U\3U\3U\3U\3U\3V\3V\3V\3V\3V\3W\3W\3W\3W\3X\3X\3"+ + "X\3X\3X\3X\3Y\3Y\3Y\3Y\3Y\3Y\3Y\3Z\3Z\3Z\3Z\3Z\3[\3[\3[\3[\3[\3\\\3\\"+ + "\3\\\3\\\3\\\3]\3]\3]\3^\3^\3^\3^\3^\3_\3_\3_\3_\3_\3_\3`\3`\3`\3`\3`"+ + "\3`\3a\3a\3a\3a\3a\3a\3a\3b\3b\3b\3b\3b\3c\3c\3c\3c\3c\3c\3d\3d\3d\3d"+ + "\3d\3e\3e\3e\3e\3e\3f\3f\3f\3f\3f\3f\3g\3g\3g\3g\3g\3g\3g\3g\3h\3h\3h"+ + "\3h\3i\3i\3i\3i\3i\3i\3i\3j\3j\3j\3k\3k\3k\3l\3l\3l\3l\3m\3m\3m\3m\3m"+ + "\3m\3n\3n\7n\u03b5\nn\fn\16n\u03b8\13n\3o\3o\3p\3p\3q\3q\3q\3q\3r\3r\3"+ + "r\3r\5r\u03c6\nr\3s\3s\3t\3t\3t\3u\3u\3v\3v\3v\3w\3w\3x\3x\3y\3y\3z\3"+ + "z\3{\3{\3|\3|\3|\3}\3}\3}\3~\3~\3\177\3\177\3\u0080\3\u0080\3\u0080\3"+ + "\u0080\7\u0080\u03ea\n\u0080\f\u0080\16\u0080\u03ed\13\u0080\3\u0080\3"+ + "\u0080\3\u0081\6\u0081\u03f2\n\u0081\r\u0081\16\u0081\u03f3\3\u0082\6"+ + "\u0082\u03f7\n\u0082\r\u0082\16\u0082\u03f8\3\u0082\3\u0082\7\u0082\u03fd"+ + "\n\u0082\f\u0082\16\u0082\u0400\13\u0082\3\u0082\3\u0082\6\u0082\u0404"+ + "\n\u0082\r\u0082\16\u0082\u0405\3\u0082\6\u0082\u0409\n\u0082\r\u0082"+ + "\16\u0082\u040a\3\u0082\3\u0082\7\u0082\u040f\n\u0082\f\u0082\16\u0082"+ + "\u0412\13\u0082\5\u0082\u0414\n\u0082\3\u0082\3\u0082\3\u0082\3\u0082"+ + "\6\u0082\u041a\n\u0082\r\u0082\16\u0082\u041b\3\u0082\3\u0082\5\u0082"+ + "\u0420\n\u0082\3\u0083\3\u0083\5\u0083\u0424\n\u0083\3\u0083\3\u0083\3"+ + "\u0083\7\u0083\u0429\n\u0083\f\u0083\16\u0083\u042c\13\u0083\3\u0084\3"+ + "\u0084\3\u0084\3\u0084\6\u0084\u0432\n\u0084\r\u0084\16\u0084\u0433\3"+ + "\u0085\3\u0085\3\u0085\6\u0085\u0439\n\u0085\r\u0085\16\u0085\u043a\3"+ + "\u0086\3\u0086\3\u0086\3\u0086\7\u0086\u0441\n\u0086\f\u0086\16\u0086"+ + "\u0444\13\u0086\3\u0086\3\u0086\3\u0087\3\u0087\3\u0087\3\u0087\7\u0087"+ + "\u044c\n\u0087\f\u0087\16\u0087\u044f\13\u0087\3\u0087\3\u0087\3\u0088"+ + "\3\u0088\5\u0088\u0455\n\u0088\3\u0088\6\u0088\u0458\n\u0088\r\u0088\16"+ + "\u0088\u0459\3\u0089\3\u0089\3\u008a\3\u008a\3\u008b\3\u008b\3\u008b\3"+ + "\u008b\7\u008b\u0464\n\u008b\f\u008b\16\u008b\u0467\13\u008b\3\u008b\5"+ + "\u008b\u046a\n\u008b\3\u008b\5\u008b\u046d\n\u008b\3\u008b\3\u008b\3\u008c"+ + "\3\u008c\3\u008c\3\u008c\3\u008c\7\u008c\u0476\n\u008c\f\u008c\16\u008c"+ + "\u0479\13\u008c\3\u008c\3\u008c\3\u008c\3\u008c\3\u008c\3\u008d\6\u008d"+ + "\u0481\n\u008d\r\u008d\16\u008d\u0482\3\u008d\3\u008d\3\u008e\3\u008e"+ + "\3\u0477\2\u008f\3\3\5\4\7\5\t\6\13\7\r\b\17\t\21\n\23\13\25\f\27\r\31"+ + "\16\33\17\35\20\37\21!\22#\23%\24\'\25)\26+\27-\30/\31\61\32\63\33\65"+ + "\34\67\359\36;\37= ?!A\"C#E$G%I&K\'M(O)Q*S+U,W-Y.[/]\60_\61a\62c\63e\64"+ + "g\65i\66k\67m8o9q:s;u{?}@\177A\u0081B\u0083C\u0085D\u0087E\u0089"+ + "F\u008bG\u008dH\u008fI\u0091J\u0093K\u0095L\u0097M\u0099N\u009bO\u009d"+ + "P\u009fQ\u00a1R\u00a3S\u00a5T\u00a7U\u00a9V\u00abW\u00adX\u00afY\u00b1"+ + "Z\u00b3[\u00b5\\\u00b7]\u00b9^\u00bb_\u00bd`\u00bfa\u00c1b\u00c3c\u00c5"+ + "d\u00c7e\u00c9f\u00cbg\u00cdh\u00cfi\u00d1j\u00d3k\u00d5l\u00d7m\u00d9"+ + "n\u00dbo\u00ddp\u00dfq\u00e1r\u00e3s\u00e5t\u00e7u\u00e9v\u00ebw\u00ed"+ + "x\u00efy\u00f1z\u00f3{\u00f5|\u00f7}\u00f9~\u00fb\177\u00fd\u0080\u00ff"+ + "\u0081\u0101\u0082\u0103\u0083\u0105\u0084\u0107\u0085\u0109\u0086\u010b"+ + "\u0087\u010d\u0088\u010f\2\u0111\2\u0113\2\u0115\u0089\u0117\u008a\u0119"+ + "\u008b\u011b\u008c\3\2\13\3\2))\4\2BBaa\3\2$$\3\2bb\4\2--//\3\2\62;\3"+ + "\2C\\\4\2\f\f\17\17\5\2\13\f\17\17\"\"\u04a9\2\3\3\2\2\2\2\5\3\2\2\2\2"+ + "\7\3\2\2\2\2\t\3\2\2\2\2\13\3\2\2\2\2\r\3\2\2\2\2\17\3\2\2\2\2\21\3\2"+ + "\2\2\2\23\3\2\2\2\2\25\3\2\2\2\2\27\3\2\2\2\2\31\3\2\2\2\2\33\3\2\2\2"+ + "\2\35\3\2\2\2\2\37\3\2\2\2\2!\3\2\2\2\2#\3\2\2\2\2%\3\2\2\2\2\'\3\2\2"+ + "\2\2)\3\2\2\2\2+\3\2\2\2\2-\3\2\2\2\2/\3\2\2\2\2\61\3\2\2\2\2\63\3\2\2"+ + "\2\2\65\3\2\2\2\2\67\3\2\2\2\29\3\2\2\2\2;\3\2\2\2\2=\3\2\2\2\2?\3\2\2"+ + "\2\2A\3\2\2\2\2C\3\2\2\2\2E\3\2\2\2\2G\3\2\2\2\2I\3\2\2\2\2K\3\2\2\2\2"+ + "M\3\2\2\2\2O\3\2\2\2\2Q\3\2\2\2\2S\3\2\2\2\2U\3\2\2\2\2W\3\2\2\2\2Y\3"+ + "\2\2\2\2[\3\2\2\2\2]\3\2\2\2\2_\3\2\2\2\2a\3\2\2\2\2c\3\2\2\2\2e\3\2\2"+ + "\2\2g\3\2\2\2\2i\3\2\2\2\2k\3\2\2\2\2m\3\2\2\2\2o\3\2\2\2\2q\3\2\2\2\2"+ + "s\3\2\2\2\2u\3\2\2\2\2w\3\2\2\2\2y\3\2\2\2\2{\3\2\2\2\2}\3\2\2\2\2\177"+ + "\3\2\2\2\2\u0081\3\2\2\2\2\u0083\3\2\2\2\2\u0085\3\2\2\2\2\u0087\3\2\2"+ + "\2\2\u0089\3\2\2\2\2\u008b\3\2\2\2\2\u008d\3\2\2\2\2\u008f\3\2\2\2\2\u0091"+ + "\3\2\2\2\2\u0093\3\2\2\2\2\u0095\3\2\2\2\2\u0097\3\2\2\2\2\u0099\3\2\2"+ + "\2\2\u009b\3\2\2\2\2\u009d\3\2\2\2\2\u009f\3\2\2\2\2\u00a1\3\2\2\2\2\u00a3"+ + "\3\2\2\2\2\u00a5\3\2\2\2\2\u00a7\3\2\2\2\2\u00a9\3\2\2\2\2\u00ab\3\2\2"+ + "\2\2\u00ad\3\2\2\2\2\u00af\3\2\2\2\2\u00b1\3\2\2\2\2\u00b3\3\2\2\2\2\u00b5"+ + "\3\2\2\2\2\u00b7\3\2\2\2\2\u00b9\3\2\2\2\2\u00bb\3\2\2\2\2\u00bd\3\2\2"+ + "\2\2\u00bf\3\2\2\2\2\u00c1\3\2\2\2\2\u00c3\3\2\2\2\2\u00c5\3\2\2\2\2\u00c7"+ + "\3\2\2\2\2\u00c9\3\2\2\2\2\u00cb\3\2\2\2\2\u00cd\3\2\2\2\2\u00cf\3\2\2"+ + "\2\2\u00d1\3\2\2\2\2\u00d3\3\2\2\2\2\u00d5\3\2\2\2\2\u00d7\3\2\2\2\2\u00d9"+ + "\3\2\2\2\2\u00db\3\2\2\2\2\u00dd\3\2\2\2\2\u00df\3\2\2\2\2\u00e1\3\2\2"+ + "\2\2\u00e3\3\2\2\2\2\u00e5\3\2\2\2\2\u00e7\3\2\2\2\2\u00e9\3\2\2\2\2\u00eb"+ + "\3\2\2\2\2\u00ed\3\2\2\2\2\u00ef\3\2\2\2\2\u00f1\3\2\2\2\2\u00f3\3\2\2"+ + "\2\2\u00f5\3\2\2\2\2\u00f7\3\2\2\2\2\u00f9\3\2\2\2\2\u00fb\3\2\2\2\2\u00fd"+ + "\3\2\2\2\2\u00ff\3\2\2\2\2\u0101\3\2\2\2\2\u0103\3\2\2\2\2\u0105\3\2\2"+ + "\2\2\u0107\3\2\2\2\2\u0109\3\2\2\2\2\u010b\3\2\2\2\2\u010d\3\2\2\2\2\u0115"+ + "\3\2\2\2\2\u0117\3\2\2\2\2\u0119\3\2\2\2\2\u011b\3\2\2\2\3\u011d\3\2\2"+ + "\2\5\u011f\3\2\2\2\7\u0121\3\2\2\2\t\u0123\3\2\2\2\13\u0125\3\2\2\2\r"+ + "\u0129\3\2\2\2\17\u0131\3\2\2\2\21\u013a\3\2\2\2\23\u013e\3\2\2\2\25\u0142"+ + "\3\2\2\2\27\u0145\3\2\2\2\31\u0149\3\2\2\2\33\u0151\3\2\2\2\35\u0154\3"+ + "\2\2\2\37\u0159\3\2\2\2!\u015e\3\2\2\2#\u0166\3\2\2\2%\u016f\3\2\2\2\'"+ + "\u0177\3\2\2\2)\u017f\3\2\2\2+\u018c\3\2\2\2-\u0199\3\2\2\2/\u01ab\3\2"+ + "\2\2\61\u01af\3\2\2\2\63\u01b4\3\2\2\2\65\u01ba\3\2\2\2\67\u01bf\3\2\2"+ + "\29\u01c8\3\2\2\2;\u01d1\3\2\2\2=\u01d6\3\2\2\2?\u01da\3\2\2\2A\u01e1"+ + "\3\2\2\2C\u01ec\3\2\2\2E\u01f3\3\2\2\2G\u01fb\3\2\2\2I\u0203\3\2\2\2K"+ + "\u0209\3\2\2\2M\u020f\3\2\2\2O\u0213\3\2\2\2Q\u021a\3\2\2\2S\u021f\3\2"+ + "\2\2U\u0226\3\2\2\2W\u022b\3\2\2\2Y\u0235\3\2\2\2[\u023e\3\2\2\2]\u0244"+ + "\3\2\2\2_\u024b\3\2\2\2a\u0250\3\2\2\2c\u0256\3\2\2\2e\u0259\3\2\2\2g"+ + "\u0261\3\2\2\2i\u0267\3\2\2\2k\u0270\3\2\2\2m\u0273\3\2\2\2o\u0278\3\2"+ + "\2\2q\u027d\3\2\2\2s\u0282\3\2\2\2u\u0287\3\2\2\2w\u028d\3\2\2\2y\u0294"+ + "\3\2\2\2{\u029a\3\2\2\2}\u02a1\3\2\2\2\177\u02a9\3\2\2\2\u0081\u02af\3"+ + "\2\2\2\u0083\u02b6\3\2\2\2\u0085\u02be\3\2\2\2\u0087\u02c2\3\2\2\2\u0089"+ + "\u02c7\3\2\2\2\u008b\u02cd\3\2\2\2\u008d\u02d0\3\2\2\2\u008f\u02da\3\2"+ + "\2\2\u0091\u02dd\3\2\2\2\u0093\u02e3\3\2\2\2\u0095\u02e9\3\2\2\2\u0097"+ + "\u02f0\3\2\2\2\u0099\u02f9\3\2\2\2\u009b\u02ff\3\2\2\2\u009d\u0304\3\2"+ + "\2\2\u009f\u030a\3\2\2\2\u00a1\u0310\3\2\2\2\u00a3\u0316\3\2\2\2\u00a5"+ + "\u031e\3\2\2\2\u00a7\u0325\3\2\2\2\u00a9\u032d\3\2\2\2\u00ab\u0334\3\2"+ + "\2\2\u00ad\u0339\3\2\2\2\u00af\u033d\3\2\2\2\u00b1\u0343\3\2\2\2\u00b3"+ + "\u034a\3\2\2\2\u00b5\u034f\3\2\2\2\u00b7\u0354\3\2\2\2\u00b9\u0359\3\2"+ + "\2\2\u00bb\u035c\3\2\2\2\u00bd\u0361\3\2\2\2\u00bf\u0367\3\2\2\2\u00c1"+ + "\u036d\3\2\2\2\u00c3\u0374\3\2\2\2\u00c5\u0379\3\2\2\2\u00c7\u037f\3\2"+ + "\2\2\u00c9\u0384\3\2\2\2\u00cb\u0389\3\2\2\2\u00cd\u038f\3\2\2\2\u00cf"+ + "\u0397\3\2\2\2\u00d1\u039b\3\2\2\2\u00d3\u03a2\3\2\2\2\u00d5\u03a5\3\2"+ + "\2\2\u00d7\u03a8\3\2\2\2\u00d9\u03ac\3\2\2\2\u00db\u03b2\3\2\2\2\u00dd"+ + "\u03b9\3\2\2\2\u00df\u03bb\3\2\2\2\u00e1\u03bd\3\2\2\2\u00e3\u03c5\3\2"+ + "\2\2\u00e5\u03c7\3\2\2\2\u00e7\u03c9\3\2\2\2\u00e9\u03cc\3\2\2\2\u00eb"+ + "\u03ce\3\2\2\2\u00ed\u03d1\3\2\2\2\u00ef\u03d3\3\2\2\2\u00f1\u03d5\3\2"+ + "\2\2\u00f3\u03d7\3\2\2\2\u00f5\u03d9\3\2\2\2\u00f7\u03db\3\2\2\2\u00f9"+ + "\u03de\3\2\2\2\u00fb\u03e1\3\2\2\2\u00fd\u03e3\3\2\2\2\u00ff\u03e5\3\2"+ + "\2\2\u0101\u03f1\3\2\2\2\u0103\u041f\3\2\2\2\u0105\u0423\3\2\2\2\u0107"+ + "\u042d\3\2\2\2\u0109\u0438\3\2\2\2\u010b\u043c\3\2\2\2\u010d\u0447\3\2"+ + "\2\2\u010f\u0452\3\2\2\2\u0111\u045b\3\2\2\2\u0113\u045d\3\2\2\2\u0115"+ + "\u045f\3\2\2\2\u0117\u0470\3\2\2\2\u0119\u0480\3\2\2\2\u011b\u0486\3\2"+ + "\2\2\u011d\u011e\7*\2\2\u011e\4\3\2\2\2\u011f\u0120\7+\2\2\u0120\6\3\2"+ + "\2\2\u0121\u0122\7.\2\2\u0122\b\3\2\2\2\u0123\u0124\7<\2\2\u0124\n\3\2"+ + "\2\2\u0125\u0126\7C\2\2\u0126\u0127\7N\2\2\u0127\u0128\7N\2\2\u0128\f"+ + "\3\2\2\2\u0129\u012a\7C\2\2\u012a\u012b\7P\2\2\u012b\u012c\7C\2\2\u012c"+ + "\u012d\7N\2\2\u012d\u012e\7[\2\2\u012e\u012f\7\\\2\2\u012f\u0130\7G\2"+ + "\2\u0130\16\3\2\2\2\u0131\u0132\7C\2\2\u0132\u0133\7P\2\2\u0133\u0134"+ + "\7C\2\2\u0134\u0135\7N\2\2\u0135\u0136\7[\2\2\u0136\u0137\7\\\2\2\u0137"+ + "\u0138\7G\2\2\u0138\u0139\7F\2\2\u0139\20\3\2\2\2\u013a\u013b\7C\2\2\u013b"+ + "\u013c\7P\2\2\u013c\u013d\7F\2\2\u013d\22\3\2\2\2\u013e\u013f\7C\2\2\u013f"+ + "\u0140\7P\2\2\u0140\u0141\7[\2\2\u0141\24\3\2\2\2\u0142\u0143\7C\2\2\u0143"+ + "\u0144\7U\2\2\u0144\26\3\2\2\2\u0145\u0146\7C\2\2\u0146\u0147\7U\2\2\u0147"+ + "\u0148\7E\2\2\u0148\30\3\2\2\2\u0149\u014a\7D\2\2\u014a\u014b\7G\2\2\u014b"+ + "\u014c\7V\2\2\u014c\u014d\7Y\2\2\u014d\u014e\7G\2\2\u014e\u014f\7G\2\2"+ + "\u014f\u0150\7P\2\2\u0150\32\3\2\2\2\u0151\u0152\7D\2\2\u0152\u0153\7"+ + "[\2\2\u0153\34\3\2\2\2\u0154\u0155\7E\2\2\u0155\u0156\7C\2\2\u0156\u0157"+ + "\7U\2\2\u0157\u0158\7G\2\2\u0158\36\3\2\2\2\u0159\u015a\7E\2\2\u015a\u015b"+ + "\7C\2\2\u015b\u015c\7U\2\2\u015c\u015d\7V\2\2\u015d \3\2\2\2\u015e\u015f"+ + "\7E\2\2\u015f\u0160\7C\2\2\u0160\u0161\7V\2\2\u0161\u0162\7C\2\2\u0162"+ + "\u0163\7N\2\2\u0163\u0164\7Q\2\2\u0164\u0165\7I\2\2\u0165\"\3\2\2\2\u0166"+ + "\u0167\7E\2\2\u0167\u0168\7C\2\2\u0168\u0169\7V\2\2\u0169\u016a\7C\2\2"+ + "\u016a\u016b\7N\2\2\u016b\u016c\7Q\2\2\u016c\u016d\7I\2\2\u016d\u016e"+ + "\7U\2\2\u016e$\3\2\2\2\u016f\u0170\7E\2\2\u0170\u0171\7Q\2\2\u0171\u0172"+ + "\7N\2\2\u0172\u0173\7W\2\2\u0173\u0174\7O\2\2\u0174\u0175\7P\2\2\u0175"+ + "\u0176\7U\2\2\u0176&\3\2\2\2\u0177\u0178\7E\2\2\u0178\u0179\7Q\2\2\u0179"+ + "\u017a\7P\2\2\u017a\u017b\7X\2\2\u017b\u017c\7G\2\2\u017c\u017d\7T\2\2"+ + "\u017d\u017e\7V\2\2\u017e(\3\2\2\2\u017f\u0180\7E\2\2\u0180\u0181\7W\2"+ + "\2\u0181\u0182\7T\2\2\u0182\u0183\7T\2\2\u0183\u0184\7G\2\2\u0184\u0185"+ + "\7P\2\2\u0185\u0186\7V\2\2\u0186\u0187\7a\2\2\u0187\u0188\7F\2\2\u0188"+ + "\u0189\7C\2\2\u0189\u018a\7V\2\2\u018a\u018b\7G\2\2\u018b*\3\2\2\2\u018c"+ + "\u018d\7E\2\2\u018d\u018e\7W\2\2\u018e\u018f\7T\2\2\u018f\u0190\7T\2\2"+ + "\u0190\u0191\7G\2\2\u0191\u0192\7P\2\2\u0192\u0193\7V\2\2\u0193\u0194"+ + "\7a\2\2\u0194\u0195\7V\2\2\u0195\u0196\7K\2\2\u0196\u0197\7O\2\2\u0197"+ + "\u0198\7G\2\2\u0198,\3\2\2\2\u0199\u019a\7E\2\2\u019a\u019b\7W\2\2\u019b"+ + "\u019c\7T\2\2\u019c\u019d\7T\2\2\u019d\u019e\7G\2\2\u019e\u019f\7P\2\2"+ + "\u019f\u01a0\7V\2\2\u01a0\u01a1\7a\2\2\u01a1\u01a2\7V\2\2\u01a2\u01a3"+ + "\7K\2\2\u01a3\u01a4\7O\2\2\u01a4\u01a5\7G\2\2\u01a5\u01a6\7U\2\2\u01a6"+ + "\u01a7\7V\2\2\u01a7\u01a8\7C\2\2\u01a8\u01a9\7O\2\2\u01a9\u01aa\7R\2\2"+ + "\u01aa.\3\2\2\2\u01ab\u01ac\7F\2\2\u01ac\u01ad\7C\2\2\u01ad\u01ae\7[\2"+ + "\2\u01ae\60\3\2\2\2\u01af\u01b0\7F\2\2\u01b0\u01b1\7C\2\2\u01b1\u01b2"+ + "\7[\2\2\u01b2\u01b3\7U\2\2\u01b3\62\3\2\2\2\u01b4\u01b5\7F\2\2\u01b5\u01b6"+ + "\7G\2\2\u01b6\u01b7\7D\2\2\u01b7\u01b8\7W\2\2\u01b8\u01b9\7I\2\2\u01b9"+ + "\64\3\2\2\2\u01ba\u01bb\7F\2\2\u01bb\u01bc\7G\2\2\u01bc\u01bd\7U\2\2\u01bd"+ + "\u01be\7E\2\2\u01be\66\3\2\2\2\u01bf\u01c0\7F\2\2\u01c0\u01c1\7G\2\2\u01c1"+ + "\u01c2\7U\2\2\u01c2\u01c3\7E\2\2\u01c3\u01c4\7T\2\2\u01c4\u01c5\7K\2\2"+ + "\u01c5\u01c6\7D\2\2\u01c6\u01c7\7G\2\2\u01c78\3\2\2\2\u01c8\u01c9\7F\2"+ + "\2\u01c9\u01ca\7K\2\2\u01ca\u01cb\7U\2\2\u01cb\u01cc\7V\2\2\u01cc\u01cd"+ + "\7K\2\2\u01cd\u01ce\7P\2\2\u01ce\u01cf\7E\2\2\u01cf\u01d0\7V\2\2\u01d0"+ + ":\3\2\2\2\u01d1\u01d2\7G\2\2\u01d2\u01d3\7N\2\2\u01d3\u01d4\7U\2\2\u01d4"+ + "\u01d5\7G\2\2\u01d5<\3\2\2\2\u01d6\u01d7\7G\2\2\u01d7\u01d8\7P\2\2\u01d8"+ + "\u01d9\7F\2\2\u01d9>\3\2\2\2\u01da\u01db\7G\2\2\u01db\u01dc\7U\2\2\u01dc"+ + "\u01dd\7E\2\2\u01dd\u01de\7C\2\2\u01de\u01df\7R\2\2\u01df\u01e0\7G\2\2"+ + "\u01e0@\3\2\2\2\u01e1\u01e2\7G\2\2\u01e2\u01e3\7Z\2\2\u01e3\u01e4\7G\2"+ + "\2\u01e4\u01e5\7E\2\2\u01e5\u01e6\7W\2\2\u01e6\u01e7\7V\2\2\u01e7\u01e8"+ + "\7C\2\2\u01e8\u01e9\7D\2\2\u01e9\u01ea\7N\2\2\u01ea\u01eb\7G\2\2\u01eb"+ + "B\3\2\2\2\u01ec\u01ed\7G\2\2\u01ed\u01ee\7Z\2\2\u01ee\u01ef\7K\2\2\u01ef"+ + "\u01f0\7U\2\2\u01f0\u01f1\7V\2\2\u01f1\u01f2\7U\2\2\u01f2D\3\2\2\2\u01f3"+ + "\u01f4\7G\2\2\u01f4\u01f5\7Z\2\2\u01f5\u01f6\7R\2\2\u01f6\u01f7\7N\2\2"+ + "\u01f7\u01f8\7C\2\2\u01f8\u01f9\7K\2\2\u01f9\u01fa\7P\2\2\u01faF\3\2\2"+ + "\2\u01fb\u01fc\7G\2\2\u01fc\u01fd\7Z\2\2\u01fd\u01fe\7V\2\2\u01fe\u01ff"+ + "\7T\2\2\u01ff\u0200\7C\2\2\u0200\u0201\7E\2\2\u0201\u0202\7V\2\2\u0202"+ + "H\3\2\2\2\u0203\u0204\7H\2\2\u0204\u0205\7C\2\2\u0205\u0206\7N\2\2\u0206"+ + "\u0207\7U\2\2\u0207\u0208\7G\2\2\u0208J\3\2\2\2\u0209\u020a\7H\2\2\u020a"+ + "\u020b\7K\2\2\u020b\u020c\7T\2\2\u020c\u020d\7U\2\2\u020d\u020e\7V\2\2"+ + "\u020eL\3\2\2\2\u020f\u0210\7H\2\2\u0210\u0211\7Q\2\2\u0211\u0212\7T\2"+ + "\2\u0212N\3\2\2\2\u0213\u0214\7H\2\2\u0214\u0215\7Q\2\2\u0215\u0216\7"+ + "T\2\2\u0216\u0217\7O\2\2\u0217\u0218\7C\2\2\u0218\u0219\7V\2\2\u0219P"+ + "\3\2\2\2\u021a\u021b\7H\2\2\u021b\u021c\7T\2\2\u021c\u021d\7Q\2\2\u021d"+ + "\u021e\7O\2\2\u021eR\3\2\2\2\u021f\u0220\7H\2\2\u0220\u0221\7T\2\2\u0221"+ + "\u0222\7Q\2\2\u0222\u0223\7\\\2\2\u0223\u0224\7G\2\2\u0224\u0225\7P\2"+ + "\2\u0225T\3\2\2\2\u0226\u0227\7H\2\2\u0227\u0228\7W\2\2\u0228\u0229\7"+ + "N\2\2\u0229\u022a\7N\2\2\u022aV\3\2\2\2\u022b\u022c\7H\2\2\u022c\u022d"+ + "\7W\2\2\u022d\u022e\7P\2\2\u022e\u022f\7E\2\2\u022f\u0230\7V\2\2\u0230"+ + "\u0231\7K\2\2\u0231\u0232\7Q\2\2\u0232\u0233\7P\2\2\u0233\u0234\7U\2\2"+ + "\u0234X\3\2\2\2\u0235\u0236\7I\2\2\u0236\u0237\7T\2\2\u0237\u0238\7C\2"+ + "\2\u0238\u0239\7R\2\2\u0239\u023a\7J\2\2\u023a\u023b\7X\2\2\u023b\u023c"+ + "\7K\2\2\u023c\u023d\7\\\2\2\u023dZ\3\2\2\2\u023e\u023f\7I\2\2\u023f\u0240"+ + "\7T\2\2\u0240\u0241\7Q\2\2\u0241\u0242\7W\2\2\u0242\u0243\7R\2\2\u0243"+ + "\\\3\2\2\2\u0244\u0245\7J\2\2\u0245\u0246\7C\2\2\u0246\u0247\7X\2\2\u0247"+ + "\u0248\7K\2\2\u0248\u0249\7P\2\2\u0249\u024a\7I\2\2\u024a^\3\2\2\2\u024b"+ + "\u024c\7J\2\2\u024c\u024d\7Q\2\2\u024d\u024e\7W\2\2\u024e\u024f\7T\2\2"+ + "\u024f`\3\2\2\2\u0250\u0251\7J\2\2\u0251\u0252\7Q\2\2\u0252\u0253\7W\2"+ + "\2\u0253\u0254\7T\2\2\u0254\u0255\7U\2\2\u0255b\3\2\2\2\u0256\u0257\7"+ + "K\2\2\u0257\u0258\7P\2\2\u0258d\3\2\2\2\u0259\u025a\7K\2\2\u025a\u025b"+ + "\7P\2\2\u025b\u025c\7E\2\2\u025c\u025d\7N\2\2\u025d\u025e\7W\2\2\u025e"+ + "\u025f\7F\2\2\u025f\u0260\7G\2\2\u0260f\3\2\2\2\u0261\u0262\7K\2\2\u0262"+ + "\u0263\7P\2\2\u0263\u0264\7P\2\2\u0264\u0265\7G\2\2\u0265\u0266\7T\2\2"+ + "\u0266h\3\2\2\2\u0267\u0268\7K\2\2\u0268\u0269\7P\2\2\u0269\u026a\7V\2"+ + "\2\u026a\u026b\7G\2\2\u026b\u026c\7T\2\2\u026c\u026d\7X\2\2\u026d\u026e"+ + "\7C\2\2\u026e\u026f\7N\2\2\u026fj\3\2\2\2\u0270\u0271\7K\2\2\u0271\u0272"+ + "\7U\2\2\u0272l\3\2\2\2\u0273\u0274\7L\2\2\u0274\u0275\7Q\2\2\u0275\u0276"+ + "\7K\2\2\u0276\u0277\7P\2\2\u0277n\3\2\2\2\u0278\u0279\7N\2\2\u0279\u027a"+ + "\7C\2\2\u027a\u027b\7U\2\2\u027b\u027c\7V\2\2\u027cp\3\2\2\2\u027d\u027e"+ + "\7N\2\2\u027e\u027f\7G\2\2\u027f\u0280\7H\2\2\u0280\u0281\7V\2\2\u0281"+ + "r\3\2\2\2\u0282\u0283\7N\2\2\u0283\u0284\7K\2\2\u0284\u0285\7M\2\2\u0285"+ + "\u0286\7G\2\2\u0286t\3\2\2\2\u0287\u0288\7N\2\2\u0288\u0289\7K\2\2\u0289"+ + "\u028a\7O\2\2\u028a\u028b\7K\2\2\u028b\u028c\7V\2\2\u028cv\3\2\2\2\u028d"+ + "\u028e\7O\2\2\u028e\u028f\7C\2\2\u028f\u0290\7R\2\2\u0290\u0291\7R\2\2"+ + "\u0291\u0292\7G\2\2\u0292\u0293\7F\2\2\u0293x\3\2\2\2\u0294\u0295\7O\2"+ + "\2\u0295\u0296\7C\2\2\u0296\u0297\7V\2\2\u0297\u0298\7E\2\2\u0298\u0299"+ + "\7J\2\2\u0299z\3\2\2\2\u029a\u029b\7O\2\2\u029b\u029c\7K\2\2\u029c\u029d"+ + "\7P\2\2\u029d\u029e\7W\2\2\u029e\u029f\7V\2\2\u029f\u02a0\7G\2\2\u02a0"+ + "|\3\2\2\2\u02a1\u02a2\7O\2\2\u02a2\u02a3\7K\2\2\u02a3\u02a4\7P\2\2\u02a4"+ + "\u02a5\7W\2\2\u02a5\u02a6\7V\2\2\u02a6\u02a7\7G\2\2\u02a7\u02a8\7U\2\2"+ + "\u02a8~\3\2\2\2\u02a9\u02aa\7O\2\2\u02aa\u02ab\7Q\2\2\u02ab\u02ac\7P\2"+ + "\2\u02ac\u02ad\7V\2\2\u02ad\u02ae\7J\2\2\u02ae\u0080\3\2\2\2\u02af\u02b0"+ + "\7O\2\2\u02b0\u02b1\7Q\2\2\u02b1\u02b2\7P\2\2\u02b2\u02b3\7V\2\2\u02b3"+ + "\u02b4\7J\2\2\u02b4\u02b5\7U\2\2\u02b5\u0082\3\2\2\2\u02b6\u02b7\7P\2"+ + "\2\u02b7\u02b8\7C\2\2\u02b8\u02b9\7V\2\2\u02b9\u02ba\7W\2\2\u02ba\u02bb"+ + "\7T\2\2\u02bb\u02bc\7C\2\2\u02bc\u02bd\7N\2\2\u02bd\u0084\3\2\2\2\u02be"+ + "\u02bf\7P\2\2\u02bf\u02c0\7Q\2\2\u02c0\u02c1\7V\2\2\u02c1\u0086\3\2\2"+ + "\2\u02c2\u02c3\7P\2\2\u02c3\u02c4\7W\2\2\u02c4\u02c5\7N\2\2\u02c5\u02c6"+ + "\7N\2\2\u02c6\u0088\3\2\2\2\u02c7\u02c8\7P\2\2\u02c8\u02c9\7W\2\2\u02c9"+ + "\u02ca\7N\2\2\u02ca\u02cb\7N\2\2\u02cb\u02cc\7U\2\2\u02cc\u008a\3\2\2"+ + "\2\u02cd\u02ce\7Q\2\2\u02ce\u02cf\7P\2\2\u02cf\u008c\3\2\2\2\u02d0\u02d1"+ + "\7Q\2\2\u02d1\u02d2\7R\2\2\u02d2\u02d3\7V\2\2\u02d3\u02d4\7K\2\2\u02d4"+ + "\u02d5\7O\2\2\u02d5\u02d6\7K\2\2\u02d6\u02d7\7\\\2\2\u02d7\u02d8\7G\2"+ + "\2\u02d8\u02d9\7F\2\2\u02d9\u008e\3\2\2\2\u02da\u02db\7Q\2\2\u02db\u02dc"+ + "\7T\2\2\u02dc\u0090\3\2\2\2\u02dd\u02de\7Q\2\2\u02de\u02df\7T\2\2\u02df"+ + "\u02e0\7F\2\2\u02e0\u02e1\7G\2\2\u02e1\u02e2\7T\2\2\u02e2\u0092\3\2\2"+ + "\2\u02e3\u02e4\7Q\2\2\u02e4\u02e5\7W\2\2\u02e5\u02e6\7V\2\2\u02e6\u02e7"+ + "\7G\2\2\u02e7\u02e8\7T\2\2\u02e8\u0094\3\2\2\2\u02e9\u02ea\7R\2\2\u02ea"+ + "\u02eb\7C\2\2\u02eb\u02ec\7T\2\2\u02ec\u02ed\7U\2\2\u02ed\u02ee\7G\2\2"+ + "\u02ee\u02ef\7F\2\2\u02ef\u0096\3\2\2\2\u02f0\u02f1\7R\2\2\u02f1\u02f2"+ + "\7J\2\2\u02f2\u02f3\7[\2\2\u02f3\u02f4\7U\2\2\u02f4\u02f5\7K\2\2\u02f5"+ + "\u02f6\7E\2\2\u02f6\u02f7\7C\2\2\u02f7\u02f8\7N\2\2\u02f8\u0098\3\2\2"+ + "\2\u02f9\u02fa\7R\2\2\u02fa\u02fb\7K\2\2\u02fb\u02fc\7X\2\2\u02fc\u02fd"+ + "\7Q\2\2\u02fd\u02fe\7V\2\2\u02fe\u009a\3\2\2\2\u02ff\u0300\7R\2\2\u0300"+ + "\u0301\7N\2\2\u0301\u0302\7C\2\2\u0302\u0303\7P\2\2\u0303\u009c\3\2\2"+ + "\2\u0304\u0305\7T\2\2\u0305\u0306\7K\2\2\u0306\u0307\7I\2\2\u0307\u0308"+ + "\7J\2\2\u0308\u0309\7V\2\2\u0309\u009e\3\2\2\2\u030a\u030b\7T\2\2\u030b"+ + "\u030c\7N\2\2\u030c\u030d\7K\2\2\u030d\u030e\7M\2\2\u030e\u030f\7G\2\2"+ + "\u030f\u00a0\3\2\2\2\u0310\u0311\7S\2\2\u0311\u0312\7W\2\2\u0312\u0313"+ + "\7G\2\2\u0313\u0314\7T\2\2\u0314\u0315\7[\2\2\u0315\u00a2\3\2\2\2\u0316"+ + "\u0317\7U\2\2\u0317\u0318\7E\2\2\u0318\u0319\7J\2\2\u0319\u031a\7G\2\2"+ + "\u031a\u031b\7O\2\2\u031b\u031c\7C\2\2\u031c\u031d\7U\2\2\u031d\u00a4"+ + "\3\2\2\2\u031e\u031f\7U\2\2\u031f\u0320\7G\2\2\u0320\u0321\7E\2\2\u0321"+ + "\u0322\7Q\2\2\u0322\u0323\7P\2\2\u0323\u0324\7F\2\2\u0324\u00a6\3\2\2"+ + "\2\u0325\u0326\7U\2\2\u0326\u0327\7G\2\2\u0327\u0328\7E\2\2\u0328\u0329"+ + "\7Q\2\2\u0329\u032a\7P\2\2\u032a\u032b\7F\2\2\u032b\u032c\7U\2\2\u032c"+ + "\u00a8\3\2\2\2\u032d\u032e\7U\2\2\u032e\u032f\7G\2\2\u032f\u0330\7N\2"+ + "\2\u0330\u0331\7G\2\2\u0331\u0332\7E\2\2\u0332\u0333\7V\2\2\u0333\u00aa"+ + "\3\2\2\2\u0334\u0335\7U\2\2\u0335\u0336\7J\2\2\u0336\u0337\7Q\2\2\u0337"+ + "\u0338\7Y\2\2\u0338\u00ac\3\2\2\2\u0339\u033a\7U\2\2\u033a\u033b\7[\2"+ + "\2\u033b\u033c\7U\2\2\u033c\u00ae\3\2\2\2\u033d\u033e\7V\2\2\u033e\u033f"+ + "\7C\2\2\u033f\u0340\7D\2\2\u0340\u0341\7N\2\2\u0341\u0342\7G\2\2\u0342"+ + "\u00b0\3\2\2\2\u0343\u0344\7V\2\2\u0344\u0345\7C\2\2\u0345\u0346\7D\2"+ + "\2\u0346\u0347\7N\2\2\u0347\u0348\7G\2\2\u0348\u0349\7U\2\2\u0349\u00b2"+ + "\3\2\2\2\u034a\u034b\7V\2\2\u034b\u034c\7G\2\2\u034c\u034d\7Z\2\2\u034d"+ + "\u034e\7V\2\2\u034e\u00b4\3\2\2\2\u034f\u0350\7V\2\2\u0350\u0351\7J\2"+ + "\2\u0351\u0352\7G\2\2\u0352\u0353\7P\2\2\u0353\u00b6\3\2\2\2\u0354\u0355"+ + "\7V\2\2\u0355\u0356\7T\2\2\u0356\u0357\7W\2\2\u0357\u0358\7G\2\2\u0358"+ + "\u00b8\3\2\2\2\u0359\u035a\7V\2\2\u035a\u035b\7Q\2\2\u035b\u00ba\3\2\2"+ + "\2\u035c\u035d\7V\2\2\u035d\u035e\7[\2\2\u035e\u035f\7R\2\2\u035f\u0360"+ + "\7G\2\2\u0360\u00bc\3\2\2\2\u0361\u0362\7V\2\2\u0362\u0363\7[\2\2\u0363"+ + "\u0364\7R\2\2\u0364\u0365\7G\2\2\u0365\u0366\7U\2\2\u0366\u00be\3\2\2"+ + "\2\u0367\u0368\7W\2\2\u0368\u0369\7U\2\2\u0369\u036a\7K\2\2\u036a\u036b"+ + "\7P\2\2\u036b\u036c\7I\2\2\u036c\u00c0\3\2\2\2\u036d\u036e\7X\2\2\u036e"+ + "\u036f\7G\2\2\u036f\u0370\7T\2\2\u0370\u0371\7K\2\2\u0371\u0372\7H\2\2"+ + "\u0372\u0373\7[\2\2\u0373\u00c2\3\2\2\2\u0374\u0375\7Y\2\2\u0375\u0376"+ + "\7J\2\2\u0376\u0377\7G\2\2\u0377\u0378\7P\2\2\u0378\u00c4\3\2\2\2\u0379"+ + "\u037a\7Y\2\2\u037a\u037b\7J\2\2\u037b\u037c\7G\2\2\u037c\u037d\7T\2\2"+ + "\u037d\u037e\7G\2\2\u037e\u00c6\3\2\2\2\u037f\u0380\7Y\2\2\u0380\u0381"+ + "\7K\2\2\u0381\u0382\7V\2\2\u0382\u0383\7J\2\2\u0383\u00c8\3\2\2\2\u0384"+ + "\u0385\7[\2\2\u0385\u0386\7G\2\2\u0386\u0387\7C\2\2\u0387\u0388\7T\2\2"+ + "\u0388\u00ca\3\2\2\2\u0389\u038a\7[\2\2\u038a\u038b\7G\2\2\u038b\u038c"+ + "\7C\2\2\u038c\u038d\7T\2\2\u038d\u038e\7U\2\2\u038e\u00cc\3\2\2\2\u038f"+ + "\u0390\5\u00dbn\2\u0390\u0391\7G\2\2\u0391\u0392\7U\2\2\u0392\u0393\7"+ + "E\2\2\u0393\u0394\7C\2\2\u0394\u0395\7R\2\2\u0395\u0396\7G\2\2\u0396\u00ce"+ + "\3\2\2\2\u0397\u0398\5\u00dbn\2\u0398\u0399\7H\2\2\u0399\u039a\7P\2\2"+ + "\u039a\u00d0\3\2\2\2\u039b\u039c\5\u00dbn\2\u039c\u039d\7N\2\2\u039d\u039e"+ + "\7K\2\2\u039e\u039f\7O\2\2\u039f\u03a0\7K\2\2\u03a0\u03a1\7V\2\2\u03a1"+ + "\u00d2\3\2\2\2\u03a2\u03a3\5\u00dbn\2\u03a3\u03a4\7F\2\2\u03a4\u00d4\3"+ + "\2\2\2\u03a5\u03a6\5\u00dbn\2\u03a6\u03a7\7V\2\2\u03a7\u00d6\3\2\2\2\u03a8"+ + "\u03a9\5\u00dbn\2\u03a9\u03aa\7V\2\2\u03aa\u03ab\7U\2\2\u03ab\u00d8\3"+ + "\2\2\2\u03ac\u03ad\5\u00dbn\2\u03ad\u03ae\7I\2\2\u03ae\u03af\7W\2\2\u03af"+ + "\u03b0\7K\2\2\u03b0\u03b1\7F\2\2\u03b1\u00da\3\2\2\2\u03b2\u03b6\7}\2"+ + "\2\u03b3\u03b5\5\u0119\u008d\2\u03b4\u03b3\3\2\2\2\u03b5\u03b8\3\2\2\2"+ + "\u03b6\u03b4\3\2\2\2\u03b6\u03b7\3\2\2\2\u03b7\u00dc\3\2\2\2\u03b8\u03b6"+ + "\3\2\2\2\u03b9\u03ba\7\177\2\2\u03ba\u00de\3\2\2\2\u03bb\u03bc\7?\2\2"+ + "\u03bc\u00e0\3\2\2\2\u03bd\u03be\7>\2\2\u03be\u03bf\7?\2\2\u03bf\u03c0"+ + "\7@\2\2\u03c0\u00e2\3\2\2\2\u03c1\u03c2\7>\2\2\u03c2\u03c6\7@\2\2\u03c3"+ + "\u03c4\7#\2\2\u03c4\u03c6\7?\2\2\u03c5\u03c1\3\2\2\2\u03c5\u03c3\3\2\2"+ + "\2\u03c6\u00e4\3\2\2\2\u03c7\u03c8\7>\2\2\u03c8\u00e6\3\2\2\2\u03c9\u03ca"+ + "\7>\2\2\u03ca\u03cb\7?\2\2\u03cb\u00e8\3\2\2\2\u03cc\u03cd\7@\2\2\u03cd"+ + "\u00ea\3\2\2\2\u03ce\u03cf\7@\2\2\u03cf\u03d0\7?\2\2\u03d0\u00ec\3\2\2"+ + "\2\u03d1\u03d2\7-\2\2\u03d2\u00ee\3\2\2\2\u03d3\u03d4\7/\2\2\u03d4\u00f0"+ + "\3\2\2\2\u03d5\u03d6\7,\2\2\u03d6\u00f2\3\2\2\2\u03d7\u03d8\7\61\2\2\u03d8"+ + "\u00f4\3\2\2\2\u03d9\u03da\7\'\2\2\u03da\u00f6\3\2\2\2\u03db\u03dc\7<"+ + "\2\2\u03dc\u03dd\7<\2\2\u03dd\u00f8\3\2\2\2\u03de\u03df\7~\2\2\u03df\u03e0"+ + "\7~\2\2\u03e0\u00fa\3\2\2\2\u03e1\u03e2\7\60\2\2\u03e2\u00fc\3\2\2\2\u03e3"+ + "\u03e4\7A\2\2\u03e4\u00fe\3\2\2\2\u03e5\u03eb\7)\2\2\u03e6\u03ea\n\2\2"+ + "\2\u03e7\u03e8\7)\2\2\u03e8\u03ea\7)\2\2\u03e9\u03e6\3\2\2\2\u03e9\u03e7"+ + "\3\2\2\2\u03ea\u03ed\3\2\2\2\u03eb\u03e9\3\2\2\2\u03eb\u03ec\3\2\2\2\u03ec"+ + "\u03ee\3\2\2\2\u03ed\u03eb\3\2\2\2\u03ee\u03ef\7)\2\2\u03ef\u0100\3\2"+ + "\2\2\u03f0\u03f2\5\u0111\u0089\2\u03f1\u03f0\3\2\2\2\u03f2\u03f3\3\2\2"+ + "\2\u03f3\u03f1\3\2\2\2\u03f3\u03f4\3\2\2\2\u03f4\u0102\3\2\2\2\u03f5\u03f7"+ + "\5\u0111\u0089\2\u03f6\u03f5\3\2\2\2\u03f7\u03f8\3\2\2\2\u03f8\u03f6\3"+ + "\2\2\2\u03f8\u03f9\3\2\2\2\u03f9\u03fa\3\2\2\2\u03fa\u03fe\5\u00fb~\2"+ + "\u03fb\u03fd\5\u0111\u0089\2\u03fc\u03fb\3\2\2\2\u03fd\u0400\3\2\2\2\u03fe"+ + "\u03fc\3\2\2\2\u03fe\u03ff\3\2\2\2\u03ff\u0420\3\2\2\2\u0400\u03fe\3\2"+ + "\2\2\u0401\u0403\5\u00fb~\2\u0402\u0404\5\u0111\u0089\2\u0403\u0402\3"+ + "\2\2\2\u0404\u0405\3\2\2\2\u0405\u0403\3\2\2\2\u0405\u0406\3\2\2\2\u0406"+ + "\u0420\3\2\2\2\u0407\u0409\5\u0111\u0089\2\u0408\u0407\3\2\2\2\u0409\u040a"+ + "\3\2\2\2\u040a\u0408\3\2\2\2\u040a\u040b\3\2\2\2\u040b\u0413\3\2\2\2\u040c"+ + "\u0410\5\u00fb~\2\u040d\u040f\5\u0111\u0089\2\u040e\u040d\3\2\2\2\u040f"+ + "\u0412\3\2\2\2\u0410\u040e\3\2\2\2\u0410\u0411\3\2\2\2\u0411\u0414\3\2"+ + "\2\2\u0412\u0410\3\2\2\2\u0413\u040c\3\2\2\2\u0413\u0414\3\2\2\2\u0414"+ + "\u0415\3\2\2\2\u0415\u0416\5\u010f\u0088\2\u0416\u0420\3\2\2\2\u0417\u0419"+ + "\5\u00fb~\2\u0418\u041a\5\u0111\u0089\2\u0419\u0418\3\2\2\2\u041a\u041b"+ + "\3\2\2\2\u041b\u0419\3\2\2\2\u041b\u041c\3\2\2\2\u041c\u041d\3\2\2\2\u041d"+ + "\u041e\5\u010f\u0088\2\u041e\u0420\3\2\2\2\u041f\u03f6\3\2\2\2\u041f\u0401"+ + "\3\2\2\2\u041f\u0408\3\2\2\2\u041f\u0417\3\2\2\2\u0420\u0104\3\2\2\2\u0421"+ + "\u0424\5\u0113\u008a\2\u0422\u0424\7a\2\2\u0423\u0421\3\2\2\2\u0423\u0422"+ + "\3\2\2\2\u0424\u042a\3\2\2\2\u0425\u0429\5\u0113\u008a\2\u0426\u0429\5"+ + "\u0111\u0089\2\u0427\u0429\t\3\2\2\u0428\u0425\3\2\2\2\u0428\u0426\3\2"+ + "\2\2\u0428\u0427\3\2\2\2\u0429\u042c\3\2\2\2\u042a\u0428\3\2\2\2\u042a"+ + "\u042b\3\2\2\2\u042b\u0106\3\2\2\2\u042c\u042a\3\2\2\2\u042d\u0431\5\u0111"+ + "\u0089\2\u042e\u0432\5\u0113\u008a\2\u042f\u0432\5\u0111\u0089\2\u0430"+ + "\u0432\t\3\2\2\u0431\u042e\3\2\2\2\u0431\u042f\3\2\2\2\u0431\u0430\3\2"+ + "\2\2\u0432\u0433\3\2\2\2\u0433\u0431\3\2\2\2\u0433\u0434\3\2\2\2\u0434"+ + "\u0108\3\2\2\2\u0435\u0439\5\u0113\u008a\2\u0436\u0439\5\u0111\u0089\2"+ + "\u0437\u0439\7a\2\2\u0438\u0435\3\2\2\2\u0438\u0436\3\2\2\2\u0438\u0437"+ + "\3\2\2\2\u0439\u043a\3\2\2\2\u043a\u0438\3\2\2\2\u043a\u043b\3\2\2\2\u043b"+ + "\u010a\3\2\2\2\u043c\u0442\7$\2\2\u043d\u0441\n\4\2\2\u043e\u043f\7$\2"+ + "\2\u043f\u0441\7$\2\2\u0440\u043d\3\2\2\2\u0440\u043e\3\2\2\2\u0441\u0444"+ + "\3\2\2\2\u0442\u0440\3\2\2\2\u0442\u0443\3\2\2\2\u0443\u0445\3\2\2\2\u0444"+ + "\u0442\3\2\2\2\u0445\u0446\7$\2\2\u0446\u010c\3\2\2\2\u0447\u044d\7b\2"+ + "\2\u0448\u044c\n\5\2\2\u0449\u044a\7b\2\2\u044a\u044c\7b\2\2\u044b\u0448"+ + "\3\2\2\2\u044b\u0449\3\2\2\2\u044c\u044f\3\2\2\2\u044d\u044b\3\2\2\2\u044d"+ + "\u044e\3\2\2\2\u044e\u0450\3\2\2\2\u044f\u044d\3\2\2\2\u0450\u0451\7b"+ + "\2\2\u0451\u010e\3\2\2\2\u0452\u0454\7G\2\2\u0453\u0455\t\6\2\2\u0454"+ + "\u0453\3\2\2\2\u0454\u0455\3\2\2\2\u0455\u0457\3\2\2\2\u0456\u0458\5\u0111"+ + "\u0089\2\u0457\u0456\3\2\2\2\u0458\u0459\3\2\2\2\u0459\u0457\3\2\2\2\u0459"+ + "\u045a\3\2\2\2\u045a\u0110\3\2\2\2\u045b\u045c\t\7\2\2\u045c\u0112\3\2"+ + "\2\2\u045d\u045e\t\b\2\2\u045e\u0114\3\2\2\2\u045f\u0460\7/\2\2\u0460"+ + "\u0461\7/\2\2\u0461\u0465\3\2\2\2\u0462\u0464\n\t\2\2\u0463\u0462\3\2"+ + "\2\2\u0464\u0467\3\2\2\2\u0465\u0463\3\2\2\2\u0465\u0466\3\2\2\2\u0466"+ + "\u0469\3\2\2\2\u0467\u0465\3\2\2\2\u0468\u046a\7\17\2\2\u0469\u0468\3"+ + "\2\2\2\u0469\u046a\3\2\2\2\u046a\u046c\3\2\2\2\u046b\u046d\7\f\2\2\u046c"+ + "\u046b\3\2\2\2\u046c\u046d\3\2\2\2\u046d\u046e\3\2\2\2\u046e\u046f\b\u008b"+ + "\2\2\u046f\u0116\3\2\2\2\u0470\u0471\7\61\2\2\u0471\u0472\7,\2\2\u0472"+ + "\u0477\3\2\2\2\u0473\u0476\5\u0117\u008c\2\u0474\u0476\13\2\2\2\u0475"+ + "\u0473\3\2\2\2\u0475\u0474\3\2\2\2\u0476\u0479\3\2\2\2\u0477\u0478\3\2"+ + "\2\2\u0477\u0475\3\2\2\2\u0478\u047a\3\2\2\2\u0479\u0477\3\2\2\2\u047a"+ + "\u047b\7,\2\2\u047b\u047c\7\61\2\2\u047c\u047d\3\2\2\2\u047d\u047e\b\u008c"+ + "\2\2\u047e\u0118\3\2\2\2\u047f\u0481\t\n\2\2\u0480\u047f\3\2\2\2\u0481"+ + "\u0482\3\2\2\2\u0482\u0480\3\2\2\2\u0482\u0483\3\2\2\2\u0483\u0484\3\2"+ + "\2\2\u0484\u0485\b\u008d\2\2\u0485\u011a\3\2\2\2\u0486\u0487\13\2\2\2"+ + "\u0487\u011c\3\2\2\2#\2\u03b6\u03c5\u03e9\u03eb\u03f3\u03f8\u03fe\u0405"+ + "\u040a\u0410\u0413\u041b\u041f\u0423\u0428\u042a\u0431\u0433\u0438\u043a"+ + "\u0440\u0442\u044b\u044d\u0454\u0459\u0465\u0469\u046c\u0475\u0477\u0482"+ + "\3\2\3\2"; public static final ATN _ATN = new ATNDeserializer().deserialize(_serializedATN.toCharArray()); static { diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseParser.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseParser.java index 63cc1bd7a3f5..6d0c4a69f324 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseParser.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseParser.java @@ -32,12 +32,12 @@ class SqlBaseParser extends Parser { TABLES=88, TEXT=89, THEN=90, TRUE=91, TO=92, TYPE=93, TYPES=94, USING=95, VERIFY=96, WHEN=97, WHERE=98, WITH=99, YEAR=100, YEARS=101, ESCAPE_ESC=102, FUNCTION_ESC=103, LIMIT_ESC=104, DATE_ESC=105, TIME_ESC=106, TIMESTAMP_ESC=107, - GUID_ESC=108, ESC_END=109, EQ=110, NULLEQ=111, NEQ=112, LT=113, LTE=114, - GT=115, GTE=116, PLUS=117, MINUS=118, ASTERISK=119, SLASH=120, PERCENT=121, - CAST_OP=122, CONCAT=123, DOT=124, PARAM=125, STRING=126, INTEGER_VALUE=127, - DECIMAL_VALUE=128, IDENTIFIER=129, DIGIT_IDENTIFIER=130, TABLE_IDENTIFIER=131, - QUOTED_IDENTIFIER=132, BACKQUOTED_IDENTIFIER=133, SIMPLE_COMMENT=134, - BRACKETED_COMMENT=135, WS=136, UNRECOGNIZED=137, DELIMITER=138; + GUID_ESC=108, ESC_START=109, ESC_END=110, EQ=111, NULLEQ=112, NEQ=113, + LT=114, LTE=115, GT=116, GTE=117, PLUS=118, MINUS=119, ASTERISK=120, SLASH=121, + PERCENT=122, CAST_OP=123, CONCAT=124, DOT=125, PARAM=126, STRING=127, + INTEGER_VALUE=128, DECIMAL_VALUE=129, IDENTIFIER=130, DIGIT_IDENTIFIER=131, + TABLE_IDENTIFIER=132, QUOTED_IDENTIFIER=133, BACKQUOTED_IDENTIFIER=134, + SIMPLE_COMMENT=135, BRACKETED_COMMENT=136, WS=137, UNRECOGNIZED=138, DELIMITER=139; public static final int RULE_singleStatement = 0, RULE_singleExpression = 1, RULE_statement = 2, RULE_query = 3, RULE_queryNoWith = 4, RULE_limitClause = 5, RULE_queryTerm = 6, @@ -89,9 +89,9 @@ class SqlBaseParser extends Parser { "'QUERY'", "'SCHEMAS'", "'SECOND'", "'SECONDS'", "'SELECT'", "'SHOW'", "'SYS'", "'TABLE'", "'TABLES'", "'TEXT'", "'THEN'", "'TRUE'", "'TO'", "'TYPE'", "'TYPES'", "'USING'", "'VERIFY'", "'WHEN'", "'WHERE'", "'WITH'", - "'YEAR'", "'YEARS'", "'{ESCAPE'", "'{FN'", "'{LIMIT'", "'{D'", "'{T'", - "'{TS'", "'{GUID'", "'}'", "'='", "'<=>'", null, "'<'", "'<='", "'>'", - "'>='", "'+'", "'-'", "'*'", "'/'", "'%'", "'::'", "'||'", "'.'", "'?'" + "'YEAR'", "'YEARS'", null, null, null, null, null, null, null, null, "'}'", + "'='", "'<=>'", null, "'<'", "'<='", "'>'", "'>='", "'+'", "'-'", "'*'", + "'/'", "'%'", "'::'", "'||'", "'.'", "'?'" }; private static final String[] _SYMBOLIC_NAMES = { null, null, null, null, null, "ALL", "ANALYZE", "ANALYZED", "AND", "ANY", @@ -108,11 +108,12 @@ class SqlBaseParser extends Parser { "SYS", "TABLE", "TABLES", "TEXT", "THEN", "TRUE", "TO", "TYPE", "TYPES", "USING", "VERIFY", "WHEN", "WHERE", "WITH", "YEAR", "YEARS", "ESCAPE_ESC", "FUNCTION_ESC", "LIMIT_ESC", "DATE_ESC", "TIME_ESC", "TIMESTAMP_ESC", - "GUID_ESC", "ESC_END", "EQ", "NULLEQ", "NEQ", "LT", "LTE", "GT", "GTE", - "PLUS", "MINUS", "ASTERISK", "SLASH", "PERCENT", "CAST_OP", "CONCAT", - "DOT", "PARAM", "STRING", "INTEGER_VALUE", "DECIMAL_VALUE", "IDENTIFIER", - "DIGIT_IDENTIFIER", "TABLE_IDENTIFIER", "QUOTED_IDENTIFIER", "BACKQUOTED_IDENTIFIER", - "SIMPLE_COMMENT", "BRACKETED_COMMENT", "WS", "UNRECOGNIZED", "DELIMITER" + "GUID_ESC", "ESC_START", "ESC_END", "EQ", "NULLEQ", "NEQ", "LT", "LTE", + "GT", "GTE", "PLUS", "MINUS", "ASTERISK", "SLASH", "PERCENT", "CAST_OP", + "CONCAT", "DOT", "PARAM", "STRING", "INTEGER_VALUE", "DECIMAL_VALUE", + "IDENTIFIER", "DIGIT_IDENTIFIER", "TABLE_IDENTIFIER", "QUOTED_IDENTIFIER", + "BACKQUOTED_IDENTIFIER", "SIMPLE_COMMENT", "BRACKETED_COMMENT", "WS", + "UNRECOGNIZED", "DELIMITER" }; public static final Vocabulary VOCABULARY = new VocabularyImpl(_LITERAL_NAMES, _SYMBOLIC_NAMES); @@ -1215,7 +1216,7 @@ class SqlBaseParser extends Parser { match(TYPES); setState(235); _la = _input.LA(1); - if (((((_la - 117)) & ~0x3f) == 0 && ((1L << (_la - 117)) & ((1L << (PLUS - 117)) | (1L << (MINUS - 117)) | (1L << (INTEGER_VALUE - 117)) | (1L << (DECIMAL_VALUE - 117)))) != 0)) { + if (((((_la - 118)) & ~0x3f) == 0 && ((1L << (_la - 118)) & ((1L << (PLUS - 118)) | (1L << (MINUS - 118)) | (1L << (INTEGER_VALUE - 118)) | (1L << (DECIMAL_VALUE - 118)))) != 0)) { { setState(232); _la = _input.LA(1); @@ -2050,7 +2051,7 @@ class SqlBaseParser extends Parser { match(T__0); setState(345); _la = _input.LA(1); - if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << T__0) | (1L << ANALYZE) | (1L << ANALYZED) | (1L << CASE) | (1L << CAST) | (1L << CATALOGS) | (1L << COLUMNS) | (1L << CONVERT) | (1L << CURRENT_DATE) | (1L << CURRENT_TIME) | (1L << CURRENT_TIMESTAMP) | (1L << DAY) | (1L << DEBUG) | (1L << EXECUTABLE) | (1L << EXISTS) | (1L << EXPLAIN) | (1L << EXTRACT) | (1L << FALSE) | (1L << FIRST) | (1L << FORMAT) | (1L << FULL) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << HOUR) | (1L << INTERVAL) | (1L << LAST) | (1L << LEFT) | (1L << LIMIT) | (1L << MAPPED) | (1L << MATCH) | (1L << MINUTE) | (1L << MONTH))) != 0) || ((((_la - 66)) & ~0x3f) == 0 && ((1L << (_la - 66)) & ((1L << (NOT - 66)) | (1L << (NULL - 66)) | (1L << (OPTIMIZED - 66)) | (1L << (PARSED - 66)) | (1L << (PHYSICAL - 66)) | (1L << (PIVOT - 66)) | (1L << (PLAN - 66)) | (1L << (RIGHT - 66)) | (1L << (RLIKE - 66)) | (1L << (QUERY - 66)) | (1L << (SCHEMAS - 66)) | (1L << (SECOND - 66)) | (1L << (SHOW - 66)) | (1L << (SYS - 66)) | (1L << (TABLES - 66)) | (1L << (TEXT - 66)) | (1L << (TRUE - 66)) | (1L << (TYPE - 66)) | (1L << (TYPES - 66)) | (1L << (VERIFY - 66)) | (1L << (YEAR - 66)) | (1L << (FUNCTION_ESC - 66)) | (1L << (DATE_ESC - 66)) | (1L << (TIME_ESC - 66)) | (1L << (TIMESTAMP_ESC - 66)) | (1L << (GUID_ESC - 66)) | (1L << (PLUS - 66)) | (1L << (MINUS - 66)) | (1L << (ASTERISK - 66)) | (1L << (PARAM - 66)) | (1L << (STRING - 66)) | (1L << (INTEGER_VALUE - 66)) | (1L << (DECIMAL_VALUE - 66)) | (1L << (IDENTIFIER - 66)))) != 0) || ((((_la - 130)) & ~0x3f) == 0 && ((1L << (_la - 130)) & ((1L << (DIGIT_IDENTIFIER - 130)) | (1L << (QUOTED_IDENTIFIER - 130)) | (1L << (BACKQUOTED_IDENTIFIER - 130)))) != 0)) { + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << T__0) | (1L << ANALYZE) | (1L << ANALYZED) | (1L << CASE) | (1L << CAST) | (1L << CATALOGS) | (1L << COLUMNS) | (1L << CONVERT) | (1L << CURRENT_DATE) | (1L << CURRENT_TIME) | (1L << CURRENT_TIMESTAMP) | (1L << DAY) | (1L << DEBUG) | (1L << EXECUTABLE) | (1L << EXISTS) | (1L << EXPLAIN) | (1L << EXTRACT) | (1L << FALSE) | (1L << FIRST) | (1L << FORMAT) | (1L << FULL) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << HOUR) | (1L << INTERVAL) | (1L << LAST) | (1L << LEFT) | (1L << LIMIT) | (1L << MAPPED) | (1L << MATCH) | (1L << MINUTE) | (1L << MONTH))) != 0) || ((((_la - 66)) & ~0x3f) == 0 && ((1L << (_la - 66)) & ((1L << (NOT - 66)) | (1L << (NULL - 66)) | (1L << (OPTIMIZED - 66)) | (1L << (PARSED - 66)) | (1L << (PHYSICAL - 66)) | (1L << (PIVOT - 66)) | (1L << (PLAN - 66)) | (1L << (RIGHT - 66)) | (1L << (RLIKE - 66)) | (1L << (QUERY - 66)) | (1L << (SCHEMAS - 66)) | (1L << (SECOND - 66)) | (1L << (SHOW - 66)) | (1L << (SYS - 66)) | (1L << (TABLES - 66)) | (1L << (TEXT - 66)) | (1L << (TRUE - 66)) | (1L << (TYPE - 66)) | (1L << (TYPES - 66)) | (1L << (VERIFY - 66)) | (1L << (YEAR - 66)) | (1L << (FUNCTION_ESC - 66)) | (1L << (DATE_ESC - 66)) | (1L << (TIME_ESC - 66)) | (1L << (TIMESTAMP_ESC - 66)) | (1L << (GUID_ESC - 66)) | (1L << (PLUS - 66)) | (1L << (MINUS - 66)) | (1L << (ASTERISK - 66)) | (1L << (PARAM - 66)) | (1L << (STRING - 66)) | (1L << (INTEGER_VALUE - 66)) | (1L << (DECIMAL_VALUE - 66)))) != 0) || ((((_la - 130)) & ~0x3f) == 0 && ((1L << (_la - 130)) & ((1L << (IDENTIFIER - 130)) | (1L << (DIGIT_IDENTIFIER - 130)) | (1L << (QUOTED_IDENTIFIER - 130)) | (1L << (BACKQUOTED_IDENTIFIER - 130)))) != 0)) { { setState(337); expression(); @@ -3080,7 +3081,7 @@ class SqlBaseParser extends Parser { valueExpression(0); setState(481); _la = _input.LA(1); - if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << ANALYZE) | (1L << ANALYZED) | (1L << AS) | (1L << CATALOGS) | (1L << COLUMNS) | (1L << CURRENT_DATE) | (1L << CURRENT_TIME) | (1L << CURRENT_TIMESTAMP) | (1L << DAY) | (1L << DEBUG) | (1L << EXECUTABLE) | (1L << EXPLAIN) | (1L << FIRST) | (1L << FORMAT) | (1L << FULL) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << HOUR) | (1L << INTERVAL) | (1L << LAST) | (1L << LIMIT) | (1L << MAPPED) | (1L << MINUTE) | (1L << MONTH))) != 0) || ((((_la - 70)) & ~0x3f) == 0 && ((1L << (_la - 70)) & ((1L << (OPTIMIZED - 70)) | (1L << (PARSED - 70)) | (1L << (PHYSICAL - 70)) | (1L << (PIVOT - 70)) | (1L << (PLAN - 70)) | (1L << (RLIKE - 70)) | (1L << (QUERY - 70)) | (1L << (SCHEMAS - 70)) | (1L << (SECOND - 70)) | (1L << (SHOW - 70)) | (1L << (SYS - 70)) | (1L << (TABLES - 70)) | (1L << (TEXT - 70)) | (1L << (TYPE - 70)) | (1L << (TYPES - 70)) | (1L << (VERIFY - 70)) | (1L << (YEAR - 70)) | (1L << (IDENTIFIER - 70)) | (1L << (DIGIT_IDENTIFIER - 70)) | (1L << (QUOTED_IDENTIFIER - 70)) | (1L << (BACKQUOTED_IDENTIFIER - 70)))) != 0)) { + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << ANALYZE) | (1L << ANALYZED) | (1L << AS) | (1L << CATALOGS) | (1L << COLUMNS) | (1L << CURRENT_DATE) | (1L << CURRENT_TIME) | (1L << CURRENT_TIMESTAMP) | (1L << DAY) | (1L << DEBUG) | (1L << EXECUTABLE) | (1L << EXPLAIN) | (1L << FIRST) | (1L << FORMAT) | (1L << FULL) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << HOUR) | (1L << INTERVAL) | (1L << LAST) | (1L << LIMIT) | (1L << MAPPED) | (1L << MINUTE) | (1L << MONTH))) != 0) || ((((_la - 70)) & ~0x3f) == 0 && ((1L << (_la - 70)) & ((1L << (OPTIMIZED - 70)) | (1L << (PARSED - 70)) | (1L << (PHYSICAL - 70)) | (1L << (PIVOT - 70)) | (1L << (PLAN - 70)) | (1L << (RLIKE - 70)) | (1L << (QUERY - 70)) | (1L << (SCHEMAS - 70)) | (1L << (SECOND - 70)) | (1L << (SHOW - 70)) | (1L << (SYS - 70)) | (1L << (TABLES - 70)) | (1L << (TEXT - 70)) | (1L << (TYPE - 70)) | (1L << (TYPES - 70)) | (1L << (VERIFY - 70)) | (1L << (YEAR - 70)) | (1L << (IDENTIFIER - 70)) | (1L << (DIGIT_IDENTIFIER - 70)) | (1L << (QUOTED_IDENTIFIER - 70)))) != 0) || _la==BACKQUOTED_IDENTIFIER) { { setState(478); _la = _input.LA(1); @@ -3938,6 +3939,7 @@ class SqlBaseParser extends Parser { return getRuleContext(StringContext.class,0); } public TerminalNode ESCAPE_ESC() { return getToken(SqlBaseParser.ESCAPE_ESC, 0); } + public TerminalNode ESC_END() { return getToken(SqlBaseParser.ESC_END, 0); } public PatternEscapeContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); } @@ -4242,7 +4244,7 @@ class SqlBaseParser extends Parser { setState(610); ((ArithmeticBinaryContext)_localctx).operator = _input.LT(1); _la = _input.LA(1); - if ( !(((((_la - 119)) & ~0x3f) == 0 && ((1L << (_la - 119)) & ((1L << (ASTERISK - 119)) | (1L << (SLASH - 119)) | (1L << (PERCENT - 119)))) != 0)) ) { + if ( !(((((_la - 120)) & ~0x3f) == 0 && ((1L << (_la - 120)) & ((1L << (ASTERISK - 120)) | (1L << (SLASH - 120)) | (1L << (PERCENT - 120)))) != 0)) ) { ((ArithmeticBinaryContext)_localctx).operator = (Token)_errHandler.recoverInline(this); } else { consume(); @@ -4607,7 +4609,7 @@ class SqlBaseParser extends Parser { _prevctx = _localctx; setState(632); _la = _input.LA(1); - if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << ANALYZE) | (1L << ANALYZED) | (1L << CATALOGS) | (1L << COLUMNS) | (1L << CURRENT_DATE) | (1L << CURRENT_TIME) | (1L << CURRENT_TIMESTAMP) | (1L << DAY) | (1L << DEBUG) | (1L << EXECUTABLE) | (1L << EXPLAIN) | (1L << FIRST) | (1L << FORMAT) | (1L << FULL) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << HOUR) | (1L << INTERVAL) | (1L << LAST) | (1L << LIMIT) | (1L << MAPPED) | (1L << MINUTE) | (1L << MONTH))) != 0) || ((((_la - 70)) & ~0x3f) == 0 && ((1L << (_la - 70)) & ((1L << (OPTIMIZED - 70)) | (1L << (PARSED - 70)) | (1L << (PHYSICAL - 70)) | (1L << (PIVOT - 70)) | (1L << (PLAN - 70)) | (1L << (RLIKE - 70)) | (1L << (QUERY - 70)) | (1L << (SCHEMAS - 70)) | (1L << (SECOND - 70)) | (1L << (SHOW - 70)) | (1L << (SYS - 70)) | (1L << (TABLES - 70)) | (1L << (TEXT - 70)) | (1L << (TYPE - 70)) | (1L << (TYPES - 70)) | (1L << (VERIFY - 70)) | (1L << (YEAR - 70)) | (1L << (IDENTIFIER - 70)) | (1L << (DIGIT_IDENTIFIER - 70)) | (1L << (QUOTED_IDENTIFIER - 70)) | (1L << (BACKQUOTED_IDENTIFIER - 70)))) != 0)) { + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << ANALYZE) | (1L << ANALYZED) | (1L << CATALOGS) | (1L << COLUMNS) | (1L << CURRENT_DATE) | (1L << CURRENT_TIME) | (1L << CURRENT_TIMESTAMP) | (1L << DAY) | (1L << DEBUG) | (1L << EXECUTABLE) | (1L << EXPLAIN) | (1L << FIRST) | (1L << FORMAT) | (1L << FULL) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << HOUR) | (1L << INTERVAL) | (1L << LAST) | (1L << LIMIT) | (1L << MAPPED) | (1L << MINUTE) | (1L << MONTH))) != 0) || ((((_la - 70)) & ~0x3f) == 0 && ((1L << (_la - 70)) & ((1L << (OPTIMIZED - 70)) | (1L << (PARSED - 70)) | (1L << (PHYSICAL - 70)) | (1L << (PIVOT - 70)) | (1L << (PLAN - 70)) | (1L << (RLIKE - 70)) | (1L << (QUERY - 70)) | (1L << (SCHEMAS - 70)) | (1L << (SECOND - 70)) | (1L << (SHOW - 70)) | (1L << (SYS - 70)) | (1L << (TABLES - 70)) | (1L << (TEXT - 70)) | (1L << (TYPE - 70)) | (1L << (TYPES - 70)) | (1L << (VERIFY - 70)) | (1L << (YEAR - 70)) | (1L << (IDENTIFIER - 70)) | (1L << (DIGIT_IDENTIFIER - 70)) | (1L << (QUOTED_IDENTIFIER - 70)))) != 0) || _la==BACKQUOTED_IDENTIFIER) { { setState(629); qualifiedName(); @@ -4673,7 +4675,7 @@ class SqlBaseParser extends Parser { match(CASE); setState(647); _la = _input.LA(1); - if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << T__0) | (1L << ANALYZE) | (1L << ANALYZED) | (1L << CASE) | (1L << CAST) | (1L << CATALOGS) | (1L << COLUMNS) | (1L << CONVERT) | (1L << CURRENT_DATE) | (1L << CURRENT_TIME) | (1L << CURRENT_TIMESTAMP) | (1L << DAY) | (1L << DEBUG) | (1L << EXECUTABLE) | (1L << EXISTS) | (1L << EXPLAIN) | (1L << EXTRACT) | (1L << FALSE) | (1L << FIRST) | (1L << FORMAT) | (1L << FULL) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << HOUR) | (1L << INTERVAL) | (1L << LAST) | (1L << LEFT) | (1L << LIMIT) | (1L << MAPPED) | (1L << MATCH) | (1L << MINUTE) | (1L << MONTH))) != 0) || ((((_la - 66)) & ~0x3f) == 0 && ((1L << (_la - 66)) & ((1L << (NOT - 66)) | (1L << (NULL - 66)) | (1L << (OPTIMIZED - 66)) | (1L << (PARSED - 66)) | (1L << (PHYSICAL - 66)) | (1L << (PIVOT - 66)) | (1L << (PLAN - 66)) | (1L << (RIGHT - 66)) | (1L << (RLIKE - 66)) | (1L << (QUERY - 66)) | (1L << (SCHEMAS - 66)) | (1L << (SECOND - 66)) | (1L << (SHOW - 66)) | (1L << (SYS - 66)) | (1L << (TABLES - 66)) | (1L << (TEXT - 66)) | (1L << (TRUE - 66)) | (1L << (TYPE - 66)) | (1L << (TYPES - 66)) | (1L << (VERIFY - 66)) | (1L << (YEAR - 66)) | (1L << (FUNCTION_ESC - 66)) | (1L << (DATE_ESC - 66)) | (1L << (TIME_ESC - 66)) | (1L << (TIMESTAMP_ESC - 66)) | (1L << (GUID_ESC - 66)) | (1L << (PLUS - 66)) | (1L << (MINUS - 66)) | (1L << (ASTERISK - 66)) | (1L << (PARAM - 66)) | (1L << (STRING - 66)) | (1L << (INTEGER_VALUE - 66)) | (1L << (DECIMAL_VALUE - 66)) | (1L << (IDENTIFIER - 66)))) != 0) || ((((_la - 130)) & ~0x3f) == 0 && ((1L << (_la - 130)) & ((1L << (DIGIT_IDENTIFIER - 130)) | (1L << (QUOTED_IDENTIFIER - 130)) | (1L << (BACKQUOTED_IDENTIFIER - 130)))) != 0)) { + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << T__0) | (1L << ANALYZE) | (1L << ANALYZED) | (1L << CASE) | (1L << CAST) | (1L << CATALOGS) | (1L << COLUMNS) | (1L << CONVERT) | (1L << CURRENT_DATE) | (1L << CURRENT_TIME) | (1L << CURRENT_TIMESTAMP) | (1L << DAY) | (1L << DEBUG) | (1L << EXECUTABLE) | (1L << EXISTS) | (1L << EXPLAIN) | (1L << EXTRACT) | (1L << FALSE) | (1L << FIRST) | (1L << FORMAT) | (1L << FULL) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << HOUR) | (1L << INTERVAL) | (1L << LAST) | (1L << LEFT) | (1L << LIMIT) | (1L << MAPPED) | (1L << MATCH) | (1L << MINUTE) | (1L << MONTH))) != 0) || ((((_la - 66)) & ~0x3f) == 0 && ((1L << (_la - 66)) & ((1L << (NOT - 66)) | (1L << (NULL - 66)) | (1L << (OPTIMIZED - 66)) | (1L << (PARSED - 66)) | (1L << (PHYSICAL - 66)) | (1L << (PIVOT - 66)) | (1L << (PLAN - 66)) | (1L << (RIGHT - 66)) | (1L << (RLIKE - 66)) | (1L << (QUERY - 66)) | (1L << (SCHEMAS - 66)) | (1L << (SECOND - 66)) | (1L << (SHOW - 66)) | (1L << (SYS - 66)) | (1L << (TABLES - 66)) | (1L << (TEXT - 66)) | (1L << (TRUE - 66)) | (1L << (TYPE - 66)) | (1L << (TYPES - 66)) | (1L << (VERIFY - 66)) | (1L << (YEAR - 66)) | (1L << (FUNCTION_ESC - 66)) | (1L << (DATE_ESC - 66)) | (1L << (TIME_ESC - 66)) | (1L << (TIMESTAMP_ESC - 66)) | (1L << (GUID_ESC - 66)) | (1L << (PLUS - 66)) | (1L << (MINUS - 66)) | (1L << (ASTERISK - 66)) | (1L << (PARAM - 66)) | (1L << (STRING - 66)) | (1L << (INTEGER_VALUE - 66)) | (1L << (DECIMAL_VALUE - 66)))) != 0) || ((((_la - 130)) & ~0x3f) == 0 && ((1L << (_la - 130)) & ((1L << (IDENTIFIER - 130)) | (1L << (DIGIT_IDENTIFIER - 130)) | (1L << (QUOTED_IDENTIFIER - 130)) | (1L << (BACKQUOTED_IDENTIFIER - 130)))) != 0)) { { setState(646); ((CaseContext)_localctx).operand = booleanExpression(0); @@ -5144,6 +5146,7 @@ class SqlBaseParser extends Parser { return getRuleContext(FunctionTemplateContext.class,0); } public TerminalNode FUNCTION_ESC() { return getToken(SqlBaseParser.FUNCTION_ESC, 0); } + public TerminalNode ESC_END() { return getToken(SqlBaseParser.ESC_END, 0); } public FunctionExpressionContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); } @@ -5292,7 +5295,7 @@ class SqlBaseParser extends Parser { match(T__0); setState(735); _la = _input.LA(1); - if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << T__0) | (1L << ALL) | (1L << ANALYZE) | (1L << ANALYZED) | (1L << CASE) | (1L << CAST) | (1L << CATALOGS) | (1L << COLUMNS) | (1L << CONVERT) | (1L << CURRENT_DATE) | (1L << CURRENT_TIME) | (1L << CURRENT_TIMESTAMP) | (1L << DAY) | (1L << DEBUG) | (1L << DISTINCT) | (1L << EXECUTABLE) | (1L << EXISTS) | (1L << EXPLAIN) | (1L << EXTRACT) | (1L << FALSE) | (1L << FIRST) | (1L << FORMAT) | (1L << FULL) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << HOUR) | (1L << INTERVAL) | (1L << LAST) | (1L << LEFT) | (1L << LIMIT) | (1L << MAPPED) | (1L << MATCH) | (1L << MINUTE) | (1L << MONTH))) != 0) || ((((_la - 66)) & ~0x3f) == 0 && ((1L << (_la - 66)) & ((1L << (NOT - 66)) | (1L << (NULL - 66)) | (1L << (OPTIMIZED - 66)) | (1L << (PARSED - 66)) | (1L << (PHYSICAL - 66)) | (1L << (PIVOT - 66)) | (1L << (PLAN - 66)) | (1L << (RIGHT - 66)) | (1L << (RLIKE - 66)) | (1L << (QUERY - 66)) | (1L << (SCHEMAS - 66)) | (1L << (SECOND - 66)) | (1L << (SHOW - 66)) | (1L << (SYS - 66)) | (1L << (TABLES - 66)) | (1L << (TEXT - 66)) | (1L << (TRUE - 66)) | (1L << (TYPE - 66)) | (1L << (TYPES - 66)) | (1L << (VERIFY - 66)) | (1L << (YEAR - 66)) | (1L << (FUNCTION_ESC - 66)) | (1L << (DATE_ESC - 66)) | (1L << (TIME_ESC - 66)) | (1L << (TIMESTAMP_ESC - 66)) | (1L << (GUID_ESC - 66)) | (1L << (PLUS - 66)) | (1L << (MINUS - 66)) | (1L << (ASTERISK - 66)) | (1L << (PARAM - 66)) | (1L << (STRING - 66)) | (1L << (INTEGER_VALUE - 66)) | (1L << (DECIMAL_VALUE - 66)) | (1L << (IDENTIFIER - 66)))) != 0) || ((((_la - 130)) & ~0x3f) == 0 && ((1L << (_la - 130)) & ((1L << (DIGIT_IDENTIFIER - 130)) | (1L << (QUOTED_IDENTIFIER - 130)) | (1L << (BACKQUOTED_IDENTIFIER - 130)))) != 0)) { + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << T__0) | (1L << ALL) | (1L << ANALYZE) | (1L << ANALYZED) | (1L << CASE) | (1L << CAST) | (1L << CATALOGS) | (1L << COLUMNS) | (1L << CONVERT) | (1L << CURRENT_DATE) | (1L << CURRENT_TIME) | (1L << CURRENT_TIMESTAMP) | (1L << DAY) | (1L << DEBUG) | (1L << DISTINCT) | (1L << EXECUTABLE) | (1L << EXISTS) | (1L << EXPLAIN) | (1L << EXTRACT) | (1L << FALSE) | (1L << FIRST) | (1L << FORMAT) | (1L << FULL) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << HOUR) | (1L << INTERVAL) | (1L << LAST) | (1L << LEFT) | (1L << LIMIT) | (1L << MAPPED) | (1L << MATCH) | (1L << MINUTE) | (1L << MONTH))) != 0) || ((((_la - 66)) & ~0x3f) == 0 && ((1L << (_la - 66)) & ((1L << (NOT - 66)) | (1L << (NULL - 66)) | (1L << (OPTIMIZED - 66)) | (1L << (PARSED - 66)) | (1L << (PHYSICAL - 66)) | (1L << (PIVOT - 66)) | (1L << (PLAN - 66)) | (1L << (RIGHT - 66)) | (1L << (RLIKE - 66)) | (1L << (QUERY - 66)) | (1L << (SCHEMAS - 66)) | (1L << (SECOND - 66)) | (1L << (SHOW - 66)) | (1L << (SYS - 66)) | (1L << (TABLES - 66)) | (1L << (TEXT - 66)) | (1L << (TRUE - 66)) | (1L << (TYPE - 66)) | (1L << (TYPES - 66)) | (1L << (VERIFY - 66)) | (1L << (YEAR - 66)) | (1L << (FUNCTION_ESC - 66)) | (1L << (DATE_ESC - 66)) | (1L << (TIME_ESC - 66)) | (1L << (TIMESTAMP_ESC - 66)) | (1L << (GUID_ESC - 66)) | (1L << (PLUS - 66)) | (1L << (MINUS - 66)) | (1L << (ASTERISK - 66)) | (1L << (PARAM - 66)) | (1L << (STRING - 66)) | (1L << (INTEGER_VALUE - 66)) | (1L << (DECIMAL_VALUE - 66)))) != 0) || ((((_la - 130)) & ~0x3f) == 0 && ((1L << (_la - 130)) & ((1L << (IDENTIFIER - 130)) | (1L << (DIGIT_IDENTIFIER - 130)) | (1L << (QUOTED_IDENTIFIER - 130)) | (1L << (BACKQUOTED_IDENTIFIER - 130)))) != 0)) { { setState(725); _la = _input.LA(1); @@ -5830,7 +5833,7 @@ class SqlBaseParser extends Parser { { setState(772); _la = _input.LA(1); - if ( !(((((_la - 110)) & ~0x3f) == 0 && ((1L << (_la - 110)) & ((1L << (EQ - 110)) | (1L << (NULLEQ - 110)) | (1L << (NEQ - 110)) | (1L << (LT - 110)) | (1L << (LTE - 110)) | (1L << (GT - 110)) | (1L << (GTE - 110)))) != 0)) ) { + if ( !(((((_la - 111)) & ~0x3f) == 0 && ((1L << (_la - 111)) & ((1L << (EQ - 111)) | (1L << (NULLEQ - 111)) | (1L << (NEQ - 111)) | (1L << (LT - 111)) | (1L << (LTE - 111)) | (1L << (GT - 111)) | (1L << (GTE - 111)))) != 0)) ) { _errHandler.recoverInline(this); } else { consume(); @@ -6335,7 +6338,7 @@ class SqlBaseParser extends Parser { { setState(810); _la = _input.LA(1); - if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << ANALYZE) | (1L << ANALYZED) | (1L << CATALOGS) | (1L << COLUMNS) | (1L << CURRENT_DATE) | (1L << CURRENT_TIME) | (1L << CURRENT_TIMESTAMP) | (1L << DAY) | (1L << DEBUG) | (1L << EXECUTABLE) | (1L << EXPLAIN) | (1L << FIRST) | (1L << FORMAT) | (1L << FULL) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << HOUR) | (1L << INTERVAL) | (1L << LAST) | (1L << LIMIT) | (1L << MAPPED) | (1L << MINUTE) | (1L << MONTH))) != 0) || ((((_la - 70)) & ~0x3f) == 0 && ((1L << (_la - 70)) & ((1L << (OPTIMIZED - 70)) | (1L << (PARSED - 70)) | (1L << (PHYSICAL - 70)) | (1L << (PIVOT - 70)) | (1L << (PLAN - 70)) | (1L << (RLIKE - 70)) | (1L << (QUERY - 70)) | (1L << (SCHEMAS - 70)) | (1L << (SECOND - 70)) | (1L << (SHOW - 70)) | (1L << (SYS - 70)) | (1L << (TABLES - 70)) | (1L << (TEXT - 70)) | (1L << (TYPE - 70)) | (1L << (TYPES - 70)) | (1L << (VERIFY - 70)) | (1L << (YEAR - 70)) | (1L << (IDENTIFIER - 70)) | (1L << (DIGIT_IDENTIFIER - 70)) | (1L << (QUOTED_IDENTIFIER - 70)) | (1L << (BACKQUOTED_IDENTIFIER - 70)))) != 0)) { + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << ANALYZE) | (1L << ANALYZED) | (1L << CATALOGS) | (1L << COLUMNS) | (1L << CURRENT_DATE) | (1L << CURRENT_TIME) | (1L << CURRENT_TIMESTAMP) | (1L << DAY) | (1L << DEBUG) | (1L << EXECUTABLE) | (1L << EXPLAIN) | (1L << FIRST) | (1L << FORMAT) | (1L << FULL) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << HOUR) | (1L << INTERVAL) | (1L << LAST) | (1L << LIMIT) | (1L << MAPPED) | (1L << MINUTE) | (1L << MONTH))) != 0) || ((((_la - 70)) & ~0x3f) == 0 && ((1L << (_la - 70)) & ((1L << (OPTIMIZED - 70)) | (1L << (PARSED - 70)) | (1L << (PHYSICAL - 70)) | (1L << (PIVOT - 70)) | (1L << (PLAN - 70)) | (1L << (RLIKE - 70)) | (1L << (QUERY - 70)) | (1L << (SCHEMAS - 70)) | (1L << (SECOND - 70)) | (1L << (SHOW - 70)) | (1L << (SYS - 70)) | (1L << (TABLES - 70)) | (1L << (TEXT - 70)) | (1L << (TYPE - 70)) | (1L << (TYPES - 70)) | (1L << (VERIFY - 70)) | (1L << (YEAR - 70)) | (1L << (IDENTIFIER - 70)) | (1L << (DIGIT_IDENTIFIER - 70)) | (1L << (QUOTED_IDENTIFIER - 70)))) != 0) || _la==BACKQUOTED_IDENTIFIER) { { setState(807); ((TableIdentifierContext)_localctx).catalog = identifier(); @@ -6912,7 +6915,7 @@ class SqlBaseParser extends Parser { } public static final String _serializedATN = - "\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\3\u008c\u034e\4\2\t"+ + "\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\3\u008d\u034e\4\2\t"+ "\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13"+ "\t\13\4\f\t\f\4\r\t\r\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22"+ "\4\23\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30\4\31\t\31"+ @@ -6977,16 +6980,16 @@ class SqlBaseParser extends Parser { "\39\59\u0343\n9\3:\3:\3;\3;\3;\3;\3;\3<\3<\3<\2\5\66DF=\2\4\6\b\n\f\16"+ "\20\22\24\26\30\32\34\36 \"$&(*,.\60\62\64\668:<>@BDFHJLNPRTVXZ\\^`bd"+ "fhjlnprtv\2\22\b\2\7\7\t\t\"\"==HHLL\4\2..[[\4\2\t\tHH\4\2**\63\63\3\2"+ - "\34\35\3\2wx\4\2\7\7\u0081\u0081\4\2\r\r\34\34\4\2\'\'99\4\2\7\7\36\36"+ - "\3\2y{\3\2pv\4\2&&]]\7\2\31\32\61\62?BTUfg\3\2\177\u0080\31\2\b\t\23\24"+ - "\26\31\33\33\"\"$$\'\')),.\61\61\66\6699<=??AAHHLOQTWXZ[_`bbff\u03b1\2"+ - "x\3\2\2\2\4{\3\2\2\2\6\u00ef\3\2\2\2\b\u00fa\3\2\2\2\n\u00fe\3\2\2\2\f"+ - "\u0113\3\2\2\2\16\u011a\3\2\2\2\20\u011c\3\2\2\2\22\u0124\3\2\2\2\24\u0139"+ - "\3\2\2\2\26\u0146\3\2\2\2\30\u0150\3\2\2\2\32\u015f\3\2\2\2\34\u0161\3"+ - "\2\2\2\36\u0167\3\2\2\2 \u0169\3\2\2\2\"\u0171\3\2\2\2$\u0178\3\2\2\2"+ - "&\u018a\3\2\2\2(\u019b\3\2\2\2*\u01ab\3\2\2\2,\u01c9\3\2\2\2.\u01cb\3"+ - "\2\2\2\60\u01d6\3\2\2\2\62\u01de\3\2\2\2\64\u01e5\3\2\2\2\66\u0206\3\2"+ - "\2\28\u0217\3\2\2\2:\u021a\3\2\2\2<\u024c\3\2\2\2>\u024e\3\2\2\2@\u0251"+ + "\34\35\3\2xy\4\2\7\7\u0082\u0082\4\2\r\r\34\34\4\2\'\'99\4\2\7\7\36\36"+ + "\3\2z|\3\2qw\4\2&&]]\7\2\31\32\61\62?BTUfg\3\2\u0080\u0081\31\2\b\t\23"+ + "\24\26\31\33\33\"\"$$\'\')),.\61\61\66\6699<=??AAHHLOQTWXZ[_`bbff\u03b1"+ + "\2x\3\2\2\2\4{\3\2\2\2\6\u00ef\3\2\2\2\b\u00fa\3\2\2\2\n\u00fe\3\2\2\2"+ + "\f\u0113\3\2\2\2\16\u011a\3\2\2\2\20\u011c\3\2\2\2\22\u0124\3\2\2\2\24"+ + "\u0139\3\2\2\2\26\u0146\3\2\2\2\30\u0150\3\2\2\2\32\u015f\3\2\2\2\34\u0161"+ + "\3\2\2\2\36\u0167\3\2\2\2 \u0169\3\2\2\2\"\u0171\3\2\2\2$\u0178\3\2\2"+ + "\2&\u018a\3\2\2\2(\u019b\3\2\2\2*\u01ab\3\2\2\2,\u01c9\3\2\2\2.\u01cb"+ + "\3\2\2\2\60\u01d6\3\2\2\2\62\u01de\3\2\2\2\64\u01e5\3\2\2\2\66\u0206\3"+ + "\2\2\28\u0217\3\2\2\2:\u021a\3\2\2\2<\u024c\3\2\2\2>\u024e\3\2\2\2@\u0251"+ "\3\2\2\2B\u025b\3\2\2\2D\u0261\3\2\2\2F\u0296\3\2\2\2H\u02a3\3\2\2\2J"+ "\u02af\3\2\2\2L\u02b1\3\2\2\2N\u02b8\3\2\2\2P\u02c4\3\2\2\2R\u02c6\3\2"+ "\2\2T\u02d2\3\2\2\2V\u02d4\3\2\2\2X\u02e8\3\2\2\2Z\u0304\3\2\2\2\\\u0306"+ @@ -7046,7 +7049,7 @@ class SqlBaseParser extends Parser { "\u0106\3\2\2\2\u0109\u00ff\3\2\2\2\u0109\u010a\3\2\2\2\u010a\u010c\3\2"+ "\2\2\u010b\u010d\5\f\7\2\u010c\u010b\3\2\2\2\u010c\u010d\3\2\2\2\u010d"+ "\13\3\2\2\2\u010e\u010f\7<\2\2\u010f\u0114\t\b\2\2\u0110\u0111\7j\2\2"+ - "\u0111\u0112\t\b\2\2\u0112\u0114\7o\2\2\u0113\u010e\3\2\2\2\u0113\u0110"+ + "\u0111\u0112\t\b\2\2\u0112\u0114\7p\2\2\u0113\u010e\3\2\2\2\u0113\u0110"+ "\3\2\2\2\u0114\r\3\2\2\2\u0115\u011b\5\22\n\2\u0116\u0117\7\3\2\2\u0117"+ "\u0118\5\n\6\2\u0118\u0119\7\4\2\2\u0119\u011b\3\2\2\2\u011a\u0115\3\2"+ "\2\2\u011a\u0116\3\2\2\2\u011b\17\3\2\2\2\u011c\u011e\5\64\33\2\u011d"+ @@ -7155,7 +7158,7 @@ class SqlBaseParser extends Parser { "\2\2\u024d=\3\2\2\2\u024e\u024f\7;\2\2\u024f\u0250\5@!\2\u0250?\3\2\2"+ "\2\u0251\u0253\5r:\2\u0252\u0254\5B\"\2\u0253\u0252\3\2\2\2\u0253\u0254"+ "\3\2\2\2\u0254A\3\2\2\2\u0255\u0256\7!\2\2\u0256\u025c\5r:\2\u0257\u0258"+ - "\7h\2\2\u0258\u0259\5r:\2\u0259\u025a\7o\2\2\u025a\u025c\3\2\2\2\u025b"+ + "\7h\2\2\u0258\u0259\5r:\2\u0259\u025a\7p\2\2\u025a\u025c\3\2\2\2\u025b"+ "\u0255\3\2\2\2\u025b\u0257\3\2\2\2\u025cC\3\2\2\2\u025d\u025e\b#\1\2\u025e"+ "\u0262\5F$\2\u025f\u0260\t\7\2\2\u0260\u0262\5D#\6\u0261\u025d\3\2\2\2"+ "\u0261\u025f\3\2\2\2\u0262\u026f\3\2\2\2\u0263\u0264\f\5\2\2\u0264\u0265"+ @@ -7165,37 +7168,37 @@ class SqlBaseParser extends Parser { "\3\2\2\2\u026e\u0271\3\2\2\2\u026f\u026d\3\2\2\2\u026f\u0270\3\2\2\2\u0270"+ "E\3\2\2\2\u0271\u026f\3\2\2\2\u0272\u0273\b$\1\2\u0273\u0297\5J&\2\u0274"+ "\u0297\5P)\2\u0275\u0297\5H%\2\u0276\u0297\5Z.\2\u0277\u0278\5f\64\2\u0278"+ - "\u0279\7~\2\2\u0279\u027b\3\2\2\2\u027a\u0277\3\2\2\2\u027a\u027b\3\2"+ - "\2\2\u027b\u027c\3\2\2\2\u027c\u0297\7y\2\2\u027d\u0297\5T+\2\u027e\u027f"+ - "\7\3\2\2\u027f\u0280\5\b\5\2\u0280\u0281\7\4\2\2\u0281\u0297\3\2\2\2\u0282"+ - "\u0297\5f\64\2\u0283\u0284\7\3\2\2\u0284\u0285\5\64\33\2\u0285\u0286\7"+ - "\4\2\2\u0286\u0297\3\2\2\2\u0287\u0289\7\20\2\2\u0288\u028a\5\66\34\2"+ - "\u0289\u0288\3\2\2\2\u0289\u028a\3\2\2\2\u028a\u028c\3\2\2\2\u028b\u028d"+ - "\5t;\2\u028c\u028b\3\2\2\2\u028d\u028e\3\2\2\2\u028e\u028c\3\2\2\2\u028e"+ - "\u028f\3\2\2\2\u028f\u0292\3\2\2\2\u0290\u0291\7\37\2\2\u0291\u0293\5"+ - "\66\34\2\u0292\u0290\3\2\2\2\u0292\u0293\3\2\2\2\u0293\u0294\3\2\2\2\u0294"+ - "\u0295\7 \2\2\u0295\u0297\3\2\2\2\u0296\u0272\3\2\2\2\u0296\u0274\3\2"+ - "\2\2\u0296\u0275\3\2\2\2\u0296\u0276\3\2\2\2\u0296\u027a\3\2\2\2\u0296"+ - "\u027d\3\2\2\2\u0296\u027e\3\2\2\2\u0296\u0282\3\2\2\2\u0296\u0283\3\2"+ - "\2\2\u0296\u0287\3\2\2\2\u0297\u029d\3\2\2\2\u0298\u0299\f\f\2\2\u0299"+ - "\u029a\7|\2\2\u029a\u029c\5d\63\2\u029b\u0298\3\2\2\2\u029c\u029f\3\2"+ - "\2\2\u029d\u029b\3\2\2\2\u029d\u029e\3\2\2\2\u029eG\3\2\2\2\u029f\u029d"+ - "\3\2\2\2\u02a0\u02a4\7\30\2\2\u02a1\u02a4\7\26\2\2\u02a2\u02a4\7\27\2"+ - "\2\u02a3\u02a0\3\2\2\2\u02a3\u02a1\3\2\2\2\u02a3\u02a2\3\2\2\2\u02a4I"+ - "\3\2\2\2\u02a5\u02b0\5L\'\2\u02a6\u02a7\7i\2\2\u02a7\u02a8\5L\'\2\u02a8"+ - "\u02a9\7o\2\2\u02a9\u02b0\3\2\2\2\u02aa\u02b0\5N(\2\u02ab\u02ac\7i\2\2"+ - "\u02ac\u02ad\5N(\2\u02ad\u02ae\7o\2\2\u02ae\u02b0\3\2\2\2\u02af\u02a5"+ - "\3\2\2\2\u02af\u02a6\3\2\2\2\u02af\u02aa\3\2\2\2\u02af\u02ab\3\2\2\2\u02b0"+ - "K\3\2\2\2\u02b1\u02b2\7\21\2\2\u02b2\u02b3\7\3\2\2\u02b3\u02b4\5\64\33"+ - "\2\u02b4\u02b5\7\f\2\2\u02b5\u02b6\5d\63\2\u02b6\u02b7\7\4\2\2\u02b7M"+ - "\3\2\2\2\u02b8\u02b9\7\25\2\2\u02b9\u02ba\7\3\2\2\u02ba\u02bb\5\64\33"+ - "\2\u02bb\u02bc\7\5\2\2\u02bc\u02bd\5d\63\2\u02bd\u02be\7\4\2\2\u02beO"+ - "\3\2\2\2\u02bf\u02c5\5R*\2\u02c0\u02c1\7i\2\2\u02c1\u02c2\5R*\2\u02c2"+ - "\u02c3\7o\2\2\u02c3\u02c5\3\2\2\2\u02c4\u02bf\3\2\2\2\u02c4\u02c0\3\2"+ + "\u0279\7\177\2\2\u0279\u027b\3\2\2\2\u027a\u0277\3\2\2\2\u027a\u027b\3"+ + "\2\2\2\u027b\u027c\3\2\2\2\u027c\u0297\7z\2\2\u027d\u0297\5T+\2\u027e"+ + "\u027f\7\3\2\2\u027f\u0280\5\b\5\2\u0280\u0281\7\4\2\2\u0281\u0297\3\2"+ + "\2\2\u0282\u0297\5f\64\2\u0283\u0284\7\3\2\2\u0284\u0285\5\64\33\2\u0285"+ + "\u0286\7\4\2\2\u0286\u0297\3\2\2\2\u0287\u0289\7\20\2\2\u0288\u028a\5"+ + "\66\34\2\u0289\u0288\3\2\2\2\u0289\u028a\3\2\2\2\u028a\u028c\3\2\2\2\u028b"+ + "\u028d\5t;\2\u028c\u028b\3\2\2\2\u028d\u028e\3\2\2\2\u028e\u028c\3\2\2"+ + "\2\u028e\u028f\3\2\2\2\u028f\u0292\3\2\2\2\u0290\u0291\7\37\2\2\u0291"+ + "\u0293\5\66\34\2\u0292\u0290\3\2\2\2\u0292\u0293\3\2\2\2\u0293\u0294\3"+ + "\2\2\2\u0294\u0295\7 \2\2\u0295\u0297\3\2\2\2\u0296\u0272\3\2\2\2\u0296"+ + "\u0274\3\2\2\2\u0296\u0275\3\2\2\2\u0296\u0276\3\2\2\2\u0296\u027a\3\2"+ + "\2\2\u0296\u027d\3\2\2\2\u0296\u027e\3\2\2\2\u0296\u0282\3\2\2\2\u0296"+ + "\u0283\3\2\2\2\u0296\u0287\3\2\2\2\u0297\u029d\3\2\2\2\u0298\u0299\f\f"+ + "\2\2\u0299\u029a\7}\2\2\u029a\u029c\5d\63\2\u029b\u0298\3\2\2\2\u029c"+ + "\u029f\3\2\2\2\u029d\u029b\3\2\2\2\u029d\u029e\3\2\2\2\u029eG\3\2\2\2"+ + "\u029f\u029d\3\2\2\2\u02a0\u02a4\7\30\2\2\u02a1\u02a4\7\26\2\2\u02a2\u02a4"+ + "\7\27\2\2\u02a3\u02a0\3\2\2\2\u02a3\u02a1\3\2\2\2\u02a3\u02a2\3\2\2\2"+ + "\u02a4I\3\2\2\2\u02a5\u02b0\5L\'\2\u02a6\u02a7\7i\2\2\u02a7\u02a8\5L\'"+ + "\2\u02a8\u02a9\7p\2\2\u02a9\u02b0\3\2\2\2\u02aa\u02b0\5N(\2\u02ab\u02ac"+ + "\7i\2\2\u02ac\u02ad\5N(\2\u02ad\u02ae\7p\2\2\u02ae\u02b0\3\2\2\2\u02af"+ + "\u02a5\3\2\2\2\u02af\u02a6\3\2\2\2\u02af\u02aa\3\2\2\2\u02af\u02ab\3\2"+ + "\2\2\u02b0K\3\2\2\2\u02b1\u02b2\7\21\2\2\u02b2\u02b3\7\3\2\2\u02b3\u02b4"+ + "\5\64\33\2\u02b4\u02b5\7\f\2\2\u02b5\u02b6\5d\63\2\u02b6\u02b7\7\4\2\2"+ + "\u02b7M\3\2\2\2\u02b8\u02b9\7\25\2\2\u02b9\u02ba\7\3\2\2\u02ba\u02bb\5"+ + "\64\33\2\u02bb\u02bc\7\5\2\2\u02bc\u02bd\5d\63\2\u02bd\u02be\7\4\2\2\u02be"+ + "O\3\2\2\2\u02bf\u02c5\5R*\2\u02c0\u02c1\7i\2\2\u02c1\u02c2\5R*\2\u02c2"+ + "\u02c3\7p\2\2\u02c3\u02c5\3\2\2\2\u02c4\u02bf\3\2\2\2\u02c4\u02c0\3\2"+ "\2\2\u02c5Q\3\2\2\2\u02c6\u02c7\7%\2\2\u02c7\u02c8\7\3\2\2\u02c8\u02c9"+ "\5h\65\2\u02c9\u02ca\7*\2\2\u02ca\u02cb\5D#\2\u02cb\u02cc\7\4\2\2\u02cc"+ "S\3\2\2\2\u02cd\u02d3\5V,\2\u02ce\u02cf\7i\2\2\u02cf\u02d0\5V,\2\u02d0"+ - "\u02d1\7o\2\2\u02d1\u02d3\3\2\2\2\u02d2\u02cd\3\2\2\2\u02d2\u02ce\3\2"+ + "\u02d1\7p\2\2\u02d1\u02d3\3\2\2\2\u02d2\u02cd\3\2\2\2\u02d2\u02ce\3\2"+ "\2\2\u02d3U\3\2\2\2\u02d4\u02d5\5X-\2\u02d5\u02e1\7\3\2\2\u02d6\u02d8"+ "\5\36\20\2\u02d7\u02d6\3\2\2\2\u02d7\u02d8\3\2\2\2\u02d8\u02d9\3\2\2\2"+ "\u02d9\u02de\5\64\33\2\u02da\u02db\7\5\2\2\u02db\u02dd\5\64\33\2\u02dc"+ @@ -7205,13 +7208,13 @@ class SqlBaseParser extends Parser { "\u02e5\u02e9\7:\2\2\u02e6\u02e9\7P\2\2\u02e7\u02e9\5h\65\2\u02e8\u02e5"+ "\3\2\2\2\u02e8\u02e6\3\2\2\2\u02e8\u02e7\3\2\2\2\u02e9Y\3\2\2\2\u02ea"+ "\u0305\7E\2\2\u02eb\u0305\5`\61\2\u02ec\u0305\5p9\2\u02ed\u0305\5^\60"+ - "\2\u02ee\u02f0\7\u0080\2\2\u02ef\u02ee\3\2\2\2\u02f0\u02f1\3\2\2\2\u02f1"+ - "\u02ef\3\2\2\2\u02f1\u02f2\3\2\2\2\u02f2\u0305\3\2\2\2\u02f3\u0305\7\177"+ - "\2\2\u02f4\u02f5\7k\2\2\u02f5\u02f6\5r:\2\u02f6\u02f7\7o\2\2\u02f7\u0305"+ - "\3\2\2\2\u02f8\u02f9\7l\2\2\u02f9\u02fa\5r:\2\u02fa\u02fb\7o\2\2\u02fb"+ - "\u0305\3\2\2\2\u02fc\u02fd\7m\2\2\u02fd\u02fe\5r:\2\u02fe\u02ff\7o\2\2"+ + "\2\u02ee\u02f0\7\u0081\2\2\u02ef\u02ee\3\2\2\2\u02f0\u02f1\3\2\2\2\u02f1"+ + "\u02ef\3\2\2\2\u02f1\u02f2\3\2\2\2\u02f2\u0305\3\2\2\2\u02f3\u0305\7\u0080"+ + "\2\2\u02f4\u02f5\7k\2\2\u02f5\u02f6\5r:\2\u02f6\u02f7\7p\2\2\u02f7\u0305"+ + "\3\2\2\2\u02f8\u02f9\7l\2\2\u02f9\u02fa\5r:\2\u02fa\u02fb\7p\2\2\u02fb"+ + "\u0305\3\2\2\2\u02fc\u02fd\7m\2\2\u02fd\u02fe\5r:\2\u02fe\u02ff\7p\2\2"+ "\u02ff\u0305\3\2\2\2\u0300\u0301\7n\2\2\u0301\u0302\5r:\2\u0302\u0303"+ - "\7o\2\2\u0303\u0305\3\2\2\2\u0304\u02ea\3\2\2\2\u0304\u02eb\3\2\2\2\u0304"+ + "\7p\2\2\u0303\u0305\3\2\2\2\u0304\u02ea\3\2\2\2\u0304\u02eb\3\2\2\2\u0304"+ "\u02ec\3\2\2\2\u0304\u02ed\3\2\2\2\u0304\u02ef\3\2\2\2\u0304\u02f3\3\2"+ "\2\2\u0304\u02f4\3\2\2\2\u0304\u02f8\3\2\2\2\u0304\u02fc\3\2\2\2\u0304"+ "\u0300\3\2\2\2\u0305[\3\2\2\2\u0306\u0307\t\r\2\2\u0307]\3\2\2\2\u0308"+ @@ -7221,20 +7224,20 @@ class SqlBaseParser extends Parser { "\u0312\3\2\2\2\u0312\u0315\5b\62\2\u0313\u0314\7^\2\2\u0314\u0316\5b\62"+ "\2\u0315\u0313\3\2\2\2\u0315\u0316\3\2\2\2\u0316a\3\2\2\2\u0317\u0318"+ "\t\17\2\2\u0318c\3\2\2\2\u0319\u031a\5h\65\2\u031ae\3\2\2\2\u031b\u031c"+ - "\5h\65\2\u031c\u031d\7~\2\2\u031d\u031f\3\2\2\2\u031e\u031b\3\2\2\2\u031f"+ - "\u0322\3\2\2\2\u0320\u031e\3\2\2\2\u0320\u0321\3\2\2\2\u0321\u0323\3\2"+ - "\2\2\u0322\u0320\3\2\2\2\u0323\u0324\5h\65\2\u0324g\3\2\2\2\u0325\u0328"+ - "\5l\67\2\u0326\u0328\5n8\2\u0327\u0325\3\2\2\2\u0327\u0326\3\2\2\2\u0328"+ - "i\3\2\2\2\u0329\u032a\5h\65\2\u032a\u032b\7\6\2\2\u032b\u032d\3\2\2\2"+ - "\u032c\u0329\3\2\2\2\u032c\u032d\3\2\2\2\u032d\u032e\3\2\2\2\u032e\u0336"+ - "\7\u0085\2\2\u032f\u0330\5h\65\2\u0330\u0331\7\6\2\2\u0331\u0333\3\2\2"+ - "\2\u0332\u032f\3\2\2\2\u0332\u0333\3\2\2\2\u0333\u0334\3\2\2\2\u0334\u0336"+ - "\5h\65\2\u0335\u032c\3\2\2\2\u0335\u0332\3\2\2\2\u0336k\3\2\2\2\u0337"+ - "\u033a\7\u0086\2\2\u0338\u033a\7\u0087\2\2\u0339\u0337\3\2\2\2\u0339\u0338"+ - "\3\2\2\2\u033am\3\2\2\2\u033b\u033f\7\u0083\2\2\u033c\u033f\5v<\2\u033d"+ - "\u033f\7\u0084\2\2\u033e\u033b\3\2\2\2\u033e\u033c\3\2\2\2\u033e\u033d"+ - "\3\2\2\2\u033fo\3\2\2\2\u0340\u0343\7\u0082\2\2\u0341\u0343\7\u0081\2"+ - "\2\u0342\u0340\3\2\2\2\u0342\u0341\3\2\2\2\u0343q\3\2\2\2\u0344\u0345"+ + "\5h\65\2\u031c\u031d\7\177\2\2\u031d\u031f\3\2\2\2\u031e\u031b\3\2\2\2"+ + "\u031f\u0322\3\2\2\2\u0320\u031e\3\2\2\2\u0320\u0321\3\2\2\2\u0321\u0323"+ + "\3\2\2\2\u0322\u0320\3\2\2\2\u0323\u0324\5h\65\2\u0324g\3\2\2\2\u0325"+ + "\u0328\5l\67\2\u0326\u0328\5n8\2\u0327\u0325\3\2\2\2\u0327\u0326\3\2\2"+ + "\2\u0328i\3\2\2\2\u0329\u032a\5h\65\2\u032a\u032b\7\6\2\2\u032b\u032d"+ + "\3\2\2\2\u032c\u0329\3\2\2\2\u032c\u032d\3\2\2\2\u032d\u032e\3\2\2\2\u032e"+ + "\u0336\7\u0086\2\2\u032f\u0330\5h\65\2\u0330\u0331\7\6\2\2\u0331\u0333"+ + "\3\2\2\2\u0332\u032f\3\2\2\2\u0332\u0333\3\2\2\2\u0333\u0334\3\2\2\2\u0334"+ + "\u0336\5h\65\2\u0335\u032c\3\2\2\2\u0335\u0332\3\2\2\2\u0336k\3\2\2\2"+ + "\u0337\u033a\7\u0087\2\2\u0338\u033a\7\u0088\2\2\u0339\u0337\3\2\2\2\u0339"+ + "\u0338\3\2\2\2\u033am\3\2\2\2\u033b\u033f\7\u0084\2\2\u033c\u033f\5v<"+ + "\2\u033d\u033f\7\u0085\2\2\u033e\u033b\3\2\2\2\u033e\u033c\3\2\2\2\u033e"+ + "\u033d\3\2\2\2\u033fo\3\2\2\2\u0340\u0343\7\u0083\2\2\u0341\u0343\7\u0082"+ + "\2\2\u0342\u0340\3\2\2\2\u0342\u0341\3\2\2\2\u0343q\3\2\2\2\u0344\u0345"+ "\t\20\2\2\u0345s\3\2\2\2\u0346\u0347\7c\2\2\u0347\u0348\5\64\33\2\u0348"+ "\u0349\7\\\2\2\u0349\u034a\5\64\33\2\u034au\3\2\2\2\u034b\u034c\t\21\2"+ "\2\u034cw\3\2\2\2s\u0087\u0089\u008d\u0096\u0098\u009c\u00a3\u00a7\u00ad"+ diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/EscapedFunctionsTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/EscapedFunctionsTests.java index c22c9c570296..d4f194665344 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/EscapedFunctionsTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/EscapedFunctionsTests.java @@ -22,42 +22,58 @@ import org.junit.Assert; import java.util.List; import java.util.Locale; +import java.util.StringJoiner; import static java.lang.String.format; import static java.util.Arrays.asList; +import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.matchesPattern; public class EscapedFunctionsTests extends ESTestCase { private final SqlParser parser = new SqlParser(); + private String randomWhitespaces() { + StringJoiner sj = new StringJoiner(""); + for (int i = 0; i < randomInt(10); i++) { + sj.add(randomFrom(" ", "\t", "\r", "\n")); + } + return sj.toString(); + } + + private String buildExpression(String escape, String pattern, Object value) { + return format(Locale.ROOT, "{" + randomWhitespaces() + escape + " " + randomWhitespaces() + + pattern + randomWhitespaces() + "}", value); + } + private Literal dateLiteral(String date) { - Expression exp = parser.createExpression(format(Locale.ROOT, "{d '%s'}", date)); + Expression exp = parser.createExpression(buildExpression("d", "'%s'", date)); assertThat(exp, instanceOf(Expression.class)); return (Literal) exp; } private Literal timeLiteral(String date) { - Expression exp = parser.createExpression(format(Locale.ROOT, "{t '%s'}", date)); + Expression exp = parser.createExpression(buildExpression("t", "'%s'", date)); assertThat(exp, instanceOf(Expression.class)); return (Literal) exp; } private Literal timestampLiteral(String date) { - Expression exp = parser.createExpression(format(Locale.ROOT, "{ts '%s'}", date)); + Expression exp = parser.createExpression(buildExpression("ts", "'%s'", date)); assertThat(exp, instanceOf(Expression.class)); return (Literal) exp; } - private Literal guidLiteral(String date) { - Expression exp = parser.createExpression(format(Locale.ROOT, "{guid '%s'}", date)); + private Literal guidLiteral(String guid) { + Expression exp = parser.createExpression(buildExpression("guid", "'%s'", guid)); assertThat(exp, instanceOf(Expression.class)); return (Literal) exp; } private Limit limit(int limit) { - LogicalPlan plan = parser.createStatement(format(Locale.ROOT, "SELECT * FROM emp {limit %d}", limit)); + LogicalPlan plan = parser.createStatement("SELECT * FROM emp " + buildExpression("limit", "%d", limit)); assertThat(plan, instanceOf(With.class)); With with = (With) plan; Limit limitPlan = (Limit) (with.child()); @@ -66,25 +82,31 @@ public class EscapedFunctionsTests extends ESTestCase { } private LikePattern likeEscape(String like, String character) { - Expression exp = parser.createExpression(format(Locale.ROOT, "exp LIKE '%s' {escape '%s'}", like, character)); + Expression exp = parser.createExpression(format(Locale.ROOT, "exp LIKE '%s' ", like) + + buildExpression("escape", "'%s'", character)); assertThat(exp, instanceOf(Like.class)); return ((Like) exp).pattern(); } private Function function(String name) { - Expression exp = parser.createExpression(format(Locale.ROOT, "{fn %s}", name)); + Expression exp = parser.createExpression( + format(Locale.ROOT, "{" + randomWhitespaces() + "fn" + randomWhitespaces() + "%s" + randomWhitespaces() + "}", name)); assertThat(exp, instanceOf(Function.class)); return (Function) exp; } + private void assertFunction(String name, String result) { + String escapedName = name.replace("(", "\\(").replace(")", "\\)").replace("{", "\\{").replace("}", "\\}"); + assertThat(result, matchesPattern("\\{\\s*fn\\s*" + escapedName + "\\s*}")); + } public void testFunctionNoArg() { Function f = function("SCORE()"); - assertEquals("{fn SCORE()}", f.sourceText()); + assertFunction("SCORE()", f.sourceText()); } public void testFunctionOneArg() { Function f = function("ABS(foo)"); - assertEquals("{fn ABS(foo)}", f.sourceText()); + assertFunction("ABS(foo)", f.sourceText()); assertEquals(1, f.arguments().size()); Expression arg = f.arguments().get(0); assertThat(arg, instanceOf(UnresolvedAttribute.class)); @@ -94,7 +116,7 @@ public class EscapedFunctionsTests extends ESTestCase { public void testFunctionOneArgFunction() { Function f = function("ABS({fn SCORE()})"); - assertEquals("{fn ABS({fn SCORE()})}", f.sourceText()); + assertFunction("ABS({fn SCORE()})", f.sourceText()); assertEquals(1, f.arguments().size()); Expression arg = f.arguments().get(0); assertThat(arg, instanceOf(UnresolvedFunction.class)); @@ -120,7 +142,7 @@ public class EscapedFunctionsTests extends ESTestCase { public void testFunctionWithFunctionWithArg() { Function f = function("POWER(foo, {fn POWER({fn SCORE()}, {fN SCORE()})})"); - assertEquals("{fn POWER(foo, {fn POWER({fn SCORE()}, {fN SCORE()})})}", f.sourceText()); + assertFunction("POWER(foo, {fn POWER({fn SCORE()}, {fN SCORE()})})", f.sourceText()); assertEquals(2, f.arguments().size()); Expression arg = f.arguments().get(1); assertThat(arg, instanceOf(UnresolvedFunction.class)); @@ -213,17 +235,17 @@ public class EscapedFunctionsTests extends ESTestCase { public void testGUIDValidationHexa() { ParsingException ex = expectThrows(ParsingException.class, () -> guidLiteral("12345678-90ab-cdef-0123-456789abcdeH")); - assertEquals("line 1:8: Invalid GUID, expected hexadecimal at offset[35], found [H]", ex.getMessage()); + assertThat(ex.getMessage(), endsWith(": Invalid GUID, expected hexadecimal at offset[35], found [H]")); } public void testGUIDValidationGroups() { ParsingException ex = expectThrows(ParsingException.class, () -> guidLiteral("12345678A90ab-cdef-0123-456789abcdeH")); - assertEquals("line 1:8: Invalid GUID, expected group separator at offset [8], found [A]", ex.getMessage()); + assertThat(ex.getMessage(), endsWith(": Invalid GUID, expected group separator at offset [8], found [A]")); } public void testGUIDValidationLength() { ParsingException ex = expectThrows(ParsingException.class, () -> guidLiteral("12345678A90")); - assertEquals("line 1:8: Invalid GUID, too short", ex.getMessage()); + assertThat(ex.getMessage(), endsWith(": Invalid GUID, too short")); } public void testCurrentTimestampAsEscapedExpression() { From 78e7a2e7284659807ad35d7bb5f7a5dea145e063 Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Mon, 7 Oct 2019 15:13:38 +0200 Subject: [PATCH 24/55] Update deprecation logging doc with logger configuration (#47649) Explicitly adds a configuration snippet to change logging level --- docs/reference/setup/logging-config.asciidoc | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/docs/reference/setup/logging-config.asciidoc b/docs/reference/setup/logging-config.asciidoc index 4cc2615d214e..d1705a887bcd 100644 --- a/docs/reference/setup/logging-config.asciidoc +++ b/docs/reference/setup/logging-config.asciidoc @@ -211,7 +211,13 @@ logs to roll and compress after 1 GB, and to preserve a maximum of five log files (four rolled logs, and the active log). You can disable it in the `config/log4j2.properties` file by setting the deprecation -log level to `error`. +log level to `error` like this: +[source,properties] +-------------------------------------------------- +logger.deprecation.name = org.elasticsearch.deprecation +logger.deprecation.level = error +-------------------------------------------------- + You can identify what is triggering deprecated functionality if `X-Opaque-Id` was used as an HTTP header. The user ID is included in the `X-Opaque-ID` field in deprecation JSON logs. @@ -265,4 +271,4 @@ appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:e appender.rolling.layout.type = PatternLayout appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %.-10000m%n appender.rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.log.gz --------------------------------------------------- \ No newline at end of file +-------------------------------------------------- From 61ff51a233a2cc12c5ddca421bdc0811b4f3a30e Mon Sep 17 00:00:00 2001 From: James Rodewig Date: Mon, 7 Oct 2019 09:14:49 -0400 Subject: [PATCH 25/55] [DOCS] Reformat clear cache API docs (#46512) * [DOCS] Reformat clear cache API docs * iter --- docs/reference/indices/clearcache.asciidoc | 145 ++++++++++++++++----- 1 file changed, 116 insertions(+), 29 deletions(-) diff --git a/docs/reference/indices/clearcache.asciidoc b/docs/reference/indices/clearcache.asciidoc index b79530f2e467..732f2928918d 100644 --- a/docs/reference/indices/clearcache.asciidoc +++ b/docs/reference/indices/clearcache.asciidoc @@ -4,53 +4,140 @@ Clear cache ++++ -The clear cache API allows to clear either all caches or specific cached -associated with one or more indices. +Clears caches for one or more indices. [source,console] --------------------------------------------------- +---- POST /twitter/_cache/clear --------------------------------------------------- +---- // TEST[setup:twitter] -The API, by default, will clear all caches. Specific caches can be cleaned -explicitly by setting the `query`, `fielddata` or `request` url parameter to `true`. + +[[clear-cache-api-request]] +==== {api-request-title} + +`POST //_cache/clear` + +`POST /_cache/clear` + + +[[clear-cache-api-path-params]] +==== {api-path-parms-title} + +include::{docdir}/rest-api/common-parms.asciidoc[tag=index] + + +[[clear-cache-api-query-params]] +==== {api-query-parms-title} + +include::{docdir}/rest-api/common-parms.asciidoc[tag=allow-no-indices] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=expand-wildcards] ++ +Defaults to `open`. + +`fielddata`:: ++ +-- +(Optional, boolean) +If `true`, +clears the fields cache. + +Use the `fields` parameter +to clear the cache of specific fields only. +-- + +`fields`:: ++ +-- +(Optional, string) +Comma-separated list of field names +used to limit the `fielddata` parameter. + +Defaults to all fields. + +NOTE: This parameter does *not* support objects +or field aliases. +-- + + +`index`:: +(Optional, string) +Comma-separated list of index names +used to limit the request. + +include::{docdir}/rest-api/common-parms.asciidoc[tag=index-ignore-unavailable] + +`query`:: +(Optional, boolean) +If `true`, +clears the query cache. + +`request`:: +(Optional, boolean) +If `true`, +clears the request cache. + + +[[clear-cache-api-example]] +==== {api-examples-title} + + +[[clear-cache-api-specific-ex]] +===== Clear a specific cache + +By default, +the clear cache API clears all caches. +You can clear only specific caches +by setting the following query parameters to `true`: + +* `fielddata` +* `query` +* `request` [source,console] --------------------------------------------------- -POST /twitter/_cache/clear?query=true <1> -POST /twitter/_cache/clear?request=true <2> -POST /twitter/_cache/clear?fielddata=true <3> --------------------------------------------------- +---- +POST /twitter/_cache/clear?fielddata=true <1> +POST /twitter/_cache/clear?query=true <2> +POST /twitter/_cache/clear?request=true <3> +---- // TEST[continued] -<1> Cleans only the query cache -<2> Cleans only the request cache -<3> Cleans only the fielddata cache +<1> Clears only the fields cache +<2> Clears only the query cache +<3> Clears only the request cache -In addition to this, all caches relating to a specific field can also be -cleared by specifying `fields` url parameter with a comma delimited list of -the fields that should be cleared. Note that the provided names must refer to -concrete fields -- objects and field aliases are not supported. + + +[[clear-cache-api-specific-fields-ex]] +===== Clear the cache of specific fields + +To only clear the cache of specific fields, +use the `fields` query parameter. [source,console] --------------------------------------------------- +---- POST /twitter/_cache/clear?fields=foo,bar <1> --------------------------------------------------- +---- // TEST[continued] -<1> Clear the cache for the `foo` an `bar` field +<1> Clear the cache for the `foo` and `bar` field -[float] -==== Multi Index -The clear cache API can be applied to more than one index with a single -call, or even on `_all` the indices. +[[clear-cache-api-multi-ex]] +===== Clear caches for several indices [source,console] --------------------------------------------------- +---- POST /kimchy,elasticsearch/_cache/clear - -POST /_cache/clear --------------------------------------------------- +---- // TEST[s/^/PUT kimchy\nPUT elasticsearch\n/] + + +[[clear-cache-api-all-ex]] +===== Clear caches for all indices + +[source,console] +---- +POST /_cache/clear +---- From e7ffacf8c0eeccf133613edef1a7031bff257637 Mon Sep 17 00:00:00 2001 From: James Rodewig Date: Mon, 7 Oct 2019 09:25:03 -0400 Subject: [PATCH 26/55] [DOCS] Correct callouts in search template docs (#47655) --- docs/reference/search/search-template.asciidoc | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/reference/search/search-template.asciidoc b/docs/reference/search/search-template.asciidoc index 67aa9001c21c..aa73e9d33723 100644 --- a/docs/reference/search/search-template.asciidoc +++ b/docs/reference/search/search-template.asciidoc @@ -536,14 +536,14 @@ The `params` would look like: "params": { "text": "words to search for", "line_no": { <1> - "start": 10, <1> - "end": 20 <1> + "start": 10, + "end": 20 } } } ------------------------------------------ // NOTCONSOLE -<1> All three of these elements are optional. +<1> The `line_no`, `start`, and `end` parameters are optional. We could write the query as: @@ -565,13 +565,13 @@ We could write the query as: {{#start}} <3> "gte": "{{start}}" <4> {{#end}},{{/end}} <5> - {{/start}} <3> + {{/start}} {{#end}} <6> "lte": "{{end}}" <7> - {{/end}} <6> + {{/end}} } } - {{/line_no}} <2> + {{/line_no}} } } } From 5cec47d8e34991c616443dd7f1d9ba27c35db761 Mon Sep 17 00:00:00 2001 From: James Rodewig Date: Mon, 7 Oct 2019 09:37:23 -0400 Subject: [PATCH 27/55] [DOCS] Correct deprecation note in mapping docs (#47656) --- docs/reference/mapping.asciidoc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/reference/mapping.asciidoc b/docs/reference/mapping.asciidoc index 2c30bfacc808..6067e90e78d2 100644 --- a/docs/reference/mapping.asciidoc +++ b/docs/reference/mapping.asciidoc @@ -5,7 +5,7 @@ -- Mapping is the process of defining how a document, and the fields it contains, -are stored and indexed. For instance, use mappings to define: +are stored and indexed. For instance, use mappings to define: * which string fields should be treated as full text fields. * which fields contain numbers, dates, or geolocations. @@ -20,7 +20,7 @@ are stored and indexed. For instance, use mappings to define: Each index has one _mapping type_ which determines how the document will be indexed. -deprecated[6.0.0,See <>]. +deprecated::[6.0.0,See <>] A mapping type has: From 6e98189f5dee61e036a711f0804b8ed9c894b194 Mon Sep 17 00:00:00 2001 From: Jake Landis Date: Mon, 7 Oct 2019 08:44:19 -0500 Subject: [PATCH 28/55] Watcher remove assertion that is susceptible to a race condition (#47630) When deactivating a watch, there is a chance that it is fully deactivated and reporting as not running but the history is not fully written yet. There is not a tight coupling between the associated watcher history index and the deactivation. This test assumes that once a watch is deactivated that all history is fully written in a very short time period. If the Watch is deactivated, but the history is slow to write it can result in a failing test. This change removes an assertion that assumes that the deactivation of a watch ensured the all of the watch history was written. There is still a minor race condition with respect to the remaining history assertions. However, if the history is slow to be written, it will allow the test to still passing. fixes #47503 --- .../transport/action/activate/ActivateWatchTests.java | 7 ------- 1 file changed, 7 deletions(-) diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/action/activate/ActivateWatchTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/action/activate/ActivateWatchTests.java index 380e8401f01a..8abc5ccd49ac 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/action/activate/ActivateWatchTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/action/activate/ActivateWatchTests.java @@ -84,16 +84,9 @@ public class ActivateWatchTests extends AbstractWatcherIntegrationTestCase { assertThat(sum, is(0)); }); - logger.info("Ensured no more watches are being executed"); refresh(); long count1 = docCount(".watcher-history*", matchAllQuery()); - refresh(); - // Ensure no new watch history. The assertion ought to always return false, but if it returns true - // then we know that more history has been written. - boolean hasNewHistory = waitUntil(() -> count1 != docCount(".watcher-history*", matchAllQuery()), 5, TimeUnit.SECONDS); - assertFalse("Watcher should have stopped executing but new history found", hasNewHistory); - // lets activate it again logger.info("Activating watch again"); From e9e121c9ce65bdb27dd9307657bf85298020ea8c Mon Sep 17 00:00:00 2001 From: Hendrik Muhs Date: Mon, 7 Oct 2019 16:21:51 +0200 Subject: [PATCH 29/55] [Transform] move root endpoint to _transform with BWC layer (#47127) move the main endpoint to /_transform/ from /_data_frame/transforms/ with providing backwards compatibility and deprecation warnings --- .../client/TransformRequestConverters.java | 16 ++-- ...a => TransformRequestConvertersTests.java} | 22 ++--- .../TransformDocumentationIT.java | 12 +-- docs/build.gradle | 2 +- .../reference/transform/api-quickref.asciidoc | 2 +- .../transform/apis/delete-transform.asciidoc | 4 +- .../apis/get-transform-stats.asciidoc | 14 ++-- .../transform/apis/get-transform.asciidoc | 14 ++-- .../transform/apis/preview-transform.asciidoc | 4 +- .../transform/apis/put-transform.asciidoc | 4 +- .../transform/apis/start-transform.asciidoc | 4 +- .../transform/apis/stop-transform.asciidoc | 8 +- .../transform/apis/update-transform.asciidoc | 4 +- .../transform/ecommerce-tutorial.asciidoc | 6 +- docs/reference/transform/examples.asciidoc | 6 +- docs/reference/transform/limitations.asciidoc | 2 +- .../test/rest/yaml/section/DoSection.java | 4 +- .../privilege/ClusterPrivilegeResolver.java | 5 +- .../xpack/core/transform/TransformField.java | 8 +- .../core/transform/TransformMessages.java | 2 + .../action/DeleteTransformAction.java | 2 +- .../transform/action/GetTransformAction.java | 4 +- .../action/GetTransformStatsAction.java | 2 +- .../action/PreviewTransformAction.java | 4 +- .../transform/action/PutTransformAction.java | 2 +- .../action/StartTransformAction.java | 2 +- .../transform/action/StopTransformAction.java | 4 +- .../action/UpdateTransformAction.java | 2 +- .../DeleteTransformActionDeprecated.java | 20 +++++ .../compat/GetTransformActionDeprecated.java | 20 +++++ .../GetTransformStatsActionDeprecated.java | 21 +++++ .../PreviewTransformActionDeprecated.java | 22 +++++ .../compat/PutTransformActionDeprecated.java | 21 +++++ .../StartTransformActionDeprecated.java | 21 +++++ .../compat/StopTransformActionDeprecated.java | 21 +++++ .../UpdateTransformActionDeprecated.java | 22 +++++ ...transform_deprecated.delete_transform.json | 35 ++++++++ ...me_transform_deprecated.get_transform.json | 55 +++++++++++++ ...nsform_deprecated.get_transform_stats.json | 45 +++++++++++ ...ransform_deprecated.preview_transform.json | 26 ++++++ ...me_transform_deprecated.put_transform.json | 39 +++++++++ ..._transform_deprecated.start_transform.json | 35 ++++++++ ...e_transform_deprecated.stop_transform.json | 45 +++++++++++ ...transform_deprecated.update_transform.json | 38 +++++++++ .../api/transform.delete_transform.json | 2 +- .../api/transform.get_transform.json | 4 +- .../api/transform.get_transform_stats.json | 2 +- .../api/transform.preview_transform.json | 2 +- .../api/transform.put_transform.json | 2 +- .../api/transform.start_transform.json | 2 +- .../api/transform.stop_transform.json | 2 +- .../api/transform.update_transform.json | 2 +- .../TransformConfigurationIndexIT.java | 2 +- .../TransformGetAndGetStatsIT.java | 30 +++---- .../integration/TransformInternalIndexIT.java | 14 ++-- .../integration/TransformPivotRestIT.java | 30 +++---- .../integration/TransformRestTestCase.java | 45 ++++++++--- .../integration/TransformUsageIT.java | 2 +- .../xpack/transform/Transform.java | 61 ++++++++++++-- .../TransportDeleteTransformAction.java | 25 +++--- .../action/TransportGetTransformAction.java | 11 ++- .../TransportGetTransformStatsAction.java | 12 ++- .../TransportPreviewTransformAction.java | 14 +++- .../action/TransportPutTransformAction.java | 22 +++-- .../action/TransportStartTransformAction.java | 24 ++++-- .../action/TransportStopTransformAction.java | 21 +++-- .../TransportUpdateTransformAction.java | 27 ++++--- ...nsportDeleteTransformActionDeprecated.java | 31 +++++++ ...TransportGetTransformActionDeprecated.java | 25 ++++++ ...portGetTransformStatsActionDeprecated.java | 28 +++++++ ...sportPreviewTransformActionDeprecated.java | 31 +++++++ ...TransportPutTransformActionDeprecated.java | 35 ++++++++ ...ansportStartTransformActionDeprecated.java | 35 ++++++++ ...ransportStopTransformActionDeprecated.java | 32 ++++++++ ...nsportUpdateTransformActionDeprecated.java | 35 ++++++++ .../action/RestDeleteTransformAction.java | 2 +- .../rest/action/RestGetTransformAction.java | 2 +- .../action/RestGetTransformStatsAction.java | 2 +- .../action/RestPreviewTransformAction.java | 4 +- .../rest/action/RestPutTransformAction.java | 2 +- .../rest/action/RestStartTransformAction.java | 2 +- .../rest/action/RestStopTransformAction.java | 2 +- .../action/RestUpdateTransformAction.java | 2 +- .../RestDeleteTransformActionDeprecated.java | 49 +++++++++++ .../RestGetTransformActionDeprecated.java | 55 +++++++++++++ ...RestGetTransformStatsActionDeprecated.java | 55 +++++++++++++ .../RestPreviewTransformActionDeprecated.java | 46 +++++++++++ .../RestPutTransformActionDeprecated.java | 49 +++++++++++ .../RestStartTransformActionDeprecated.java | 46 +++++++++++ .../RestStopTransformActionDeprecated.java | 55 +++++++++++++ .../RestUpdateTransformActionDeprecated.java | 49 +++++++++++ ...T.java => TransformSurvivesUpgradeIT.java} | 81 ++++++++++++------- .../mixed_cluster/80_data_frame_jobs_crud.yml | 64 +++++++-------- .../old_cluster/80_data_frame_jobs_crud.yml | 36 ++++----- 94 files changed, 1584 insertions(+), 280 deletions(-) rename client/rest-high-level/src/test/java/org/elasticsearch/client/{DataFrameRequestConvertersTests.java => TransformRequestConvertersTests.java} (92%) create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/compat/DeleteTransformActionDeprecated.java create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/compat/GetTransformActionDeprecated.java create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/compat/GetTransformStatsActionDeprecated.java create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/compat/PreviewTransformActionDeprecated.java create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/compat/PutTransformActionDeprecated.java create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/compat/StartTransformActionDeprecated.java create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/compat/StopTransformActionDeprecated.java create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/compat/UpdateTransformActionDeprecated.java create mode 100644 x-pack/plugin/src/test/resources/rest-api-spec/api/data_frame_transform_deprecated.delete_transform.json create mode 100644 x-pack/plugin/src/test/resources/rest-api-spec/api/data_frame_transform_deprecated.get_transform.json create mode 100644 x-pack/plugin/src/test/resources/rest-api-spec/api/data_frame_transform_deprecated.get_transform_stats.json create mode 100644 x-pack/plugin/src/test/resources/rest-api-spec/api/data_frame_transform_deprecated.preview_transform.json create mode 100644 x-pack/plugin/src/test/resources/rest-api-spec/api/data_frame_transform_deprecated.put_transform.json create mode 100644 x-pack/plugin/src/test/resources/rest-api-spec/api/data_frame_transform_deprecated.start_transform.json create mode 100644 x-pack/plugin/src/test/resources/rest-api-spec/api/data_frame_transform_deprecated.stop_transform.json create mode 100644 x-pack/plugin/src/test/resources/rest-api-spec/api/data_frame_transform_deprecated.update_transform.json create mode 100644 x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/compat/TransportDeleteTransformActionDeprecated.java create mode 100644 x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/compat/TransportGetTransformActionDeprecated.java create mode 100644 x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/compat/TransportGetTransformStatsActionDeprecated.java create mode 100644 x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/compat/TransportPreviewTransformActionDeprecated.java create mode 100644 x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/compat/TransportPutTransformActionDeprecated.java create mode 100644 x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/compat/TransportStartTransformActionDeprecated.java create mode 100644 x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/compat/TransportStopTransformActionDeprecated.java create mode 100644 x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/compat/TransportUpdateTransformActionDeprecated.java create mode 100644 x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/compat/RestDeleteTransformActionDeprecated.java create mode 100644 x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/compat/RestGetTransformActionDeprecated.java create mode 100644 x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/compat/RestGetTransformStatsActionDeprecated.java create mode 100644 x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/compat/RestPreviewTransformActionDeprecated.java create mode 100644 x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/compat/RestPutTransformActionDeprecated.java create mode 100644 x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/compat/RestStartTransformActionDeprecated.java create mode 100644 x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/compat/RestStopTransformActionDeprecated.java create mode 100644 x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/compat/RestUpdateTransformActionDeprecated.java rename x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/{DataFrameSurvivesUpgradeIT.java => TransformSurvivesUpgradeIT.java} (85%) diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/TransformRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/TransformRequestConverters.java index 49d347ae2b6e..d7a44db3a5d2 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/TransformRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/TransformRequestConverters.java @@ -48,7 +48,7 @@ final class TransformRequestConverters { static Request putTransform(PutTransformRequest putRequest) throws IOException { String endpoint = new RequestConverters.EndpointBuilder() - .addPathPartAsIs("_data_frame", "transforms") + .addPathPartAsIs("_transform") .addPathPart(putRequest.getConfig().getId()) .build(); Request request = new Request(HttpPut.METHOD_NAME, endpoint); @@ -61,7 +61,7 @@ final class TransformRequestConverters { static Request updateTransform(UpdateTransformRequest updateDataFrameTransformRequest) throws IOException { String endpoint = new RequestConverters.EndpointBuilder() - .addPathPartAsIs("_data_frame", "transforms") + .addPathPartAsIs("_transform") .addPathPart(updateDataFrameTransformRequest.getId()) .addPathPart("_update") .build(); @@ -75,7 +75,7 @@ final class TransformRequestConverters { static Request getTransform(GetTransformRequest getRequest) { String endpoint = new RequestConverters.EndpointBuilder() - .addPathPartAsIs("_data_frame", "transforms") + .addPathPartAsIs("_transform") .addPathPart(Strings.collectionToCommaDelimitedString(getRequest.getId())) .build(); Request request = new Request(HttpGet.METHOD_NAME, endpoint); @@ -93,7 +93,7 @@ final class TransformRequestConverters { static Request deleteTransform(DeleteTransformRequest deleteRequest) { String endpoint = new RequestConverters.EndpointBuilder() - .addPathPartAsIs("_data_frame", "transforms") + .addPathPartAsIs("_transform") .addPathPart(deleteRequest.getId()) .build(); Request request = new Request(HttpDelete.METHOD_NAME, endpoint); @@ -105,7 +105,7 @@ final class TransformRequestConverters { static Request startTransform(StartTransformRequest startRequest) { String endpoint = new RequestConverters.EndpointBuilder() - .addPathPartAsIs("_data_frame", "transforms") + .addPathPartAsIs("_transform") .addPathPart(startRequest.getId()) .addPathPartAsIs("_start") .build(); @@ -120,7 +120,7 @@ final class TransformRequestConverters { static Request stopTransform(StopTransformRequest stopRequest) { String endpoint = new RequestConverters.EndpointBuilder() - .addPathPartAsIs("_data_frame", "transforms") + .addPathPartAsIs("_transform") .addPathPart(stopRequest.getId()) .addPathPartAsIs("_stop") .build(); @@ -141,7 +141,7 @@ final class TransformRequestConverters { static Request previewTransform(PreviewTransformRequest previewRequest) throws IOException { String endpoint = new RequestConverters.EndpointBuilder() - .addPathPartAsIs("_data_frame", "transforms", "_preview") + .addPathPartAsIs("_transform", "_preview") .build(); Request request = new Request(HttpPost.METHOD_NAME, endpoint); request.setEntity(createEntity(previewRequest, REQUEST_BODY_CONTENT_TYPE)); @@ -150,7 +150,7 @@ final class TransformRequestConverters { static Request getTransformStats(GetTransformStatsRequest statsRequest) { String endpoint = new RequestConverters.EndpointBuilder() - .addPathPartAsIs("_data_frame", "transforms") + .addPathPartAsIs("_transform") .addPathPart(statsRequest.getId()) .addPathPartAsIs("_stats") .build(); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/TransformRequestConvertersTests.java similarity index 92% rename from client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameRequestConvertersTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/TransformRequestConvertersTests.java index f1c181b61796..f7c32652fff0 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/TransformRequestConvertersTests.java @@ -24,7 +24,6 @@ import org.apache.http.client.methods.HttpGet; import org.apache.http.client.methods.HttpPost; import org.apache.http.client.methods.HttpPut; import org.elasticsearch.client.core.PageParams; -import org.elasticsearch.client.transform.TransformNamedXContentProvider; import org.elasticsearch.client.transform.DeleteTransformRequest; import org.elasticsearch.client.transform.GetTransformRequest; import org.elasticsearch.client.transform.GetTransformStatsRequest; @@ -32,6 +31,7 @@ import org.elasticsearch.client.transform.PreviewTransformRequest; import org.elasticsearch.client.transform.PutTransformRequest; import org.elasticsearch.client.transform.StartTransformRequest; import org.elasticsearch.client.transform.StopTransformRequest; +import org.elasticsearch.client.transform.TransformNamedXContentProvider; import org.elasticsearch.client.transform.UpdateTransformRequest; import org.elasticsearch.client.transform.transforms.TransformConfig; import org.elasticsearch.client.transform.transforms.TransformConfigTests; @@ -56,7 +56,7 @@ import static org.hamcrest.Matchers.hasEntry; import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.not; -public class DataFrameRequestConvertersTests extends ESTestCase { +public class TransformRequestConvertersTests extends ESTestCase { @Override protected NamedXContentRegistry xContentRegistry() { @@ -73,7 +73,7 @@ public class DataFrameRequestConvertersTests extends ESTestCase { Request request = TransformRequestConverters.putTransform(putRequest); assertThat(request.getParameters(), not(hasKey("defer_validation"))); assertEquals(HttpPut.METHOD_NAME, request.getMethod()); - assertThat(request.getEndpoint(), equalTo("/_data_frame/transforms/" + putRequest.getConfig().getId())); + assertThat(request.getEndpoint(), equalTo("/_transform/" + putRequest.getConfig().getId())); try (XContentParser parser = createParser(JsonXContent.jsonXContent, request.getEntity().getContent())) { TransformConfig parsedConfig = TransformConfig.PARSER.apply(parser, null); @@ -92,7 +92,7 @@ public class DataFrameRequestConvertersTests extends ESTestCase { Request request = TransformRequestConverters.updateTransform(updateDataFrameTransformRequest); assertThat(request.getParameters(), not(hasKey("defer_validation"))); assertEquals(HttpPost.METHOD_NAME, request.getMethod()); - assertThat(request.getEndpoint(), equalTo("/_data_frame/transforms/" + transformId + "/_update")); + assertThat(request.getEndpoint(), equalTo("/_transform/" + transformId + "/_update")); try (XContentParser parser = createParser(JsonXContent.jsonXContent, request.getEntity().getContent())) { TransformConfigUpdate parsedConfig = TransformConfigUpdate.fromXContent(parser); @@ -109,7 +109,7 @@ public class DataFrameRequestConvertersTests extends ESTestCase { Request request = TransformRequestConverters.deleteTransform(deleteRequest); assertEquals(HttpDelete.METHOD_NAME, request.getMethod()); - assertThat(request.getEndpoint(), equalTo("/_data_frame/transforms/foo")); + assertThat(request.getEndpoint(), equalTo("/_transform/foo")); assertThat(request.getParameters(), not(hasKey("force"))); @@ -129,7 +129,7 @@ public class DataFrameRequestConvertersTests extends ESTestCase { Request request = TransformRequestConverters.startTransform(startRequest); assertEquals(HttpPost.METHOD_NAME, request.getMethod()); - assertThat(request.getEndpoint(), equalTo("/_data_frame/transforms/" + startRequest.getId() + "/_start")); + assertThat(request.getEndpoint(), equalTo("/_transform/" + startRequest.getId() + "/_start")); if (timeValue != null) { assertTrue(request.getParameters().containsKey("timeout")); @@ -153,7 +153,7 @@ public class DataFrameRequestConvertersTests extends ESTestCase { Request request = TransformRequestConverters.stopTransform(stopRequest); assertEquals(HttpPost.METHOD_NAME, request.getMethod()); - assertThat(request.getEndpoint(), equalTo("/_data_frame/transforms/" + stopRequest.getId() + "/_stop")); + assertThat(request.getEndpoint(), equalTo("/_transform/" + stopRequest.getId() + "/_stop")); if (waitForCompletion != null) { assertTrue(request.getParameters().containsKey("wait_for_completion")); @@ -181,7 +181,7 @@ public class DataFrameRequestConvertersTests extends ESTestCase { Request request = TransformRequestConverters.previewTransform(previewRequest); assertEquals(HttpPost.METHOD_NAME, request.getMethod()); - assertThat(request.getEndpoint(), equalTo("/_data_frame/transforms/_preview")); + assertThat(request.getEndpoint(), equalTo("/_transform/_preview")); try (XContentParser parser = createParser(JsonXContent.jsonXContent, request.getEntity().getContent())) { TransformConfig parsedConfig = TransformConfig.PARSER.apply(parser, null); @@ -194,7 +194,7 @@ public class DataFrameRequestConvertersTests extends ESTestCase { Request request = TransformRequestConverters.getTransformStats(getStatsRequest); assertEquals(HttpGet.METHOD_NAME, request.getMethod()); - assertThat(request.getEndpoint(), equalTo("/_data_frame/transforms/foo/_stats")); + assertThat(request.getEndpoint(), equalTo("/_transform/foo/_stats")); assertFalse(request.getParameters().containsKey("from")); assertFalse(request.getParameters().containsKey("size")); @@ -224,7 +224,7 @@ public class DataFrameRequestConvertersTests extends ESTestCase { Request request = TransformRequestConverters.getTransform(getRequest); assertEquals(HttpGet.METHOD_NAME, request.getMethod()); - assertThat(request.getEndpoint(), equalTo("/_data_frame/transforms/bar")); + assertThat(request.getEndpoint(), equalTo("/_transform/bar")); assertFalse(request.getParameters().containsKey("from")); assertFalse(request.getParameters().containsKey("size")); @@ -254,6 +254,6 @@ public class DataFrameRequestConvertersTests extends ESTestCase { Request request = TransformRequestConverters.getTransform(getRequest); assertEquals(HttpGet.METHOD_NAME, request.getMethod()); - assertThat(request.getEndpoint(), equalTo("/_data_frame/transforms/foo,bar,baz")); + assertThat(request.getEndpoint(), equalTo("/_transform/foo,bar,baz")); } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/TransformDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/TransformDocumentationIT.java index 537ad29efa4e..e27c36afd317 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/TransformDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/TransformDocumentationIT.java @@ -42,16 +42,16 @@ import org.elasticsearch.client.transform.StopTransformRequest; import org.elasticsearch.client.transform.StopTransformResponse; import org.elasticsearch.client.transform.UpdateTransformRequest; import org.elasticsearch.client.transform.UpdateTransformResponse; -import org.elasticsearch.client.transform.transforms.TransformIndexerStats; -import org.elasticsearch.client.transform.transforms.TransformConfig; -import org.elasticsearch.client.transform.transforms.TransformConfigUpdate; -import org.elasticsearch.client.transform.transforms.TransformProgress; -import org.elasticsearch.client.transform.transforms.TransformStats; import org.elasticsearch.client.transform.transforms.DestConfig; import org.elasticsearch.client.transform.transforms.NodeAttributes; import org.elasticsearch.client.transform.transforms.QueryConfig; import org.elasticsearch.client.transform.transforms.SourceConfig; import org.elasticsearch.client.transform.transforms.TimeSyncConfig; +import org.elasticsearch.client.transform.transforms.TransformConfig; +import org.elasticsearch.client.transform.transforms.TransformConfigUpdate; +import org.elasticsearch.client.transform.transforms.TransformIndexerStats; +import org.elasticsearch.client.transform.transforms.TransformProgress; +import org.elasticsearch.client.transform.transforms.TransformStats; import org.elasticsearch.client.transform.transforms.pivot.AggregationConfig; import org.elasticsearch.client.transform.transforms.pivot.GroupConfig; import org.elasticsearch.client.transform.transforms.pivot.PivotConfig; @@ -219,7 +219,7 @@ public class TransformDocumentationIT extends ESRestHighLevelClientTestCase { } } - public void testUpdateDataFrameTransform() throws IOException, InterruptedException { + public void testUpdateTransform() throws IOException, InterruptedException { createIndex("source-data"); RestHighLevelClient client = highLevelClient(); diff --git a/docs/build.gradle b/docs/build.gradle index 23308a225806..0fc725bf589a 100644 --- a/docs/build.gradle +++ b/docs/build.gradle @@ -1186,7 +1186,7 @@ buildRestTests.setups['simple_kibana_continuous_pivot'] = buildRestTests.setups[ - do: raw: method: PUT - path: _data_frame/transforms/simple-kibana-ecomm-pivot + path: _transform/simple-kibana-ecomm-pivot body: > { "source": { diff --git a/docs/reference/transform/api-quickref.asciidoc b/docs/reference/transform/api-quickref.asciidoc index d2dff5c3021d..877ae2409691 100644 --- a/docs/reference/transform/api-quickref.asciidoc +++ b/docs/reference/transform/api-quickref.asciidoc @@ -6,7 +6,7 @@ All {transform} endpoints have the following base: [source,js] ---- -/_data_frame/transforms/ +_transform/ ---- // NOTCONSOLE diff --git a/docs/reference/transform/apis/delete-transform.asciidoc b/docs/reference/transform/apis/delete-transform.asciidoc index aaf08d2f9abb..ce07a5a55919 100644 --- a/docs/reference/transform/apis/delete-transform.asciidoc +++ b/docs/reference/transform/apis/delete-transform.asciidoc @@ -15,7 +15,7 @@ beta[] [[delete-transform-request]] ==== {api-request-title} -`DELETE _data_frame/transforms/` +`DELETE _transform/` [[delete-transform-prereqs]] ==== {api-prereq-title} @@ -46,7 +46,7 @@ current state. The default value is `false`, meaning that the {transform} must b [source,console] -------------------------------------------------- -DELETE _data_frame/transforms/ecommerce_transform +DELETE _transform/ecommerce_transform -------------------------------------------------- // TEST[skip:setup kibana sample data] diff --git a/docs/reference/transform/apis/get-transform-stats.asciidoc b/docs/reference/transform/apis/get-transform-stats.asciidoc index cfeea51a4ae2..63292c3977e0 100644 --- a/docs/reference/transform/apis/get-transform-stats.asciidoc +++ b/docs/reference/transform/apis/get-transform-stats.asciidoc @@ -16,15 +16,15 @@ beta[] [[get-transform-stats-request]] ==== {api-request-title} -`GET _data_frame/transforms//_stats` +`GET _transform//_stats` -`GET _data_frame/transforms/,/_stats` + +`GET _transform/,/_stats` + -`GET _data_frame/transforms/_stats` + +`GET _transform/_stats` + -`GET _data_frame/transforms/_all/_stats` + +`GET _transform/_all/_stats` + -`GET _data_frame/transforms/*/_stats` + +`GET _transform/*/_stats` + [[get-transform-stats-prereqs]] @@ -102,7 +102,7 @@ gets usage information for a maximum of ten results: [source,console] -------------------------------------------------- -GET _data_frame/transforms/_stats?from=5&size=10 +GET _transform/_stats?from=5&size=10 -------------------------------------------------- // TEST[skip:todo] @@ -111,7 +111,7 @@ The following example gets usage information for the `ecommerce_transform` [source,console] -------------------------------------------------- -GET _data_frame/transforms/ecommerce_transform/_stats +GET _transform/ecommerce_transform/_stats -------------------------------------------------- // TEST[skip:todo] diff --git a/docs/reference/transform/apis/get-transform.asciidoc b/docs/reference/transform/apis/get-transform.asciidoc index d48e7f07c7fe..39ed180d9e8b 100644 --- a/docs/reference/transform/apis/get-transform.asciidoc +++ b/docs/reference/transform/apis/get-transform.asciidoc @@ -15,15 +15,15 @@ beta[] [[get-transform-request]] ==== {api-request-title} -`GET _data_frame/transforms/` + +`GET _transform/` + -`GET _data_frame/transforms/,` + +`GET _transform/,` + -`GET _data_frame/transforms/` + +`GET _transform/` + -`GET _data_frame/transforms/_all` + +`GET _transform/_all` + -`GET _data_frame/transforms/*` +`GET _transform/*` [[get-transform-prereqs]] ==== {api-prereq-title} @@ -95,7 +95,7 @@ The following example retrieves information about a maximum of ten {transforms}: [source,console] -------------------------------------------------- -GET _data_frame/transforms?size=10 +GET _transform?size=10 -------------------------------------------------- // TEST[skip:setup kibana sample data] @@ -104,7 +104,7 @@ The following example gets configuration information for the [source,console] -------------------------------------------------- -GET _data_frame/transforms/ecommerce_transform +GET _transform/ecommerce_transform -------------------------------------------------- // TEST[skip:setup kibana sample data] diff --git a/docs/reference/transform/apis/preview-transform.asciidoc b/docs/reference/transform/apis/preview-transform.asciidoc index e0d5227e275b..e293a63ea66d 100644 --- a/docs/reference/transform/apis/preview-transform.asciidoc +++ b/docs/reference/transform/apis/preview-transform.asciidoc @@ -15,7 +15,7 @@ beta[] [[preview-transform-request]] ==== {api-request-title} -`POST _data_frame/transforms/_preview` +`POST _transform/_preview` [[preview-transform-prereq]] ==== {api-prereq-title} @@ -68,7 +68,7 @@ on all the current data in the source index. [source,console] -------------------------------------------------- -POST _data_frame/transforms/_preview +POST _transform/_preview { "source": { "index": "kibana_sample_data_ecommerce" diff --git a/docs/reference/transform/apis/put-transform.asciidoc b/docs/reference/transform/apis/put-transform.asciidoc index d2d76eed1d19..a0b3829f3104 100644 --- a/docs/reference/transform/apis/put-transform.asciidoc +++ b/docs/reference/transform/apis/put-transform.asciidoc @@ -15,7 +15,7 @@ beta[] [[put-transform-request]] ==== {api-request-title} -`PUT _data_frame/transforms/` +`PUT _transform/` [[put-transform-prereqs]] ==== {api-prereq-title} @@ -139,7 +139,7 @@ delays. [source,console] -------------------------------------------------- -PUT _data_frame/transforms/ecommerce_transform +PUT _transform/ecommerce_transform { "source": { "index": "kibana_sample_data_ecommerce", diff --git a/docs/reference/transform/apis/start-transform.asciidoc b/docs/reference/transform/apis/start-transform.asciidoc index 41712e3c2809..7dc49fcd0fde 100644 --- a/docs/reference/transform/apis/start-transform.asciidoc +++ b/docs/reference/transform/apis/start-transform.asciidoc @@ -15,7 +15,7 @@ beta[] [[start-transform-request]] ==== {api-request-title} -`POST _data_frame/transforms//_start` +`POST _transform//_start` [[start-transform-prereqs]] ==== {api-prereq-title} @@ -62,7 +62,7 @@ required privileges on the source and destination indices, the [source,console] -------------------------------------------------- -POST _data_frame/transforms/ecommerce_transform/_start +POST _transform/ecommerce_transform/_start -------------------------------------------------- // TEST[skip:set up kibana samples] diff --git a/docs/reference/transform/apis/stop-transform.asciidoc b/docs/reference/transform/apis/stop-transform.asciidoc index 55894aa50086..c367c487b1f3 100644 --- a/docs/reference/transform/apis/stop-transform.asciidoc +++ b/docs/reference/transform/apis/stop-transform.asciidoc @@ -16,11 +16,11 @@ beta[] [[stop-transform-request]] ==== {api-request-title} -`POST _data_frame/transforms//_stop` + +`POST _transform//_stop` + -`POST _data_frame/transforms/,/_stop` + +`POST _transform/,/_stop` + -`POST _data_frame/transforms/_all/_stop` +`POST _transform/_all/_stop` [[stop-transform-prereq]] @@ -104,7 +104,7 @@ are no matches or only partial matches. [source,console] -------------------------------------------------- -POST _data_frame/transforms/ecommerce_transform/_stop +POST _transform/ecommerce_transform/_stop -------------------------------------------------- // TEST[skip:set up kibana samples] diff --git a/docs/reference/transform/apis/update-transform.asciidoc b/docs/reference/transform/apis/update-transform.asciidoc index fe45472b0651..610f91231597 100644 --- a/docs/reference/transform/apis/update-transform.asciidoc +++ b/docs/reference/transform/apis/update-transform.asciidoc @@ -15,7 +15,7 @@ beta[] [[update-transform-request]] ==== {api-request-title} -`POST _data_frame/transforms//_update` +`POST _transform//_update` [[update-transform-prereqs]] ==== {api-prereq-title} @@ -122,7 +122,7 @@ delays. [source,console] -------------------------------------------------- -POST _data_frame/transforms/simple-kibana-ecomm-pivot/_update +POST _transform/simple-kibana-ecomm-pivot/_update { "source": { "index": "kibana_sample_data_ecommerce", diff --git a/docs/reference/transform/ecommerce-tutorial.asciidoc b/docs/reference/transform/ecommerce-tutorial.asciidoc index b4dc1ba742fa..84d5255787a7 100644 --- a/docs/reference/transform/ecommerce-tutorial.asciidoc +++ b/docs/reference/transform/ecommerce-tutorial.asciidoc @@ -85,7 +85,7 @@ If you prefer, you can use the [source,console] -------------------------------------------------- -POST _data_frame/transforms/_preview +POST _transform/_preview { "source": { "index": "kibana_sample_data_ecommerce", @@ -161,7 +161,7 @@ example: [source,console] -------------------------------------------------- -PUT _data_frame/transforms/ecommerce-customer-transform +PUT _transform/ecommerce-customer-transform { "source": { "index": [ @@ -237,7 +237,7 @@ example: [source,console] -------------------------------------------------- -POST _data_frame/transforms/ecommerce-customer-transform/_start +POST _transform/ecommerce-customer-transform/_start -------------------------------------------------- // TEST[skip:setup kibana sample data] diff --git a/docs/reference/transform/examples.asciidoc b/docs/reference/transform/examples.asciidoc index 4686a1c90b77..19dd9bbf505d 100644 --- a/docs/reference/transform/examples.asciidoc +++ b/docs/reference/transform/examples.asciidoc @@ -29,7 +29,7 @@ order, and the total amount of ordered products for each customer. [source,console] ---------------------------------- -POST _data_frame/transforms/_preview +POST _transform/_preview { "source": { "index": "kibana_sample_data_ecommerce" @@ -115,7 +115,7 @@ to determine what percentage of the flight time was actually delay. [source,console] ---------------------------------- -POST _data_frame/transforms/_preview +POST _transform/_preview { "source": { "index": "kibana_sample_data_flights", @@ -207,7 +207,7 @@ entity is `clientip`. [source,console] ---------------------------------- -POST _data_frame/transforms/_preview +POST _transform/_preview { "source": { "index": "kibana_sample_data_logs", diff --git a/docs/reference/transform/limitations.asciidoc b/docs/reference/transform/limitations.asciidoc index 27f73a9d5f01..1d3d38c94369 100644 --- a/docs/reference/transform/limitations.asciidoc +++ b/docs/reference/transform/limitations.asciidoc @@ -120,7 +120,7 @@ viewing the destination index. [[transform-deletion-limitations]] ==== Deleting a {transform} does not delete the destination index or {kib} index pattern -When deleting a {transform} using `DELETE _data_frame/transforms/index` +When deleting a {transform} using `DELETE _transform/index` neither the destination index nor the {kib} index pattern, should one have been created, are deleted. These objects must be deleted separately. diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java index dd2da0d928b7..32c3d47eac53 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java @@ -292,9 +292,9 @@ public class DoSection implements ExecutableSection { final boolean matches = matcher.matches(); if (matches) { final String message = matcher.group(1); - if (message.startsWith("[types removal]")) { + if (message.startsWith("[types removal]") || message.startsWith("[_data_frame/transforms/] is deprecated")) { /* - * We skip warnings related to types deprecation so that we can continue to run the many + * We skip warnings related to types deprecation and transform rename so that we can continue to run the many * mixed-version tests that used typed APIs. */ } else if (expected.remove(message) == false) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ClusterPrivilegeResolver.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ClusterPrivilegeResolver.java index 1d1c455a4ef9..24fd6997b83b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ClusterPrivilegeResolver.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ClusterPrivilegeResolver.java @@ -50,12 +50,13 @@ public class ClusterPrivilegeResolver { private static final Set MANAGE_API_KEY_PATTERN = Set.of("cluster:admin/xpack/security/api_key/*"); private static final Set MONITOR_PATTERN = Set.of("cluster:monitor/*"); private static final Set MONITOR_ML_PATTERN = Set.of("cluster:monitor/xpack/ml/*"); - private static final Set MONITOR_DATA_FRAME_PATTERN = Set.of("cluster:monitor/data_frame/*"); + private static final Set MONITOR_DATA_FRAME_PATTERN = Set.of("cluster:monitor/data_frame/*", "cluster:monitor/transform/*"); private static final Set MONITOR_WATCHER_PATTERN = Set.of("cluster:monitor/xpack/watcher/*"); private static final Set MONITOR_ROLLUP_PATTERN = Set.of("cluster:monitor/xpack/rollup/*"); private static final Set ALL_CLUSTER_PATTERN = Set.of("cluster:*", "indices:admin/template/*"); private static final Set MANAGE_ML_PATTERN = Set.of("cluster:admin/xpack/ml/*", "cluster:monitor/xpack/ml/*"); - private static final Set MANAGE_DATA_FRAME_PATTERN = Set.of("cluster:admin/data_frame/*", "cluster:monitor/data_frame/*"); + private static final Set MANAGE_DATA_FRAME_PATTERN = Set.of("cluster:admin/data_frame/*", "cluster:monitor/data_frame/*", + "cluster:monitor/transform/*", "cluster:admin/transform/*"); private static final Set MANAGE_WATCHER_PATTERN = Set.of("cluster:admin/xpack/watcher/*", "cluster:monitor/xpack/watcher/*"); private static final Set TRANSPORT_CLIENT_PATTERN = Set.of("cluster:monitor/nodes/liveness", "cluster:monitor/state"); private static final Set MANAGE_IDX_TEMPLATE_PATTERN = Set.of("indices:admin/template/*"); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/TransformField.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/TransformField.java index 3e09577de4c3..a82d3cc822dd 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/TransformField.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/TransformField.java @@ -56,9 +56,13 @@ public final class TransformField { // common strings public static final String TASK_NAME = "data_frame/transforms"; - public static final String REST_BASE_PATH = "/_data_frame/"; - public static final String REST_BASE_PATH_TRANSFORMS = REST_BASE_PATH + "transforms/"; + public static final String REST_BASE_PATH_TRANSFORMS = "/_transform/"; public static final String REST_BASE_PATH_TRANSFORMS_BY_ID = REST_BASE_PATH_TRANSFORMS + "{id}/"; + + // deprecated REST API, to be removed for 8.0.0 + public static final String REST_BASE_PATH_TRANSFORMS_DEPRECATED = "/_data_frame/transforms/"; + public static final String REST_BASE_PATH_TRANSFORMS_BY_ID_DEPRECATED = REST_BASE_PATH_TRANSFORMS_DEPRECATED + "{id}/"; + public static final String TRANSFORM_ID = "transform_id"; // note: this is used to match tasks diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/TransformMessages.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/TransformMessages.java index 3bd18e1c2835..ac028974e82d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/TransformMessages.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/TransformMessages.java @@ -31,6 +31,8 @@ public class TransformMessages { public static final String TRANSFORM_FAILED_TO_PERSIST_STATS = "Failed to persist transform statistics for transform [{0}]"; public static final String UNKNOWN_TRANSFORM_STATS = "Statistics for transform [{0}] could not be found"; + public static final String REST_DEPRECATED_ENDPOINT = "[_data_frame/transforms/] is deprecated, use [_transform/] in the future."; + public static final String CANNOT_STOP_FAILED_TRANSFORM = "Unable to stop transform [{0}] as it is in a failed state with reason [{1}]." + " Use force stop to stop the transform."; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/DeleteTransformAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/DeleteTransformAction.java index 9ca809c39746..3df334dbf83f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/DeleteTransformAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/DeleteTransformAction.java @@ -21,7 +21,7 @@ import java.util.Objects; public class DeleteTransformAction extends ActionType { public static final DeleteTransformAction INSTANCE = new DeleteTransformAction(); - public static final String NAME = "cluster:admin/data_frame/delete"; + public static final String NAME = "cluster:admin/transform/delete"; private DeleteTransformAction() { super(NAME, AcknowledgedResponse::new); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/GetTransformAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/GetTransformAction.java index fcc17a5ceddf..b3f4626f1de2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/GetTransformAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/GetTransformAction.java @@ -7,8 +7,8 @@ package org.elasticsearch.xpack.core.transform.action; import org.apache.logging.log4j.LogManager; -import org.elasticsearch.action.ActionType; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionType; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.Writeable; @@ -31,7 +31,7 @@ import static org.elasticsearch.action.ValidateActions.addValidationError; public class GetTransformAction extends ActionType { public static final GetTransformAction INSTANCE = new GetTransformAction(); - public static final String NAME = "cluster:monitor/data_frame/get"; + public static final String NAME = "cluster:monitor/transform/get"; private static final DeprecationLogger deprecationLogger = new DeprecationLogger( LogManager.getLogger(GetTransformAction.class)); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/GetTransformStatsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/GetTransformStatsAction.java index b8b1f23ba005..4b2edfc5c7fb 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/GetTransformStatsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/GetTransformStatsAction.java @@ -36,7 +36,7 @@ import static org.elasticsearch.action.ValidateActions.addValidationError; public class GetTransformStatsAction extends ActionType { public static final GetTransformStatsAction INSTANCE = new GetTransformStatsAction(); - public static final String NAME = "cluster:monitor/data_frame/stats/get"; + public static final String NAME = "cluster:monitor/transform/stats/get"; public GetTransformStatsAction() { super(NAME, GetTransformStatsAction.Response::new); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/PreviewTransformAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/PreviewTransformAction.java index ac462c9b4bac..cb095be15719 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/PreviewTransformAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/PreviewTransformAction.java @@ -23,8 +23,8 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.xpack.core.transform.TransformField; -import org.elasticsearch.xpack.core.transform.transforms.TransformConfig; import org.elasticsearch.xpack.core.transform.transforms.DestConfig; +import org.elasticsearch.xpack.core.transform.transforms.TransformConfig; import java.io.IOException; import java.util.ArrayList; @@ -38,7 +38,7 @@ import static org.elasticsearch.action.ValidateActions.addValidationError; public class PreviewTransformAction extends ActionType { public static final PreviewTransformAction INSTANCE = new PreviewTransformAction(); - public static final String NAME = "cluster:admin/data_frame/preview"; + public static final String NAME = "cluster:admin/transform/preview"; private PreviewTransformAction() { super(NAME, PreviewTransformAction.Response::new); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/PutTransformAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/PutTransformAction.java index ea38afe1d281..734d1c9b0b8d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/PutTransformAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/PutTransformAction.java @@ -31,7 +31,7 @@ import static org.elasticsearch.cluster.metadata.MetaDataCreateIndexService.vali public class PutTransformAction extends ActionType { public static final PutTransformAction INSTANCE = new PutTransformAction(); - public static final String NAME = "cluster:admin/data_frame/put"; + public static final String NAME = "cluster:admin/transform/put"; private static final TimeValue MIN_FREQUENCY = TimeValue.timeValueSeconds(1); private static final TimeValue MAX_FREQUENCY = TimeValue.timeValueHours(1); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/StartTransformAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/StartTransformAction.java index 7d55237b2d50..b468db6de28e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/StartTransformAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/StartTransformAction.java @@ -25,7 +25,7 @@ import java.util.Objects; public class StartTransformAction extends ActionType { public static final StartTransformAction INSTANCE = new StartTransformAction(); - public static final String NAME = "cluster:admin/data_frame/start"; + public static final String NAME = "cluster:admin/transform/start"; private StartTransformAction() { super(NAME, StartTransformAction.Response::new); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/StopTransformAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/StopTransformAction.java index 4fd3ce7f54de..2bd70e1789a5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/StopTransformAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/StopTransformAction.java @@ -7,8 +7,8 @@ package org.elasticsearch.xpack.core.transform.action; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; -import org.elasticsearch.action.ActionType; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.TaskOperationFailure; import org.elasticsearch.action.support.tasks.BaseTasksRequest; import org.elasticsearch.action.support.tasks.BaseTasksResponse; @@ -35,7 +35,7 @@ import java.util.concurrent.TimeUnit; public class StopTransformAction extends ActionType { public static final StopTransformAction INSTANCE = new StopTransformAction(); - public static final String NAME = "cluster:admin/data_frame/stop"; + public static final String NAME = "cluster:admin/transform/stop"; public static final TimeValue DEFAULT_TIMEOUT = new TimeValue(30, TimeUnit.SECONDS); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/UpdateTransformAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/UpdateTransformAction.java index 3950017dc8f2..b8cc02949834 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/UpdateTransformAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/UpdateTransformAction.java @@ -31,7 +31,7 @@ import static org.elasticsearch.cluster.metadata.MetaDataCreateIndexService.vali public class UpdateTransformAction extends ActionType { public static final UpdateTransformAction INSTANCE = new UpdateTransformAction(); - public static final String NAME = "cluster:admin/data_frame/update"; + public static final String NAME = "cluster:admin/transform/update"; private static final TimeValue MIN_FREQUENCY = TimeValue.timeValueSeconds(1); private static final TimeValue MAX_FREQUENCY = TimeValue.timeValueHours(1); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/compat/DeleteTransformActionDeprecated.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/compat/DeleteTransformActionDeprecated.java new file mode 100644 index 000000000000..eae96cfa013b --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/compat/DeleteTransformActionDeprecated.java @@ -0,0 +1,20 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.transform.action.compat; + +import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.support.master.AcknowledgedResponse; + +public class DeleteTransformActionDeprecated extends ActionType { + + public static final DeleteTransformActionDeprecated INSTANCE = new DeleteTransformActionDeprecated(); + public static final String NAME = "cluster:admin/data_frame/delete"; + + private DeleteTransformActionDeprecated() { + super(NAME, AcknowledgedResponse::new); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/compat/GetTransformActionDeprecated.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/compat/GetTransformActionDeprecated.java new file mode 100644 index 000000000000..0c0c6ffb21e1 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/compat/GetTransformActionDeprecated.java @@ -0,0 +1,20 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.transform.action.compat; + +import org.elasticsearch.action.ActionType; +import org.elasticsearch.xpack.core.transform.action.GetTransformAction; + +public class GetTransformActionDeprecated extends ActionType { + + public static final GetTransformActionDeprecated INSTANCE = new GetTransformActionDeprecated(); + public static final String NAME = "cluster:monitor/data_frame/get"; + + private GetTransformActionDeprecated() { + super(NAME, GetTransformAction.Response::new); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/compat/GetTransformStatsActionDeprecated.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/compat/GetTransformStatsActionDeprecated.java new file mode 100644 index 000000000000..9a2b1ce6346d --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/compat/GetTransformStatsActionDeprecated.java @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.transform.action.compat; + +import org.elasticsearch.action.ActionType; +import org.elasticsearch.xpack.core.transform.action.GetTransformStatsAction; + +public class GetTransformStatsActionDeprecated extends ActionType { + + public static final GetTransformStatsActionDeprecated INSTANCE = new GetTransformStatsActionDeprecated(); + public static final String NAME = "cluster:monitor/data_frame/stats/get"; + + private GetTransformStatsActionDeprecated() { + super(NAME, GetTransformStatsAction.Response::new); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/compat/PreviewTransformActionDeprecated.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/compat/PreviewTransformActionDeprecated.java new file mode 100644 index 000000000000..e5c8ed707042 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/compat/PreviewTransformActionDeprecated.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.transform.action.compat; + +import org.elasticsearch.action.ActionType; +import org.elasticsearch.xpack.core.transform.action.PreviewTransformAction; + +public class PreviewTransformActionDeprecated extends ActionType { + + public static final PreviewTransformActionDeprecated INSTANCE = new PreviewTransformActionDeprecated(); + public static final String NAME = "cluster:admin/data_frame/preview"; + + private PreviewTransformActionDeprecated() { + super(NAME, PreviewTransformAction.Response::new); + } + + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/compat/PutTransformActionDeprecated.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/compat/PutTransformActionDeprecated.java new file mode 100644 index 000000000000..aa29f1db2081 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/compat/PutTransformActionDeprecated.java @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.transform.action.compat; + +import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.support.master.AcknowledgedResponse; + +public class PutTransformActionDeprecated extends ActionType { + + public static final PutTransformActionDeprecated INSTANCE = new PutTransformActionDeprecated(); + public static final String NAME = "cluster:admin/data_frame/put"; + + private PutTransformActionDeprecated() { + super(NAME, AcknowledgedResponse::new); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/compat/StartTransformActionDeprecated.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/compat/StartTransformActionDeprecated.java new file mode 100644 index 000000000000..461eca56fc71 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/compat/StartTransformActionDeprecated.java @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.transform.action.compat; + +import org.elasticsearch.action.ActionType; +import org.elasticsearch.xpack.core.transform.action.StartTransformAction; + +public class StartTransformActionDeprecated extends ActionType { + + public static final StartTransformActionDeprecated INSTANCE = new StartTransformActionDeprecated(); + public static final String NAME = "cluster:admin/data_frame/start"; + + private StartTransformActionDeprecated() { + super(NAME, StartTransformAction.Response::new); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/compat/StopTransformActionDeprecated.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/compat/StopTransformActionDeprecated.java new file mode 100644 index 000000000000..7d107d82a158 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/compat/StopTransformActionDeprecated.java @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.transform.action.compat; + +import org.elasticsearch.action.ActionType; +import org.elasticsearch.xpack.core.transform.action.StopTransformAction; + +public class StopTransformActionDeprecated extends ActionType { + + public static final StopTransformActionDeprecated INSTANCE = new StopTransformActionDeprecated(); + public static final String NAME = "cluster:admin/data_frame/stop"; + + private StopTransformActionDeprecated() { + super(NAME, StopTransformAction.Response::new); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/compat/UpdateTransformActionDeprecated.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/compat/UpdateTransformActionDeprecated.java new file mode 100644 index 000000000000..6dd1441998e1 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/compat/UpdateTransformActionDeprecated.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.transform.action.compat; + +import org.elasticsearch.action.ActionType; +import org.elasticsearch.xpack.core.transform.action.UpdateTransformAction; +import org.elasticsearch.xpack.core.transform.action.UpdateTransformAction.Response; + +public class UpdateTransformActionDeprecated extends ActionType { + + public static final UpdateTransformActionDeprecated INSTANCE = new UpdateTransformActionDeprecated(); + public static final String NAME = "cluster:admin/data_frame/update"; + + private UpdateTransformActionDeprecated() { + super(NAME, Response::new); + } + +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/data_frame_transform_deprecated.delete_transform.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/data_frame_transform_deprecated.delete_transform.json new file mode 100644 index 000000000000..2949ceb44499 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/data_frame_transform_deprecated.delete_transform.json @@ -0,0 +1,35 @@ +{ + "data_frame_transform_deprecated.delete_transform":{ + "documentation":{ + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-transform.html" + }, + "stability":"beta", + "url":{ + "paths":[ + { + "path":"/_data_frame/transforms/{transform_id}", + "methods":[ + "DELETE" + ], + "parts":{ + "transform_id":{ + "type":"string", + "description":"The id of the transform to delete" + } + }, + "deprecated":{ + "version":"7.5.0", + "description":"[_data_frame/transforms/] is deprecated, use [_transform/] in the future." + } + } + ] + }, + "params":{ + "force":{ + "type":"boolean", + "required":false, + "description":"When `true`, the transform is deleted regardless of its current state. The default value is `false`, meaning that the transform must be `stopped` before it can be deleted." + } + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/data_frame_transform_deprecated.get_transform.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/data_frame_transform_deprecated.get_transform.json new file mode 100644 index 000000000000..466e687bd19e --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/data_frame_transform_deprecated.get_transform.json @@ -0,0 +1,55 @@ +{ + "data_frame_transform_deprecated.get_transform":{ + "documentation":{ + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/get-transform.html" + }, + "stability":"beta", + "url":{ + "paths":[ + { + "path":"/_data_frame/transforms/{transform_id}", + "methods":[ + "GET" + ], + "parts":{ + "transform_id":{ + "type":"string", + "description":"The id or comma delimited list of id expressions of the transforms to get, '_all' or '*' implies get all transforms" + } + }, + "deprecated":{ + "version":"7.5.0", + "description":"[_data_frame/transforms/] is deprecated, use [_transform/] in the future." + } + }, + { + "path":"/_data_frame/transforms", + "methods":[ + "GET" + ], + "deprecated":{ + "version":"7.5.0", + "description":"[_data_frame/transforms/] is deprecated, use [_transform/] in the future." + } + } + ] + }, + "params":{ + "from":{ + "type":"int", + "required":false, + "description":"skips a number of transform configs, defaults to 0" + }, + "size":{ + "type":"int", + "required":false, + "description":"specifies a max number of transforms to get, defaults to 100" + }, + "allow_no_match":{ + "type":"boolean", + "required":false, + "description":"Whether to ignore if a wildcard expression matches no transforms. (This includes `_all` string or when no transforms have been specified)" + } + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/data_frame_transform_deprecated.get_transform_stats.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/data_frame_transform_deprecated.get_transform_stats.json new file mode 100644 index 000000000000..eb5dd8d5d2f6 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/data_frame_transform_deprecated.get_transform_stats.json @@ -0,0 +1,45 @@ +{ + "data_frame_transform_deprecated.get_transform_stats":{ + "documentation":{ + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/get-transform-stats.html" + }, + "stability":"beta", + "url":{ + "paths":[ + { + "path":"/_data_frame/transforms/{transform_id}/_stats", + "methods":[ + "GET" + ], + "parts":{ + "transform_id":{ + "type":"string", + "description":"The id of the transform for which to get stats. '_all' or '*' implies all transforms" + } + }, + "deprecated":{ + "version":"7.5.0", + "description":"[_data_frame/transforms/] is deprecated, use [_transform/] in the future." + } + } + ] + }, + "params":{ + "from":{ + "type":"number", + "required":false, + "description":"skips a number of transform stats, defaults to 0" + }, + "size":{ + "type":"number", + "required":false, + "description":"specifies a max number of transform stats to get, defaults to 100" + }, + "allow_no_match":{ + "type":"boolean", + "required":false, + "description":"Whether to ignore if a wildcard expression matches no transforms. (This includes `_all` string or when no transforms have been specified)" + } + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/data_frame_transform_deprecated.preview_transform.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/data_frame_transform_deprecated.preview_transform.json new file mode 100644 index 000000000000..1ae012d28cbe --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/data_frame_transform_deprecated.preview_transform.json @@ -0,0 +1,26 @@ +{ + "data_frame_transform_deprecated.preview_transform":{ + "documentation":{ + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/preview-transform.html" + }, + "stability":"beta", + "url":{ + "paths":[ + { + "path":"/_data_frame/transforms/_preview", + "methods":[ + "POST" + ], + "deprecated":{ + "version":"7.5.0", + "description":"[_data_frame/transforms/] is deprecated, use [_transform/] in the future." + } + } + ] + }, + "body":{ + "description":"The definition for the transform to preview", + "required":true + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/data_frame_transform_deprecated.put_transform.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/data_frame_transform_deprecated.put_transform.json new file mode 100644 index 000000000000..27256eb782f6 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/data_frame_transform_deprecated.put_transform.json @@ -0,0 +1,39 @@ +{ + "data_frame_transform_deprecated.put_transform":{ + "documentation":{ + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/put-transform.html" + }, + "stability":"beta", + "url":{ + "paths":[ + { + "path":"/_data_frame/transforms/{transform_id}", + "methods":[ + "PUT" + ], + "parts":{ + "transform_id":{ + "type":"string", + "description":"The id of the new transform." + } + }, + "deprecated":{ + "version":"7.5.0", + "description":"[_data_frame/transforms/] is deprecated, use [_transform/] in the future." + } + } + ] + }, + "params": { + "defer_validation": { + "type": "boolean", + "required": false, + "description": "If validations should be deferred until transform starts, defaults to false." + } + }, + "body":{ + "description":"The transform definition", + "required":true + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/data_frame_transform_deprecated.start_transform.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/data_frame_transform_deprecated.start_transform.json new file mode 100644 index 000000000000..f47bd61726e8 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/data_frame_transform_deprecated.start_transform.json @@ -0,0 +1,35 @@ +{ + "data_frame_transform_deprecated.start_transform":{ + "documentation":{ + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/start-transform.html" + }, + "stability":"beta", + "url":{ + "paths":[ + { + "path":"/_data_frame/transforms/{transform_id}/_start", + "methods":[ + "POST" + ], + "parts":{ + "transform_id":{ + "type":"string", + "description":"The id of the transform to start" + } + }, + "deprecated":{ + "version":"7.5.0", + "description":"[_data_frame/transforms/] is deprecated, use [_transform/] in the future." + } + } + ] + }, + "params":{ + "timeout":{ + "type":"time", + "required":false, + "description":"Controls the time to wait for the transform to start" + } + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/data_frame_transform_deprecated.stop_transform.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/data_frame_transform_deprecated.stop_transform.json new file mode 100644 index 000000000000..434375a9fabd --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/data_frame_transform_deprecated.stop_transform.json @@ -0,0 +1,45 @@ +{ + "data_frame_transform_deprecated.stop_transform":{ + "documentation":{ + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/stop-transform.html" + }, + "stability":"beta", + "url":{ + "paths":[ + { + "path":"/_data_frame/transforms/{transform_id}/_stop", + "methods":[ + "POST" + ], + "parts":{ + "transform_id":{ + "type":"string", + "description":"The id of the transform to stop" + } + }, + "deprecated":{ + "version":"7.5.0", + "description":"[_data_frame/transforms/] is deprecated, use [_transform/] in the future." + } + } + ] + }, + "params":{ + "wait_for_completion":{ + "type":"boolean", + "required":false, + "description":"Whether to wait for the transform to fully stop before returning or not. Default to false" + }, + "timeout":{ + "type":"time", + "required":false, + "description":"Controls the time to wait until the transform has stopped. Default to 30 seconds" + }, + "allow_no_match":{ + "type":"boolean", + "required":false, + "description":"Whether to ignore if a wildcard expression matches no transforms. (This includes `_all` string or when no transforms have been specified)" + } + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/data_frame_transform_deprecated.update_transform.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/data_frame_transform_deprecated.update_transform.json new file mode 100644 index 000000000000..d1c0bb2456a7 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/data_frame_transform_deprecated.update_transform.json @@ -0,0 +1,38 @@ +{ + "data_frame_transform_deprecated.update_transform": { + "documentation": { + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/current/update-transform.html" + }, + "stability": "beta", + "url": { + "paths": [ + { + "path": "/_data_frame/transforms/{transform_id}/_update", + "methods": [ "POST" ], + "parts": { + "transform_id": { + "type": "string", + "required": true, + "description": "The id of the transform." + } + }, + "deprecated":{ + "version":"7.5.0", + "description":"[_data_frame/transforms/] is deprecated, use [_transform/] in the future." + } + } + ] + }, + "params": { + "defer_validation": { + "type": "boolean", + "required": false, + "description": "If validations should be deferred until transform starts, defaults to false." + } + }, + "body": { + "description" : "The update transform definition", + "required": true + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/transform.delete_transform.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/transform.delete_transform.json index 01915ed7b56b..c3950c129988 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/transform.delete_transform.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/transform.delete_transform.json @@ -7,7 +7,7 @@ "url":{ "paths":[ { - "path":"/_data_frame/transforms/{transform_id}", + "path":"/_transform/{transform_id}", "methods":[ "DELETE" ], diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/transform.get_transform.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/transform.get_transform.json index 9baf3446a2ae..ff4a6a24db8a 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/transform.get_transform.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/transform.get_transform.json @@ -7,7 +7,7 @@ "url":{ "paths":[ { - "path":"/_data_frame/transforms/{transform_id}", + "path":"/_transform/{transform_id}", "methods":[ "GET" ], @@ -19,7 +19,7 @@ } }, { - "path":"/_data_frame/transforms", + "path":"/_transform", "methods":[ "GET" ] diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/transform.get_transform_stats.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/transform.get_transform_stats.json index f37dfe29ff4f..eabdd3ce0b5d 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/transform.get_transform_stats.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/transform.get_transform_stats.json @@ -7,7 +7,7 @@ "url":{ "paths":[ { - "path":"/_data_frame/transforms/{transform_id}/_stats", + "path":"/_transform/{transform_id}/_stats", "methods":[ "GET" ], diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/transform.preview_transform.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/transform.preview_transform.json index b5ff3cbba966..f1f8e7a4734a 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/transform.preview_transform.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/transform.preview_transform.json @@ -7,7 +7,7 @@ "url":{ "paths":[ { - "path":"/_data_frame/transforms/_preview", + "path":"/_transform/_preview", "methods":[ "POST" ] diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/transform.put_transform.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/transform.put_transform.json index 7925fc1063be..5825fe6b3900 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/transform.put_transform.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/transform.put_transform.json @@ -7,7 +7,7 @@ "url":{ "paths":[ { - "path":"/_data_frame/transforms/{transform_id}", + "path":"/_transform/{transform_id}", "methods":[ "PUT" ], diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/transform.start_transform.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/transform.start_transform.json index 408f978e22cb..c374f5a5a6b8 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/transform.start_transform.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/transform.start_transform.json @@ -7,7 +7,7 @@ "url":{ "paths":[ { - "path":"/_data_frame/transforms/{transform_id}/_start", + "path":"/_transform/{transform_id}/_start", "methods":[ "POST" ], diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/transform.stop_transform.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/transform.stop_transform.json index b09d19703bf3..3ad86f6245f0 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/transform.stop_transform.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/transform.stop_transform.json @@ -7,7 +7,7 @@ "url":{ "paths":[ { - "path":"/_data_frame/transforms/{transform_id}/_stop", + "path":"/_transform/{transform_id}/_stop", "methods":[ "POST" ], diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/transform.update_transform.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/transform.update_transform.json index 68de23da71b9..4d4571a20b74 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/transform.update_transform.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/transform.update_transform.json @@ -7,7 +7,7 @@ "url": { "paths": [ { - "path": "/_data_frame/transforms/{transform_id}/_update", + "path": "/_transform/{transform_id}/_update", "methods": [ "POST" ], "parts": { "transform_id": { diff --git a/x-pack/plugin/transform/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/transform/integration/TransformConfigurationIndexIT.java b/x-pack/plugin/transform/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/transform/integration/TransformConfigurationIndexIT.java index 4b4845ef52b3..dfab389c4e42 100644 --- a/x-pack/plugin/transform/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/transform/integration/TransformConfigurationIndexIT.java +++ b/x-pack/plugin/transform/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/transform/integration/TransformConfigurationIndexIT.java @@ -50,7 +50,7 @@ public class TransformConfigurationIndexIT extends TransformRestTestCase { // refresh the index assertOK(client().performRequest(new Request("POST", TransformInternalIndexConstants.LATEST_INDEX_NAME + "/_refresh"))); - Request deleteRequest = new Request("DELETE", TRANSFORM_ENDPOINT + fakeTransformName); + Request deleteRequest = new Request("DELETE", getTransformEndpoint() + fakeTransformName); Response deleteResponse = client().performRequest(deleteRequest); assertOK(deleteResponse); assertTrue((boolean)XContentMapValues.extractValue("acknowledged", entityAsMap(deleteResponse))); diff --git a/x-pack/plugin/transform/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/transform/integration/TransformGetAndGetStatsIT.java b/x-pack/plugin/transform/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/transform/integration/TransformGetAndGetStatsIT.java index 7715086346b9..3ef271fb42e9 100644 --- a/x-pack/plugin/transform/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/transform/integration/TransformGetAndGetStatsIT.java +++ b/x-pack/plugin/transform/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/transform/integration/TransformGetAndGetStatsIT.java @@ -78,19 +78,19 @@ public class TransformGetAndGetStatsIT extends TransformRestTestCase { String authHeader = randomFrom(BASIC_AUTH_VALUE_TRANSFORM_USER, BASIC_AUTH_VALUE_TRANSFORM_ADMIN); // check all the different ways to retrieve all stats - Request getRequest = createRequestWithAuth("GET", TRANSFORM_ENDPOINT + "_stats", authHeader); + Request getRequest = createRequestWithAuth("GET", getTransformEndpoint() + "_stats", authHeader); Map stats = entityAsMap(client().performRequest(getRequest)); assertEquals(3, XContentMapValues.extractValue("count", stats)); - getRequest = createRequestWithAuth("GET", TRANSFORM_ENDPOINT + "_all/_stats", authHeader); + getRequest = createRequestWithAuth("GET", getTransformEndpoint() + "_all/_stats", authHeader); stats = entityAsMap(client().performRequest(getRequest)); assertEquals(3, XContentMapValues.extractValue("count", stats)); - getRequest = createRequestWithAuth("GET", TRANSFORM_ENDPOINT + "*/_stats", authHeader); + getRequest = createRequestWithAuth("GET", getTransformEndpoint() + "*/_stats", authHeader); stats = entityAsMap(client().performRequest(getRequest)); assertEquals(3, XContentMapValues.extractValue("count", stats)); - getRequest = createRequestWithAuth("GET", TRANSFORM_ENDPOINT + "pivot_1,pivot_2/_stats", authHeader); + getRequest = createRequestWithAuth("GET", getTransformEndpoint() + "pivot_1,pivot_2/_stats", authHeader); stats = entityAsMap(client().performRequest(getRequest)); assertEquals(2, XContentMapValues.extractValue("count", stats)); - getRequest = createRequestWithAuth("GET", TRANSFORM_ENDPOINT + "pivot_*/_stats", authHeader); + getRequest = createRequestWithAuth("GET", getTransformEndpoint() + "pivot_*/_stats", authHeader); stats = entityAsMap(client().performRequest(getRequest)); assertEquals(3, XContentMapValues.extractValue("count", stats)); @@ -111,7 +111,7 @@ public class TransformGetAndGetStatsIT extends TransformRestTestCase { } // only pivot_1 - getRequest = createRequestWithAuth("GET", TRANSFORM_ENDPOINT + "pivot_1/_stats", authHeader); + getRequest = createRequestWithAuth("GET", getTransformEndpoint() + "pivot_1/_stats", authHeader); stats = entityAsMap(client().performRequest(getRequest)); assertEquals(1, XContentMapValues.extractValue("count", stats)); @@ -122,7 +122,7 @@ public class TransformGetAndGetStatsIT extends TransformRestTestCase { assertEquals(1, XContentMapValues.extractValue("checkpointing.last.checkpoint", transformsStats.get(0))); // only continuous - getRequest = createRequestWithAuth("GET", TRANSFORM_ENDPOINT + "pivot_continuous/_stats", authHeader); + getRequest = createRequestWithAuth("GET", getTransformEndpoint() + "pivot_continuous/_stats", authHeader); stats = entityAsMap(client().performRequest(getRequest)); assertEquals(1, XContentMapValues.extractValue("count", stats)); @@ -133,18 +133,18 @@ public class TransformGetAndGetStatsIT extends TransformRestTestCase { // check all the different ways to retrieve all transforms - getRequest = createRequestWithAuth("GET", TRANSFORM_ENDPOINT, authHeader); + getRequest = createRequestWithAuth("GET", getTransformEndpoint(), authHeader); Map transforms = entityAsMap(client().performRequest(getRequest)); assertEquals(3, XContentMapValues.extractValue("count", transforms)); - getRequest = createRequestWithAuth("GET", TRANSFORM_ENDPOINT + "_all", authHeader); + getRequest = createRequestWithAuth("GET", getTransformEndpoint() + "_all", authHeader); transforms = entityAsMap(client().performRequest(getRequest)); assertEquals(3, XContentMapValues.extractValue("count", transforms)); - getRequest = createRequestWithAuth("GET", TRANSFORM_ENDPOINT + "*", authHeader); + getRequest = createRequestWithAuth("GET", getTransformEndpoint() + "*", authHeader); transforms = entityAsMap(client().performRequest(getRequest)); assertEquals(3, XContentMapValues.extractValue("count", transforms)); // only pivot_1 - getRequest = createRequestWithAuth("GET", TRANSFORM_ENDPOINT + "pivot_1", authHeader); + getRequest = createRequestWithAuth("GET", getTransformEndpoint() + "pivot_1", authHeader); transforms = entityAsMap(client().performRequest(getRequest)); assertEquals(1, XContentMapValues.extractValue("count", transforms)); @@ -168,7 +168,7 @@ public class TransformGetAndGetStatsIT extends TransformRestTestCase { createPivotReviewsTransform("pivot_stats_2", "pivot_reviews_stats_2", null); startAndWaitForTransform("pivot_stats_2", "pivot_reviews_stats_2"); - Request getRequest = createRequestWithAuth("GET", TRANSFORM_ENDPOINT + "_stats", BASIC_AUTH_VALUE_TRANSFORM_ADMIN); + Request getRequest = createRequestWithAuth("GET", getTransformEndpoint() + "_stats", BASIC_AUTH_VALUE_TRANSFORM_ADMIN); Map stats = entityAsMap(client().performRequest(getRequest)); assertEquals(2, XContentMapValues.extractValue("count", stats)); List> transformsStats = (List>)XContentMapValues.extractValue("transforms", stats); @@ -192,7 +192,7 @@ public class TransformGetAndGetStatsIT extends TransformRestTestCase { // Alternate testing between admin and lowly user, as both should be able to get the configs and stats String authHeader = randomFrom(BASIC_AUTH_VALUE_TRANSFORM_USER, BASIC_AUTH_VALUE_TRANSFORM_ADMIN); - Request getRequest = createRequestWithAuth("GET", TRANSFORM_ENDPOINT + transformId + "/_stats", authHeader); + Request getRequest = createRequestWithAuth("GET", getTransformEndpoint() + transformId + "/_stats", authHeader); Map stats = entityAsMap(client().performRequest(getRequest)); assertEquals(1, XContentMapValues.extractValue("count", stats)); List> transformsStats = (List>)XContentMapValues.extractValue("transforms", stats); @@ -218,7 +218,7 @@ public class TransformGetAndGetStatsIT extends TransformRestTestCase { String transformDest = transformId + "_idx"; String transformSrc = "reviews_cont_pivot_test"; createReviewsIndex(transformSrc); - final Request createTransformRequest = createRequestWithAuth("PUT", TRANSFORM_ENDPOINT + transformId, null); + final Request createTransformRequest = createRequestWithAuth("PUT", getTransformEndpoint() + transformId, null); String config = "{ \"dest\": {\"index\":\"" + transformDest + "\"}," + " \"source\": {\"index\":\"" + transformSrc + "\"}," + " \"frequency\": \"1s\"," @@ -242,7 +242,7 @@ public class TransformGetAndGetStatsIT extends TransformRestTestCase { assertThat(createTransformResponse.get("acknowledged"), equalTo(Boolean.TRUE)); startAndWaitForContinuousTransform(transformId, transformDest, null); - Request getRequest = createRequestWithAuth("GET", TRANSFORM_ENDPOINT + transformId + "/_stats", null); + Request getRequest = createRequestWithAuth("GET", getTransformEndpoint() + transformId + "/_stats", null); Map stats = entityAsMap(client().performRequest(getRequest)); List> transformsStats = (List>)XContentMapValues.extractValue("transforms", stats); assertEquals(1, transformsStats.size()); diff --git a/x-pack/plugin/transform/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/transform/integration/TransformInternalIndexIT.java b/x-pack/plugin/transform/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/transform/integration/TransformInternalIndexIT.java index c962befb16ee..9c76c0baf98a 100644 --- a/x-pack/plugin/transform/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/transform/integration/TransformInternalIndexIT.java +++ b/x-pack/plugin/transform/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/transform/integration/TransformInternalIndexIT.java @@ -12,12 +12,6 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.RestHighLevelClient; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.search.SearchModule; -import org.elasticsearch.xpack.core.transform.TransformField; -import org.elasticsearch.xpack.core.transform.transforms.TransformConfig; -import org.elasticsearch.xpack.core.transform.transforms.persistence.TransformInternalIndexConstants; import org.elasticsearch.client.indices.CreateIndexRequest; import org.elasticsearch.client.transform.GetTransformRequest; import org.elasticsearch.client.transform.GetTransformResponse; @@ -26,17 +20,23 @@ import org.elasticsearch.client.transform.UpdateTransformResponse; import org.elasticsearch.client.transform.transforms.TransformConfigUpdate; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.search.SearchModule; import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.xpack.core.transform.TransformField; +import org.elasticsearch.xpack.core.transform.transforms.TransformConfig; +import org.elasticsearch.xpack.core.transform.transforms.persistence.TransformInternalIndexConstants; import java.io.IOException; import java.nio.charset.StandardCharsets; import java.util.Base64; import java.util.Collections; -import static org.hamcrest.Matchers.is; import static org.elasticsearch.xpack.transform.persistence.TransformInternalIndex.addTransformsConfigMappings; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; public class TransformInternalIndexIT extends ESRestTestCase { diff --git a/x-pack/plugin/transform/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/transform/integration/TransformPivotRestIT.java b/x-pack/plugin/transform/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/transform/integration/TransformPivotRestIT.java index f590aba2c50a..72e9a621d54c 100644 --- a/x-pack/plugin/transform/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/transform/integration/TransformPivotRestIT.java +++ b/x-pack/plugin/transform/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/transform/integration/TransformPivotRestIT.java @@ -134,7 +134,7 @@ public class TransformPivotRestIT extends TransformRestTestCase { String transformId = "simple_bucket_selector_pivot"; String transformIndex = "bucket_selector_idx"; setupDataAccessRole(DATA_ACCESS_ROLE, REVIEWS_INDEX_NAME, transformIndex); - final Request createTransformRequest = createRequestWithAuth("PUT", TRANSFORM_ENDPOINT + transformId, + final Request createTransformRequest = createRequestWithAuth("PUT", getTransformEndpoint() + transformId, BASIC_AUTH_VALUE_TRANSFORM_ADMIN_WITH_SOME_DATA_ACCESS); String config = "{" + " \"source\": {\"index\":\"" + REVIEWS_INDEX_NAME + "\"}," @@ -179,7 +179,7 @@ public class TransformPivotRestIT extends TransformRestTestCase { String transformId = "simple_continuous_pivot"; String transformIndex = "pivot_reviews_continuous"; setupDataAccessRole(DATA_ACCESS_ROLE, indexName, transformIndex); - final Request createTransformRequest = createRequestWithAuth("PUT", TRANSFORM_ENDPOINT + transformId, + final Request createTransformRequest = createRequestWithAuth("PUT", getTransformEndpoint() + transformId, BASIC_AUTH_VALUE_TRANSFORM_ADMIN_WITH_SOME_DATA_ACCESS); String config = "{" + " \"source\": {\"index\":\"" + indexName + "\"}," @@ -293,7 +293,7 @@ public class TransformPivotRestIT extends TransformRestTestCase { String transformIndex = "pivot_reviews_via_histogram"; setupDataAccessRole(DATA_ACCESS_ROLE, REVIEWS_INDEX_NAME, transformIndex); - final Request createTransformRequest = createRequestWithAuth("PUT", TRANSFORM_ENDPOINT + transformId, + final Request createTransformRequest = createRequestWithAuth("PUT", getTransformEndpoint() + transformId, BASIC_AUTH_VALUE_TRANSFORM_ADMIN_WITH_SOME_DATA_ACCESS); String config = "{" @@ -331,7 +331,7 @@ public class TransformPivotRestIT extends TransformRestTestCase { String transformIndex = "bigger_pivot_reviews"; setupDataAccessRole(DATA_ACCESS_ROLE, REVIEWS_INDEX_NAME, transformIndex); - final Request createTransformRequest = createRequestWithAuth("PUT", TRANSFORM_ENDPOINT + transformId, + final Request createTransformRequest = createRequestWithAuth("PUT", getTransformEndpoint() + transformId, BASIC_AUTH_VALUE_TRANSFORM_ADMIN_WITH_SOME_DATA_ACCESS); String config = "{" @@ -406,7 +406,7 @@ public class TransformPivotRestIT extends TransformRestTestCase { String transformIndex = "pivot_reviews_via_date_histogram"; setupDataAccessRole(DATA_ACCESS_ROLE, REVIEWS_INDEX_NAME, transformIndex); - final Request createTransformRequest = createRequestWithAuth("PUT", TRANSFORM_ENDPOINT + transformId, + final Request createTransformRequest = createRequestWithAuth("PUT", getTransformEndpoint() + transformId, BASIC_AUTH_VALUE_TRANSFORM_ADMIN_WITH_SOME_DATA_ACCESS); String config = "{" @@ -442,7 +442,7 @@ public class TransformPivotRestIT extends TransformRestTestCase { @SuppressWarnings("unchecked") public void testPreviewTransform() throws Exception { setupDataAccessRole(DATA_ACCESS_ROLE, REVIEWS_INDEX_NAME); - final Request createPreviewRequest = createRequestWithAuth("POST", TRANSFORM_ENDPOINT + "_preview", + final Request createPreviewRequest = createRequestWithAuth("POST", getTransformEndpoint() + "_preview", BASIC_AUTH_VALUE_TRANSFORM_ADMIN_WITH_SOME_DATA_ACCESS); String config = "{" @@ -494,7 +494,7 @@ public class TransformPivotRestIT extends TransformRestTestCase { client().performRequest(pipelineRequest); setupDataAccessRole(DATA_ACCESS_ROLE, REVIEWS_INDEX_NAME); - final Request createPreviewRequest = createRequestWithAuth("POST", TRANSFORM_ENDPOINT + "_preview", null); + final Request createPreviewRequest = createRequestWithAuth("POST", getTransformEndpoint() + "_preview", null); String config = "{ \"source\": {\"index\":\"" + REVIEWS_INDEX_NAME + "\"} ," + "\"dest\": {\"pipeline\": \"" + pipelineId + "\"}," @@ -531,7 +531,7 @@ public class TransformPivotRestIT extends TransformRestTestCase { String transformIndex = "pivot_reviews_via_date_histogram_with_max_time"; setupDataAccessRole(DATA_ACCESS_ROLE, REVIEWS_INDEX_NAME, transformIndex); - final Request createTransformRequest = createRequestWithAuth("PUT", TRANSFORM_ENDPOINT + transformId, + final Request createTransformRequest = createRequestWithAuth("PUT", getTransformEndpoint() + transformId, BASIC_AUTH_VALUE_TRANSFORM_ADMIN_WITH_SOME_DATA_ACCESS); String config = "{" @@ -578,7 +578,7 @@ public class TransformPivotRestIT extends TransformRestTestCase { String transformIndex = "scripted_metric_pivot_reviews"; setupDataAccessRole(DATA_ACCESS_ROLE, REVIEWS_INDEX_NAME, transformIndex); - final Request createTransformRequest = createRequestWithAuth("PUT", TRANSFORM_ENDPOINT + transformId, + final Request createTransformRequest = createRequestWithAuth("PUT", getTransformEndpoint() + transformId, BASIC_AUTH_VALUE_TRANSFORM_ADMIN_WITH_SOME_DATA_ACCESS); String config = "{" @@ -631,7 +631,7 @@ public class TransformPivotRestIT extends TransformRestTestCase { String transformIndex = "bucket_script_pivot_reviews"; setupDataAccessRole(DATA_ACCESS_ROLE, REVIEWS_INDEX_NAME, transformIndex); - final Request createTransformRequest = createRequestWithAuth("PUT", TRANSFORM_ENDPOINT + transformId, + final Request createTransformRequest = createRequestWithAuth("PUT", getTransformEndpoint() + transformId, BASIC_AUTH_VALUE_TRANSFORM_ADMIN_WITH_SOME_DATA_ACCESS); String config = "{" @@ -683,7 +683,7 @@ public class TransformPivotRestIT extends TransformRestTestCase { String transformIndex = "geo_bounds_pivot_reviews"; setupDataAccessRole(DATA_ACCESS_ROLE, REVIEWS_INDEX_NAME, transformIndex); - final Request createTransformRequest = createRequestWithAuth("PUT", TRANSFORM_ENDPOINT + transformId, + final Request createTransformRequest = createRequestWithAuth("PUT", getTransformEndpoint() + transformId, BASIC_AUTH_VALUE_TRANSFORM_ADMIN_WITH_SOME_DATA_ACCESS); String config = "{" @@ -736,7 +736,7 @@ public class TransformPivotRestIT extends TransformRestTestCase { String transformIndex = "geo_centroid_pivot_reviews"; setupDataAccessRole(DATA_ACCESS_ROLE, REVIEWS_INDEX_NAME, transformIndex); - final Request createTransformRequest = createRequestWithAuth("PUT", TRANSFORM_ENDPOINT + transformId, + final Request createTransformRequest = createRequestWithAuth("PUT", getTransformEndpoint() + transformId, BASIC_AUTH_VALUE_TRANSFORM_ADMIN_WITH_SOME_DATA_ACCESS); String config = "{" @@ -786,7 +786,7 @@ public class TransformPivotRestIT extends TransformRestTestCase { String transformIndex = "weighted_avg_pivot_reviews"; setupDataAccessRole(DATA_ACCESS_ROLE, REVIEWS_INDEX_NAME, transformIndex); - final Request createTransformRequest = createRequestWithAuth("PUT", TRANSFORM_ENDPOINT + transformId, + final Request createTransformRequest = createRequestWithAuth("PUT", getTransformEndpoint() + transformId, BASIC_AUTH_VALUE_TRANSFORM_ADMIN_WITH_SOME_DATA_ACCESS); String config = "{" @@ -824,7 +824,7 @@ public class TransformPivotRestIT extends TransformRestTestCase { String transformId = "test_with_many_buckets"; String transformIndex = transformId + "-idx"; setupDataAccessRole(DATA_ACCESS_ROLE, REVIEWS_INDEX_NAME, transformIndex); - final Request createTransformRequest = createRequestWithAuth("PUT", TRANSFORM_ENDPOINT + transformId, + final Request createTransformRequest = createRequestWithAuth("PUT", getTransformEndpoint() + transformId, BASIC_AUTH_VALUE_TRANSFORM_ADMIN_WITH_SOME_DATA_ACCESS); String config = "{" @@ -853,7 +853,7 @@ public class TransformPivotRestIT extends TransformRestTestCase { startAndWaitForTransform(transformId, transformIndex, BASIC_AUTH_VALUE_TRANSFORM_ADMIN_WITH_SOME_DATA_ACCESS); assertTrue(indexExists(transformIndex)); - Map stats = getAsMap(TRANSFORM_ENDPOINT + transformId + "/_stats"); + Map stats = getAsMap(getTransformEndpoint() + transformId + "/_stats"); assertEquals(101, ((List)XContentMapValues.extractValue("transforms.stats.pages_processed", stats)).get(0)); } diff --git a/x-pack/plugin/transform/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/transform/integration/TransformRestTestCase.java b/x-pack/plugin/transform/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/transform/integration/TransformRestTestCase.java index 59d5fe1c1cea..c98ea5c57b7f 100644 --- a/x-pack/plugin/transform/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/transform/integration/TransformRestTestCase.java +++ b/x-pack/plugin/transform/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/transform/integration/TransformRestTestCase.java @@ -6,12 +6,15 @@ package org.elasticsearch.xpack.transform.integration; +import org.apache.http.HttpHost; import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.client.RestClientBuilder; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; @@ -23,6 +26,7 @@ import org.elasticsearch.xpack.core.transform.TransformField; import org.elasticsearch.xpack.core.transform.transforms.persistence.TransformInternalIndexConstants; import org.junit.After; import org.junit.AfterClass; +import org.junit.BeforeClass; import java.io.IOException; import java.util.Arrays; @@ -44,13 +48,30 @@ public abstract class TransformRestTestCase extends ESRestTestCase { protected static final String REVIEWS_INDEX_NAME = "reviews"; - protected static final String TRANSFORM_ENDPOINT = TransformField.REST_BASE_PATH + "transforms/"; + private static boolean useDeprecatedEndpoints; + + @BeforeClass + public static void init() { + // randomly return the old or the new endpoints, old endpoints to be removed for 8.0.0 + useDeprecatedEndpoints = randomBoolean(); + } @Override protected Settings restClientSettings() { return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", BASIC_AUTH_VALUE_SUPER_USER).build(); } + @Override + protected RestClient buildClient(Settings settings, HttpHost[] hosts) throws IOException { + if (useDeprecatedEndpoints) { + RestClientBuilder builder = RestClient.builder(hosts); + configureClient(builder, settings); + builder.setStrictDeprecationMode(false); + return builder.build(); + } + return super.buildClient(settings, hosts); + } + protected void createReviewsIndex(String indexName, int numDocs) throws IOException { int[] distributionTable = {5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 3, 3, 2, 1, 1, 1}; @@ -159,7 +180,7 @@ public abstract class TransformRestTestCase extends ESRestTestCase { protected void createContinuousPivotReviewsTransform(String transformId, String dataFrameIndex, String authHeader) throws IOException { - final Request createDataframeTransformRequest = createRequestWithAuth("PUT", TRANSFORM_ENDPOINT + transformId, authHeader); + final Request createDataframeTransformRequest = createRequestWithAuth("PUT", getTransformEndpoint() + transformId, authHeader); String config = "{ \"dest\": {\"index\":\"" + dataFrameIndex + "\"}," + " \"source\": {\"index\":\"" + REVIEWS_INDEX_NAME + "\"}," @@ -188,7 +209,7 @@ public abstract class TransformRestTestCase extends ESRestTestCase { protected void createPivotReviewsTransform(String transformId, String dataFrameIndex, String query, String pipeline, String authHeader) throws IOException { - final Request createDataframeTransformRequest = createRequestWithAuth("PUT", TRANSFORM_ENDPOINT + transformId, authHeader); + final Request createDataframeTransformRequest = createRequestWithAuth("PUT", getTransformEndpoint() + transformId, authHeader); String config = "{"; @@ -230,7 +251,7 @@ public abstract class TransformRestTestCase extends ESRestTestCase { protected void startDataframeTransform(String transformId, String authHeader, String... warnings) throws IOException { // start the transform - final Request startTransformRequest = createRequestWithAuth("POST", TRANSFORM_ENDPOINT + transformId + "/_start", authHeader); + final Request startTransformRequest = createRequestWithAuth("POST", getTransformEndpoint() + transformId + "/_start", authHeader); if (warnings.length > 0) { startTransformRequest.setOptions(expectWarnings(warnings)); } @@ -240,7 +261,7 @@ public abstract class TransformRestTestCase extends ESRestTestCase { protected void stopTransform(String transformId, boolean force) throws Exception { // start the transform - final Request stopTransformRequest = createRequestWithAuth("POST", TRANSFORM_ENDPOINT + transformId + "/_stop", null); + final Request stopTransformRequest = createRequestWithAuth("POST", getTransformEndpoint() + transformId + "/_stop", null); stopTransformRequest.addParameter(TransformField.FORCE.getPreferredName(), Boolean.toString(force)); stopTransformRequest.addParameter(TransformField.WAIT_FOR_COMPLETION.getPreferredName(), Boolean.toString(true)); Map stopTransformResponse = entityAsMap(client().performRequest(stopTransformRequest)); @@ -317,7 +338,7 @@ public abstract class TransformRestTestCase extends ESRestTestCase { @SuppressWarnings("unchecked") private static List> getDataFrameTransforms() throws IOException { - Response response = adminClient().performRequest(new Request("GET", TRANSFORM_ENDPOINT + "_all")); + Response response = adminClient().performRequest(new Request("GET", getTransformEndpoint() + "_all")); Map transforms = entityAsMap(response); List> transformConfigs = (List>) XContentMapValues.extractValue("transforms", transforms); @@ -330,7 +351,7 @@ public abstract class TransformRestTestCase extends ESRestTestCase { } protected static Map getDataFrameState(String transformId) throws IOException { - Response statsResponse = client().performRequest(new Request("GET", TRANSFORM_ENDPOINT + transformId + "/_stats")); + Response statsResponse = client().performRequest(new Request("GET", getTransformEndpoint() + transformId + "/_stats")); List transforms = ((List) entityAsMap(statsResponse).get("transforms")); if (transforms.isEmpty()) { return null; @@ -339,7 +360,7 @@ public abstract class TransformRestTestCase extends ESRestTestCase { } protected static void deleteTransform(String transformId) throws IOException { - Request request = new Request("DELETE", TRANSFORM_ENDPOINT + transformId); + Request request = new Request("DELETE", getTransformEndpoint() + transformId); request.addParameter("ignore", "404"); // Ignore 404s because they imply someone was racing us to delete this adminClient().performRequest(request); } @@ -361,7 +382,7 @@ public abstract class TransformRestTestCase extends ESRestTestCase { List> transformConfigs = getDataFrameTransforms(); for (Map transformConfig : transformConfigs) { String transformId = (String) transformConfig.get("id"); - Request request = new Request("POST", TRANSFORM_ENDPOINT + transformId + "/_stop"); + Request request = new Request("POST", getTransformEndpoint() + transformId + "/_stop"); request.addParameter("wait_for_completion", "true"); request.addParameter("timeout", "10s"); request.addParameter("ignore", "404"); @@ -403,7 +424,7 @@ public abstract class TransformRestTestCase extends ESRestTestCase { } static int getDataFrameCheckpoint(String transformId) throws IOException { - Response statsResponse = client().performRequest(new Request("GET", TRANSFORM_ENDPOINT + transformId + "/_stats")); + Response statsResponse = client().performRequest(new Request("GET", getTransformEndpoint() + transformId + "/_stats")); Map transformStatsAsMap = (Map) ((List) entityAsMap(statsResponse).get("transforms")).get(0); return (int) XContentMapValues.extractValue("checkpointing.last.checkpoint", transformStatsAsMap); @@ -431,4 +452,8 @@ public abstract class TransformRestTestCase extends ESRestTestCase { + "}"); client().performRequest(request); } + + protected static String getTransformEndpoint() { + return useDeprecatedEndpoints ? TransformField.REST_BASE_PATH_TRANSFORMS_DEPRECATED : TransformField.REST_BASE_PATH_TRANSFORMS; + } } diff --git a/x-pack/plugin/transform/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/transform/integration/TransformUsageIT.java b/x-pack/plugin/transform/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/transform/integration/TransformUsageIT.java index f46c480e53c5..128eb64e9a23 100644 --- a/x-pack/plugin/transform/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/transform/integration/TransformUsageIT.java +++ b/x-pack/plugin/transform/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/transform/integration/TransformUsageIT.java @@ -64,7 +64,7 @@ public class TransformUsageIT extends TransformRestTestCase { startAndWaitForContinuousTransform("test_usage_continuous", "pivot_reviews_continuous", null); - Request getRequest = new Request("GET", TRANSFORM_ENDPOINT + "test_usage/_stats"); + Request getRequest = new Request("GET", getTransformEndpoint() + "test_usage/_stats"); Map stats = entityAsMap(client().performRequest(getRequest)); Map expectedStats = new HashMap<>(); for(String statName : PROVIDED_STATS) { diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/Transform.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/Transform.java index 70c33c373480..8c1e4fdf9348 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/Transform.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/Transform.java @@ -53,6 +53,14 @@ import org.elasticsearch.xpack.core.transform.action.PutTransformAction; import org.elasticsearch.xpack.core.transform.action.StartTransformAction; import org.elasticsearch.xpack.core.transform.action.StopTransformAction; import org.elasticsearch.xpack.core.transform.action.UpdateTransformAction; +import org.elasticsearch.xpack.core.transform.action.compat.DeleteTransformActionDeprecated; +import org.elasticsearch.xpack.core.transform.action.compat.GetTransformActionDeprecated; +import org.elasticsearch.xpack.core.transform.action.compat.GetTransformStatsActionDeprecated; +import org.elasticsearch.xpack.core.transform.action.compat.PreviewTransformActionDeprecated; +import org.elasticsearch.xpack.core.transform.action.compat.PutTransformActionDeprecated; +import org.elasticsearch.xpack.core.transform.action.compat.StartTransformActionDeprecated; +import org.elasticsearch.xpack.core.transform.action.compat.StopTransformActionDeprecated; +import org.elasticsearch.xpack.core.transform.action.compat.UpdateTransformActionDeprecated; import org.elasticsearch.xpack.core.transform.transforms.persistence.TransformInternalIndexConstants; import org.elasticsearch.xpack.transform.action.TransportDeleteTransformAction; import org.elasticsearch.xpack.transform.action.TransportGetTransformAction; @@ -62,6 +70,14 @@ import org.elasticsearch.xpack.transform.action.TransportPutTransformAction; import org.elasticsearch.xpack.transform.action.TransportStartTransformAction; import org.elasticsearch.xpack.transform.action.TransportStopTransformAction; import org.elasticsearch.xpack.transform.action.TransportUpdateTransformAction; +import org.elasticsearch.xpack.transform.action.compat.TransportDeleteTransformActionDeprecated; +import org.elasticsearch.xpack.transform.action.compat.TransportGetTransformActionDeprecated; +import org.elasticsearch.xpack.transform.action.compat.TransportGetTransformStatsActionDeprecated; +import org.elasticsearch.xpack.transform.action.compat.TransportPreviewTransformActionDeprecated; +import org.elasticsearch.xpack.transform.action.compat.TransportPutTransformActionDeprecated; +import org.elasticsearch.xpack.transform.action.compat.TransportStartTransformActionDeprecated; +import org.elasticsearch.xpack.transform.action.compat.TransportStopTransformActionDeprecated; +import org.elasticsearch.xpack.transform.action.compat.TransportUpdateTransformActionDeprecated; import org.elasticsearch.xpack.transform.checkpoint.TransformCheckpointService; import org.elasticsearch.xpack.transform.notifications.TransformAuditor; import org.elasticsearch.xpack.transform.persistence.TransformConfigManager; @@ -74,6 +90,14 @@ import org.elasticsearch.xpack.transform.rest.action.RestPutTransformAction; import org.elasticsearch.xpack.transform.rest.action.RestStartTransformAction; import org.elasticsearch.xpack.transform.rest.action.RestStopTransformAction; import org.elasticsearch.xpack.transform.rest.action.RestUpdateTransformAction; +import org.elasticsearch.xpack.transform.rest.action.compat.RestDeleteTransformActionDeprecated; +import org.elasticsearch.xpack.transform.rest.action.compat.RestGetTransformActionDeprecated; +import org.elasticsearch.xpack.transform.rest.action.compat.RestGetTransformStatsActionDeprecated; +import org.elasticsearch.xpack.transform.rest.action.compat.RestPreviewTransformActionDeprecated; +import org.elasticsearch.xpack.transform.rest.action.compat.RestPutTransformActionDeprecated; +import org.elasticsearch.xpack.transform.rest.action.compat.RestStartTransformActionDeprecated; +import org.elasticsearch.xpack.transform.rest.action.compat.RestStopTransformActionDeprecated; +import org.elasticsearch.xpack.transform.rest.action.compat.RestUpdateTransformActionDeprecated; import org.elasticsearch.xpack.transform.transforms.TransformPersistentTasksExecutor; import org.elasticsearch.xpack.transform.transforms.TransformTask; @@ -98,7 +122,7 @@ public class Transform extends Plugin implements ActionPlugin, PersistentTaskPlu private final boolean enabled; private final Settings settings; - private final SetOnce transformsConfigManager = new SetOnce<>(); + private final SetOnce transformConfigManager = new SetOnce<>(); private final SetOnce transformAuditor = new SetOnce<>(); private final SetOnce transformCheckpointService = new SetOnce<>(); private final SetOnce schedulerEngine = new SetOnce<>(); @@ -127,7 +151,17 @@ public class Transform extends Plugin implements ActionPlugin, PersistentTaskPlu new RestGetTransformAction(restController), new RestGetTransformStatsAction(restController), new RestPreviewTransformAction(restController), - new RestUpdateTransformAction(restController) + new RestUpdateTransformAction(restController), + + // deprecated endpoints, to be removed for 8.0.0 + new RestPutTransformActionDeprecated(restController), + new RestStartTransformActionDeprecated(restController), + new RestStopTransformActionDeprecated(restController), + new RestDeleteTransformActionDeprecated(restController), + new RestGetTransformActionDeprecated(restController), + new RestGetTransformStatsActionDeprecated(restController), + new RestPreviewTransformActionDeprecated(restController), + new RestUpdateTransformActionDeprecated(restController) ); } @@ -148,6 +182,17 @@ public class Transform extends Plugin implements ActionPlugin, PersistentTaskPlu new ActionHandler<>(GetTransformStatsAction.INSTANCE, TransportGetTransformStatsAction.class), new ActionHandler<>(PreviewTransformAction.INSTANCE, TransportPreviewTransformAction.class), new ActionHandler<>(UpdateTransformAction.INSTANCE, TransportUpdateTransformAction.class), + + // deprecated actions, to be removed for 8.0.0 + new ActionHandler<>(PutTransformActionDeprecated.INSTANCE, TransportPutTransformActionDeprecated.class), + new ActionHandler<>(StartTransformActionDeprecated.INSTANCE, TransportStartTransformActionDeprecated.class), + new ActionHandler<>(StopTransformActionDeprecated.INSTANCE, TransportStopTransformActionDeprecated.class), + new ActionHandler<>(DeleteTransformActionDeprecated.INSTANCE, TransportDeleteTransformActionDeprecated.class), + new ActionHandler<>(GetTransformActionDeprecated.INSTANCE, TransportGetTransformActionDeprecated.class), + new ActionHandler<>(GetTransformStatsActionDeprecated.INSTANCE, TransportGetTransformStatsActionDeprecated.class), + new ActionHandler<>(PreviewTransformActionDeprecated.INSTANCE, TransportPreviewTransformActionDeprecated.class), + new ActionHandler<>(UpdateTransformActionDeprecated.INSTANCE, TransportUpdateTransformActionDeprecated.class), + usageAction, infoAction); } @@ -159,7 +204,7 @@ public class Transform extends Plugin implements ActionPlugin, PersistentTaskPlu } FixedExecutorBuilder indexing = new FixedExecutorBuilder(settings, TASK_THREAD_POOL_NAME, 4, 4, - "data_frame.task_thread_pool"); + "transform.task_thread_pool"); return Collections.singletonList(indexing); } @@ -172,12 +217,12 @@ public class Transform extends Plugin implements ActionPlugin, PersistentTaskPlu return emptyList(); } transformAuditor.set(new TransformAuditor(client, clusterService.getNodeName())); - transformsConfigManager.set(new TransformConfigManager(client, xContentRegistry)); + transformConfigManager.set(new TransformConfigManager(client, xContentRegistry)); transformCheckpointService.set(new TransformCheckpointService(client, - transformsConfigManager.get(), + transformConfigManager.get(), transformAuditor.get())); - return Arrays.asList(transformsConfigManager.get(), transformAuditor.get(), transformCheckpointService.get()); + return Arrays.asList(transformConfigManager.get(), transformAuditor.get(), transformCheckpointService.get()); } @Override @@ -208,14 +253,14 @@ public class Transform extends Plugin implements ActionPlugin, PersistentTaskPlu schedulerEngine.set(new SchedulerEngine(settings, Clock.systemUTC())); // the transforms config manager should have been created - assert transformsConfigManager.get() != null; + assert transformConfigManager.get() != null; // the auditor should have been created assert transformAuditor.get() != null; assert transformCheckpointService.get() != null; return Collections.singletonList( new TransformPersistentTasksExecutor(client, - transformsConfigManager.get(), + transformConfigManager.get(), transformCheckpointService.get(), schedulerEngine.get(), transformAuditor.get(), diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportDeleteTransformAction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportDeleteTransformAction.java index 7ec128a3fccd..c9d97213f540 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportDeleteTransformAction.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportDeleteTransformAction.java @@ -24,8 +24,8 @@ import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.transform.action.DeleteTransformAction; -import org.elasticsearch.xpack.core.transform.action.StopTransformAction; import org.elasticsearch.xpack.core.transform.action.DeleteTransformAction.Request; +import org.elasticsearch.xpack.core.transform.action.StopTransformAction; import org.elasticsearch.xpack.transform.notifications.TransformAuditor; import org.elasticsearch.xpack.transform.persistence.TransformConfigManager; @@ -36,18 +36,25 @@ import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; public class TransportDeleteTransformAction extends TransportMasterNodeAction { - private final TransformConfigManager transformsConfigManager; + private final TransformConfigManager transformConfigManager; private final TransformAuditor auditor; private final Client client; @Inject public TransportDeleteTransformAction(TransportService transportService, ActionFilters actionFilters, ThreadPool threadPool, - ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver, - TransformConfigManager transformsConfigManager, TransformAuditor auditor, - Client client) { - super(DeleteTransformAction.NAME, transportService, clusterService, threadPool, actionFilters, - Request::new, indexNameExpressionResolver); - this.transformsConfigManager = transformsConfigManager; + ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver, + TransformConfigManager transformsConfigManager, TransformAuditor auditor, + Client client) { + this(DeleteTransformAction.NAME, transportService, actionFilters, threadPool, clusterService, indexNameExpressionResolver, + transformsConfigManager, auditor, client); + } + + protected TransportDeleteTransformAction(String name, TransportService transportService, ActionFilters actionFilters, + ThreadPool threadPool, ClusterService clusterService, + IndexNameExpressionResolver indexNameExpressionResolver, + TransformConfigManager transformConfigManager, TransformAuditor auditor, Client client) { + super(name, transportService, clusterService, threadPool, actionFilters, Request::new, indexNameExpressionResolver); + this.transformConfigManager = transformConfigManager; this.auditor = auditor; this.client = client; } @@ -71,7 +78,7 @@ public class TransportDeleteTransformAction extends TransportMasterNodeAction stopTransformActionListener = ActionListener.wrap( - stopResponse -> transformsConfigManager.deleteTransform(request.getId(), + stopResponse -> transformConfigManager.deleteTransform(request.getId(), ActionListener.wrap( r -> { auditor.info(request.getId(), "Deleted transform."); diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportGetTransformAction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportGetTransformAction.java index 6e6813aa22c6..7e65d0d7f49d 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportGetTransformAction.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportGetTransformAction.java @@ -38,9 +38,14 @@ public class TransportGetTransformAction extends AbstractTransportGetResourcesAc Response> { @Inject - public TransportGetTransformAction(TransportService transportService, ActionFilters actionFilters, - Client client, NamedXContentRegistry xContentRegistry) { - super(GetTransformAction.NAME, transportService, actionFilters, Request::new, client, xContentRegistry); + public TransportGetTransformAction(TransportService transportService, ActionFilters actionFilters, Client client, + NamedXContentRegistry xContentRegistry) { + this(GetTransformAction.NAME, transportService, actionFilters, client, xContentRegistry); + } + + protected TransportGetTransformAction(String name, TransportService transportService, ActionFilters actionFilters, Client client, + NamedXContentRegistry xContentRegistry) { + super(name, transportService, actionFilters, Request::new, client, xContentRegistry); } @Override diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportGetTransformStatsAction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportGetTransformStatsAction.java index 3941fb62075e..57397cf2c0b3 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportGetTransformStatsAction.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportGetTransformStatsAction.java @@ -59,14 +59,22 @@ public class TransportGetTransformStatsAction extends @Inject public TransportGetTransformStatsAction(TransportService transportService, ActionFilters actionFilters, ClusterService clusterService, - TransformConfigManager transformsConfigManager, + TransformConfigManager transformConfigManager, TransformCheckpointService transformsCheckpointService) { - super(GetTransformStatsAction.NAME, clusterService, transportService, actionFilters, Request::new, Response::new, + this(GetTransformStatsAction.NAME, transportService, actionFilters, clusterService, transformConfigManager, + transformsCheckpointService); + } + + protected TransportGetTransformStatsAction(String name, TransportService transportService, ActionFilters actionFilters, + ClusterService clusterService, TransformConfigManager transformsConfigManager, + TransformCheckpointService transformsCheckpointService) { + super(name, clusterService, transportService, actionFilters, Request::new, Response::new, Response::new, ThreadPool.Names.SAME); this.transformConfigManager = transformsConfigManager; this.transformCheckpointService = transformsCheckpointService; } + @Override protected Response newResponse(Request request, List tasks, List taskOperationFailures, List failedNodeExceptions) { diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportPreviewTransformAction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportPreviewTransformAction.java index 8780f1baa0a0..7ee6ab5a05e0 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportPreviewTransformAction.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportPreviewTransformAction.java @@ -42,9 +42,9 @@ import org.elasticsearch.xpack.core.XPackField; import org.elasticsearch.xpack.core.transform.TransformField; import org.elasticsearch.xpack.core.transform.TransformMessages; import org.elasticsearch.xpack.core.transform.action.PreviewTransformAction; -import org.elasticsearch.xpack.core.transform.transforms.TransformIndexerStats; -import org.elasticsearch.xpack.core.transform.transforms.TransformConfig; import org.elasticsearch.xpack.core.transform.transforms.SourceConfig; +import org.elasticsearch.xpack.core.transform.transforms.TransformConfig; +import org.elasticsearch.xpack.core.transform.transforms.TransformIndexerStats; import org.elasticsearch.xpack.transform.transforms.pivot.AggregationResultUtils; import org.elasticsearch.xpack.transform.transforms.pivot.Pivot; @@ -73,7 +73,15 @@ public class TransportPreviewTransformAction extends Client client, ThreadPool threadPool, XPackLicenseState licenseState, IndexNameExpressionResolver indexNameExpressionResolver, ClusterService clusterService) { - super(PreviewTransformAction.NAME,transportService, actionFilters, PreviewTransformAction.Request::new); + this(PreviewTransformAction.NAME,transportService, actionFilters, client, threadPool, licenseState, indexNameExpressionResolver, + clusterService); + } + + protected TransportPreviewTransformAction(String name, TransportService transportService, ActionFilters actionFilters, + Client client, ThreadPool threadPool, XPackLicenseState licenseState, + IndexNameExpressionResolver indexNameExpressionResolver, + ClusterService clusterService) { + super(name, transportService, actionFilters, PreviewTransformAction.Request::new); this.licenseState = licenseState; this.client = client; this.threadPool = threadPool; diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportPutTransformAction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportPutTransformAction.java index 36309526f17e..2d3d6c76e46f 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportPutTransformAction.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportPutTransformAction.java @@ -62,7 +62,7 @@ public class TransportPutTransformAction extends TransportMasterNodeAction Put our transform ActionListener pivotValidationListener = ActionListener.wrap( - validationResult -> transformsConfigManager.putTransformConfiguration(config, putTransformConfigurationListener), + validationResult -> transformConfigManager.putTransformConfiguration(config, putTransformConfigurationListener), validationException -> { if (validationException instanceof ElasticsearchStatusException) { listener.onFailure(new ElasticsearchStatusException( diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportStartTransformAction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportStartTransformAction.java index af35d3704f27..0971814427b7 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportStartTransformAction.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportStartTransformAction.java @@ -37,9 +37,9 @@ import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.XPackField; import org.elasticsearch.xpack.core.transform.TransformMessages; import org.elasticsearch.xpack.core.transform.action.StartTransformAction; -import org.elasticsearch.xpack.core.transform.transforms.TransformTaskParams; import org.elasticsearch.xpack.core.transform.transforms.TransformConfig; import org.elasticsearch.xpack.core.transform.transforms.TransformState; +import org.elasticsearch.xpack.core.transform.transforms.TransformTaskParams; import org.elasticsearch.xpack.core.transform.transforms.TransformTaskState; import org.elasticsearch.xpack.transform.notifications.TransformAuditor; import org.elasticsearch.xpack.transform.persistence.TransformConfigManager; @@ -62,7 +62,7 @@ public class TransportStartTransformAction extends private static final Logger logger = LogManager.getLogger(TransportStartTransformAction.class); private final XPackLicenseState licenseState; - private final TransformConfigManager transformsConfigManager; + private final TransformConfigManager transformConfigManager; private final PersistentTasksService persistentTasksService; private final Client client; private final TransformAuditor auditor; @@ -71,13 +71,23 @@ public class TransportStartTransformAction extends public TransportStartTransformAction(TransportService transportService, ActionFilters actionFilters, ClusterService clusterService, XPackLicenseState licenseState, ThreadPool threadPool, IndexNameExpressionResolver indexNameExpressionResolver, - TransformConfigManager transformsConfigManager, + TransformConfigManager transformConfigManager, PersistentTasksService persistentTasksService, Client client, TransformAuditor auditor) { - super(StartTransformAction.NAME, transportService, clusterService, threadPool, actionFilters, - StartTransformAction.Request::new, indexNameExpressionResolver); + this(StartTransformAction.NAME, transportService, actionFilters, clusterService, licenseState, threadPool, + indexNameExpressionResolver, transformConfigManager, persistentTasksService, client, auditor); + } + + protected TransportStartTransformAction(String name, TransportService transportService, ActionFilters actionFilters, + ClusterService clusterService, XPackLicenseState licenseState, + ThreadPool threadPool, IndexNameExpressionResolver indexNameExpressionResolver, + TransformConfigManager transformConfigManager, + PersistentTasksService persistentTasksService, Client client, + TransformAuditor auditor) { + super(name, transportService, clusterService, threadPool, actionFilters, StartTransformAction.Request::new, + indexNameExpressionResolver); this.licenseState = licenseState; - this.transformsConfigManager = transformsConfigManager; + this.transformConfigManager = transformConfigManager; this.persistentTasksService = persistentTasksService; this.client = client; this.auditor = auditor; @@ -209,7 +219,7 @@ public class TransportStartTransformAction extends ); // <1> Get the config to verify it exists and is valid - transformsConfigManager.getTransformConfiguration(request.getId(), getTransformListener); + transformConfigManager.getTransformConfiguration(request.getId(), getTransformListener); } private void createDestinationIndex(final TransformConfig config, final ActionListener listener) { diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportStopTransformAction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportStopTransformAction.java index 5526dd0c9f05..088e8026153a 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportStopTransformAction.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportStopTransformAction.java @@ -54,7 +54,7 @@ public class TransportStopTransformAction extends TransportTasksAction { diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportUpdateTransformAction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportUpdateTransformAction.java index 815910efe8bf..ddd6b7c26cf5 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportUpdateTransformAction.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportUpdateTransformAction.java @@ -47,9 +47,9 @@ import org.elasticsearch.xpack.core.transform.action.UpdateTransformAction.Respo import org.elasticsearch.xpack.core.transform.transforms.TransformConfig; import org.elasticsearch.xpack.core.transform.transforms.TransformConfigUpdate; import org.elasticsearch.xpack.transform.notifications.TransformAuditor; +import org.elasticsearch.xpack.transform.persistence.SeqNoPrimaryTermAndIndex; import org.elasticsearch.xpack.transform.persistence.TransformConfigManager; import org.elasticsearch.xpack.transform.persistence.TransformIndex; -import org.elasticsearch.xpack.transform.persistence.SeqNoPrimaryTermAndIndex; import org.elasticsearch.xpack.transform.transforms.SourceDestValidator; import org.elasticsearch.xpack.transform.transforms.pivot.Pivot; @@ -66,7 +66,7 @@ public class TransportUpdateTransformAction extends TransportMasterNodeAction { final TransformConfig config = configAndVersion.v1(); // If it is a noop don't bother even writing the doc, save the cycles, just return here. @@ -197,7 +206,7 @@ public class TransportUpdateTransformAction extends TransportMasterNodeAction putTransformConfigurationListener = ActionListener.wrap( putTransformConfigurationResult -> { auditor.info(config.getId(), "updated transform."); - transformsConfigManager.deleteOldTransformConfigurations(request.getId(), ActionListener.wrap( + transformConfigManager.deleteOldTransformConfigurations(request.getId(), ActionListener.wrap( r -> { logger.trace("[{}] successfully deleted old transform configurations", request.getId()); listener.onResponse(new Response(config)); @@ -217,7 +226,7 @@ public class TransportUpdateTransformAction extends TransportMasterNodeAction Update our transform ActionListener createDestinationListener = ActionListener.wrap( - createDestResponse -> transformsConfigManager.updateTransformConfiguration(config, + createDestResponse -> transformConfigManager.updateTransformConfiguration(config, seqNoPrimaryTermAndIndex, putTransformConfigurationListener), listener::onFailure diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/compat/TransportDeleteTransformActionDeprecated.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/compat/TransportDeleteTransformActionDeprecated.java new file mode 100644 index 000000000000..5b6128a629d6 --- /dev/null +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/compat/TransportDeleteTransformActionDeprecated.java @@ -0,0 +1,31 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.transform.action.compat; + +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.transform.action.compat.DeleteTransformActionDeprecated; +import org.elasticsearch.xpack.transform.action.TransportDeleteTransformAction; +import org.elasticsearch.xpack.transform.notifications.TransformAuditor; +import org.elasticsearch.xpack.transform.persistence.TransformConfigManager; + +public class TransportDeleteTransformActionDeprecated extends TransportDeleteTransformAction{ + + @Inject + public TransportDeleteTransformActionDeprecated(TransportService transportService, ActionFilters actionFilters, ThreadPool threadPool, + ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver, + TransformConfigManager transformsConfigManager, TransformAuditor auditor, + Client client) { + super(DeleteTransformActionDeprecated.NAME, transportService, actionFilters, threadPool, clusterService, + indexNameExpressionResolver, transformsConfigManager, auditor, client); + } +} diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/compat/TransportGetTransformActionDeprecated.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/compat/TransportGetTransformActionDeprecated.java new file mode 100644 index 000000000000..581fa92e7180 --- /dev/null +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/compat/TransportGetTransformActionDeprecated.java @@ -0,0 +1,25 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.transform.action.compat; + +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.transform.action.compat.GetTransformActionDeprecated; +import org.elasticsearch.xpack.transform.action.TransportGetTransformAction; + +public class TransportGetTransformActionDeprecated extends TransportGetTransformAction { + + @Inject + public TransportGetTransformActionDeprecated(TransportService transportService, ActionFilters actionFilters, Client client, + NamedXContentRegistry xContentRegistry) { + super(GetTransformActionDeprecated.NAME, transportService, actionFilters, client, xContentRegistry); + } + +} diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/compat/TransportGetTransformStatsActionDeprecated.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/compat/TransportGetTransformStatsActionDeprecated.java new file mode 100644 index 000000000000..886f3c76e1fe --- /dev/null +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/compat/TransportGetTransformStatsActionDeprecated.java @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.transform.action.compat; + +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.transform.action.compat.GetTransformStatsActionDeprecated; +import org.elasticsearch.xpack.transform.action.TransportGetTransformStatsAction; +import org.elasticsearch.xpack.transform.checkpoint.TransformCheckpointService; +import org.elasticsearch.xpack.transform.persistence.TransformConfigManager; + +public class TransportGetTransformStatsActionDeprecated extends TransportGetTransformStatsAction { + + @Inject + public TransportGetTransformStatsActionDeprecated(TransportService transportService, ActionFilters actionFilters, + ClusterService clusterService, + TransformConfigManager transformsConfigManager, + TransformCheckpointService transformsCheckpointService) { + super(GetTransformStatsActionDeprecated.NAME, transportService, actionFilters, clusterService, transformsConfigManager, + transformsCheckpointService); + } +} diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/compat/TransportPreviewTransformActionDeprecated.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/compat/TransportPreviewTransformActionDeprecated.java new file mode 100644 index 000000000000..8882fcf505b8 --- /dev/null +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/compat/TransportPreviewTransformActionDeprecated.java @@ -0,0 +1,31 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.transform.action.compat; + +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.transform.action.compat.PreviewTransformActionDeprecated; +import org.elasticsearch.xpack.transform.action.TransportPreviewTransformAction; + +public class TransportPreviewTransformActionDeprecated extends TransportPreviewTransformAction { + + @Inject + public TransportPreviewTransformActionDeprecated(TransportService transportService, ActionFilters actionFilters, + Client client, ThreadPool threadPool, XPackLicenseState licenseState, + IndexNameExpressionResolver indexNameExpressionResolver, + ClusterService clusterService) { + super(PreviewTransformActionDeprecated.NAME, transportService, actionFilters, client, threadPool, licenseState, + indexNameExpressionResolver, clusterService); + } + +} diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/compat/TransportPutTransformActionDeprecated.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/compat/TransportPutTransformActionDeprecated.java new file mode 100644 index 000000000000..69d8a427d66c --- /dev/null +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/compat/TransportPutTransformActionDeprecated.java @@ -0,0 +1,35 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.transform.action.compat; + +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.transform.action.compat.PutTransformActionDeprecated; +import org.elasticsearch.xpack.transform.action.TransportPutTransformAction; +import org.elasticsearch.xpack.transform.notifications.TransformAuditor; +import org.elasticsearch.xpack.transform.persistence.TransformConfigManager; + +public class TransportPutTransformActionDeprecated extends TransportPutTransformAction { + + @Inject + public TransportPutTransformActionDeprecated(Settings settings, TransportService transportService, ThreadPool threadPool, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, + ClusterService clusterService, XPackLicenseState licenseState, + TransformConfigManager transformConfigManager, Client client, + TransformAuditor auditor) { + super(PutTransformActionDeprecated.NAME, settings, transportService, threadPool, actionFilters, indexNameExpressionResolver, + clusterService, licenseState, transformConfigManager, client, auditor); + } + +} diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/compat/TransportStartTransformActionDeprecated.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/compat/TransportStartTransformActionDeprecated.java new file mode 100644 index 000000000000..fa67e84fd6d9 --- /dev/null +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/compat/TransportStartTransformActionDeprecated.java @@ -0,0 +1,35 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.transform.action.compat; + +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.persistent.PersistentTasksService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.transform.action.compat.StartTransformActionDeprecated; +import org.elasticsearch.xpack.transform.action.TransportStartTransformAction; +import org.elasticsearch.xpack.transform.notifications.TransformAuditor; +import org.elasticsearch.xpack.transform.persistence.TransformConfigManager; + +public class TransportStartTransformActionDeprecated extends TransportStartTransformAction { + + @Inject + public TransportStartTransformActionDeprecated(TransportService transportService, ActionFilters actionFilters, + ClusterService clusterService, XPackLicenseState licenseState, + ThreadPool threadPool, IndexNameExpressionResolver indexNameExpressionResolver, + TransformConfigManager transformConfigManager, + PersistentTasksService persistentTasksService, Client client, + TransformAuditor auditor) { + super(StartTransformActionDeprecated.NAME, transportService, actionFilters, clusterService, licenseState, threadPool, + indexNameExpressionResolver, transformConfigManager, persistentTasksService, client, auditor); + } +} diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/compat/TransportStopTransformActionDeprecated.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/compat/TransportStopTransformActionDeprecated.java new file mode 100644 index 000000000000..ac2229d611ee --- /dev/null +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/compat/TransportStopTransformActionDeprecated.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.transform.action.compat; + +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.persistent.PersistentTasksService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.transform.action.compat.StopTransformActionDeprecated; +import org.elasticsearch.xpack.transform.action.TransportStopTransformAction; +import org.elasticsearch.xpack.transform.persistence.TransformConfigManager; + +public class TransportStopTransformActionDeprecated extends TransportStopTransformAction { + + @Inject + public TransportStopTransformActionDeprecated(TransportService transportService, ActionFilters actionFilters, + ClusterService clusterService, ThreadPool threadPool, + PersistentTasksService persistentTasksService, + TransformConfigManager transformConfigManager, + Client client) { + super(StopTransformActionDeprecated.NAME, transportService, actionFilters, clusterService, threadPool, persistentTasksService, + transformConfigManager, client); + } + +} diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/compat/TransportUpdateTransformActionDeprecated.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/compat/TransportUpdateTransformActionDeprecated.java new file mode 100644 index 000000000000..c3575d599107 --- /dev/null +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/compat/TransportUpdateTransformActionDeprecated.java @@ -0,0 +1,35 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.transform.action.compat; + +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.transform.action.compat.UpdateTransformActionDeprecated; +import org.elasticsearch.xpack.transform.action.TransportUpdateTransformAction; +import org.elasticsearch.xpack.transform.notifications.TransformAuditor; +import org.elasticsearch.xpack.transform.persistence.TransformConfigManager; + +public class TransportUpdateTransformActionDeprecated extends TransportUpdateTransformAction { + + @Inject + public TransportUpdateTransformActionDeprecated(Settings settings, TransportService transportService, ThreadPool threadPool, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, + ClusterService clusterService, XPackLicenseState licenseState, + TransformConfigManager transformConfigManager, Client client, + TransformAuditor auditor) { + super(UpdateTransformActionDeprecated.NAME, settings, transportService, threadPool, actionFilters, indexNameExpressionResolver, + clusterService, licenseState, transformConfigManager, client, auditor); + } + +} diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/RestDeleteTransformAction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/RestDeleteTransformAction.java index a240801420a5..d6dbe7e468a3 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/RestDeleteTransformAction.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/RestDeleteTransformAction.java @@ -36,6 +36,6 @@ public class RestDeleteTransformAction extends BaseRestHandler { @Override public String getName() { - return "data_frame_delete_transform_action"; + return "transform_delete_transform_action"; } } diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/RestGetTransformAction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/RestGetTransformAction.java index c329500b2066..23484f0e9408 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/RestGetTransformAction.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/RestGetTransformAction.java @@ -41,6 +41,6 @@ public class RestGetTransformAction extends BaseRestHandler { @Override public String getName() { - return "data_frame_get_transforms_action"; + return "transform_get_transform_action"; } } diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/RestGetTransformStatsAction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/RestGetTransformStatsAction.java index 61b8d60b3cc4..662f9840d026 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/RestGetTransformStatsAction.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/RestGetTransformStatsAction.java @@ -40,6 +40,6 @@ public class RestGetTransformStatsAction extends BaseRestHandler { @Override public String getName() { - return "data_frame_get_transforms_stats_action"; + return "transform_get_transform_stats_action"; } } diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/RestPreviewTransformAction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/RestPreviewTransformAction.java index 3bba1574a453..114c88b761c6 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/RestPreviewTransformAction.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/RestPreviewTransformAction.java @@ -20,12 +20,12 @@ import java.io.IOException; public class RestPreviewTransformAction extends BaseRestHandler { public RestPreviewTransformAction(RestController controller) { - controller.registerHandler(RestRequest.Method.POST, TransformField.REST_BASE_PATH + "transforms/_preview", this); + controller.registerHandler(RestRequest.Method.POST, TransformField.REST_BASE_PATH_TRANSFORMS + "_preview", this); } @Override public String getName() { - return "data_frame_preview_transform_action"; + return "transform_preview_transform_action"; } @Override diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/RestPutTransformAction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/RestPutTransformAction.java index 3b87c13f5d25..b61d81fb6e13 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/RestPutTransformAction.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/RestPutTransformAction.java @@ -25,7 +25,7 @@ public class RestPutTransformAction extends BaseRestHandler { @Override public String getName() { - return "data_frame_put_transform_action"; + return "transform_put_transform_action"; } @Override diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/RestStartTransformAction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/RestStartTransformAction.java index d016f58bb798..ac6bb368cffa 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/RestStartTransformAction.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/RestStartTransformAction.java @@ -32,6 +32,6 @@ public class RestStartTransformAction extends BaseRestHandler { @Override public String getName() { - return "data_frame_start_transform_action"; + return "transform_start_transform_action"; } } diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/RestStopTransformAction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/RestStopTransformAction.java index 49ba802e5b6f..b038ea79d991 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/RestStopTransformAction.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/RestStopTransformAction.java @@ -42,6 +42,6 @@ public class RestStopTransformAction extends BaseRestHandler { @Override public String getName() { - return "data_frame_stop_transform_action"; + return "transform_stop_transform_action"; } } diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/RestUpdateTransformAction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/RestUpdateTransformAction.java index 1e67b8b912e6..563c97864130 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/RestUpdateTransformAction.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/RestUpdateTransformAction.java @@ -25,7 +25,7 @@ public class RestUpdateTransformAction extends BaseRestHandler { @Override public String getName() { - return "data_frame_update_transform_action"; + return "transform_update_transform_action"; } @Override diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/compat/RestDeleteTransformActionDeprecated.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/compat/RestDeleteTransformActionDeprecated.java new file mode 100644 index 000000000000..94d4b5c65433 --- /dev/null +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/compat/RestDeleteTransformActionDeprecated.java @@ -0,0 +1,49 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.transform.rest.action.compat; + + +import org.apache.logging.log4j.LogManager; +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.core.transform.TransformField; +import org.elasticsearch.xpack.core.transform.TransformMessages; +import org.elasticsearch.xpack.core.transform.action.DeleteTransformAction; +import org.elasticsearch.xpack.core.transform.action.compat.DeleteTransformActionDeprecated; + +public class RestDeleteTransformActionDeprecated extends BaseRestHandler { + + private static final DeprecationLogger deprecationLogger = new DeprecationLogger( + LogManager.getLogger(RestDeleteTransformActionDeprecated.class)); + + public RestDeleteTransformActionDeprecated(RestController controller) { + controller.registerAsDeprecatedHandler(RestRequest.Method.DELETE, TransformField.REST_BASE_PATH_TRANSFORMS_BY_ID_DEPRECATED, this, + TransformMessages.REST_DEPRECATED_ENDPOINT, deprecationLogger); + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { + if (restRequest.hasContent()) { + throw new IllegalArgumentException("delete transform requests can not have a request body"); + } + + String id = restRequest.param(TransformField.ID.getPreferredName()); + boolean force = restRequest.paramAsBoolean(TransformField.FORCE.getPreferredName(), false); + DeleteTransformAction.Request request = new DeleteTransformAction.Request(id, force); + + return channel -> client.execute(DeleteTransformActionDeprecated.INSTANCE, request, + new RestToXContentListener<>(channel)); + } + + @Override + public String getName() { + return "data_frame_delete_transform_action"; + } +} diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/compat/RestGetTransformActionDeprecated.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/compat/RestGetTransformActionDeprecated.java new file mode 100644 index 000000000000..ca92942a52dd --- /dev/null +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/compat/RestGetTransformActionDeprecated.java @@ -0,0 +1,55 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.transform.rest.action.compat; + +import org.apache.logging.log4j.LogManager; +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.core.action.util.PageParams; +import org.elasticsearch.xpack.core.transform.TransformField; +import org.elasticsearch.xpack.core.transform.TransformMessages; +import org.elasticsearch.xpack.core.transform.action.GetTransformAction; +import org.elasticsearch.xpack.core.transform.action.compat.GetTransformActionDeprecated; + +import static org.elasticsearch.xpack.core.transform.TransformField.ALLOW_NO_MATCH; + +public class RestGetTransformActionDeprecated extends BaseRestHandler { + + private static final DeprecationLogger deprecationLogger = new DeprecationLogger( + LogManager.getLogger(RestGetTransformActionDeprecated.class)); + + public RestGetTransformActionDeprecated(RestController controller) { + controller.registerAsDeprecatedHandler(RestRequest.Method.GET, TransformField.REST_BASE_PATH_TRANSFORMS_DEPRECATED, this, + TransformMessages.REST_DEPRECATED_ENDPOINT, deprecationLogger); + controller.registerAsDeprecatedHandler(RestRequest.Method.GET, TransformField.REST_BASE_PATH_TRANSFORMS_BY_ID_DEPRECATED, this, + TransformMessages.REST_DEPRECATED_ENDPOINT, deprecationLogger); + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { + GetTransformAction.Request request = new GetTransformAction.Request(); + + String id = restRequest.param(TransformField.ID.getPreferredName()); + request.setResourceId(id); + request.setAllowNoResources(restRequest.paramAsBoolean(ALLOW_NO_MATCH.getPreferredName(), true)); + if (restRequest.hasParam(PageParams.FROM.getPreferredName()) || restRequest.hasParam(PageParams.SIZE.getPreferredName())) { + request.setPageParams( + new PageParams(restRequest.paramAsInt(PageParams.FROM.getPreferredName(), PageParams.DEFAULT_FROM), + restRequest.paramAsInt(PageParams.SIZE.getPreferredName(), PageParams.DEFAULT_SIZE))); + } + return channel -> client.execute(GetTransformActionDeprecated.INSTANCE, request, new RestToXContentListener<>(channel)); + } + + @Override + public String getName() { + return "data_frame_get_transforms_action"; + } +} diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/compat/RestGetTransformStatsActionDeprecated.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/compat/RestGetTransformStatsActionDeprecated.java new file mode 100644 index 000000000000..b444ea3beef1 --- /dev/null +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/compat/RestGetTransformStatsActionDeprecated.java @@ -0,0 +1,55 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.transform.rest.action.compat; + +import org.apache.logging.log4j.LogManager; +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.core.action.util.PageParams; +import org.elasticsearch.xpack.core.transform.TransformField; +import org.elasticsearch.xpack.core.transform.TransformMessages; +import org.elasticsearch.xpack.core.transform.action.GetTransformStatsAction; +import org.elasticsearch.xpack.core.transform.action.compat.GetTransformStatsActionDeprecated; + +import static org.elasticsearch.xpack.core.transform.TransformField.ALLOW_NO_MATCH; + +public class RestGetTransformStatsActionDeprecated extends BaseRestHandler { + + private static final DeprecationLogger deprecationLogger = new DeprecationLogger( + LogManager.getLogger(RestGetTransformStatsActionDeprecated.class)); + + public RestGetTransformStatsActionDeprecated(RestController controller) { + controller.registerAsDeprecatedHandler(RestRequest.Method.GET, TransformField.REST_BASE_PATH_TRANSFORMS_DEPRECATED + "_stats", + this, TransformMessages.REST_DEPRECATED_ENDPOINT, deprecationLogger); + controller.registerAsDeprecatedHandler(RestRequest.Method.GET, + TransformField.REST_BASE_PATH_TRANSFORMS_BY_ID_DEPRECATED + "_stats", this, + TransformMessages.REST_DEPRECATED_ENDPOINT, deprecationLogger); + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { + String id = restRequest.param(TransformField.ID.getPreferredName()); + GetTransformStatsAction.Request request = new GetTransformStatsAction.Request(id); + request.setAllowNoMatch(restRequest.paramAsBoolean(ALLOW_NO_MATCH.getPreferredName(), true)); + if (restRequest.hasParam(PageParams.FROM.getPreferredName()) || restRequest.hasParam(PageParams.SIZE.getPreferredName())) { + request.setPageParams( + new PageParams(restRequest.paramAsInt(PageParams.FROM.getPreferredName(), PageParams.DEFAULT_FROM), + restRequest.paramAsInt(PageParams.SIZE.getPreferredName(), PageParams.DEFAULT_SIZE))); + } + return channel -> client.execute(GetTransformStatsActionDeprecated.INSTANCE, request, + new RestToXContentListener<>(channel)); + } + + @Override + public String getName() { + return "data_frame_get_transforms_stats_action"; + } +} diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/compat/RestPreviewTransformActionDeprecated.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/compat/RestPreviewTransformActionDeprecated.java new file mode 100644 index 000000000000..8c0a994ebbef --- /dev/null +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/compat/RestPreviewTransformActionDeprecated.java @@ -0,0 +1,46 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.transform.rest.action.compat; + +import org.apache.logging.log4j.LogManager; +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.core.transform.TransformField; +import org.elasticsearch.xpack.core.transform.TransformMessages; +import org.elasticsearch.xpack.core.transform.action.PreviewTransformAction; +import org.elasticsearch.xpack.core.transform.action.compat.PreviewTransformActionDeprecated; + +import java.io.IOException; + +public class RestPreviewTransformActionDeprecated extends BaseRestHandler { + + private static final DeprecationLogger deprecationLogger = new DeprecationLogger( + LogManager.getLogger(RestPreviewTransformActionDeprecated.class)); + + public RestPreviewTransformActionDeprecated(RestController controller) { + controller.registerAsDeprecatedHandler(RestRequest.Method.POST, TransformField.REST_BASE_PATH_TRANSFORMS_DEPRECATED + "_preview", + this, TransformMessages.REST_DEPRECATED_ENDPOINT, deprecationLogger); + } + + @Override + public String getName() { + return "data_frame_preview_transform_action"; + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { + XContentParser parser = restRequest.contentParser(); + + PreviewTransformAction.Request request = PreviewTransformAction.Request.fromXContent(parser); + return channel -> client.execute(PreviewTransformActionDeprecated.INSTANCE, request, new RestToXContentListener<>(channel)); + } +} diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/compat/RestPutTransformActionDeprecated.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/compat/RestPutTransformActionDeprecated.java new file mode 100644 index 000000000000..aeac2c91be2f --- /dev/null +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/compat/RestPutTransformActionDeprecated.java @@ -0,0 +1,49 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.transform.rest.action.compat; + +import org.apache.logging.log4j.LogManager; +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.core.transform.TransformField; +import org.elasticsearch.xpack.core.transform.TransformMessages; +import org.elasticsearch.xpack.core.transform.action.PutTransformAction; +import org.elasticsearch.xpack.core.transform.action.compat.PutTransformActionDeprecated; + +import java.io.IOException; + +public class RestPutTransformActionDeprecated extends BaseRestHandler { + + private static final DeprecationLogger deprecationLogger = new DeprecationLogger( + LogManager.getLogger(RestPutTransformActionDeprecated.class)); + + public RestPutTransformActionDeprecated(RestController controller) { + controller.registerAsDeprecatedHandler(RestRequest.Method.PUT, TransformField.REST_BASE_PATH_TRANSFORMS_BY_ID_DEPRECATED, this, + TransformMessages.REST_DEPRECATED_ENDPOINT, deprecationLogger); + } + + @Override + public String getName() { + return "data_frame_put_transform_action"; + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { + String id = restRequest.param(TransformField.ID.getPreferredName()); + XContentParser parser = restRequest.contentParser(); + + boolean deferValidation = restRequest.paramAsBoolean(TransformField.DEFER_VALIDATION.getPreferredName(), false); + PutTransformAction.Request request = PutTransformAction.Request.fromXContent(parser, id, deferValidation); + + return channel -> client.execute(PutTransformActionDeprecated.INSTANCE, request, new RestToXContentListener<>(channel)); + } +} diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/compat/RestStartTransformActionDeprecated.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/compat/RestStartTransformActionDeprecated.java new file mode 100644 index 000000000000..796c9833222a --- /dev/null +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/compat/RestStartTransformActionDeprecated.java @@ -0,0 +1,46 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.transform.rest.action.compat; + +import org.apache.logging.log4j.LogManager; +import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.core.transform.TransformField; +import org.elasticsearch.xpack.core.transform.TransformMessages; +import org.elasticsearch.xpack.core.transform.action.StartTransformAction; +import org.elasticsearch.xpack.core.transform.action.compat.StartTransformActionDeprecated; + +public class RestStartTransformActionDeprecated extends BaseRestHandler { + + private static final DeprecationLogger deprecationLogger = new DeprecationLogger( + LogManager.getLogger(RestStartTransformActionDeprecated.class)); + + public RestStartTransformActionDeprecated(RestController controller) { + controller.registerAsDeprecatedHandler(RestRequest.Method.POST, + TransformField.REST_BASE_PATH_TRANSFORMS_BY_ID_DEPRECATED + "_start", this, TransformMessages.REST_DEPRECATED_ENDPOINT, + deprecationLogger); + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { + String id = restRequest.param(TransformField.ID.getPreferredName()); + StartTransformAction.Request request = new StartTransformAction.Request(id); + request.timeout(restRequest.paramAsTime(TransformField.TIMEOUT.getPreferredName(), AcknowledgedRequest.DEFAULT_ACK_TIMEOUT)); + return channel -> client.execute(StartTransformActionDeprecated.INSTANCE, request, + new RestToXContentListener<>(channel)); + } + + @Override + public String getName() { + return "data_frame_start_transform_action"; + } +} diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/compat/RestStopTransformActionDeprecated.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/compat/RestStopTransformActionDeprecated.java new file mode 100644 index 000000000000..29f59397b8a3 --- /dev/null +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/compat/RestStopTransformActionDeprecated.java @@ -0,0 +1,55 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.transform.rest.action.compat; + +import org.apache.logging.log4j.LogManager; +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.core.transform.TransformField; +import org.elasticsearch.xpack.core.transform.TransformMessages; +import org.elasticsearch.xpack.core.transform.action.StopTransformAction; +import org.elasticsearch.xpack.core.transform.action.compat.StopTransformActionDeprecated; + +public class RestStopTransformActionDeprecated extends BaseRestHandler { + + private static final DeprecationLogger deprecationLogger = new DeprecationLogger( + LogManager.getLogger(RestStopTransformActionDeprecated.class)); + + public RestStopTransformActionDeprecated(RestController controller) { + controller.registerAsDeprecatedHandler(RestRequest.Method.POST, TransformField.REST_BASE_PATH_TRANSFORMS_BY_ID_DEPRECATED + "_stop", + this, TransformMessages.REST_DEPRECATED_ENDPOINT, deprecationLogger); + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { + String id = restRequest.param(TransformField.ID.getPreferredName()); + TimeValue timeout = restRequest.paramAsTime(TransformField.TIMEOUT.getPreferredName(), + StopTransformAction.DEFAULT_TIMEOUT); + boolean waitForCompletion = restRequest.paramAsBoolean(TransformField.WAIT_FOR_COMPLETION.getPreferredName(), false); + boolean force = restRequest.paramAsBoolean(TransformField.FORCE.getPreferredName(), false); + boolean allowNoMatch = restRequest.paramAsBoolean(TransformField.ALLOW_NO_MATCH.getPreferredName(), false); + + + StopTransformAction.Request request = new StopTransformAction.Request(id, + waitForCompletion, + force, + timeout, + allowNoMatch); + + return channel -> client.execute(StopTransformActionDeprecated.INSTANCE, request, + new RestToXContentListener<>(channel)); + } + + @Override + public String getName() { + return "data_frame_stop_transform_action"; + } +} diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/compat/RestUpdateTransformActionDeprecated.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/compat/RestUpdateTransformActionDeprecated.java new file mode 100644 index 000000000000..5be2586f955e --- /dev/null +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/compat/RestUpdateTransformActionDeprecated.java @@ -0,0 +1,49 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.transform.rest.action.compat; + +import org.apache.logging.log4j.LogManager; +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.core.transform.TransformField; +import org.elasticsearch.xpack.core.transform.TransformMessages; +import org.elasticsearch.xpack.core.transform.action.UpdateTransformAction; +import org.elasticsearch.xpack.core.transform.action.compat.UpdateTransformActionDeprecated; + +import java.io.IOException; + +public class RestUpdateTransformActionDeprecated extends BaseRestHandler { + + private static final DeprecationLogger deprecationLogger = new DeprecationLogger( + LogManager.getLogger(RestUpdateTransformActionDeprecated.class)); + + public RestUpdateTransformActionDeprecated(RestController controller) { + controller.registerAsDeprecatedHandler(RestRequest.Method.POST, + TransformField.REST_BASE_PATH_TRANSFORMS_BY_ID_DEPRECATED + "_update", this, TransformMessages.REST_DEPRECATED_ENDPOINT, + deprecationLogger); + } + + @Override + public String getName() { + return "data_frame_update_transform_action"; + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { + String id = restRequest.param(TransformField.ID.getPreferredName()); + boolean deferValidation = restRequest.paramAsBoolean(TransformField.DEFER_VALIDATION.getPreferredName(), false); + XContentParser parser = restRequest.contentParser(); + UpdateTransformAction.Request request = UpdateTransformAction.Request.fromXContent(parser, id, deferValidation); + + return channel -> client.execute(UpdateTransformActionDeprecated.INSTANCE, request, new RestToXContentListener<>(channel)); + } +} diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/DataFrameSurvivesUpgradeIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/TransformSurvivesUpgradeIT.java similarity index 85% rename from x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/DataFrameSurvivesUpgradeIT.java rename to x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/TransformSurvivesUpgradeIT.java index a41cba32058e..74993783a294 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/DataFrameSurvivesUpgradeIT.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/TransformSurvivesUpgradeIT.java @@ -5,23 +5,27 @@ */ package org.elasticsearch.upgrades; +import org.apache.http.HttpHost; import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.client.RestClientBuilder; import org.elasticsearch.client.core.IndexerState; import org.elasticsearch.client.transform.GetTransformStatsResponse; -import org.elasticsearch.client.transform.transforms.TransformConfig; -import org.elasticsearch.client.transform.transforms.TransformStats; import org.elasticsearch.client.transform.transforms.DestConfig; import org.elasticsearch.client.transform.transforms.SourceConfig; import org.elasticsearch.client.transform.transforms.TimeSyncConfig; +import org.elasticsearch.client.transform.transforms.TransformConfig; +import org.elasticsearch.client.transform.transforms.TransformStats; import org.elasticsearch.client.transform.transforms.pivot.GroupConfig; import org.elasticsearch.client.transform.transforms.pivot.PivotConfig; import org.elasticsearch.client.transform.transforms.pivot.TermsGroupSource; import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; @@ -51,12 +55,13 @@ import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.oneOf; -public class DataFrameSurvivesUpgradeIT extends AbstractUpgradeTestCase { +public class TransformSurvivesUpgradeIT extends AbstractUpgradeTestCase { private static final Version UPGRADE_FROM_VERSION = Version.fromString(System.getProperty("tests.upgrade_from_version")); - private static final String DATAFRAME_ENDPOINT = "/_data_frame/transforms/"; - private static final String CONTINUOUS_DATA_FRAME_ID = "continuous-data-frame-upgrade-job"; - private static final String CONTINUOUS_DATA_FRAME_SOURCE = "data-frame-upgrade-continuous-source"; + private static final String DATAFRAME_ENDPOINT = "/_transform/"; + private static final String DATAFRAME_ENDPOINT_DEPRECATED = "/_data_frame/transforms/"; + private static final String CONTINUOUS_TRANSFORM_ID = "continuous-transform-upgrade-job"; + private static final String CONTINUOUS_TRANSFORM_SOURCE = "transform-upgrade-continuous-source"; private static final List ENTITIES = Stream.iterate(1, n -> n + 1) .limit(5) .map(v -> "user_" + v) @@ -76,6 +81,14 @@ public class DataFrameSurvivesUpgradeIT extends AbstractUpgradeTestCase { waitForPendingTasks(adminClient(), taskName -> taskName.startsWith("data_frame/transforms") == false); } + @Override + protected RestClient buildClient(Settings settings, HttpHost[] hosts) throws IOException { + RestClientBuilder builder = RestClient.builder(hosts); + configureClient(builder, settings); + builder.setStrictDeprecationMode(false); + return builder.build(); + } + /** * The purpose of this test is to ensure that when a job is open through a rolling upgrade we upgrade the results * index mappings when it is assigned to an upgraded node even if no other ML endpoint is called after the upgrade @@ -86,7 +99,9 @@ public class DataFrameSurvivesUpgradeIT extends AbstractUpgradeTestCase { adjustLoggingLevels.setJsonEntity( "{\"transient\": {" + "\"logger.org.elasticsearch.xpack.core.indexing.AsyncTwoPhaseIndexer\": \"trace\"," + - "\"logger.org.elasticsearch.xpack.dataframe\": \"trace\"}}"); + "\"logger.org.elasticsearch.xpack.dataframe\": \"trace\"," + + "\"logger.org.elasticsearch.xpack.transform\": \"trace\"" + + "}}"); client().performRequest(adjustLoggingLevels); Request waitForYellow = new Request("GET", "/_cluster/health"); waitForYellow.addParameter("wait_for_nodes", "3"); @@ -115,17 +130,17 @@ public class DataFrameSurvivesUpgradeIT extends AbstractUpgradeTestCase { } private void cleanUpTransforms() throws Exception { - stopTransform(CONTINUOUS_DATA_FRAME_ID); - deleteTransform(CONTINUOUS_DATA_FRAME_ID); + stopTransform(CONTINUOUS_TRANSFORM_ID); + deleteTransform(CONTINUOUS_TRANSFORM_ID); waitForPendingDataFrameTasks(); } private void createAndStartContinuousDataFrame() throws Exception { - createIndex(CONTINUOUS_DATA_FRAME_SOURCE); + createIndex(CONTINUOUS_TRANSFORM_SOURCE); long totalDocsWrittenSum = 0; for (TimeValue bucket : BUCKETS) { int docs = randomIntBetween(1, 25); - putData(CONTINUOUS_DATA_FRAME_SOURCE, docs, bucket, ENTITIES); + putData(CONTINUOUS_TRANSFORM_SOURCE, docs, bucket, ENTITIES); totalDocsWrittenSum += docs * ENTITIES.size(); } long totalDocsWritten = totalDocsWrittenSum; @@ -135,18 +150,18 @@ public class DataFrameSurvivesUpgradeIT extends AbstractUpgradeTestCase { .setAggregations(new AggregatorFactories.Builder().addAggregator(AggregationBuilders.avg("stars").field("stars"))) .setGroups(GroupConfig.builder().groupBy("user_id", TermsGroupSource.builder().setField("user_id").build()).build()) .build()) - .setDest(DestConfig.builder().setIndex(CONTINUOUS_DATA_FRAME_ID + "_idx").build()) - .setSource(SourceConfig.builder().setIndex(CONTINUOUS_DATA_FRAME_SOURCE).build()) - .setId(CONTINUOUS_DATA_FRAME_ID) + .setDest(DestConfig.builder().setIndex(CONTINUOUS_TRANSFORM_ID + "_idx").build()) + .setSource(SourceConfig.builder().setIndex(CONTINUOUS_TRANSFORM_SOURCE).build()) + .setId(CONTINUOUS_TRANSFORM_ID) .setFrequency(TimeValue.timeValueSeconds(1)) .build(); - putTransform(CONTINUOUS_DATA_FRAME_ID, config); + putTransform(CONTINUOUS_TRANSFORM_ID, config); - startTransform(CONTINUOUS_DATA_FRAME_ID); - waitUntilAfterCheckpoint(CONTINUOUS_DATA_FRAME_ID, 0L); + startTransform(CONTINUOUS_TRANSFORM_ID); + waitUntilAfterCheckpoint(CONTINUOUS_TRANSFORM_ID, 0L); assertBusy(() -> { - TransformStats stateAndStats = getTransformStats(CONTINUOUS_DATA_FRAME_ID); + TransformStats stateAndStats = getTransformStats(CONTINUOUS_TRANSFORM_ID); assertThat(stateAndStats.getIndexerStats().getOutputDocuments(), equalTo((long)ENTITIES.size())); assertThat(stateAndStats.getIndexerStats().getNumDocuments(), equalTo(totalDocsWritten)); // Even if we get back to started, we may periodically get set back to `indexing` when triggered. @@ -156,7 +171,7 @@ public class DataFrameSurvivesUpgradeIT extends AbstractUpgradeTestCase { // We want to make sure our latest state is written before we turn the node off, this makes the testing more reliable - awaitWrittenIndexerState(CONTINUOUS_DATA_FRAME_ID, IndexerState.STARTED.value()); + awaitWrittenIndexerState(CONTINUOUS_TRANSFORM_ID, IndexerState.STARTED.value()); } @SuppressWarnings("unchecked") @@ -165,13 +180,13 @@ public class DataFrameSurvivesUpgradeIT extends AbstractUpgradeTestCase { // A continuous data frame should automatically become started when it gets assigned to a node // if it was assigned to the node that was removed from the cluster assertBusy(() -> { - TransformStats stateAndStats = getTransformStats(CONTINUOUS_DATA_FRAME_ID); + TransformStats stateAndStats = getTransformStats(CONTINUOUS_TRANSFORM_ID); assertThat(stateAndStats.getState(), oneOf(TransformStats.State.STARTED, TransformStats.State.INDEXING)); }, 120, TimeUnit.SECONDS); - TransformStats previousStateAndStats = getTransformStats(CONTINUOUS_DATA_FRAME_ID); + TransformStats previousStateAndStats = getTransformStats(CONTINUOUS_TRANSFORM_ID); // Add a new user and write data to it // This is so we can have more reliable data counts, as writing to existing entities requires @@ -181,20 +196,20 @@ public class DataFrameSurvivesUpgradeIT extends AbstractUpgradeTestCase { int docs = 5; // Index the data // The frequency and delay should see the data once its indexed - putData(CONTINUOUS_DATA_FRAME_SOURCE, docs, TimeValue.timeValueSeconds(0), entities); + putData(CONTINUOUS_TRANSFORM_SOURCE, docs, TimeValue.timeValueSeconds(0), entities); - waitUntilAfterCheckpoint(CONTINUOUS_DATA_FRAME_ID, expectedLastCheckpoint); + waitUntilAfterCheckpoint(CONTINUOUS_TRANSFORM_ID, expectedLastCheckpoint); assertBusy(() -> assertThat( - getTransformStats(CONTINUOUS_DATA_FRAME_ID).getIndexerStats().getNumDocuments(), + getTransformStats(CONTINUOUS_TRANSFORM_ID).getIndexerStats().getNumDocuments(), greaterThanOrEqualTo(docs + previousStateAndStats.getIndexerStats().getNumDocuments())), 120, TimeUnit.SECONDS); - TransformStats stateAndStats = getTransformStats(CONTINUOUS_DATA_FRAME_ID); + TransformStats stateAndStats = getTransformStats(CONTINUOUS_TRANSFORM_ID); assertThat(stateAndStats.getState(), oneOf(TransformStats.State.STARTED, TransformStats.State.INDEXING)); - awaitWrittenIndexerState(CONTINUOUS_DATA_FRAME_ID, (responseBody) -> { + awaitWrittenIndexerState(CONTINUOUS_TRANSFORM_ID, (responseBody) -> { Map indexerStats = (Map)((List)XContentMapValues.extractValue("hits.hits._source.stats", responseBody)) .get(0); @@ -245,33 +260,37 @@ public class DataFrameSurvivesUpgradeIT extends AbstractUpgradeTestCase { }); } + private String getTransformEndpoint() { + return CLUSTER_TYPE == ClusterType.UPGRADED ? DATAFRAME_ENDPOINT : DATAFRAME_ENDPOINT_DEPRECATED; + } + private void putTransform(String id, TransformConfig config) throws IOException { - final Request createDataframeTransformRequest = new Request("PUT", DATAFRAME_ENDPOINT + id); + final Request createDataframeTransformRequest = new Request("PUT", getTransformEndpoint() + id); createDataframeTransformRequest.setJsonEntity(Strings.toString(config)); Response response = client().performRequest(createDataframeTransformRequest); assertEquals(200, response.getStatusLine().getStatusCode()); } private void deleteTransform(String id) throws IOException { - Response response = client().performRequest(new Request("DELETE", DATAFRAME_ENDPOINT + id)); + Response response = client().performRequest(new Request("DELETE", getTransformEndpoint() + id)); assertEquals(200, response.getStatusLine().getStatusCode()); } private void startTransform(String id) throws IOException { - final Request startDataframeTransformRequest = new Request("POST", DATAFRAME_ENDPOINT + id + "/_start"); + final Request startDataframeTransformRequest = new Request("POST", getTransformEndpoint() + id + "/_start"); Response response = client().performRequest(startDataframeTransformRequest); assertEquals(200, response.getStatusLine().getStatusCode()); } private void stopTransform(String id) throws IOException { final Request stopDataframeTransformRequest = new Request("POST", - DATAFRAME_ENDPOINT + id + "/_stop?wait_for_completion=true"); + getTransformEndpoint() + id + "/_stop?wait_for_completion=true"); Response response = client().performRequest(stopDataframeTransformRequest); assertEquals(200, response.getStatusLine().getStatusCode()); } private TransformStats getTransformStats(String id) throws IOException { - final Request getStats = new Request("GET", DATAFRAME_ENDPOINT + id + "/_stats"); + final Request getStats = new Request("GET", getTransformEndpoint() + id + "/_stats"); Response response = client().performRequest(getStats); assertEquals(200, response.getStatusLine().getStatusCode()); XContentType xContentType = XContentType.fromMediaTypeOrFormat(response.getEntity().getContentType().getValue()); diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/80_data_frame_jobs_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/80_data_frame_jobs_crud.yml index 713a66a5ff60..8d7b6d09ec4c 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/80_data_frame_jobs_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/80_data_frame_jobs_crud.yml @@ -7,7 +7,7 @@ timeout: 70s - do: - transform.put_transform: + data_frame_transform_deprecated.put_transform: transform_id: "mixed-simple-transform" body: > { @@ -21,31 +21,31 @@ - match: { acknowledged: true } - do: - transform.start_transform: + data_frame_transform_deprecated.start_transform: transform_id: "mixed-simple-transform" - match: { acknowledged: true } - do: - transform.get_transform_stats: + data_frame_transform_deprecated.get_transform_stats: transform_id: "mixed-simple-transform" - match: { count: 1 } - match: { transforms.0.id: "mixed-simple-transform" } - match: { transforms.0.state: "/started|indexing|stopping|stopped/" } - do: - transform.stop_transform: + data_frame_transform_deprecated.stop_transform: transform_id: "mixed-simple-transform" wait_for_completion: true - match: { acknowledged: true } - do: - transform.get_transform_stats: + data_frame_transform_deprecated.get_transform_stats: transform_id: "mixed-simple-transform" - match: { count: 1 } - match: { transforms.0.id: "mixed-simple-transform" } - match: { transforms.0.state: "stopped" } - do: - transform.put_transform: + data_frame_transform_deprecated.put_transform: transform_id: "mixed-complex-transform" body: > { @@ -73,7 +73,7 @@ - match: { acknowledged: true } - do: - transform.get_transform: + data_frame_transform_deprecated.get_transform: transform_id: "mixed-complex-transform" - match: { count: 1 } - match: { transforms.0.id: "mixed-complex-transform" } @@ -81,24 +81,24 @@ - is_true: transforms.0.create_time - do: - transform.start_transform: + data_frame_transform_deprecated.start_transform: transform_id: "mixed-complex-transform" - match: { acknowledged: true } - do: - transform.get_transform_stats: + data_frame_transform_deprecated.get_transform_stats: transform_id: "mixed-complex-transform" - match: { count: 1 } - match: { transforms.0.id: "mixed-complex-transform" } - match: { transforms.0.state: "/started|indexing|stopping|stopped/" } - do: - transform.stop_transform: + data_frame_transform_deprecated.stop_transform: transform_id: "mixed-complex-transform" wait_for_completion: true - match: { acknowledged: true } - do: - transform.get_transform_stats: + data_frame_transform_deprecated.get_transform_stats: transform_id: "mixed-complex-transform" - match: { count: 1 } - match: { transforms.0.id: "mixed-complex-transform" } @@ -113,7 +113,7 @@ timeout: 70s - do: - transform.put_transform: + data_frame_transform_deprecated.put_transform: transform_id: "mixed-simple-continuous-transform" body: > { @@ -133,7 +133,7 @@ - match: { acknowledged: true } - do: - transform.get_transform: + data_frame_transform_deprecated.get_transform: transform_id: "mixed-simple-continuous-transform" - match: { count: 1 } - match: { transforms.0.id: "mixed-simple-continuous-transform" } @@ -143,24 +143,24 @@ - is_true: transforms.0.create_time - do: - transform.start_transform: + data_frame_transform_deprecated.start_transform: transform_id: "mixed-simple-continuous-transform" - match: { acknowledged: true } - do: - transform.get_transform_stats: + data_frame_transform_deprecated.get_transform_stats: transform_id: "mixed-simple-continuous-transform" - match: { count: 1 } - match: { transforms.0.id: "mixed-simple-continuous-transform" } - match: { transforms.0.state: "/started|indexing/" } - do: - transform.stop_transform: + data_frame_transform_deprecated.stop_transform: transform_id: "mixed-simple-continuous-transform" wait_for_completion: true - match: { acknowledged: true } - do: - transform.get_transform_stats: + data_frame_transform_deprecated.get_transform_stats: transform_id: "mixed-simple-continuous-transform" - match: { count: 1 } - match: { transforms.0.id: "mixed-simple-continuous-transform" } @@ -175,7 +175,7 @@ timeout: 70s - do: - transform.get_transform: + data_frame_transform_deprecated.get_transform: transform_id: "old-simple-transform" - match: { count: 1 } - match: { transforms.0.id: "old-simple-transform" } @@ -185,30 +185,30 @@ - match: { transforms.0.pivot.aggregations.avg_response.avg.field: "responsetime" } - do: - transform.start_transform: + data_frame_transform_deprecated.start_transform: transform_id: "old-simple-transform" - match: { acknowledged: true } - do: - transform.get_transform_stats: + data_frame_transform_deprecated.get_transform_stats: transform_id: "old-simple-transform" - match: { count: 1 } - match: { transforms.0.id: "old-simple-transform" } - match: { transforms.0.state: "/started|indexing|stopping|stopped/" } - do: - transform.stop_transform: + data_frame_transform_deprecated.stop_transform: transform_id: "old-simple-transform" wait_for_completion: true - match: { acknowledged: true } - do: - transform.get_transform_stats: + data_frame_transform_deprecated.get_transform_stats: transform_id: "old-simple-transform" - match: { count: 1 } - match: { transforms.0.id: "old-simple-transform" } - match: { transforms.0.state: "stopped" } - do: - transform.get_transform: + data_frame_transform_deprecated.get_transform: transform_id: "old-complex-transform" - match: { count: 1 } - match: { transforms.0.id: "old-complex-transform" } @@ -221,23 +221,23 @@ - match: { transforms.0.pivot.aggregations.avg_response.avg.field: "responsetime" } - do: - transform.start_transform: + data_frame_transform_deprecated.start_transform: transform_id: "old-complex-transform" - match: { acknowledged: true } - do: - transform.get_transform_stats: + data_frame_transform_deprecated.get_transform_stats: transform_id: "old-complex-transform" - match: { count: 1 } - match: { transforms.0.id: "old-complex-transform" } - match: { transforms.0.state: "/started|indexing|stopping|stopped/" } - do: - transform.stop_transform: + data_frame_transform_deprecated.stop_transform: transform_id: "old-complex-transform" wait_for_completion: true - match: { acknowledged: true } - do: - transform.get_transform_stats: + data_frame_transform_deprecated.get_transform_stats: transform_id: "old-complex-transform" - match: { count: 1 } - match: { transforms.0.id: "old-complex-transform" } @@ -252,7 +252,7 @@ timeout: 70s - do: - transform.get_transform: + data_frame_transform_deprecated.get_transform: transform_id: "old-simple-continuous-transform" - match: { count: 1 } - match: { transforms.0.id: "old-simple-continuous-transform" } @@ -262,24 +262,24 @@ - is_true: transforms.0.create_time - do: - transform.start_transform: + data_frame_transform_deprecated.start_transform: transform_id: "old-simple-continuous-transform" - match: { acknowledged: true } - do: - transform.get_transform_stats: + data_frame_transform_deprecated.get_transform_stats: transform_id: "old-simple-continuous-transform" - match: { count: 1 } - match: { transforms.0.id: "old-simple-continuous-transform" } - match: { transforms.0.state: "/started|indexing/" } - do: - transform.stop_transform: + data_frame_transform_deprecated.stop_transform: transform_id: "old-simple-continuous-transform" wait_for_completion: true - match: { acknowledged: true } - do: - transform.get_transform_stats: + data_frame_transform_deprecated.get_transform_stats: transform_id: "old-simple-continuous-transform" - match: { count: 1 } - match: { transforms.0.id: "old-simple-continuous-transform" } diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/80_data_frame_jobs_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/80_data_frame_jobs_crud.yml index bc50362c0d8d..dd2e18b77973 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/80_data_frame_jobs_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/80_data_frame_jobs_crud.yml @@ -35,7 +35,7 @@ ] } - do: - transform.put_transform: + data_frame_transform_deprecated.put_transform: transform_id: "old-simple-transform" body: > { @@ -49,7 +49,7 @@ - match: { acknowledged: true } - do: - transform.get_transform: + data_frame_transform_deprecated.get_transform: transform_id: "old-simple-transform" - match: { count: 1 } - match: { transforms.0.id: "old-simple-transform" } @@ -57,29 +57,29 @@ - is_true: transforms.0.create_time - do: - transform.start_transform: + data_frame_transform_deprecated.start_transform: transform_id: "old-simple-transform" - match: { acknowledged: true } - do: - transform.get_transform_stats: + data_frame_transform_deprecated.get_transform_stats: transform_id: "old-simple-transform" - match: { count: 1 } - match: { transforms.0.id: "old-simple-transform" } - do: - transform.stop_transform: + data_frame_transform_deprecated.stop_transform: transform_id: "old-simple-transform" wait_for_completion: true - match: { acknowledged: true } - do: - transform.get_transform_stats: + data_frame_transform_deprecated.get_transform_stats: transform_id: "old-simple-transform" - match: { count: 1 } - match: { transforms.0.id: "old-simple-transform" } - do: - transform.put_transform: + data_frame_transform_deprecated.put_transform: transform_id: "old-complex-transform" body: > { @@ -107,29 +107,29 @@ - match: { acknowledged: true } - do: - transform.get_transform: + data_frame_transform_deprecated.get_transform: transform_id: "old-complex-transform" - match: { count: 1 } - match: { transforms.0.id: "old-complex-transform" } - do: - transform.start_transform: + data_frame_transform_deprecated.start_transform: transform_id: "old-complex-transform" - match: { acknowledged: true } - do: - transform.get_transform_stats: + data_frame_transform_deprecated.get_transform_stats: transform_id: "old-complex-transform" - match: { count: 1 } - match: { transforms.0.id: "old-complex-transform" } - do: - transform.stop_transform: + data_frame_transform_deprecated.stop_transform: transform_id: "old-complex-transform" wait_for_completion: true - match: { acknowledged: true } - do: - transform.get_transform_stats: + data_frame_transform_deprecated.get_transform_stats: transform_id: "old-complex-transform" - match: { count: 1 } - match: { transforms.0.id: "old-complex-transform" } @@ -157,7 +157,7 @@ timeout: 70s - do: - transform.put_transform: + data_frame_transform_deprecated.put_transform: transform_id: "old-simple-continuous-transform" body: > { @@ -177,7 +177,7 @@ - match: { acknowledged: true } - do: - transform.get_transform: + data_frame_transform_deprecated.get_transform: transform_id: "old-simple-continuous-transform" - match: { count: 1 } - match: { transforms.0.id: "old-simple-continuous-transform" } @@ -187,23 +187,23 @@ - is_true: transforms.0.create_time - do: - transform.start_transform: + data_frame_transform_deprecated.start_transform: transform_id: "old-simple-continuous-transform" - match: { acknowledged: true } - do: - transform.get_transform_stats: + data_frame_transform_deprecated.get_transform_stats: transform_id: "old-simple-continuous-transform" - match: { count: 1 } - match: { transforms.0.id: "old-simple-continuous-transform" } - do: - transform.stop_transform: + data_frame_transform_deprecated.stop_transform: transform_id: "old-simple-continuous-transform" wait_for_completion: true - match: { acknowledged: true } - do: - transform.get_transform_stats: + data_frame_transform_deprecated.get_transform_stats: transform_id: "old-simple-continuous-transform" - match: { count: 1 } - match: { transforms.0.id: "old-simple-continuous-transform" } From 446dcbe697d634db3ad62300ea531289f477c905 Mon Sep 17 00:00:00 2001 From: Jack Conradson Date: Mon, 7 Oct 2019 07:47:56 -0700 Subject: [PATCH 30/55] Modify Painless AST to add synthetic functions during semantic pass (#47611) This has ELambda and ENewArrayFunctionRef add their generated synthetic methods to the SClass node during the semantic pass and removes this data from the write pass. This is the first step to remove "Globals" (mutable state) from the write pass. --- .../org/elasticsearch/painless/Globals.java | 18 ------------- .../painless/node/AExpression.java | 2 +- .../painless/node/EAssignment.java | 2 +- .../elasticsearch/painless/node/EBinary.java | 2 +- .../painless/node/ECallLocal.java | 2 +- .../painless/node/ECapturingFunctionRef.java | 2 +- .../elasticsearch/painless/node/ECast.java | 2 +- .../elasticsearch/painless/node/EComp.java | 2 +- .../painless/node/EInstanceof.java | 2 +- .../elasticsearch/painless/node/ELambda.java | 6 ++--- .../painless/node/EListInit.java | 2 +- .../elasticsearch/painless/node/EMapInit.java | 2 +- .../painless/node/ENewArrayFunctionRef.java | 3 +-- .../elasticsearch/painless/node/ENewObj.java | 2 +- .../elasticsearch/painless/node/ENull.java | 2 +- .../elasticsearch/painless/node/ERegex.java | 2 +- .../elasticsearch/painless/node/EUnary.java | 2 +- .../elasticsearch/painless/node/PBrace.java | 2 +- .../painless/node/PCallInvoke.java | 2 +- .../elasticsearch/painless/node/PField.java | 2 +- .../painless/node/PSubCallInvoke.java | 2 +- .../painless/node/PSubDefArray.java | 2 +- .../painless/node/PSubDefCall.java | 2 +- .../painless/node/PSubDefField.java | 2 +- .../painless/node/PSubField.java | 2 +- .../painless/node/PSubListShortcut.java | 2 +- .../painless/node/PSubMapShortcut.java | 2 +- .../painless/node/PSubShortcut.java | 2 +- .../elasticsearch/painless/node/SClass.java | 25 +++++++++---------- .../elasticsearch/painless/node/SEach.java | 2 +- .../painless/node/SFunction.java | 2 +- .../elasticsearch/painless/node/SReturn.java | 2 +- .../painless/node/SSubEachArray.java | 2 +- .../painless/node/SSubEachIterable.java | 2 +- 34 files changed, 45 insertions(+), 67 deletions(-) diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Globals.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Globals.java index a6a15b8ce1e6..2adb7618aa4b 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Globals.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Globals.java @@ -19,8 +19,6 @@ package org.elasticsearch.painless; -import org.elasticsearch.painless.node.SFunction; - import java.util.BitSet; import java.util.HashMap; import java.util.Map; @@ -29,7 +27,6 @@ import java.util.Map; * Program-wide globals (initializers, synthetic methods, etc) */ public class Globals { - private final Map syntheticMethods = new HashMap<>(); private final Map constantInitializers = new HashMap<>(); private final Map> classBindings = new HashMap<>(); private final Map instanceBindings = new HashMap<>(); @@ -40,16 +37,6 @@ public class Globals { this.statements = statements; } - /** Adds a new synthetic method to be written. It must be analyzed! */ - public void addSyntheticMethod(SFunction function) { - if (!function.synthetic) { - throw new IllegalStateException("method: " + function.name + " is not synthetic"); - } - if (syntheticMethods.put(function.name, function) != null) { - throw new IllegalStateException("synthetic method: " + function.name + " already exists"); - } - } - /** Adds a new constant initializer to be written */ public void addConstantInitializer(Constant constant) { if (constantInitializers.put(constant.name, constant) != null) { @@ -69,11 +56,6 @@ public class Globals { public String addInstanceBinding(Object instance) { return instanceBindings.computeIfAbsent(instance, key -> "$instance_binding$" + instanceBindings.size()); } - - /** Returns the current synthetic methods */ - public Map getSyntheticMethods() { - return syntheticMethods; - } /** Returns the current initializers */ public Map getConstantInitializers() { diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/AExpression.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/AExpression.java index dd9abfbfd659..d1281b1ea396 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/AExpression.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/AExpression.java @@ -22,9 +22,9 @@ package org.elasticsearch.painless.node; import org.elasticsearch.painless.AnalyzerCaster; import org.elasticsearch.painless.Locals; import org.elasticsearch.painless.Location; +import org.elasticsearch.painless.ScriptRoot; import org.elasticsearch.painless.lookup.PainlessCast; import org.elasticsearch.painless.lookup.PainlessLookupUtility; -import org.elasticsearch.painless.ScriptRoot; import java.util.Objects; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EAssignment.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EAssignment.java index 27c84d5e6ec6..655b511471dd 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EAssignment.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EAssignment.java @@ -29,9 +29,9 @@ import org.elasticsearch.painless.Locals; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.MethodWriter; import org.elasticsearch.painless.Operation; +import org.elasticsearch.painless.ScriptRoot; import org.elasticsearch.painless.lookup.PainlessCast; import org.elasticsearch.painless.lookup.def; -import org.elasticsearch.painless.ScriptRoot; import java.util.ArrayList; import java.util.List; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EBinary.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EBinary.java index ec7ce5dcdc8b..55f4a4deca4c 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EBinary.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EBinary.java @@ -28,10 +28,10 @@ import org.elasticsearch.painless.Locals; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.MethodWriter; import org.elasticsearch.painless.Operation; +import org.elasticsearch.painless.ScriptRoot; import org.elasticsearch.painless.WriterConstants; import org.elasticsearch.painless.lookup.PainlessLookupUtility; import org.elasticsearch.painless.lookup.def; -import org.elasticsearch.painless.ScriptRoot; import java.util.Objects; import java.util.Set; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ECallLocal.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ECallLocal.java index fe594ec9c249..24c5245f61e9 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ECallLocal.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ECallLocal.java @@ -25,10 +25,10 @@ import org.elasticsearch.painless.Globals; import org.elasticsearch.painless.Locals; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.MethodWriter; +import org.elasticsearch.painless.ScriptRoot; import org.elasticsearch.painless.lookup.PainlessClassBinding; import org.elasticsearch.painless.lookup.PainlessInstanceBinding; import org.elasticsearch.painless.lookup.PainlessMethod; -import org.elasticsearch.painless.ScriptRoot; import org.elasticsearch.painless.symbol.FunctionTable; import org.objectweb.asm.Label; import org.objectweb.asm.Type; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ECapturingFunctionRef.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ECapturingFunctionRef.java index 2b787cc1be07..50487e41a6b7 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ECapturingFunctionRef.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ECapturingFunctionRef.java @@ -28,9 +28,9 @@ import org.elasticsearch.painless.Locals; import org.elasticsearch.painless.Locals.Variable; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.MethodWriter; +import org.elasticsearch.painless.ScriptRoot; import org.elasticsearch.painless.lookup.PainlessLookupUtility; import org.elasticsearch.painless.lookup.def; -import org.elasticsearch.painless.ScriptRoot; import org.objectweb.asm.Opcodes; import org.objectweb.asm.Type; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ECast.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ECast.java index 148f1c43b74f..2f9df0a5bec2 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ECast.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ECast.java @@ -25,9 +25,9 @@ import org.elasticsearch.painless.Globals; import org.elasticsearch.painless.Locals; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.MethodWriter; +import org.elasticsearch.painless.ScriptRoot; import org.elasticsearch.painless.lookup.PainlessCast; import org.elasticsearch.painless.lookup.PainlessLookupUtility; -import org.elasticsearch.painless.ScriptRoot; import java.util.Objects; import java.util.Set; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EComp.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EComp.java index 40d57a68f821..79597ac0752a 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EComp.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EComp.java @@ -28,9 +28,9 @@ import org.elasticsearch.painless.Locals; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.MethodWriter; import org.elasticsearch.painless.Operation; +import org.elasticsearch.painless.ScriptRoot; import org.elasticsearch.painless.lookup.PainlessLookupUtility; import org.elasticsearch.painless.lookup.def; -import org.elasticsearch.painless.ScriptRoot; import org.objectweb.asm.Label; import org.objectweb.asm.Type; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EInstanceof.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EInstanceof.java index 06b9d964b419..4e03f59be5e6 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EInstanceof.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EInstanceof.java @@ -25,8 +25,8 @@ import org.elasticsearch.painless.Globals; import org.elasticsearch.painless.Locals; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.MethodWriter; -import org.elasticsearch.painless.lookup.PainlessLookupUtility; import org.elasticsearch.painless.ScriptRoot; +import org.elasticsearch.painless.lookup.PainlessLookupUtility; import java.util.Objects; import java.util.Set; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ELambda.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ELambda.java index 1103bf60f98f..7bd027c149c9 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ELambda.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ELambda.java @@ -27,10 +27,10 @@ import org.elasticsearch.painless.Locals; import org.elasticsearch.painless.Locals.Variable; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.MethodWriter; +import org.elasticsearch.painless.ScriptRoot; import org.elasticsearch.painless.lookup.PainlessLookupUtility; import org.elasticsearch.painless.lookup.PainlessMethod; import org.elasticsearch.painless.lookup.def; -import org.elasticsearch.painless.ScriptRoot; import org.objectweb.asm.Opcodes; import java.util.ArrayList; @@ -185,6 +185,7 @@ public final class ELambda extends AExpression implements ILambda { desugared.analyze(scriptRoot, Locals.newLambdaScope(locals.getProgramScope(), desugared.name, returnType, desugared.parameters, captures.size(), settings.getMaxLoopCounter())); scriptRoot.getFunctionTable().addFunction(desugared.name, desugared.returnType, desugared.typeParameters, true); + scriptRoot.getClassNode().addFunction(desugared); // setup method reference to synthetic method if (expected == null) { @@ -219,9 +220,6 @@ public final class ELambda extends AExpression implements ILambda { methodWriter.visitVarInsn(MethodWriter.getType(capture.clazz).getOpcode(Opcodes.ILOAD), capture.getSlot()); } } - - // add synthetic method to the queue to be written - globals.addSyntheticMethod(desugared); } @Override diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EListInit.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EListInit.java index ba20436af69e..75c1e10bb683 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EListInit.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EListInit.java @@ -25,10 +25,10 @@ import org.elasticsearch.painless.Globals; import org.elasticsearch.painless.Locals; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.MethodWriter; +import org.elasticsearch.painless.ScriptRoot; import org.elasticsearch.painless.lookup.PainlessConstructor; import org.elasticsearch.painless.lookup.PainlessMethod; import org.elasticsearch.painless.lookup.def; -import org.elasticsearch.painless.ScriptRoot; import org.objectweb.asm.Type; import org.objectweb.asm.commons.Method; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EMapInit.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EMapInit.java index 2861577ff8d2..8107f96c77aa 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EMapInit.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EMapInit.java @@ -25,10 +25,10 @@ import org.elasticsearch.painless.Globals; import org.elasticsearch.painless.Locals; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.MethodWriter; +import org.elasticsearch.painless.ScriptRoot; import org.elasticsearch.painless.lookup.PainlessConstructor; import org.elasticsearch.painless.lookup.PainlessMethod; import org.elasticsearch.painless.lookup.def; -import org.elasticsearch.painless.ScriptRoot; import org.objectweb.asm.Type; import org.objectweb.asm.commons.Method; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ENewArrayFunctionRef.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ENewArrayFunctionRef.java index 4f54d00f7dcd..b85e93c19dab 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ENewArrayFunctionRef.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ENewArrayFunctionRef.java @@ -75,6 +75,7 @@ public final class ENewArrayFunctionRef extends AExpression implements ILambda { function.analyze(scriptRoot, Locals.newLambdaScope(locals.getProgramScope(), function.name, function.returnType, function.parameters, 0, settings.getMaxLoopCounter())); scriptRoot.getFunctionTable().addFunction(function.name, function.returnType, function.typeParameters, true); + scriptRoot.getClassNode().addFunction(function); if (expected == null) { ref = null; @@ -97,8 +98,6 @@ public final class ENewArrayFunctionRef extends AExpression implements ILambda { // push a null instruction as a placeholder for future lambda instructions methodWriter.push((String)null); } - - globals.addSyntheticMethod(function); } @Override diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ENewObj.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ENewObj.java index c6041efa2b88..73d3c38addb7 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ENewObj.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ENewObj.java @@ -25,9 +25,9 @@ import org.elasticsearch.painless.Globals; import org.elasticsearch.painless.Locals; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.MethodWriter; +import org.elasticsearch.painless.ScriptRoot; import org.elasticsearch.painless.lookup.PainlessConstructor; import org.elasticsearch.painless.lookup.PainlessLookupUtility; -import org.elasticsearch.painless.ScriptRoot; import org.objectweb.asm.Type; import org.objectweb.asm.commons.Method; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ENull.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ENull.java index 79ef55d6028d..0cd33ea900b4 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ENull.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ENull.java @@ -25,8 +25,8 @@ import org.elasticsearch.painless.Globals; import org.elasticsearch.painless.Locals; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.MethodWriter; -import org.elasticsearch.painless.lookup.PainlessLookupUtility; import org.elasticsearch.painless.ScriptRoot; +import org.elasticsearch.painless.lookup.PainlessLookupUtility; import org.objectweb.asm.Opcodes; import java.util.Set; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ERegex.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ERegex.java index e020d80e745f..0c8b35db3ac9 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ERegex.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ERegex.java @@ -26,8 +26,8 @@ import org.elasticsearch.painless.Globals; import org.elasticsearch.painless.Locals; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.MethodWriter; -import org.elasticsearch.painless.WriterConstants; import org.elasticsearch.painless.ScriptRoot; +import org.elasticsearch.painless.WriterConstants; import java.util.Set; import java.util.regex.Pattern; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EUnary.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EUnary.java index 7e9d04f17fbd..6e9698e172e0 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EUnary.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EUnary.java @@ -28,9 +28,9 @@ import org.elasticsearch.painless.Locals; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.MethodWriter; import org.elasticsearch.painless.Operation; +import org.elasticsearch.painless.ScriptRoot; import org.elasticsearch.painless.lookup.PainlessLookupUtility; import org.elasticsearch.painless.lookup.def; -import org.elasticsearch.painless.ScriptRoot; import org.objectweb.asm.Label; import org.objectweb.asm.Opcodes; import org.objectweb.asm.Type; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PBrace.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PBrace.java index 0526caefe2a7..cc18088f3fe5 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PBrace.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PBrace.java @@ -25,9 +25,9 @@ import org.elasticsearch.painless.Globals; import org.elasticsearch.painless.Locals; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.MethodWriter; +import org.elasticsearch.painless.ScriptRoot; import org.elasticsearch.painless.lookup.PainlessLookupUtility; import org.elasticsearch.painless.lookup.def; -import org.elasticsearch.painless.ScriptRoot; import java.util.List; import java.util.Map; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PCallInvoke.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PCallInvoke.java index b1c1081a9350..92e119d37e5d 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PCallInvoke.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PCallInvoke.java @@ -25,9 +25,9 @@ import org.elasticsearch.painless.Globals; import org.elasticsearch.painless.Locals; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.MethodWriter; +import org.elasticsearch.painless.ScriptRoot; import org.elasticsearch.painless.lookup.PainlessMethod; import org.elasticsearch.painless.lookup.def; -import org.elasticsearch.painless.ScriptRoot; import java.util.List; import java.util.Objects; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PField.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PField.java index 063291625a90..b38ce3edb553 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PField.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PField.java @@ -25,11 +25,11 @@ import org.elasticsearch.painless.Globals; import org.elasticsearch.painless.Locals; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.MethodWriter; +import org.elasticsearch.painless.ScriptRoot; import org.elasticsearch.painless.lookup.PainlessField; import org.elasticsearch.painless.lookup.PainlessLookupUtility; import org.elasticsearch.painless.lookup.PainlessMethod; import org.elasticsearch.painless.lookup.def; -import org.elasticsearch.painless.ScriptRoot; import java.util.List; import java.util.Map; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PSubCallInvoke.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PSubCallInvoke.java index 4c13ebe5cae8..a2afc502a8e0 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PSubCallInvoke.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PSubCallInvoke.java @@ -25,8 +25,8 @@ import org.elasticsearch.painless.Globals; import org.elasticsearch.painless.Locals; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.MethodWriter; -import org.elasticsearch.painless.lookup.PainlessMethod; import org.elasticsearch.painless.ScriptRoot; +import org.elasticsearch.painless.lookup.PainlessMethod; import java.util.List; import java.util.Objects; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PSubDefArray.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PSubDefArray.java index 1bbe2b89a6b4..64f713d1e871 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PSubDefArray.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PSubDefArray.java @@ -26,8 +26,8 @@ import org.elasticsearch.painless.Globals; import org.elasticsearch.painless.Locals; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.MethodWriter; -import org.elasticsearch.painless.lookup.def; import org.elasticsearch.painless.ScriptRoot; +import org.elasticsearch.painless.lookup.def; import org.objectweb.asm.Type; import java.time.ZonedDateTime; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PSubDefCall.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PSubDefCall.java index 64d90602f915..b148f4c70630 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PSubDefCall.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PSubDefCall.java @@ -26,8 +26,8 @@ import org.elasticsearch.painless.Globals; import org.elasticsearch.painless.Locals; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.MethodWriter; -import org.elasticsearch.painless.lookup.def; import org.elasticsearch.painless.ScriptRoot; +import org.elasticsearch.painless.lookup.def; import org.objectweb.asm.Type; import java.time.ZonedDateTime; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PSubDefField.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PSubDefField.java index 6bad888cf540..2f8e5035cbde 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PSubDefField.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PSubDefField.java @@ -26,8 +26,8 @@ import org.elasticsearch.painless.Globals; import org.elasticsearch.painless.Locals; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.MethodWriter; -import org.elasticsearch.painless.lookup.def; import org.elasticsearch.painless.ScriptRoot; +import org.elasticsearch.painless.lookup.def; import java.time.ZonedDateTime; import java.util.Objects; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PSubField.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PSubField.java index 102a1118331b..371e36beab46 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PSubField.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PSubField.java @@ -25,9 +25,9 @@ import org.elasticsearch.painless.Globals; import org.elasticsearch.painless.Locals; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.MethodWriter; +import org.elasticsearch.painless.ScriptRoot; import org.elasticsearch.painless.lookup.PainlessField; import org.elasticsearch.painless.lookup.PainlessLookupUtility; -import org.elasticsearch.painless.ScriptRoot; import org.objectweb.asm.Type; import java.lang.reflect.Modifier; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PSubListShortcut.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PSubListShortcut.java index 4226252b16f0..e9bdbc437eb0 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PSubListShortcut.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PSubListShortcut.java @@ -25,10 +25,10 @@ import org.elasticsearch.painless.Globals; import org.elasticsearch.painless.Locals; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.MethodWriter; +import org.elasticsearch.painless.ScriptRoot; import org.elasticsearch.painless.WriterConstants; import org.elasticsearch.painless.lookup.PainlessLookupUtility; import org.elasticsearch.painless.lookup.PainlessMethod; -import org.elasticsearch.painless.ScriptRoot; import java.util.Objects; import java.util.Set; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PSubMapShortcut.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PSubMapShortcut.java index 99a2ab0fa2b4..fb537572f631 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PSubMapShortcut.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PSubMapShortcut.java @@ -25,9 +25,9 @@ import org.elasticsearch.painless.Globals; import org.elasticsearch.painless.Locals; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.MethodWriter; +import org.elasticsearch.painless.ScriptRoot; import org.elasticsearch.painless.lookup.PainlessLookupUtility; import org.elasticsearch.painless.lookup.PainlessMethod; -import org.elasticsearch.painless.ScriptRoot; import java.util.Objects; import java.util.Set; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PSubShortcut.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PSubShortcut.java index b9266d8590a9..214ee7f42964 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PSubShortcut.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PSubShortcut.java @@ -25,8 +25,8 @@ import org.elasticsearch.painless.Globals; import org.elasticsearch.painless.Locals; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.MethodWriter; -import org.elasticsearch.painless.lookup.PainlessMethod; import org.elasticsearch.painless.ScriptRoot; +import org.elasticsearch.painless.lookup.PainlessMethod; import java.util.Set; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SClass.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SClass.java index af241e2d8474..aee002c8170f 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SClass.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SClass.java @@ -28,9 +28,9 @@ import org.elasticsearch.painless.Locals.Variable; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.MethodWriter; import org.elasticsearch.painless.ScriptClassInfo; +import org.elasticsearch.painless.ScriptRoot; import org.elasticsearch.painless.WriterConstants; import org.elasticsearch.painless.lookup.PainlessLookup; -import org.elasticsearch.painless.ScriptRoot; import org.elasticsearch.painless.symbol.FunctionTable; import org.objectweb.asm.ClassVisitor; import org.objectweb.asm.Label; @@ -82,7 +82,7 @@ public final class SClass extends AStatement { private final ScriptClassInfo scriptClassInfo; private final String name; private final Printer debugStream; - private final List functions; + private final List functions = new ArrayList<>(); private final Globals globals; private final List statements; @@ -100,7 +100,7 @@ public final class SClass extends AStatement { this.scriptClassInfo = Objects.requireNonNull(scriptClassInfo); this.name = Objects.requireNonNull(name); this.debugStream = debugStream; - this.functions = Collections.unmodifiableList(functions); + this.functions.addAll(Objects.requireNonNull(functions)); this.statements = Collections.unmodifiableList(statements); this.globals = new Globals(new BitSet(sourceText.length())); @@ -108,6 +108,10 @@ public final class SClass extends AStatement { this.getMethods = new ArrayList<>(); } + void addFunction(SFunction function) { + functions.add(function); + } + @Override public void storeSettings(CompilerSettings settings) { for (SFunction function : functions) { @@ -155,7 +159,11 @@ public final class SClass extends AStatement { @Override void analyze(ScriptRoot scriptRoot, Locals program) { - for (SFunction function : this.functions) { + // copy protection is required because synthetic functions are + // added for lambdas/method references and analysis here is + // only for user-defined functions + List functions = new ArrayList<>(this.functions); + for (SFunction function : functions) { Locals functionLocals = Locals.newFunctionScope(program, function.returnType, function.parameters, settings.getMaxLoopCounter()); function.analyze(scriptRoot, functionLocals); @@ -281,15 +289,6 @@ public final class SClass extends AStatement { function.write(classWriter, globals); } - // Write all synthetic functions. Note that this process may add more :) - while (!globals.getSyntheticMethods().isEmpty()) { - List current = new ArrayList<>(globals.getSyntheticMethods().values()); - globals.getSyntheticMethods().clear(); - for (SFunction function : current) { - function.write(classWriter, globals); - } - } - // Write the constants if (false == globals.getConstantInitializers().isEmpty()) { Collection inits = globals.getConstantInitializers().values(); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SEach.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SEach.java index 6d02bc490b16..5fa25e2db08d 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SEach.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SEach.java @@ -26,9 +26,9 @@ import org.elasticsearch.painless.Locals; import org.elasticsearch.painless.Locals.Variable; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.MethodWriter; +import org.elasticsearch.painless.ScriptRoot; import org.elasticsearch.painless.lookup.PainlessLookupUtility; import org.elasticsearch.painless.lookup.def; -import org.elasticsearch.painless.ScriptRoot; import java.util.Objects; import java.util.Set; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SFunction.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SFunction.java index 6b65f49be8f4..4bf6fbcd4342 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SFunction.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SFunction.java @@ -27,9 +27,9 @@ import org.elasticsearch.painless.Locals.Parameter; import org.elasticsearch.painless.Locals.Variable; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.MethodWriter; +import org.elasticsearch.painless.ScriptRoot; import org.elasticsearch.painless.lookup.PainlessLookup; import org.elasticsearch.painless.lookup.PainlessLookupUtility; -import org.elasticsearch.painless.ScriptRoot; import org.objectweb.asm.Opcodes; import java.lang.invoke.MethodType; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SReturn.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SReturn.java index 3d264777e17b..88bb4ff5ea84 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SReturn.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SReturn.java @@ -25,8 +25,8 @@ import org.elasticsearch.painless.Globals; import org.elasticsearch.painless.Locals; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.MethodWriter; -import org.elasticsearch.painless.lookup.PainlessLookupUtility; import org.elasticsearch.painless.ScriptRoot; +import org.elasticsearch.painless.lookup.PainlessLookupUtility; import java.util.Set; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSubEachArray.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSubEachArray.java index 2da43bd25fd2..9fe6c06dada1 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSubEachArray.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSubEachArray.java @@ -27,9 +27,9 @@ import org.elasticsearch.painless.Locals; import org.elasticsearch.painless.Locals.Variable; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.MethodWriter; +import org.elasticsearch.painless.ScriptRoot; import org.elasticsearch.painless.lookup.PainlessCast; import org.elasticsearch.painless.lookup.PainlessLookupUtility; -import org.elasticsearch.painless.ScriptRoot; import org.objectweb.asm.Label; import org.objectweb.asm.Opcodes; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSubEachIterable.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSubEachIterable.java index 3b11f8821ef5..db02a7e4c751 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSubEachIterable.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSubEachIterable.java @@ -28,11 +28,11 @@ import org.elasticsearch.painless.Locals; import org.elasticsearch.painless.Locals.Variable; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.MethodWriter; +import org.elasticsearch.painless.ScriptRoot; import org.elasticsearch.painless.lookup.PainlessCast; import org.elasticsearch.painless.lookup.PainlessLookupUtility; import org.elasticsearch.painless.lookup.PainlessMethod; import org.elasticsearch.painless.lookup.def; -import org.elasticsearch.painless.ScriptRoot; import org.objectweb.asm.Label; import org.objectweb.asm.Opcodes; From 890b3db1938f3f0bd4e86f79ee5c7e7965277357 Mon Sep 17 00:00:00 2001 From: Benjamin Trent Date: Mon, 7 Oct 2019 11:16:51 -0400 Subject: [PATCH 31/55] [ML][Inference] adjusting definition object schema and validation (#47447) * [ML][Inference] adjusting definition object schema and validation * finalizing schema and fixing inference npe * addressing PR comments --- .../ml/inference/TrainedModelDefinition.java | 79 +++++- .../preprocessing/TargetMeanEncoding.java | 6 +- .../TrainedModelDefinitionTests.java | 5 +- .../ml/inference/TrainedModelDefinition.java | 94 +++++++- .../preprocessing/TargetMeanEncoding.java | 8 +- .../trainedmodel/ensemble/Ensemble.java | 27 +-- .../ml/inference/trainedmodel/tree/Tree.java | 10 +- .../TrainedModelDefinitionTests.java | 228 ++++++++++++++++++ .../trainedmodel/ensemble/EnsembleTests.java | 43 ++-- .../trainedmodel/tree/TreeTests.java | 22 +- 10 files changed, 459 insertions(+), 63 deletions(-) diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/inference/TrainedModelDefinition.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/inference/TrainedModelDefinition.java index 7b564a9e684f..dec834fa328f 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/inference/TrainedModelDefinition.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/inference/TrainedModelDefinition.java @@ -22,6 +22,7 @@ import org.elasticsearch.client.ml.inference.preprocessing.PreProcessor; import org.elasticsearch.client.ml.inference.trainedmodel.TrainedModel; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -38,6 +39,7 @@ public class TrainedModelDefinition implements ToXContentObject { public static final ParseField TRAINED_MODEL = new ParseField("trained_model"); public static final ParseField PREPROCESSORS = new ParseField("preprocessors"); + public static final ParseField INPUT = new ParseField("input"); public static final ObjectParser PARSER = new ObjectParser<>(NAME, true, @@ -51,6 +53,7 @@ public class TrainedModelDefinition implements ToXContentObject { (p, c, n) -> p.namedObject(PreProcessor.class, n, null), (trainedModelDefBuilder) -> {/* Does not matter client side*/ }, PREPROCESSORS); + PARSER.declareObject(TrainedModelDefinition.Builder::setInput, (p, c) -> Input.fromXContent(p), INPUT); } public static TrainedModelDefinition.Builder fromXContent(XContentParser parser) throws IOException { @@ -59,10 +62,12 @@ public class TrainedModelDefinition implements ToXContentObject { private final TrainedModel trainedModel; private final List preProcessors; + private final Input input; - TrainedModelDefinition(TrainedModel trainedModel, List preProcessors) { + TrainedModelDefinition(TrainedModel trainedModel, List preProcessors, Input input) { this.trainedModel = trainedModel; this.preProcessors = preProcessors == null ? Collections.emptyList() : Collections.unmodifiableList(preProcessors); + this.input = input; } @Override @@ -78,6 +83,9 @@ public class TrainedModelDefinition implements ToXContentObject { true, PREPROCESSORS.getPreferredName(), preProcessors); + if (input != null) { + builder.field(INPUT.getPreferredName(), input); + } builder.endObject(); return builder; } @@ -90,6 +98,10 @@ public class TrainedModelDefinition implements ToXContentObject { return preProcessors; } + public Input getInput() { + return input; + } + @Override public String toString() { return Strings.toString(this); @@ -101,18 +113,20 @@ public class TrainedModelDefinition implements ToXContentObject { if (o == null || getClass() != o.getClass()) return false; TrainedModelDefinition that = (TrainedModelDefinition) o; return Objects.equals(trainedModel, that.trainedModel) && - Objects.equals(preProcessors, that.preProcessors) ; + Objects.equals(preProcessors, that.preProcessors) && + Objects.equals(input, that.input); } @Override public int hashCode() { - return Objects.hash(trainedModel, preProcessors); + return Objects.hash(trainedModel, preProcessors, input); } public static class Builder { private List preProcessors; private TrainedModel trainedModel; + private Input input; public Builder setPreProcessors(List preProcessors) { this.preProcessors = preProcessors; @@ -124,14 +138,71 @@ public class TrainedModelDefinition implements ToXContentObject { return this; } + public Builder setInput(Input input) { + this.input = input; + return this; + } + private Builder setTrainedModel(List trainedModel) { assert trainedModel.size() == 1; return setTrainedModel(trainedModel.get(0)); } public TrainedModelDefinition build() { - return new TrainedModelDefinition(this.trainedModel, this.preProcessors); + return new TrainedModelDefinition(this.trainedModel, this.preProcessors, this.input); } } + public static class Input implements ToXContentObject { + + public static final String NAME = "trained_mode_definition_input"; + public static final ParseField FIELD_NAMES = new ParseField("field_names"); + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, + true, + a -> new Input((List)a[0])); + static { + PARSER.declareStringArray(ConstructingObjectParser.constructorArg(), FIELD_NAMES); + } + + public static Input fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + private final List fieldNames; + + public Input(List fieldNames) { + this.fieldNames = fieldNames; + } + + public List getFieldNames() { + return fieldNames; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (fieldNames != null) { + builder.field(FIELD_NAMES.getPreferredName(), fieldNames); + } + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + TrainedModelDefinition.Input that = (TrainedModelDefinition.Input) o; + return Objects.equals(fieldNames, that.fieldNames); + } + + @Override + public int hashCode() { + return Objects.hash(fieldNames); + } + + } + } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/inference/preprocessing/TargetMeanEncoding.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/inference/preprocessing/TargetMeanEncoding.java index bb29924b98e1..18203f330189 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/inference/preprocessing/TargetMeanEncoding.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/inference/preprocessing/TargetMeanEncoding.java @@ -39,7 +39,7 @@ public class TargetMeanEncoding implements PreProcessor { public static final String NAME = "target_mean_encoding"; public static final ParseField FIELD = new ParseField("field"); public static final ParseField FEATURE_NAME = new ParseField("feature_name"); - public static final ParseField TARGET_MEANS = new ParseField("target_means"); + public static final ParseField TARGET_MAP = new ParseField("target_map"); public static final ParseField DEFAULT_VALUE = new ParseField("default_value"); @SuppressWarnings("unchecked") @@ -52,7 +52,7 @@ public class TargetMeanEncoding implements PreProcessor { PARSER.declareString(ConstructingObjectParser.constructorArg(), FEATURE_NAME); PARSER.declareObject(ConstructingObjectParser.constructorArg(), (p, c) -> p.map(HashMap::new, XContentParser::doubleValue), - TARGET_MEANS); + TARGET_MAP); PARSER.declareDouble(ConstructingObjectParser.constructorArg(), DEFAULT_VALUE); } @@ -110,7 +110,7 @@ public class TargetMeanEncoding implements PreProcessor { builder.startObject(); builder.field(FIELD.getPreferredName(), field); builder.field(FEATURE_NAME.getPreferredName(), featureName); - builder.field(TARGET_MEANS.getPreferredName(), meanMap); + builder.field(TARGET_MAP.getPreferredName(), meanMap); builder.field(DEFAULT_VALUE.getPreferredName(), defaultValue); builder.endObject(); return builder; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/inference/TrainedModelDefinitionTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/inference/TrainedModelDefinitionTests.java index 8eeec2ce2fcb..ff53c0d8fc08 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/inference/TrainedModelDefinitionTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/inference/TrainedModelDefinitionTests.java @@ -64,7 +64,10 @@ public class TrainedModelDefinitionTests extends AbstractXContentTestCase randomAlphaOfLength(10)) + .limit(randomLongBetween(1, 10)) + .collect(Collectors.toList()))); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/TrainedModelDefinition.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/TrainedModelDefinition.java index 6daa530e0277..f85c184646e1 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/TrainedModelDefinition.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/TrainedModelDefinition.java @@ -10,6 +10,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -30,10 +31,11 @@ import java.util.Objects; public class TrainedModelDefinition implements ToXContentObject, Writeable { - public static final String NAME = "trained_model_doc"; + public static final String NAME = "trained_mode_definition"; public static final ParseField TRAINED_MODEL = new ParseField("trained_model"); public static final ParseField PREPROCESSORS = new ParseField("preprocessors"); + public static final ParseField INPUT = new ParseField("input"); // These parsers follow the pattern that metadata is parsed leniently (to allow for enhancements), whilst config is parsed strictly public static final ObjectParser LENIENT_PARSER = createParser(true); @@ -55,6 +57,7 @@ public class TrainedModelDefinition implements ToXContentObject, Writeable { p.namedObject(StrictlyParsedPreProcessor.class, n, null), (trainedModelDefBuilder) -> trainedModelDefBuilder.setProcessorsInOrder(true), PREPROCESSORS); + parser.declareObject(TrainedModelDefinition.Builder::setInput, (p, c) -> Input.fromXContent(p, ignoreUnknownFields), INPUT); return parser; } @@ -64,21 +67,25 @@ public class TrainedModelDefinition implements ToXContentObject, Writeable { private final TrainedModel trainedModel; private final List preProcessors; + private final Input input; - TrainedModelDefinition(TrainedModel trainedModel, List preProcessors) { - this.trainedModel = trainedModel; + TrainedModelDefinition(TrainedModel trainedModel, List preProcessors, Input input) { + this.trainedModel = ExceptionsHelper.requireNonNull(trainedModel, TRAINED_MODEL); this.preProcessors = preProcessors == null ? Collections.emptyList() : Collections.unmodifiableList(preProcessors); + this.input = ExceptionsHelper.requireNonNull(input, INPUT); } public TrainedModelDefinition(StreamInput in) throws IOException { this.trainedModel = in.readNamedWriteable(TrainedModel.class); this.preProcessors = in.readNamedWriteableList(PreProcessor.class); + this.input = new Input(in); } @Override public void writeTo(StreamOutput out) throws IOException { out.writeNamedWriteable(trainedModel); out.writeNamedWriteableList(preProcessors); + input.writeTo(out); } @Override @@ -94,6 +101,7 @@ public class TrainedModelDefinition implements ToXContentObject, Writeable { true, PREPROCESSORS.getPreferredName(), preProcessors); + builder.field(INPUT.getPreferredName(), input); builder.endObject(); return builder; } @@ -106,6 +114,10 @@ public class TrainedModelDefinition implements ToXContentObject, Writeable { return preProcessors; } + public Input getInput() { + return input; + } + @Override public String toString() { return Strings.toString(this); @@ -117,12 +129,13 @@ public class TrainedModelDefinition implements ToXContentObject, Writeable { if (o == null || getClass() != o.getClass()) return false; TrainedModelDefinition that = (TrainedModelDefinition) o; return Objects.equals(trainedModel, that.trainedModel) && - Objects.equals(preProcessors, that.preProcessors) ; + Objects.equals(input, that.input) && + Objects.equals(preProcessors, that.preProcessors); } @Override public int hashCode() { - return Objects.hash(trainedModel, preProcessors); + return Objects.hash(trainedModel, input, preProcessors); } public static class Builder { @@ -130,6 +143,7 @@ public class TrainedModelDefinition implements ToXContentObject, Writeable { private List preProcessors; private TrainedModel trainedModel; private boolean processorsInOrder; + private Input input; private static Builder builderForParser() { return new Builder(false); @@ -153,6 +167,11 @@ public class TrainedModelDefinition implements ToXContentObject, Writeable { return this; } + public Builder setInput(Input input) { + this.input = input; + return this; + } + private Builder setTrainedModel(List trainedModel) { if (trainedModel.size() != 1) { throw ExceptionsHelper.badRequestException("[{}] must have exactly one trained model defined.", @@ -169,8 +188,71 @@ public class TrainedModelDefinition implements ToXContentObject, Writeable { if (preProcessors != null && preProcessors.size() > 1 && processorsInOrder == false) { throw new IllegalArgumentException("preprocessors must be an array of preprocessor objects"); } - return new TrainedModelDefinition(this.trainedModel, this.preProcessors); + return new TrainedModelDefinition(this.trainedModel, this.preProcessors, this.input); } } + public static class Input implements ToXContentObject, Writeable { + + public static final String NAME = "trained_mode_definition_input"; + public static final ParseField FIELD_NAMES = new ParseField("field_names"); + + public static final ConstructingObjectParser LENIENT_PARSER = createParser(true); + public static final ConstructingObjectParser STRICT_PARSER = createParser(false); + + @SuppressWarnings("unchecked") + private static ConstructingObjectParser createParser(boolean ignoreUnknownFields) { + ConstructingObjectParser parser = new ConstructingObjectParser<>(NAME, + ignoreUnknownFields, + a -> new Input((List)a[0])); + parser.declareStringArray(ConstructingObjectParser.constructorArg(), FIELD_NAMES); + return parser; + } + + public static Input fromXContent(XContentParser parser, boolean lenient) throws IOException { + return lenient ? LENIENT_PARSER.parse(parser, null) : STRICT_PARSER.parse(parser, null); + } + + private final List fieldNames; + + public Input(List fieldNames) { + this.fieldNames = Collections.unmodifiableList(ExceptionsHelper.requireNonNull(fieldNames, FIELD_NAMES)); + } + + public Input(StreamInput in) throws IOException { + this.fieldNames = Collections.unmodifiableList(in.readStringList()); + } + + public List getFieldNames() { + return fieldNames; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeStringCollection(fieldNames); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(FIELD_NAMES.getPreferredName(), fieldNames); + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + TrainedModelDefinition.Input that = (TrainedModelDefinition.Input) o; + return Objects.equals(fieldNames, that.fieldNames); + } + + @Override + public int hashCode() { + return Objects.hash(fieldNames); + } + + } + } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/preprocessing/TargetMeanEncoding.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/preprocessing/TargetMeanEncoding.java index ebce49db957e..d8f413b3b175 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/preprocessing/TargetMeanEncoding.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/preprocessing/TargetMeanEncoding.java @@ -28,7 +28,7 @@ public class TargetMeanEncoding implements LenientlyParsedPreProcessor, Strictly public static final ParseField NAME = new ParseField("target_mean_encoding"); public static final ParseField FIELD = new ParseField("field"); public static final ParseField FEATURE_NAME = new ParseField("feature_name"); - public static final ParseField TARGET_MEANS = new ParseField("target_means"); + public static final ParseField TARGET_MAP = new ParseField("target_map"); public static final ParseField DEFAULT_VALUE = new ParseField("default_value"); public static final ConstructingObjectParser STRICT_PARSER = createParser(false); @@ -44,7 +44,7 @@ public class TargetMeanEncoding implements LenientlyParsedPreProcessor, Strictly parser.declareString(ConstructingObjectParser.constructorArg(), FEATURE_NAME); parser.declareObject(ConstructingObjectParser.constructorArg(), (p, c) -> p.map(HashMap::new, XContentParser::doubleValue), - TARGET_MEANS); + TARGET_MAP); parser.declareDouble(ConstructingObjectParser.constructorArg(), DEFAULT_VALUE); return parser; } @@ -65,7 +65,7 @@ public class TargetMeanEncoding implements LenientlyParsedPreProcessor, Strictly public TargetMeanEncoding(String field, String featureName, Map meanMap, Double defaultValue) { this.field = ExceptionsHelper.requireNonNull(field, FIELD); this.featureName = ExceptionsHelper.requireNonNull(featureName, FEATURE_NAME); - this.meanMap = Collections.unmodifiableMap(ExceptionsHelper.requireNonNull(meanMap, TARGET_MEANS)); + this.meanMap = Collections.unmodifiableMap(ExceptionsHelper.requireNonNull(meanMap, TARGET_MAP)); this.defaultValue = ExceptionsHelper.requireNonNull(defaultValue, DEFAULT_VALUE); } @@ -136,7 +136,7 @@ public class TargetMeanEncoding implements LenientlyParsedPreProcessor, Strictly builder.startObject(); builder.field(FIELD.getPreferredName(), field); builder.field(FEATURE_NAME.getPreferredName(), featureName); - builder.field(TARGET_MEANS.getPreferredName(), meanMap); + builder.field(TARGET_MAP.getPreferredName(), meanMap); builder.field(DEFAULT_VALUE.getPreferredName(), defaultValue); builder.endObject(); return builder; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ensemble/Ensemble.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ensemble/Ensemble.java index 7f2a7cc9a02c..5e5199c24053 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ensemble/Ensemble.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ensemble/Ensemble.java @@ -107,14 +107,13 @@ public class Ensemble implements LenientlyParsedTrainedModel, StrictlyParsedTrai @Override public double infer(Map fields) { - List features = featureNames.stream().map(f -> (Double) fields.get(f)).collect(Collectors.toList()); - return infer(features); + List processedInferences = inferAndProcess(fields); + return outputAggregator.aggregate(processedInferences); } @Override public double infer(List fields) { - List processedInferences = inferAndProcess(fields); - return outputAggregator.aggregate(processedInferences); + throw new UnsupportedOperationException("Ensemble requires map containing field names and values"); } @Override @@ -128,17 +127,12 @@ public class Ensemble implements LenientlyParsedTrainedModel, StrictlyParsedTrai throw new UnsupportedOperationException( "Cannot determine classification probability with target_type [" + targetType.toString() + "]"); } - List features = featureNames.stream().map(f -> (Double) fields.get(f)).collect(Collectors.toList()); - return classificationProbability(features); + return inferAndProcess(fields); } @Override public List classificationProbability(List fields) { - if ((targetType == TargetType.CLASSIFICATION) == false) { - throw new UnsupportedOperationException( - "Cannot determine classification probability with target_type [" + targetType.toString() + "]"); - } - return inferAndProcess(fields); + throw new UnsupportedOperationException("Ensemble requires map containing field names and values"); } @Override @@ -146,7 +140,7 @@ public class Ensemble implements LenientlyParsedTrainedModel, StrictlyParsedTrai return classificationLabels; } - private List inferAndProcess(List fields) { + private List inferAndProcess(Map fields) { List modelInferences = models.stream().map(m -> m.infer(fields)).collect(Collectors.toList()); return outputAggregator.processValues(modelInferences); } @@ -210,15 +204,6 @@ public class Ensemble implements LenientlyParsedTrainedModel, StrictlyParsedTrai @Override public void validate() { - if (this.featureNames != null) { - if (this.models.stream() - .anyMatch(trainedModel -> trainedModel.getFeatureNames().equals(this.featureNames) == false)) { - throw ExceptionsHelper.badRequestException( - "[{}] must be the same and in the same order for each of the {}", - FEATURE_NAMES.getPreferredName(), - TRAINED_MODELS.getPreferredName()); - } - } if (outputAggregator.expectedValueSize() != null && outputAggregator.expectedValueSize() != models.size()) { throw ExceptionsHelper.badRequestException( diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/tree/Tree.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/tree/Tree.java index 5dca29d58437..3a91ec0cd86c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/tree/Tree.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/tree/Tree.java @@ -106,7 +106,9 @@ public class Tree implements LenientlyParsedTrainedModel, StrictlyParsedTrainedM @Override public double infer(Map fields) { - List features = featureNames.stream().map(f -> (Double) fields.get(f)).collect(Collectors.toList()); + List features = featureNames.stream().map(f -> + fields.get(f) instanceof Number ? ((Number)fields.get(f)).doubleValue() : null + ).collect(Collectors.toList()); return infer(features); } @@ -146,7 +148,11 @@ public class Tree implements LenientlyParsedTrainedModel, StrictlyParsedTrainedM throw new UnsupportedOperationException( "Cannot determine classification probability with target_type [" + targetType.toString() + "]"); } - return classificationProbability(featureNames.stream().map(f -> (Double) fields.get(f)).collect(Collectors.toList())); + List features = featureNames.stream().map(f -> + fields.get(f) instanceof Number ? ((Number)fields.get(f)).doubleValue() : null) + .collect(Collectors.toList()); + + return classificationProbability(features); } @Override diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/TrainedModelDefinitionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/TrainedModelDefinitionTests.java index 0ecb7c1e6c2e..5339d93bf910 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/TrainedModelDefinitionTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/TrainedModelDefinitionTests.java @@ -8,13 +8,18 @@ package org.elasticsearch.xpack.core.ml.inference; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.search.SearchModule; import org.elasticsearch.test.AbstractSerializingTestCase; import org.elasticsearch.xpack.core.ml.inference.preprocessing.FrequencyEncodingTests; import org.elasticsearch.xpack.core.ml.inference.preprocessing.OneHotEncodingTests; import org.elasticsearch.xpack.core.ml.inference.preprocessing.TargetMeanEncodingTests; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.ensemble.Ensemble; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.tree.Tree; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.tree.TreeTests; import org.junit.Before; @@ -26,6 +31,8 @@ import java.util.function.Predicate; import java.util.stream.Collectors; import java.util.stream.Stream; +import static org.hamcrest.Matchers.equalTo; + public class TrainedModelDefinitionTests extends AbstractSerializingTestCase { @@ -61,8 +68,229 @@ public class TrainedModelDefinitionTests extends AbstractSerializingTestCase randomAlphaOfLength(10)) + .limit(randomLongBetween(1, 10)) + .collect(Collectors.toList()))) .setTrainedModel(randomFrom(TreeTests.createRandom())); } + + private static final String ENSEMBLE_MODEL = "" + + "{\n" + + " \"input\": {\n" + + " \"field_names\": [\n" + + " \"col1\",\n" + + " \"col2\",\n" + + " \"col3\",\n" + + " \"col4\"\n" + + " ]\n" + + " },\n" + + " \"preprocessors\": [\n" + + " {\n" + + " \"one_hot_encoding\": {\n" + + " \"field\": \"col1\",\n" + + " \"hot_map\": {\n" + + " \"male\": \"col1_male\",\n" + + " \"female\": \"col1_female\"\n" + + " }\n" + + " }\n" + + " },\n" + + " {\n" + + " \"target_mean_encoding\": {\n" + + " \"field\": \"col2\",\n" + + " \"feature_name\": \"col2_encoded\",\n" + + " \"target_map\": {\n" + + " \"S\": 5.0,\n" + + " \"M\": 10.0,\n" + + " \"L\": 20\n" + + " },\n" + + " \"default_value\": 5.0\n" + + " }\n" + + " },\n" + + " {\n" + + " \"frequency_encoding\": {\n" + + " \"field\": \"col3\",\n" + + " \"feature_name\": \"col3_encoded\",\n" + + " \"frequency_map\": {\n" + + " \"none\": 0.75,\n" + + " \"true\": 0.10,\n" + + " \"false\": 0.15\n" + + " }\n" + + " }\n" + + " }\n" + + " ],\n" + + " \"trained_model\": {\n" + + " \"ensemble\": {\n" + + " \"feature_names\": [\n" + + " \"col1_male\",\n" + + " \"col1_female\",\n" + + " \"col2_encoded\",\n" + + " \"col3_encoded\",\n" + + " \"col4\"\n" + + " ],\n" + + " \"aggregate_output\": {\n" + + " \"weighted_sum\": {\n" + + " \"weights\": [\n" + + " 0.5,\n" + + " 0.5\n" + + " ]\n" + + " }\n" + + " },\n" + + " \"target_type\": \"regression\",\n" + + " \"trained_models\": [\n" + + " {\n" + + " \"tree\": {\n" + + " \"feature_names\": [\n" + + " \"col1_male\",\n" + + " \"col1_female\",\n" + + " \"col4\"\n" + + " ],\n" + + " \"tree_structure\": [\n" + + " {\n" + + " \"node_index\": 0,\n" + + " \"split_feature\": 0,\n" + + " \"split_gain\": 12.0,\n" + + " \"threshold\": 10.0,\n" + + " \"decision_type\": \"lte\",\n" + + " \"default_left\": true,\n" + + " \"left_child\": 1,\n" + + " \"right_child\": 2\n" + + " },\n" + + " {\n" + + " \"node_index\": 1,\n" + + " \"leaf_value\": 1\n" + + " },\n" + + " {\n" + + " \"node_index\": 2,\n" + + " \"leaf_value\": 2\n" + + " }\n" + + " ],\n" + + " \"target_type\": \"regression\"\n" + + " }\n" + + " },\n" + + " {\n" + + " \"tree\": {\n" + + " \"feature_names\": [\n" + + " \"col2_encoded\",\n" + + " \"col3_encoded\",\n" + + " \"col4\"\n" + + " ],\n" + + " \"tree_structure\": [\n" + + " {\n" + + " \"node_index\": 0,\n" + + " \"split_feature\": 0,\n" + + " \"split_gain\": 12.0,\n" + + " \"threshold\": 10.0,\n" + + " \"decision_type\": \"lte\",\n" + + " \"default_left\": true,\n" + + " \"left_child\": 1,\n" + + " \"right_child\": 2\n" + + " },\n" + + " {\n" + + " \"node_index\": 1,\n" + + " \"leaf_value\": 1\n" + + " },\n" + + " {\n" + + " \"node_index\": 2,\n" + + " \"leaf_value\": 2\n" + + " }\n" + + " ],\n" + + " \"target_type\": \"regression\"\n" + + " }\n" + + " }\n" + + " ]\n" + + " }\n" + + " }\n" + + "}"; + private static final String TREE_MODEL = "" + + "{\n" + + " \"input\": {\n" + + " \"field_names\": [\n" + + " \"col1\",\n" + + " \"col2\",\n" + + " \"col3\",\n" + + " \"col4\"\n" + + " ]\n" + + " },\n" + + " \"preprocessors\": [\n" + + " {\n" + + " \"one_hot_encoding\": {\n" + + " \"field\": \"col1\",\n" + + " \"hot_map\": {\n" + + " \"male\": \"col1_male\",\n" + + " \"female\": \"col1_female\"\n" + + " }\n" + + " }\n" + + " },\n" + + " {\n" + + " \"target_mean_encoding\": {\n" + + " \"field\": \"col2\",\n" + + " \"feature_name\": \"col2_encoded\",\n" + + " \"target_map\": {\n" + + " \"S\": 5.0,\n" + + " \"M\": 10.0,\n" + + " \"L\": 20\n" + + " },\n" + + " \"default_value\": 5.0\n" + + " }\n" + + " },\n" + + " {\n" + + " \"frequency_encoding\": {\n" + + " \"field\": \"col3\",\n" + + " \"feature_name\": \"col3_encoded\",\n" + + " \"frequency_map\": {\n" + + " \"none\": 0.75,\n" + + " \"true\": 0.10,\n" + + " \"false\": 0.15\n" + + " }\n" + + " }\n" + + " }\n" + + " ],\n" + + " \"trained_model\": {\n" + + " \"tree\": {\n" + + " \"feature_names\": [\n" + + " \"col1_male\",\n" + + " \"col1_female\",\n" + + " \"col4\"\n" + + " ],\n" + + " \"tree_structure\": [\n" + + " {\n" + + " \"node_index\": 0,\n" + + " \"split_feature\": 0,\n" + + " \"split_gain\": 12.0,\n" + + " \"threshold\": 10.0,\n" + + " \"decision_type\": \"lte\",\n" + + " \"default_left\": true,\n" + + " \"left_child\": 1,\n" + + " \"right_child\": 2\n" + + " },\n" + + " {\n" + + " \"node_index\": 1,\n" + + " \"leaf_value\": 1\n" + + " },\n" + + " {\n" + + " \"node_index\": 2,\n" + + " \"leaf_value\": 2\n" + + " }\n" + + " ],\n" + + " \"target_type\": \"regression\"\n" + + " }\n" + + " }\n" + + "}"; + + public void testEnsembleSchemaDeserialization() throws IOException { + XContentParser parser = XContentFactory.xContent(XContentType.JSON) + .createParser(xContentRegistry(), DeprecationHandler.THROW_UNSUPPORTED_OPERATION, ENSEMBLE_MODEL); + TrainedModelDefinition definition = TrainedModelDefinition.fromXContent(parser, false).build(); + assertThat(definition.getTrainedModel().getClass(), equalTo(Ensemble.class)); + } + + public void testTreeSchemaDeserialization() throws IOException { + XContentParser parser = XContentFactory.xContent(XContentType.JSON) + .createParser(xContentRegistry(), DeprecationHandler.THROW_UNSUPPORTED_OPERATION, TREE_MODEL); + TrainedModelDefinition definition = TrainedModelDefinition.fromXContent(parser, false).build(); + assertThat(definition.getTrainedModel().getClass(), equalTo(Tree.class)); + } + @Override protected TrainedModelDefinition createTestInstance() { return createRandomBuilder().build(); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ensemble/EnsembleTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ensemble/EnsembleTests.java index 1e1b1f8f7286..eb537e247e99 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ensemble/EnsembleTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ensemble/EnsembleTests.java @@ -26,6 +26,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.function.Predicate; @@ -108,25 +109,6 @@ public class EnsembleTests extends AbstractSerializingTestCase { return new NamedWriteableRegistry(entries); } - public void testEnsembleWithModelsThatHaveDifferentFeatureNames() { - List featureNames = Arrays.asList("foo", "bar", "baz", "farequote"); - ElasticsearchException ex = expectThrows(ElasticsearchException.class, () -> { - Ensemble.builder().setFeatureNames(featureNames) - .setTrainedModels(Arrays.asList(TreeTests.buildRandomTree(Arrays.asList("bar", "foo", "baz", "farequote"), 6))) - .build() - .validate(); - }); - assertThat(ex.getMessage(), equalTo("[feature_names] must be the same and in the same order for each of the trained_models")); - - ex = expectThrows(ElasticsearchException.class, () -> { - Ensemble.builder().setFeatureNames(featureNames) - .setTrainedModels(Arrays.asList(TreeTests.buildRandomTree(Arrays.asList("completely_different"), 6))) - .build() - .validate(); - }); - assertThat(ex.getMessage(), equalTo("[feature_names] must be the same and in the same order for each of the trained_models")); - } - public void testEnsembleWithAggregatedOutputDifferingFromTrainedModels() { List featureNames = Arrays.asList("foo", "bar"); int numberOfModels = 5; @@ -279,6 +261,17 @@ public class EnsembleTests extends AbstractSerializingTestCase { for(int i = 0; i < expected.size(); i++) { assertThat(probabilities.get(i), closeTo(expected.get(i), eps)); } + + // This should handle missing values and take the default_left path + featureMap = new HashMap<>(2) {{ + put("foo", 0.3); + put("bar", null); + }}; + expected = Arrays.asList(0.6899744811, 0.3100255188); + probabilities = ensemble.classificationProbability(featureMap); + for(int i = 0; i < expected.size(); i++) { + assertThat(probabilities.get(i), closeTo(expected.get(i), eps)); + } } public void testClassificationInference() { @@ -336,6 +329,12 @@ public class EnsembleTests extends AbstractSerializingTestCase { featureVector = Arrays.asList(0.0, 1.0); featureMap = zipObjMap(featureNames, featureVector); assertEquals(1.0, ensemble.infer(featureMap), 0.00001); + + featureMap = new HashMap<>(2) {{ + put("foo", 0.3); + put("bar", null); + }}; + assertEquals(0.0, ensemble.infer(featureMap), 0.00001); } public void testRegressionInference() { @@ -394,6 +393,12 @@ public class EnsembleTests extends AbstractSerializingTestCase { featureVector = Arrays.asList(2.0, 0.7); featureMap = zipObjMap(featureNames, featureVector); assertEquals(1.0, ensemble.infer(featureMap), 0.00001); + + featureMap = new HashMap<>(2) {{ + put("foo", 0.3); + put("bar", null); + }}; + assertEquals(1.8, ensemble.infer(featureMap), 0.00001); } private static Map zipObjMap(List keys, List values) { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/tree/TreeTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/tree/TreeTests.java index ce27120d671b..81030585f188 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/tree/TreeTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/tree/TreeTests.java @@ -17,12 +17,14 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.function.Predicate; import java.util.stream.Collectors; import java.util.stream.IntStream; +import static org.hamcrest.Matchers.closeTo; import static org.hamcrest.Matchers.equalTo; @@ -118,19 +120,26 @@ public class TreeTests extends AbstractSerializingTestCase { // This feature vector should hit the right child of the root node List featureVector = Arrays.asList(0.6, 0.0); Map featureMap = zipObjMap(featureNames, featureVector); - assertEquals(0.3, tree.infer(featureMap), 0.00001); + assertThat(0.3, closeTo(tree.infer(featureMap), 0.00001)); // This should hit the left child of the left child of the root node // i.e. it takes the path left, left featureVector = Arrays.asList(0.3, 0.7); featureMap = zipObjMap(featureNames, featureVector); - assertEquals(0.1, tree.infer(featureMap), 0.00001); + assertThat(0.1, closeTo(tree.infer(featureMap), 0.00001)); // This should hit the right child of the left child of the root node // i.e. it takes the path left, right featureVector = Arrays.asList(0.3, 0.9); featureMap = zipObjMap(featureNames, featureVector); - assertEquals(0.2, tree.infer(featureMap), 0.00001); + assertThat(0.2, closeTo(tree.infer(featureMap), 0.00001)); + + // This should handle missing values and take the default_left path + featureMap = new HashMap<>(2) {{ + put("foo", 0.3); + put("bar", null); + }}; + assertThat(0.1, closeTo(tree.infer(featureMap), 0.00001)); } public void testTreeClassificationProbability() { @@ -162,6 +171,13 @@ public class TreeTests extends AbstractSerializingTestCase { featureVector = Arrays.asList(0.3, 0.9); featureMap = zipObjMap(featureNames, featureVector); assertEquals(Arrays.asList(1.0, 0.0), tree.classificationProbability(featureMap)); + + // This should handle missing values and take the default_left path + featureMap = new HashMap<>(2) {{ + put("foo", 0.3); + put("bar", null); + }}; + assertEquals(1.0, tree.infer(featureMap), 0.00001); } public void testTreeWithNullRoot() { From f52a98449687c9e53d74437140d3fe14c8e2e5c3 Mon Sep 17 00:00:00 2001 From: Jake Landis Date: Mon, 7 Oct 2019 14:26:06 -0500 Subject: [PATCH 32/55] Watcher - catch uncaught exception. (#47680) If a thread pool rejection exception happens, an alternative code path is chosen to write history and delete the trigger. If an exception happens during deletion of the trigger an exception may be thrown and not caught. This commit catches the exception and provides a meaning error message. fixes #47008 --- .../xpack/watcher/execution/ExecutionService.java | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/ExecutionService.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/ExecutionService.java index ff1e1f8ec98b..2033ea49eb5b 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/ExecutionService.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/ExecutionService.java @@ -433,7 +433,14 @@ public class ExecutionService { "Error storing watch history record for watch [{}] after thread pool rejection", triggeredWatch.id()), exc); } - deleteTrigger(triggeredWatch.id()); + try { + deleteTrigger(triggeredWatch.id()); + } catch (Exception exc) { + logger.error((Supplier) () -> + new ParameterizedMessage( + "Error deleting entry from .triggered_watches for watch [{}] after thread pool rejection", + triggeredWatch.id()), exc); + } })); } } From 7db3dc3b7fa3a63ca7a263db37ae4a1d33b9eb0e Mon Sep 17 00:00:00 2001 From: Jake Landis Date: Mon, 7 Oct 2019 14:56:53 -0500 Subject: [PATCH 33/55] Re-enable Watcher rest test (#47687) This test is believed to be fixed by #43939 Closes #45585 --- .../test/resources/rest-api-spec/test/painless/10_basic.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/10_basic.yml b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/10_basic.yml index bd59530c593e..3f47401068fe 100644 --- a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/10_basic.yml +++ b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/10_basic.yml @@ -1,8 +1,5 @@ --- "Test execute watch api": - - skip: - version: "all" - reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/45585" - do: cluster.health: From 07ed3d5081b3789cf5b04118f6227f0a4ec95915 Mon Sep 17 00:00:00 2001 From: Jake Landis Date: Mon, 7 Oct 2019 15:12:37 -0500 Subject: [PATCH 34/55] Re-enable Watcher rest tests (#47690) These tests are believed to be fixed by #43939 closes #45582 and #43975 --- .../rest-api-spec/test/painless/50_update_scripts.yml | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/50_update_scripts.yml b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/50_update_scripts.yml index 37b53821ba1d..e764505f9c05 100644 --- a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/50_update_scripts.yml +++ b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/50_update_scripts.yml @@ -3,8 +3,7 @@ --- "Test transform scripts are updated on execution": - skip: - version: "all" - reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/43975" + features: warnings - do: cluster.health: @@ -80,8 +79,6 @@ "Test condition scripts are updated on execution": - skip: features: warnings - version: "all" - reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/45582" - do: cluster.health: From 8b38e9f39b710de8d767774b0e130f26a325589b Mon Sep 17 00:00:00 2001 From: Gordon Brown Date: Mon, 7 Oct 2019 14:20:36 -0600 Subject: [PATCH 35/55] Ensure index is green after closing in test (#47541) This test was less stable following a backport as the shards in these indices did not always show up as allocated immediately after closing them. This ensures those shards have stabilized before trying to roll over. --- .../elasticsearch/action/admin/indices/rollover/RolloverIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java index 5846ebabc0e9..1cabb0020fe7 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java @@ -423,6 +423,7 @@ public class RolloverIT extends ESIntegTestCase { assertAcked(client().admin().indices().prepareClose(closedIndex).get()); assertAcked(client().admin().indices().prepareClose(writeIndexPrefix + "000001").get()); + ensureGreen(aliasName); RolloverResponse rolloverResponse = client().admin().indices().prepareRolloverIndex(aliasName) .addMaxIndexDocsCondition(1) From 04a1b1d96403012f75e5f6b89f1b8fabe665b0cc Mon Sep 17 00:00:00 2001 From: Jake Landis Date: Mon, 7 Oct 2019 15:46:31 -0500 Subject: [PATCH 36/55] Re-enable Watcher rest tests (#47692) This test is believed to be fixed by #43939 closes #43889 --- .../test/resources/rest-api-spec/test/painless/10_basic.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/10_basic.yml b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/10_basic.yml index 3f47401068fe..f9a89e2ef169 100644 --- a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/10_basic.yml +++ b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/10_basic.yml @@ -124,9 +124,6 @@ --- "Test execute watch api with rest_total_hits_as_int": - - skip: - version: "all" - reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/43889" - do: cluster.health: From 9358b2fb93dd5944b49f46e24b2e51b8fcfaf3f0 Mon Sep 17 00:00:00 2001 From: Jake Landis Date: Mon, 7 Oct 2019 16:01:49 -0500 Subject: [PATCH 37/55] Re-enable Watcher rest test (#47699) This test is believed to be fixed by #43939 closes #43988 --- .../resources/rest-api-spec/test/painless/20_minimal_body.yml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/20_minimal_body.yml b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/20_minimal_body.yml index 4ba1fc6a0cc1..56b5ebd20e93 100644 --- a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/20_minimal_body.yml +++ b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/20_minimal_body.yml @@ -1,8 +1,6 @@ --- "Test execute watch api with minimal body": - - skip: - version: "all" - reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/43988" + - do: cluster.health: wait_for_status: green From fbda3a7e19a41d718260bdf6909b271e530cc5cb Mon Sep 17 00:00:00 2001 From: Przemko Robakowski Date: Mon, 7 Oct 2019 23:06:49 +0200 Subject: [PATCH 38/55] Explicit name for doc snippets (#46951) * Explicit name for doc snippets This change adds option to specify explicit name for doc snippet. This name will be used instead of line number when creating yml file in buildRestTests task. Stable names should improve tracking changes through history and allow Gradle to skip tests on non-code docs changes. * Avoid duplication in names * Changes id declaration, more examples * Fix names in examples * Unit test added * Throw exception on duplicate names * Moved UT to Java --- .../doc/RestTestsFromSnippetsTask.groovy | 12 +++++- .../gradle/doc/SnippetsTask.groovy | 26 +++++++++-- .../gradle/doc/SnippetsTaskTests.java | 43 +++++++++++++++++++ docs/build.gradle | 2 + docs/reference/cat/alias.asciidoc | 2 +- docs/reference/cat/allocation.asciidoc | 2 +- docs/reference/cat/count.asciidoc | 4 +- docs/reference/cat/fielddata.asciidoc | 2 +- docs/reference/cat/health.asciidoc | 4 +- 9 files changed, 85 insertions(+), 12 deletions(-) create mode 100644 buildSrc/src/test/java/org/elasticsearch/gradle/doc/SnippetsTaskTests.java diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy index 6a522af12dd0..ea2301cb203b 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy @@ -58,6 +58,8 @@ class RestTestsFromSnippetsTask extends SnippetsTask { @OutputDirectory File testRoot = project.file('build/rest') + Set names = new HashSet<>() + RestTestsFromSnippetsTask() { project.afterEvaluate { // Wait to set this so testRoot can be customized @@ -238,7 +240,14 @@ class RestTestsFromSnippetsTask extends SnippetsTask { } } else { current.println('---') - current.println("\"line_$test.start\":") + if (test.name != null && test.name.isBlank() == false) { + if(names.add(test.name) == false) { + throw new InvalidUserDataException("Duplicated snippet name '$test.name': $test") + } + current.println("\"$test.name\":") + } else { + current.println("\"line_$test.start\":") + } /* The Elasticsearch test runner doesn't support quite a few * constructs unless we output this skip. We don't know if * we're going to use these constructs, but we might so we @@ -406,6 +415,7 @@ class RestTestsFromSnippetsTask extends SnippetsTask { if (lastDocsPath == test.path) { return } + names.clear() finishLastTest() lastDocsPath = test.path diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/SnippetsTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/SnippetsTask.groovy index 9ef0aa6197cb..0b59c361104a 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/SnippetsTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/SnippetsTask.groovy @@ -82,6 +82,7 @@ class SnippetsTask extends DefaultTask { */ for (File file: docs) { String lastLanguage + String name int lastLanguageLine Snippet snippet = null StringBuilder contents = null @@ -155,19 +156,21 @@ class SnippetsTask extends DefaultTask { if (line ==~ /-{4,}\s*/) { // Four dashes looks like a snippet if (snippet == null) { Path path = docs.dir.toPath().relativize(file.toPath()) - snippet = new Snippet(path: path, start: lineNumber, testEnv: testEnv) + snippet = new Snippet(path: path, start: lineNumber, testEnv: testEnv, name: name) if (lastLanguageLine == lineNumber - 1) { snippet.language = lastLanguage } + name = null } else { snippet.end = lineNumber } return } - matcher = line =~ /\["?source"?,\s*"?([-\w]+)"?(,.*)?].*/ - if (matcher.matches()) { - lastLanguage = matcher.group(1) + def source = matchSource(line) + if (source.matches) { + lastLanguage = source.language lastLanguageLine = lineNumber + name = source.name return } if (line ==~ /\/\/\s*AUTOSENSE\s*/) { @@ -310,6 +313,20 @@ class SnippetsTask extends DefaultTask { } } + static Source matchSource(String line) { + def matcher = line =~ /\["?source"?,\s*"?([-\w]+)"?(,((?!id=).)*(id="?([-\w]+)"?)?(.*))?].*/ + if(matcher.matches()){ + return new Source(matches: true, language: matcher.group(1), name: matcher.group(5)) + } + return new Source(matches: false) + } + + static class Source { + boolean matches + String language + String name + } + static class Snippet { static final int NOT_FINISHED = -1 @@ -336,6 +353,7 @@ class SnippetsTask extends DefaultTask { boolean curl List warnings = new ArrayList() boolean skipShardsFailures = false + String name @Override public String toString() { diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/doc/SnippetsTaskTests.java b/buildSrc/src/test/java/org/elasticsearch/gradle/doc/SnippetsTaskTests.java new file mode 100644 index 000000000000..8bd480e6f0dc --- /dev/null +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/doc/SnippetsTaskTests.java @@ -0,0 +1,43 @@ +package org.elasticsearch.gradle.doc; + +import org.elasticsearch.gradle.test.GradleUnitTestCase; + +public class SnippetsTaskTests extends GradleUnitTestCase { + + public void testMatchSource() { + SnippetsTask.Source source = SnippetsTask.matchSource("[source,console]"); + assertTrue(source.getMatches()); + assertEquals("console", source.getLanguage()); + assertNull(source.getName()); + + source = SnippetsTask.matchSource("[source,console,id=snippet-name-1]"); + assertTrue(source.getMatches()); + assertEquals("console", source.getLanguage()); + assertEquals("snippet-name-1", source.getName()); + + source = SnippetsTask.matchSource("[source, console, id=snippet-name-1]"); + assertTrue(source.getMatches()); + assertEquals("console", source.getLanguage()); + assertEquals("snippet-name-1", source.getName()); + + source = SnippetsTask.matchSource("[source,console,attr=5,id=snippet-name-1,attr2=6]"); + assertTrue(source.getMatches()); + assertEquals("console", source.getLanguage()); + assertEquals("snippet-name-1", source.getName()); + + source = SnippetsTask.matchSource("[source,console, attr=5, id=snippet-name-1, attr2=6]"); + assertTrue(source.getMatches()); + assertEquals("console", source.getLanguage()); + assertEquals("snippet-name-1", source.getName()); + + source = SnippetsTask.matchSource("[\"source\",\"console\",id=\"snippet-name-1\"]"); + assertTrue(source.getMatches()); + assertEquals("console", source.getLanguage()); + assertEquals("snippet-name-1", source.getName()); + + source = SnippetsTask.matchSource("[source,console,id=\"snippet-name-1\"]"); + assertTrue(source.getMatches()); + assertEquals("console", source.getLanguage()); + assertEquals("snippet-name-1", source.getName()); + } +} diff --git a/docs/build.gradle b/docs/build.gradle index 0fc725bf589a..0c3df06b2caf 100644 --- a/docs/build.gradle +++ b/docs/build.gradle @@ -93,6 +93,8 @@ buildRestTests.docs = fileTree(projectDir) { listSnippets.docs = buildRestTests.docs +listConsoleCandidates.docs = buildRestTests.docs + Closure setupTwitter = { String name, int count -> buildRestTests.setups[name] = ''' - do: diff --git a/docs/reference/cat/alias.asciidoc b/docs/reference/cat/alias.asciidoc index 60f2d8ed9cb1..ec15adce0d32 100644 --- a/docs/reference/cat/alias.asciidoc +++ b/docs/reference/cat/alias.asciidoc @@ -42,7 +42,7 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-v] //// Hidden setup for example: -[source,console] +[source,console,id=cat-aliases-example] -------------------------------------------------- PUT test1 { diff --git a/docs/reference/cat/allocation.asciidoc b/docs/reference/cat/allocation.asciidoc index c089d3a855f7..821cfb7c0df3 100644 --- a/docs/reference/cat/allocation.asciidoc +++ b/docs/reference/cat/allocation.asciidoc @@ -41,7 +41,7 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-v] [[cat-allocation-api-example]] ==== {api-examples-title} -[source,console] +[source,console,id=cat-allocation-example] -------------------------------------------------- GET /_cat/allocation?v -------------------------------------------------- diff --git a/docs/reference/cat/count.asciidoc b/docs/reference/cat/count.asciidoc index e38151ce4b55..b1605e3014f9 100644 --- a/docs/reference/cat/count.asciidoc +++ b/docs/reference/cat/count.asciidoc @@ -50,7 +50,7 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-v] The following `count` API request retrieves the document count of a single index, `twitter`. -[source,console] +[source,console,id=cat-count-individual-example] -------------------------------------------------- GET /_cat/count/twitter?v -------------------------------------------------- @@ -72,7 +72,7 @@ epoch timestamp count The following `count` API request retrieves the document count of all indices in the cluster. -[source,console] +[source,console,id=cat-count-all-example] -------------------------------------------------- GET /_cat/count?v -------------------------------------------------- diff --git a/docs/reference/cat/fielddata.asciidoc b/docs/reference/cat/fielddata.asciidoc index f63462f8e049..878d06ea1f0f 100644 --- a/docs/reference/cat/fielddata.asciidoc +++ b/docs/reference/cat/fielddata.asciidoc @@ -48,7 +48,7 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-v] //// Hidden setup snippet to build an index with fielddata so our results are real: -[source,console] +[source,console,id=cat-fielddata-example] -------------------------------------------------- PUT test { diff --git a/docs/reference/cat/health.asciidoc b/docs/reference/cat/health.asciidoc index 803f564d2c6e..ecf48fd77484 100644 --- a/docs/reference/cat/health.asciidoc +++ b/docs/reference/cat/health.asciidoc @@ -67,7 +67,7 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-v] By default, the cat health API returns `HH:MM:SS` and https://en.wikipedia.org/wiki/Unix_time[Unix `epoch`] timestamps. For example: -[source,console] +[source,console,id=cat-health-example] -------------------------------------------------- GET /_cat/health?v -------------------------------------------------- @@ -87,7 +87,7 @@ epoch timestamp cluster status node.total node.data shards pri relo i ===== Example without a timestamp You can use the `ts` (timestamps) parameter to disable timestamps. For example: -[source,console] +[source,console,id=cat-health-no-timestamp-example] -------------------------------------------------- GET /_cat/health?v&ts=false -------------------------------------------------- From 4de0d777773be240f33af95122c5630aff8644e8 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Mon, 7 Oct 2019 17:08:37 -0700 Subject: [PATCH 39/55] Switch stored script example to script_score query (#47691) The example use of a scoring script was incorrectly using a filter script query, which has no scoring, and thus no _score variable avialable. This commit converts the example doc to using the newer script_score query. --- docs/reference/scripting/using.asciidoc | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/docs/reference/scripting/using.asciidoc b/docs/reference/scripting/using.asciidoc index 31b8612ce13b..02a3cc6042c0 100644 --- a/docs/reference/scripting/using.asciidoc +++ b/docs/reference/scripting/using.asciidoc @@ -153,6 +153,7 @@ POST _scripts/calculate-score } } ----------------------------------- +// TEST[setup:twitter] This same script can be retrieved with: @@ -166,10 +167,15 @@ Stored scripts can be used by specifying the `id` parameters as follows: [source,console] -------------------------------------------------- -GET _search +GET twitter/_search { "query": { - "script": { + "script_score": { + "query": { + "match": { + "message": "some message" + } + }, "script": { "id": "calculate-score", "params": { From e0c2ac102f54cd52c21f1bded6e2b3ad38befddc Mon Sep 17 00:00:00 2001 From: Lee Hinman Date: Mon, 7 Oct 2019 19:31:17 -0600 Subject: [PATCH 40/55] Add a test for SLM retention with security enabled (#47608) This enhances the existing SLM test using users/roles/etc to also test that SLM retention works when security is enabled. Relates to #43663 --- .../xpack/security/PermissionsIT.java | 63 ++++++++++++++----- 1 file changed, 46 insertions(+), 17 deletions(-) diff --git a/x-pack/plugin/ilm/qa/with-security/src/test/java/org/elasticsearch/xpack/security/PermissionsIT.java b/x-pack/plugin/ilm/qa/with-security/src/test/java/org/elasticsearch/xpack/security/PermissionsIT.java index 2dbc33266660..0c738038e02e 100644 --- a/x-pack/plugin/ilm/qa/with-security/src/test/java/org/elasticsearch/xpack/security/PermissionsIT.java +++ b/x-pack/plugin/ilm/qa/with-security/src/test/java/org/elasticsearch/xpack/security/PermissionsIT.java @@ -8,9 +8,11 @@ package org.elasticsearch.xpack.security; import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest; -import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotRequest; +import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsRequest; +import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse; import org.elasticsearch.client.Node; import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; @@ -19,9 +21,11 @@ import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestClientBuilder; import org.elasticsearch.client.RestHighLevelClient; +import org.elasticsearch.client.core.AcknowledgedResponse; import org.elasticsearch.client.slm.DeleteSnapshotLifecyclePolicyRequest; import org.elasticsearch.client.slm.ExecuteSnapshotLifecyclePolicyRequest; import org.elasticsearch.client.slm.ExecuteSnapshotLifecyclePolicyResponse; +import org.elasticsearch.client.slm.ExecuteSnapshotLifecycleRetentionRequest; import org.elasticsearch.client.slm.GetSnapshotLifecyclePolicyRequest; import org.elasticsearch.client.slm.PutSnapshotLifecyclePolicyRequest; import org.elasticsearch.client.slm.SnapshotLifecyclePolicy; @@ -38,6 +42,7 @@ import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.repositories.fs.FsRepository; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.snapshots.SnapshotState; import org.elasticsearch.test.junit.annotations.TestIssueLogging; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.xpack.core.ilm.DeleteAction; @@ -57,8 +62,8 @@ import java.util.Map; import static java.util.Collections.singletonMap; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.is; public class PermissionsIT extends ESRestTestCase { @@ -145,6 +150,7 @@ public class PermissionsIT extends ESRestTestCase { } public void testSLMWithPermissions() throws Exception { + String repo = "my_repository"; createIndexAsAdmin("index", Settings.builder().put("index.number_of_replicas", 0).build(), ""); // Set up two roles and users, one for reading SLM, another for managing SLM @@ -152,7 +158,7 @@ public class PermissionsIT extends ESRestTestCase { roleRequest.setJsonEntity("{ \"cluster\": [\"read_slm\"] }"); assertOK(adminClient().performRequest(roleRequest)); roleRequest = new Request("PUT", "/_security/role/slm-manage"); - roleRequest.setJsonEntity("{ \"cluster\": [\"manage_slm\", \"create_snapshot\"]," + + roleRequest.setJsonEntity("{ \"cluster\": [\"manage_slm\", \"cluster:admin/repository/*\", \"cluster:admin/snapshot/*\"]," + "\"indices\": [{ \"names\": [\".slm-history*\"],\"privileges\": [\"all\"] }] }"); assertOK(adminClient().performRequest(roleRequest)); @@ -182,7 +188,7 @@ public class PermissionsIT extends ESRestTestCase { Settings.Builder settingsBuilder = Settings.builder().put("location", "."); repoRequest.settings(settingsBuilder); - repoRequest.name("my_repository"); + repoRequest.name(repo); repoRequest.type(FsRepository.TYPE); org.elasticsearch.action.support.master.AcknowledgedResponse response = hlAdminClient.snapshot().createRepository(repoRequest, RequestOptions.DEFAULT); @@ -191,7 +197,8 @@ public class PermissionsIT extends ESRestTestCase { Map config = new HashMap<>(); config.put("indices", Collections.singletonList("index")); SnapshotLifecyclePolicy policy = new SnapshotLifecyclePolicy( - "policy_id", "name", "1 2 3 * * ?", "my_repository", config, SnapshotRetentionConfiguration.EMPTY); + "policy_id", "name", "1 2 3 * * ?", repo, config, + new SnapshotRetentionConfiguration(TimeValue.ZERO, null, null)); PutSnapshotLifecyclePolicyRequest request = new PutSnapshotLifecyclePolicyRequest(policy); expectThrows(ElasticsearchStatusException.class, @@ -209,6 +216,40 @@ public class PermissionsIT extends ESRestTestCase { ExecuteSnapshotLifecyclePolicyResponse executeResp = adminHLRC.indexLifecycle().executeSnapshotLifecyclePolicy(executeRequest, RequestOptions.DEFAULT); + final String snapName = executeResp.getSnapshotName(); + + assertBusy(() -> { + try { + logger.info("--> checking for snapshot to be created"); + GetSnapshotsRequest getSnaps = new GetSnapshotsRequest(repo); + getSnaps.snapshots(new String[]{snapName}); + GetSnapshotsResponse getResp = adminHLRC.snapshot().get(getSnaps, RequestOptions.DEFAULT); + assertThat(getResp.getSnapshots(repo).get(0).state(), equalTo(SnapshotState.SUCCESS)); + } catch (ElasticsearchException e) { + fail("expected snapshot to exist but it does not: " + e.getDetailedMessage()); + } + }); + + ExecuteSnapshotLifecycleRetentionRequest executeRetention = new ExecuteSnapshotLifecycleRetentionRequest(); + expectThrows(ElasticsearchStatusException.class, () -> + readHlrc.indexLifecycle().executeSnapshotLifecycleRetention(executeRetention, RequestOptions.DEFAULT)); + + AcknowledgedResponse retentionResp = + adminHLRC.indexLifecycle().executeSnapshotLifecycleRetention(executeRetention, RequestOptions.DEFAULT); + assertTrue(retentionResp.isAcknowledged()); + + assertBusy(() -> { + try { + logger.info("--> checking for snapshot to be deleted"); + GetSnapshotsRequest getSnaps = new GetSnapshotsRequest(repo); + getSnaps.snapshots(new String[]{snapName}); + GetSnapshotsResponse getResp = adminHLRC.snapshot().get(getSnaps, RequestOptions.DEFAULT); + assertThat(getResp.getSnapshots(repo).size(), equalTo(0)); + } catch (ElasticsearchException e) { + // great, we want it to not exist + assertThat(e.getDetailedMessage(), containsString("snapshot_missing_exception")); + } + }); DeleteSnapshotLifecyclePolicyRequest deleteRequest = new DeleteSnapshotLifecyclePolicyRequest("policy_id"); expectThrows(ElasticsearchStatusException.class, () -> @@ -216,18 +257,6 @@ public class PermissionsIT extends ESRestTestCase { adminHLRC.indexLifecycle().deleteSnapshotLifecyclePolicy(deleteRequest, RequestOptions.DEFAULT); - // Delete snapshot to clean up and make sure it's not on-going. - // This is inside an assertBusy because the snapshot may not - // yet exist (in which case it throws an error) - assertBusy(() -> { - try { - DeleteSnapshotRequest delReq = new DeleteSnapshotRequest("my_repository", executeResp.getSnapshotName()); - hlAdminClient.snapshot().delete(delReq, RequestOptions.DEFAULT); - } catch (ElasticsearchStatusException e) { - fail("got exception: " + e); - } - }); - hlAdminClient.close(); readHlrc.close(); adminHLRC.close(); From ead99f1ae4b5393dcf074ef531064e122bd01773 Mon Sep 17 00:00:00 2001 From: Lee Hinman Date: Mon, 7 Oct 2019 19:53:46 -0600 Subject: [PATCH 41/55] Throw error retrieving non-existent SLM policy (#47679) Previously when retrieving an SLM policy it would always return a 200 with `{}` in the body, even if the policy did not exist. This changes that behavior to throw an error (similar to our other APIs) if a policy doesn't exist. This also adds a basic CRUD yml test for the behavior. Resolves #47664 --- .../rest-api-spec/test/ilm/11_basic_slm.yml | 87 +++++++++++++++++++ .../TransportGetSnapshotLifecycleAction.java | 20 ++++- 2 files changed, 105 insertions(+), 2 deletions(-) create mode 100644 x-pack/plugin/ilm/qa/rest/src/test/resources/rest-api-spec/test/ilm/11_basic_slm.yml diff --git a/x-pack/plugin/ilm/qa/rest/src/test/resources/rest-api-spec/test/ilm/11_basic_slm.yml b/x-pack/plugin/ilm/qa/rest/src/test/resources/rest-api-spec/test/ilm/11_basic_slm.yml new file mode 100644 index 000000000000..7a40feed17de --- /dev/null +++ b/x-pack/plugin/ilm/qa/rest/src/test/resources/rest-api-spec/test/ilm/11_basic_slm.yml @@ -0,0 +1,87 @@ +--- +setup: + - do: + cluster.health: + wait_for_status: yellow + +--- +"Test Basic Policy CRUD": + - do: + catch: missing + slm.get_lifecycle: + policy_id: "daily-snapshots" + + - do: + catch: missing + slm.delete_lifecycle: + policy_id: "daily-snapshots" + + - do: + snapshot.create_repository: + repository: repo + body: + type: fs + settings: + location: "my-snaps" + + - do: + slm.put_lifecycle: + policy_id: "daily-snapshots" + body: | + { + "schedule": "0 1 2 3 4 ?", + "name": "", + "repository": "repo", + "config": { + "indices": ["foo-*", "important"], + "ignore_unavailable": false, + "include_global_state": false + }, + "retention": { + "expire_after": "30d", + "min_count": 1, + "max_count": 50 + } + } + + - do: + slm.get_lifecycle: + policy_id: "daily-snapshots" + - match: { daily-snapshots.version: 1 } + - match: { daily-snapshots.policy.name: "" } + - is_true: daily-snapshots.next_execution_millis + - is_true: daily-snapshots.stats + - match: { daily-snapshots.policy.schedule: "0 1 2 3 4 ?" } + + - do: + slm.put_lifecycle: + policy_id: "daily-snapshots" + body: | + { + "schedule": "1 1 1 1 1 ?", + "name": "", + "repository": "repo", + "config": { + "indices": ["foo-*", "important"], + "ignore_unavailable": false, + "include_global_state": false + }, + "retention": { + "expire_after": "30d", + "min_count": 1, + "max_count": 50 + } + } + + - do: + catch: missing + slm.get_lifecycle: + policy_id: "doesnt-exist" + + - do: + slm.get_lifecycle: + policy_id: "daily-snapshots" + - match: { daily-snapshots.version: 2 } + - match: { daily-snapshots.policy.schedule: "1 1 1 1 1 ?" } + - is_true: daily-snapshots.next_execution_millis + - is_true: daily-snapshots.stats diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/TransportGetSnapshotLifecycleAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/TransportGetSnapshotLifecycleAction.java index 90d302eb403d..f4af0d5c5a7b 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/TransportGetSnapshotLifecycleAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/TransportGetSnapshotLifecycleAction.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.slm.action; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; @@ -65,7 +66,13 @@ public class TransportGetSnapshotLifecycleAction extends final ActionListener listener) { SnapshotLifecycleMetadata snapMeta = state.metaData().custom(SnapshotLifecycleMetadata.TYPE); if (snapMeta == null) { - listener.onResponse(new GetSnapshotLifecycleAction.Response(Collections.emptyList())); + if (request.getLifecycleIds().length == 0) { + listener.onResponse(new GetSnapshotLifecycleAction.Response(Collections.emptyList())); + } else { + listener.onFailure(new ResourceNotFoundException( + "snapshot lifecycle policy or policies {} not found, no policies are configured", + Arrays.toString(request.getLifecycleIds()))); + } } else { final Map inProgress; SnapshotsInProgress sip = state.custom(SnapshotsInProgress.TYPE); @@ -100,7 +107,16 @@ public class TransportGetSnapshotLifecycleAction extends new SnapshotLifecyclePolicyItem(policyMeta, inProgress.get(policyMeta.getPolicy().getId()), slmStats.getMetrics().get(policyMeta.getPolicy().getId()))) .collect(Collectors.toList()); - listener.onResponse(new GetSnapshotLifecycleAction.Response(lifecycles)); + if (lifecycles.size() == 0) { + if (request.getLifecycleIds().length == 0) { + listener.onResponse(new GetSnapshotLifecycleAction.Response(Collections.emptyList())); + } else { + listener.onFailure(new ResourceNotFoundException("snapshot lifecycle policy or policies {} not found", + Arrays.toString(request.getLifecycleIds()))); + } + } else { + listener.onResponse(new GetSnapshotLifecycleAction.Response(lifecycles)); + } } } From 9d67a02a5664a9f04db1af1b1f7034fc030a235f Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 8 Oct 2019 09:15:13 +0200 Subject: [PATCH 42/55] Deprecate include_relocations setting (#47443) Setting `cluster.routing.allocation.disk.include_relocations` to `false` is a bad idea since it will lead to the kinds of overshoot that were otherwise fixed in #46079. This commit deprecates this setting so it can be removed in the next major release. --- docs/reference/modules/cluster/disk_allocator.asciidoc | 1 + .../cluster/routing/allocation/DiskThresholdSettings.java | 2 +- .../cluster/routing/allocation/DiskThresholdSettingsTests.java | 3 +++ .../routing/allocation/decider/DiskThresholdDeciderTests.java | 3 --- 4 files changed, 5 insertions(+), 4 deletions(-) diff --git a/docs/reference/modules/cluster/disk_allocator.asciidoc b/docs/reference/modules/cluster/disk_allocator.asciidoc index aa5ac15c455f..765f249f20e5 100644 --- a/docs/reference/modules/cluster/disk_allocator.asciidoc +++ b/docs/reference/modules/cluster/disk_allocator.asciidoc @@ -69,6 +69,7 @@ PUT /twitter/_settings `cluster.routing.allocation.disk.include_relocations`:: + deprecated[7.5, Future versions will always account for relocations.] Defaults to +true+, which means that Elasticsearch will take into account shards that are currently being relocated to the target node when computing a node's disk usage. Taking relocating shards' sizes into account may, diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdSettings.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdSettings.java index 334ad41109a2..fa63ec3a9a70 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdSettings.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdSettings.java @@ -57,7 +57,7 @@ public class DiskThresholdSettings { Setting.Property.Dynamic, Setting.Property.NodeScope); public static final Setting CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING = Setting.boolSetting("cluster.routing.allocation.disk.include_relocations", true, - Setting.Property.Dynamic, Setting.Property.NodeScope); + Setting.Property.Dynamic, Setting.Property.NodeScope, Setting.Property.Deprecated); public static final Setting CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING = Setting.positiveTimeSetting("cluster.routing.allocation.disk.reroute_interval", TimeValue.timeValueSeconds(60), Setting.Property.Dynamic, Setting.Property.NodeScope); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdSettingsTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdSettingsTests.java index fd364290e279..adc824ec72e1 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdSettingsTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdSettingsTests.java @@ -72,6 +72,9 @@ public class DiskThresholdSettingsTests extends ESTestCase { assertEquals(30L, diskThresholdSettings.getRerouteInterval().seconds()); assertFalse(diskThresholdSettings.isEnabled()); assertFalse(diskThresholdSettings.includeRelocations()); + + assertWarnings("[cluster.routing.allocation.disk.include_relocations] setting was deprecated in Elasticsearch and " + + "will be removed in a future release! See the breaking changes documentation for the next major version."); } public void testInvalidConstruction() { diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java index f1236cfd252a..db0aa271bc7b 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java @@ -635,7 +635,6 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { public void testShardRelocationsTakenIntoAccount() { Settings diskSettings = Settings.builder() .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey(), true) - .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING.getKey(), true) .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), 0.7) .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), 0.8).build(); @@ -732,7 +731,6 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { public void testCanRemainWithShardRelocatingAway() { Settings diskSettings = Settings.builder() .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey(), true) - .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING.getKey(), true) .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "60%") .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), "70%").build(); @@ -862,7 +860,6 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { public void testForSingleDataNode() { Settings diskSettings = Settings.builder() .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey(), true) - .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING.getKey(), true) .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "60%") .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), "70%").build(); From 791116c76ad5f5e12313addad84e0a139b3cbbc6 Mon Sep 17 00:00:00 2001 From: Henning Andersen <33268011+henningandersen@users.noreply.github.com> Date: Tue, 8 Oct 2019 12:08:50 +0200 Subject: [PATCH 43/55] Dangling indices strip aliases (#47581) Importing dangling indices with aliases risks breaking functionalities using those aliases. For instance, writing to an alias may break if there is no is_write_index indication on the existing alias and the dangling index import adds a second index to the alias. Or an application could have an assumption about the alias only ever pointing to one index and suddenly seeing the alias also linked to an old index could break it. With this change we strip aliases of the index meta data found before importing a dangling index. --- .../gateway/DanglingIndicesState.java | 16 ++++++++++++- .../gateway/DanglingIndicesStateTests.java | 23 +++++++++++++++++++ 2 files changed, 38 insertions(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/gateway/DanglingIndicesState.java b/server/src/main/java/org/elasticsearch/gateway/DanglingIndicesState.java index fefd807d8d8a..d649c02af4e7 100644 --- a/server/src/main/java/org/elasticsearch/gateway/DanglingIndicesState.java +++ b/server/src/main/java/org/elasticsearch/gateway/DanglingIndicesState.java @@ -143,7 +143,7 @@ public class DanglingIndicesState implements ClusterStateListener { } else { logger.info("[{}] dangling index exists on local file system, but not in cluster metadata, " + "auto import to cluster state", indexMetaData.getIndex()); - newIndices.put(indexMetaData.getIndex(), indexMetaData); + newIndices.put(indexMetaData.getIndex(), stripAliases(indexMetaData)); } } return newIndices; @@ -153,6 +153,20 @@ public class DanglingIndicesState implements ClusterStateListener { } } + /** + * Dangling importing indices with aliases is dangerous, it could for instance result in inability to write to an existing alias if it + * previously had only one index with any is_write_index indication. + */ + private IndexMetaData stripAliases(IndexMetaData indexMetaData) { + if (indexMetaData.getAliases().isEmpty()) { + return indexMetaData; + } else { + logger.info("[{}] stripping aliases: {} from index before importing", + indexMetaData.getIndex(), indexMetaData.getAliases().keys()); + return IndexMetaData.builder(indexMetaData).removeAllAliases().build(); + } + } + /** * Allocates the provided list of the dangled indices by sending them to the master node * for allocation. diff --git a/server/src/test/java/org/elasticsearch/gateway/DanglingIndicesStateTests.java b/server/src/test/java/org/elasticsearch/gateway/DanglingIndicesStateTests.java index 9593b58eae97..e7dfbadeeda7 100644 --- a/server/src/test/java/org/elasticsearch/gateway/DanglingIndicesStateTests.java +++ b/server/src/test/java/org/elasticsearch/gateway/DanglingIndicesStateTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.gateway; import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.AliasMetaData; import org.elasticsearch.cluster.metadata.IndexGraveyard; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; @@ -158,6 +159,28 @@ public class DanglingIndicesStateTests extends ESTestCase { } } + public void testDanglingIndicesStripAliases() throws Exception { + try (NodeEnvironment env = newNodeEnvironment()) { + MetaStateService metaStateService = new MetaStateService(env, xContentRegistry()); + DanglingIndicesState danglingState = createDanglingIndicesState(env, metaStateService); + + final Settings.Builder settings = Settings.builder().put(indexSettings).put(IndexMetaData.SETTING_INDEX_UUID, "test1UUID"); + IndexMetaData dangledIndex = IndexMetaData.builder("test1") + .settings(settings) + .putAlias(AliasMetaData.newAliasMetaDataBuilder("test_aliasd").build()) + .build(); + metaStateService.writeIndex("test_write", dangledIndex); + assertThat(dangledIndex.getAliases().size(), equalTo(1)); + + final MetaData metaData = MetaData.builder().build(); + Map newDanglingIndices = danglingState.findNewDanglingIndices(metaData); + assertThat(newDanglingIndices.size(), equalTo(1)); + Map.Entry entry = newDanglingIndices.entrySet().iterator().next(); + assertThat(entry.getKey().getName(), equalTo("test1")); + assertThat(entry.getValue().getAliases().size(), equalTo(0)); + } + } + private DanglingIndicesState createDanglingIndicesState(NodeEnvironment env, MetaStateService metaStateService) { return new DanglingIndicesState(env, metaStateService, null, mock(ClusterService.class)); } From 7b652adfbfc042f02e32f046803f6ce1d31a4c8f Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 8 Oct 2019 13:33:49 +0200 Subject: [PATCH 44/55] Remove include_relocations setting (#47717) Setting `cluster.routing.allocation.disk.include_relocations` to `false` is a bad idea since it will lead to the kinds of overshoot that were otherwise fixed in #46079. This setting was deprecated in #47443. This commit removes it. --- .../migration/migrate_8_0/allocation.asciidoc | 12 +++++++++ .../modules/cluster/disk_allocator.asciidoc | 25 ++++++++++--------- .../allocation/DiskThresholdSettings.java | 14 ----------- .../decider/DiskThresholdDecider.java | 23 ++++++----------- .../common/settings/ClusterSettings.java | 1 - .../DiskThresholdSettingsTests.java | 6 ----- 6 files changed, 33 insertions(+), 48 deletions(-) diff --git a/docs/reference/migration/migrate_8_0/allocation.asciidoc b/docs/reference/migration/migrate_8_0/allocation.asciidoc index 3d39f67b4473..ed2c550c6022 100644 --- a/docs/reference/migration/migrate_8_0/allocation.asciidoc +++ b/docs/reference/migration/migrate_8_0/allocation.asciidoc @@ -20,3 +20,15 @@ block is automatically removed when a node drops below the high watermark again, but this behaviour could be disabled by setting the system property `es.disk.auto_release_flood_stage_block` to `false`. This behaviour is no longer optional, and this system property must now not be set. + +[float] +[[breaking_80_allocation_change_include_relocations_removed]] +==== Accounting for disk usage of relocating shards no longer optional + +By default {es} will account for the sizes of relocating shards when making +allocation decisions based on the disk usage of the nodes in the cluster. In +earlier versions the `cluster.routing.allocation.disk.include_relocations` +setting allowed this accounting to be disabled, which would result in poor +allocation decisions that might overshoot watermarks and require significant +extra work to correct. This behaviour is no longer optional, and this setting +has been removed. diff --git a/docs/reference/modules/cluster/disk_allocator.asciidoc b/docs/reference/modules/cluster/disk_allocator.asciidoc index 765f249f20e5..1fb03183d243 100644 --- a/docs/reference/modules/cluster/disk_allocator.asciidoc +++ b/docs/reference/modules/cluster/disk_allocator.asciidoc @@ -67,18 +67,6 @@ PUT /twitter/_settings How often Elasticsearch should check on disk usage for each node in the cluster. Defaults to `30s`. -`cluster.routing.allocation.disk.include_relocations`:: - - deprecated[7.5, Future versions will always account for relocations.] - Defaults to +true+, which means that Elasticsearch will take into account - shards that are currently being relocated to the target node when computing - a node's disk usage. Taking relocating shards' sizes into account may, - however, mean that the disk usage for a node is incorrectly estimated on - the high side, since the relocation could be 90% complete and a recently - retrieved disk usage would include the total size of the relocating shard - as well as the space already used by the running relocation. - - NOTE: Percentage values refer to used disk space, while byte values refer to free disk space. This can be confusing, since it flips the meaning of high and low. For example, it makes sense to set the low watermark to 10gb and the high @@ -100,3 +88,16 @@ PUT _cluster/settings } } -------------------------------------------------- + +{es} accounts for the future disk usage of ongoing shard relocations and +recoveries to help prevent these shard movements from breaching a watermark. +This mechanism may double-count some data that has already been relocated onto +a node. For instance, if a relocation of a 100GB shard is 90% complete then +{es} has copied 90GB of data onto the target node. This 90GB consumes disk +space and will be reflected in the node's disk usage statistics. However {es} +also treats the relocation as if it will consume another full 100GB in the +future, even though the shard may really only consume a further 10GB of space. +If the node's disks are close to a watermark then this may temporarily prevent +other shards from moving onto the same node. Eventually the relocation will +complete and then {es} will use the node's true disk usage statistics again. + diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdSettings.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdSettings.java index fa63ec3a9a70..72e13b28a9b4 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdSettings.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdSettings.java @@ -55,9 +55,6 @@ public class DiskThresholdSettings { (s) -> validWatermarkSetting(s, "cluster.routing.allocation.disk.watermark.flood_stage"), new FloodStageValidator(), Setting.Property.Dynamic, Setting.Property.NodeScope); - public static final Setting CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING = - Setting.boolSetting("cluster.routing.allocation.disk.include_relocations", true, - Setting.Property.Dynamic, Setting.Property.NodeScope, Setting.Property.Deprecated); public static final Setting CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING = Setting.positiveTimeSetting("cluster.routing.allocation.disk.reroute_interval", TimeValue.timeValueSeconds(60), Setting.Property.Dynamic, Setting.Property.NodeScope); @@ -68,7 +65,6 @@ public class DiskThresholdSettings { private volatile Double freeDiskThresholdHigh; private volatile ByteSizeValue freeBytesThresholdLow; private volatile ByteSizeValue freeBytesThresholdHigh; - private volatile boolean includeRelocations; private volatile boolean enabled; private volatile TimeValue rerouteInterval; private volatile Double freeDiskThresholdFloodStage; @@ -90,13 +86,11 @@ public class DiskThresholdSettings { setHighWatermark(highWatermark); setLowWatermark(lowWatermark); setFloodStage(floodStage); - this.includeRelocations = CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING.get(settings); this.rerouteInterval = CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING.get(settings); this.enabled = CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.get(settings); clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING, this::setLowWatermark); clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING, this::setHighWatermark); clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_WATERMARK_SETTING, this::setFloodStage); - clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING, this::setIncludeRelocations); clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING, this::setRerouteInterval); clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING, this::setEnabled); } @@ -227,10 +221,6 @@ public class DiskThresholdSettings { } } - private void setIncludeRelocations(boolean includeRelocations) { - this.includeRelocations = includeRelocations; - } - private void setRerouteInterval(TimeValue rerouteInterval) { this.rerouteInterval = rerouteInterval; } @@ -300,10 +290,6 @@ public class DiskThresholdSettings { return freeBytesThresholdFloodStage; } - public boolean includeRelocations() { - return includeRelocations; - } - public boolean isEnabled() { return enabled; } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java index 6c99cfa8ee05..0bb8fdb186d0 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java @@ -335,23 +335,16 @@ public class DiskThresholdDecider extends AllocationDecider { // If there is no usage, and we have other nodes in the cluster, // use the average usage for all nodes as the usage for this node usage = averageUsage(node, usages); - if (logger.isDebugEnabled()) { - logger.debug("unable to determine disk usage for {}, defaulting to average across nodes [{} total] [{} free] [{}% free]", - node.nodeId(), usage.getTotalBytes(), usage.getFreeBytes(), usage.getFreeDiskAsPercentage()); - } + logger.debug("unable to determine disk usage for {}, defaulting to average across nodes [{} total] [{} free] [{}% free]", + node.nodeId(), usage.getTotalBytes(), usage.getFreeBytes(), usage.getFreeDiskAsPercentage()); } - if (diskThresholdSettings.includeRelocations()) { - long relocatingShardsSize = sizeOfRelocatingShards(node, allocation, subtractLeavingShards, usage.getPath()); - DiskUsage usageIncludingRelocations = new DiskUsage(node.nodeId(), node.node().getName(), usage.getPath(), - usage.getTotalBytes(), usage.getFreeBytes() - relocatingShardsSize); - if (logger.isTraceEnabled()) { - logger.trace("usage without relocations: {}", usage); - logger.trace("usage with relocations: [{} bytes] {}", relocatingShardsSize, usageIncludingRelocations); - } - usage = usageIncludingRelocations; - } - return usage; + final long relocatingShardsSize = sizeOfRelocatingShards(node, allocation, subtractLeavingShards, usage.getPath()); + final DiskUsage usageIncludingRelocations = new DiskUsage(node.nodeId(), node.node().getName(), usage.getPath(), + usage.getTotalBytes(), usage.getFreeBytes() - relocatingShardsSize); + logger.trace("getDiskUsage: usage [{}] with [{}] bytes relocating yields [{}]", + usage, relocatingShardsSize, usageIncludingRelocations); + return usageIncludingRelocations; } /** diff --git a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index a6f76a486a7f..81429e011f49 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -213,7 +213,6 @@ public final class ClusterSettings extends AbstractScopedSettings { DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING, DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_WATERMARK_SETTING, DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING, - DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING, DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING, SameShardAllocationDecider.CLUSTER_ROUTING_ALLOCATION_SAME_HOST_SETTING, InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING, diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdSettingsTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdSettingsTests.java index adc824ec72e1..6272ffc751af 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdSettingsTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdSettingsTests.java @@ -44,7 +44,6 @@ public class DiskThresholdSettingsTests extends ESTestCase { assertEquals(15.0D, diskThresholdSettings.getFreeDiskThresholdLow(), 0.0D); assertEquals(60L, diskThresholdSettings.getRerouteInterval().seconds()); assertTrue(diskThresholdSettings.isEnabled()); - assertTrue(diskThresholdSettings.includeRelocations()); assertEquals(zeroBytes, diskThresholdSettings.getFreeBytesThresholdFloodStage()); assertEquals(5.0D, diskThresholdSettings.getFreeDiskThresholdFloodStage(), 0.0D); } @@ -55,7 +54,6 @@ public class DiskThresholdSettingsTests extends ESTestCase { Settings newSettings = Settings.builder() .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey(), false) - .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING.getKey(), false) .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), "500mb") .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "1000mb") .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_WATERMARK_SETTING.getKey(), "250mb") @@ -71,10 +69,6 @@ public class DiskThresholdSettingsTests extends ESTestCase { assertEquals(0.0D, diskThresholdSettings.getFreeDiskThresholdFloodStage(), 0.0D); assertEquals(30L, diskThresholdSettings.getRerouteInterval().seconds()); assertFalse(diskThresholdSettings.isEnabled()); - assertFalse(diskThresholdSettings.includeRelocations()); - - assertWarnings("[cluster.routing.allocation.disk.include_relocations] setting was deprecated in Elasticsearch and " + - "will be removed in a future release! See the breaking changes documentation for the next major version."); } public void testInvalidConstruction() { From 3bf141d22d862900cbe4dc7bc2237259db40de97 Mon Sep 17 00:00:00 2001 From: Dimitris Athanasiou Date: Tue, 8 Oct 2019 14:38:36 +0300 Subject: [PATCH 45/55] [ML] Unwrap exception causes before calling instanceof (#47676) When exceptions could be returned from another node, the exception might be wrapped in a `RemoteTransportException`. In places where we handled specific exceptions using `instanceof` we ought to unwrap the cause first. This commit attempts to fix this issue after searching code in the ML plugin. --- .../xpack/core/ml/annotations/AnnotationIndex.java | 3 ++- .../core/ml/job/persistence/AnomalyDetectorsIndex.java | 3 ++- .../xpack/core/ml/utils/ExceptionsHelper.java | 4 ++++ .../org/elasticsearch/xpack/ml/MlConfigMigrator.java | 3 ++- .../xpack/ml/action/TransportCloseJobAction.java | 2 +- .../xpack/ml/action/TransportDeleteDatafeedAction.java | 2 +- .../xpack/ml/action/TransportDeleteJobAction.java | 8 ++++---- .../xpack/ml/action/TransportOpenJobAction.java | 2 +- .../xpack/ml/action/TransportPutCalendarAction.java | 2 +- .../xpack/ml/action/TransportPutDatafeedAction.java | 2 +- .../xpack/ml/action/TransportPutFilterAction.java | 2 +- .../xpack/ml/action/TransportSetUpgradeModeAction.java | 3 ++- .../action/TransportStartDataFrameAnalyticsAction.java | 4 ++-- .../xpack/ml/action/TransportStartDatafeedAction.java | 2 +- .../ml/action/TransportStopDataFrameAnalyticsAction.java | 2 +- .../xpack/ml/action/TransportStopDatafeedAction.java | 5 +++-- .../xpack/ml/action/TransportUpdateFilterAction.java | 2 +- .../xpack/ml/datafeed/DatafeedJobBuilder.java | 2 +- .../elasticsearch/xpack/ml/datafeed/DatafeedManager.java | 3 ++- .../ml/datafeed/extractor/DataExtractorFactory.java | 6 ++++-- .../extractor/scroll/ScrollDataExtractorFactory.java | 5 +++-- .../ml/datafeed/persistence/DatafeedConfigProvider.java | 2 +- .../xpack/ml/dataframe/DataFrameAnalyticsManager.java | 7 ++++--- .../xpack/ml/dataframe/DataFrameAnalyticsTask.java | 4 ++-- .../extractor/DataFrameDataExtractorFactory.java | 3 ++- .../persistence/DataFrameAnalyticsConfigProvider.java | 2 +- .../ml/inference/persistence/TrainedModelProvider.java | 3 ++- .../java/org/elasticsearch/xpack/ml/job/JobManager.java | 4 ++-- .../xpack/ml/job/UpdateJobProcessNotifier.java | 9 +++++---- .../xpack/ml/job/persistence/JobConfigProvider.java | 4 ++-- .../xpack/ml/job/persistence/JobResultsProvider.java | 6 +++--- 31 files changed, 64 insertions(+), 47 deletions(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/annotations/AnnotationIndex.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/annotations/AnnotationIndex.java index e9da7238fad2..ad72c82c8c3e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/annotations/AnnotationIndex.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/annotations/AnnotationIndex.java @@ -22,6 +22,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.xpack.core.ml.MachineLearningField; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.persistence.ElasticsearchMappings; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import java.io.IOException; import java.util.SortedMap; @@ -84,7 +85,7 @@ public class AnnotationIndex { e -> { // Possible that the index was created while the request was executing, // so we need to handle that possibility - if (e instanceof ResourceAlreadyExistsException) { + if (ExceptionsHelper.unwrapCause(e) instanceof ResourceAlreadyExistsException) { // Create the alias createAliasListener.onResponse(true); } else { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/AnomalyDetectorsIndex.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/AnomalyDetectorsIndex.java index 7e61d42705a9..7d4e2367ccef 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/AnomalyDetectorsIndex.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/AnomalyDetectorsIndex.java @@ -16,6 +16,7 @@ import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import java.util.Arrays; import java.util.Collections; @@ -133,7 +134,7 @@ public final class AnomalyDetectorsIndex { // If it was created between our last check, and this request being handled, we should add the alias // Adding an alias that already exists is idempotent. So, no need to double check if the alias exists // as well. - if (createIndexFailure instanceof ResourceAlreadyExistsException) { + if (ExceptionsHelper.unwrapCause(createIndexFailure) instanceof ResourceAlreadyExistsException) { createAliasListener.onResponse(AnomalyDetectorsIndexFields.STATE_INDEX_PREFIX); } else { finalListener.onFailure(createIndexFailure); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/ExceptionsHelper.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/ExceptionsHelper.java index 320eace98359..517e600ab445 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/ExceptionsHelper.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/ExceptionsHelper.java @@ -99,4 +99,8 @@ public class ExceptionsHelper { public static T requireNonNull(T obj, ParseField paramName) { return requireNonNull(obj, paramName.getPreferredName()); } + + public static Throwable unwrapCause(Throwable t) { + return org.elasticsearch.ExceptionsHelper.unwrapCause(t); + } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlConfigMigrator.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlConfigMigrator.java index 79ff292fb823..219619f72ce4 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlConfigMigrator.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlConfigMigrator.java @@ -43,6 +43,7 @@ import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; import org.elasticsearch.xpack.core.ml.job.persistence.ElasticsearchMappings; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.xpack.core.ml.utils.ToXContentParams; import org.elasticsearch.xpack.ml.datafeed.persistence.DatafeedConfigProvider; import org.elasticsearch.xpack.ml.job.persistence.JobConfigProvider; @@ -467,7 +468,7 @@ public class MlConfigMigrator { listener.onResponse(indexResponse.getResult() == DocWriteResponse.Result.CREATED); }, e -> { - if (e instanceof VersionConflictEngineException) { + if (ExceptionsHelper.unwrapCause(e) instanceof VersionConflictEngineException) { // the snapshot already exists listener.onResponse(Boolean.TRUE); } else { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCloseJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCloseJobAction.java index 8816807948ab..9ed0210aa9a9 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCloseJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCloseJobAction.java @@ -341,7 +341,7 @@ public class TransportCloseJobAction extends TransportTasksAction finishedHandler.onResponse(true), e -> { // It's not a problem for us if the index wasn't found - it's equivalent to document not found - if (e instanceof IndexNotFoundException) { + if (ExceptionsHelper.unwrapCause(e) instanceof IndexNotFoundException) { finishedHandler.onResponse(true); } else { finishedHandler.onFailure(e); @@ -466,7 +466,7 @@ public class TransportDeleteJobAction extends TransportMasterNodeAction { // It's not a problem for us if the index wasn't found - it's equivalent to document not found - if (e instanceof IndexNotFoundException) { + if (ExceptionsHelper.unwrapCause(e) instanceof IndexNotFoundException) { finishedHandler.onResponse(true); } else { finishedHandler.onFailure(e); @@ -536,7 +536,7 @@ public class TransportDeleteJobAction extends TransportMasterNodeAction killJobListener = ActionListener.wrap( response -> removePersistentTask(request.getJobId(), state, removeTaskListener), e -> { - if (e instanceof ElasticsearchStatusException) { + if (ExceptionsHelper.unwrapCause(e) instanceof ElasticsearchStatusException) { // Killing the process marks the task as completed so it // may have disappeared when we get here removePersistentTask(request.getJobId(), state, removeTaskListener); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java index 6cf6b255678f..332bcb79bebd 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java @@ -236,7 +236,7 @@ public class TransportOpenJobAction extends TransportMasterNodeAction { - if (e instanceof IndexNotFoundException) { + if (ExceptionsHelper.unwrapCause(e) instanceof IndexNotFoundException) { indicesPrivilegesBuilder.privileges(SearchAction.NAME); privRequest.indexPrivileges(indicesPrivilegesBuilder.build()); client.execute(HasPrivilegesAction.INSTANCE, privRequest, privResponseListener); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutFilterAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutFilterAction.java index b9c8afa93400..8565a86e4c66 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutFilterAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutFilterAction.java @@ -67,7 +67,7 @@ public class TransportPutFilterAction extends HandledTransportAction ex instanceof ResourceNotFoundException == false); + ex -> ExceptionsHelper.unwrapCause(ex) instanceof ResourceNotFoundException == false); for (PersistentTask task : datafeedAndJobTasks) { chainTaskExecutor.add( diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDataFrameAnalyticsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDataFrameAnalyticsAction.java index c2a21662c264..b0e8bdd2810a 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDataFrameAnalyticsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDataFrameAnalyticsAction.java @@ -154,7 +154,7 @@ public class TransportStartDataFrameAnalyticsAction @Override public void onFailure(Exception e) { - if (e instanceof ResourceAlreadyExistsException) { + if (ExceptionsHelper.unwrapCause(e) instanceof ResourceAlreadyExistsException) { e = new ElasticsearchStatusException("Cannot start data frame analytics [" + request.getId() + "] because it has already been started", RestStatus.CONFLICT, e); } @@ -342,7 +342,7 @@ public class TransportStartDataFrameAnalyticsAction } }, e -> { - if (e instanceof IndexNotFoundException) { + if (ExceptionsHelper.unwrapCause(e) instanceof IndexNotFoundException) { listener.onResponse(startContext); } else { listener.onFailure(e); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java index 1f44d22c6681..ddbd3d58966f 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java @@ -179,7 +179,7 @@ public class TransportStartDatafeedAction extends TransportMasterNodeAction { - if (e instanceof ResourceNotFoundException) { + if (ExceptionsHelper.unwrapCause(e) instanceof ResourceNotFoundException) { // the task has disappeared so must have stopped listener.onResponse(new StopDataFrameAnalyticsAction.Response(true)); } else { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopDatafeedAction.java index cbd55bb60d89..2d8c62223f24 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopDatafeedAction.java @@ -29,6 +29,7 @@ import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.ml.MlTasks; import org.elasticsearch.xpack.core.ml.action.StopDatafeedAction; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedState; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.xpack.ml.MachineLearning; import org.elasticsearch.xpack.ml.datafeed.persistence.DatafeedConfigProvider; @@ -195,7 +196,7 @@ public class TransportStopDatafeedAction extends TransportTasksAction { - if (e instanceof ResourceNotFoundException) { + if (ExceptionsHelper.unwrapCause(e) instanceof ResourceNotFoundException) { // the task has disappeared so must have stopped listener.onResponse(new StopDatafeedAction.Response(true)); } else { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateFilterAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateFilterAction.java index 49d838866910..3cb716dc504f 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateFilterAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateFilterAction.java @@ -125,7 +125,7 @@ public class TransportUpdateFilterAction extends HandledTransportAction { - if (e instanceof ResourceNotFoundException) { + if (ExceptionsHelper.unwrapCause(e) instanceof ResourceNotFoundException) { QueryPage empty = new QueryPage<>(Collections.emptyList(), 0, Bucket.RESULT_TYPE_FIELD); bucketsHandler.accept(empty); } else { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManager.java index 457701740e0f..30a3948fcc20 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManager.java @@ -30,6 +30,7 @@ import org.elasticsearch.xpack.core.ml.datafeed.DatafeedState; import org.elasticsearch.xpack.core.ml.job.config.JobState; import org.elasticsearch.xpack.core.ml.job.config.JobTaskState; import org.elasticsearch.xpack.core.ml.job.messages.Messages; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.xpack.ml.MachineLearning; import org.elasticsearch.xpack.ml.action.TransportStartDatafeedAction; import org.elasticsearch.xpack.ml.job.process.autodetect.AutodetectProcessManager; @@ -96,7 +97,7 @@ public class DatafeedManager { @Override public void onFailure(Exception e) { - if (e instanceof ResourceNotFoundException) { + if (ExceptionsHelper.unwrapCause(e) instanceof ResourceNotFoundException) { // The task was stopped in the meantime, no need to do anything logger.info("[{}] Aborting as datafeed has been stopped", datafeedId); } else { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/DataExtractorFactory.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/DataExtractorFactory.java index 40e819affa08..d43ede48d057 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/DataExtractorFactory.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/DataExtractorFactory.java @@ -15,6 +15,7 @@ import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; import org.elasticsearch.xpack.core.ml.datafeed.extractor.DataExtractor; import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.xpack.core.rollup.action.GetRollupIndexCapsAction; import org.elasticsearch.xpack.ml.datafeed.DatafeedTimingStatsReporter; import org.elasticsearch.xpack.ml.datafeed.extractor.aggregation.AggregationDataExtractorFactory; @@ -59,9 +60,10 @@ public interface DataExtractorFactory { } }, e -> { - if (e instanceof IndexNotFoundException) { + Throwable cause = ExceptionsHelper.unwrapCause(e); + if (cause instanceof IndexNotFoundException) { listener.onFailure(new ResourceNotFoundException("datafeed [" + datafeed.getId() - + "] cannot retrieve data because index " + ((IndexNotFoundException)e).getIndex() + " does not exist")); + + "] cannot retrieve data because index " + ((IndexNotFoundException) cause).getIndex() + " does not exist")); } else { listener.onFailure(e); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorFactory.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorFactory.java index 20ed7f664f92..e58689736cd6 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorFactory.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorFactory.java @@ -74,9 +74,10 @@ public class ScrollDataExtractorFactory implements DataExtractorFactory { new ScrollDataExtractorFactory(client, datafeed, job, extractedFields, xContentRegistry, timingStatsReporter)); }, e -> { - if (e instanceof IndexNotFoundException) { + Throwable cause = ExceptionsHelper.unwrapCause(e); + if (cause instanceof IndexNotFoundException) { listener.onFailure(new ResourceNotFoundException("datafeed [" + datafeed.getId() - + "] cannot retrieve data because index " + ((IndexNotFoundException) e).getIndex() + " does not exist")); + + "] cannot retrieve data because index " + ((IndexNotFoundException) cause).getIndex() + " does not exist")); } else if (e instanceof IllegalArgumentException) { listener.onFailure(ExceptionsHelper.badRequestException("[" + datafeed.getId() + "] " + e.getMessage())); } else { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/persistence/DatafeedConfigProvider.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/persistence/DatafeedConfigProvider.java index d24b91d07cef..3cf636bbb70b 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/persistence/DatafeedConfigProvider.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/persistence/DatafeedConfigProvider.java @@ -127,7 +127,7 @@ public class DatafeedConfigProvider { executeAsyncWithOrigin(client, ML_ORIGIN, IndexAction.INSTANCE, indexRequest, ActionListener.wrap( listener::onResponse, e -> { - if (e instanceof VersionConflictEngineException) { + if (ExceptionsHelper.unwrapCause(e) instanceof VersionConflictEngineException) { // the dafafeed already exists listener.onFailure(ExceptionsHelper.datafeedAlreadyExists(datafeedId)); } else { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsManager.java index a3a0d7cce179..fea9753314d7 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsManager.java @@ -34,6 +34,7 @@ import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsState; import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsTaskState; import org.elasticsearch.xpack.core.ml.job.messages.Messages; import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.xpack.ml.dataframe.extractor.DataFrameDataExtractorFactory; import org.elasticsearch.xpack.ml.dataframe.persistence.DataFrameAnalyticsConfigProvider; import org.elasticsearch.xpack.ml.dataframe.process.AnalyticsProcessManager; @@ -142,7 +143,7 @@ public class DataFrameAnalyticsManager { ActionListener.wrap( r-> reindexDataframeAndStartAnalysis(task, config), e -> { - if (e instanceof IndexNotFoundException) { + if (ExceptionsHelper.unwrapCause(e) instanceof IndexNotFoundException) { reindexDataframeAndStartAnalysis(task, config); } else { task.updateState(DataFrameAnalyticsState.FAILED, e.getMessage()); @@ -224,7 +225,7 @@ public class DataFrameAnalyticsManager { )); }, e -> { - if (org.elasticsearch.ExceptionsHelper.unwrapCause(e) instanceof IndexNotFoundException) { + if (ExceptionsHelper.unwrapCause(e) instanceof IndexNotFoundException) { auditor.info( config.getId(), Messages.getMessage(Messages.DATA_FRAME_ANALYTICS_AUDIT_CREATING_DEST_INDEX, config.getDest().getIndex())); @@ -260,7 +261,7 @@ public class DataFrameAnalyticsManager { } }), error -> { - if (error instanceof ResourceNotFoundException) { + if (ExceptionsHelper.unwrapCause(error) instanceof ResourceNotFoundException) { // Task has stopped } else { task.updateState(DataFrameAnalyticsState.FAILED, error.getMessage()); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsTask.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsTask.java index c8fbd3d6da02..d6be817804b3 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsTask.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsTask.java @@ -162,7 +162,7 @@ public class DataFrameAnalyticsTask extends AllocatedPersistentTask implements S } // There is a chance that the task is finished by the time we cancel it in which case we'll get // a ResourceNotFoundException which we can ignore. - if (firstError != null && firstError instanceof ResourceNotFoundException == false) { + if (firstError != null && ExceptionsHelper.unwrapCause(firstError) instanceof ResourceNotFoundException == false) { throw ExceptionsHelper.serverError("[" + taskParams.getId() + "] Error cancelling reindex task", firstError); } else { LOGGER.debug("[{}] Reindex task was successfully cancelled", taskParams.getId()); @@ -215,7 +215,7 @@ public class DataFrameAnalyticsTask extends AllocatedPersistentTask implements S listener.onResponse(progress); }, error -> { - if (error instanceof ResourceNotFoundException) { + if (ExceptionsHelper.unwrapCause(error) instanceof ResourceNotFoundException) { // The task is not present which means either it has not started yet or it finished. // We keep track of whether the task has finished so we can use that to tell whether the progress 100. listener.onResponse(isReindexingFinished ? 100 : 0); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractorFactory.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractorFactory.java index d24d157d4f5b..a93efe673190 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractorFactory.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractorFactory.java @@ -24,6 +24,7 @@ import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsConfig; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.xpack.ml.datafeed.extractor.fields.ExtractedField; import org.elasticsearch.xpack.ml.datafeed.extractor.fields.ExtractedFields; @@ -219,7 +220,7 @@ public class DataFrameDataExtractorFactory { docValueFieldsLimitListener.onResponse(minDocValueFieldsLimit); }, e -> { - if (e instanceof IndexNotFoundException) { + if (ExceptionsHelper.unwrapCause(e) instanceof IndexNotFoundException) { docValueFieldsLimitListener.onFailure(new ResourceNotFoundException("cannot retrieve data because index " + ((IndexNotFoundException) e).getIndex() + " does not exist")); } else { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/persistence/DataFrameAnalyticsConfigProvider.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/persistence/DataFrameAnalyticsConfigProvider.java index d8d0cd775dd6..d13ed2c6a4d7 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/persistence/DataFrameAnalyticsConfigProvider.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/persistence/DataFrameAnalyticsConfigProvider.java @@ -99,7 +99,7 @@ public class DataFrameAnalyticsConfigProvider { executeAsyncWithOrigin(client, ML_ORIGIN, IndexAction.INSTANCE, indexRequest, ActionListener.wrap( listener::onResponse, e -> { - if (e instanceof VersionConflictEngineException) { + if (ExceptionsHelper.unwrapCause(e) instanceof VersionConflictEngineException) { listener.onFailure(ExceptionsHelper.dataFrameAnalyticsAlreadyExists(id)); } else { listener.onFailure(e); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/persistence/TrainedModelProvider.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/persistence/TrainedModelProvider.java index e569edc07fd8..2028dfe9edfa 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/persistence/TrainedModelProvider.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/persistence/TrainedModelProvider.java @@ -36,6 +36,7 @@ import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.xpack.core.ml.inference.TrainedModelConfig; import org.elasticsearch.xpack.core.ml.inference.persistence.InferenceIndexConstants; import org.elasticsearch.xpack.core.ml.job.messages.Messages; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import java.io.IOException; import java.io.InputStream; @@ -72,7 +73,7 @@ public class TrainedModelProvider { trainedModelConfig.getModelId(), trainedModelConfig.getModelVersion()), e); - if (e instanceof VersionConflictEngineException) { + if (ExceptionsHelper.unwrapCause(e) instanceof VersionConflictEngineException) { listener.onFailure(new ResourceAlreadyExistsException( Messages.getMessage(Messages.INFERENCE_TRAINED_MODEL_EXISTS, trainedModelConfig.getModelId(), trainedModelConfig.getModelVersion()))); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java index aa7771ac21f6..b134cf59c193 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java @@ -142,7 +142,7 @@ public class JobManager { jobConfigProvider.getJob(jobId, ActionListener.wrap( r -> jobListener.onResponse(r.build()), // TODO JIndex we shouldn't be building the job here e -> { - if (e instanceof ResourceNotFoundException) { + if (ExceptionsHelper.unwrapCause(e) instanceof ResourceNotFoundException) { // Try to get the job from the cluster state getJobFromClusterState(jobId, jobListener); } else { @@ -272,7 +272,7 @@ public class JobManager { @Override public void onFailure(Exception e) { - if (e instanceof IllegalArgumentException) { + if (ExceptionsHelper.unwrapCause(e) instanceof IllegalArgumentException) { // the underlying error differs depending on which way around the clashing fields are seen Matcher matcher = Pattern.compile("(?:mapper|Can't merge a non object mapping) \\[(.*)\\] (?:of different type, " + "current_type \\[.*\\], merged_type|with an object mapping) \\[.*\\]").matcher(e.getMessage()); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/UpdateJobProcessNotifier.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/UpdateJobProcessNotifier.java index 29e98d01ca9a..a1bc5aa357cd 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/UpdateJobProcessNotifier.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/UpdateJobProcessNotifier.java @@ -135,13 +135,14 @@ public class UpdateJobProcessNotifier { @Override public void onFailure(Exception e) { - if (e instanceof ResourceNotFoundException) { + Throwable cause = ExceptionsHelper.unwrapCause(e); + if (cause instanceof ResourceNotFoundException) { logger.debug("Remote job [{}] not updated as it has been deleted", update.getJobId()); - } else if (e.getMessage().contains("because job [" + update.getJobId() + "] is not open") - && e instanceof ElasticsearchStatusException) { + } else if (cause.getMessage().contains("because job [" + update.getJobId() + "] is not open") + && cause instanceof ElasticsearchStatusException) { logger.debug("Remote job [{}] not updated as it is no longer open", update.getJobId()); } else { - logger.error("Failed to update remote job [" + update.getJobId() + "]", e); + logger.error("Failed to update remote job [" + update.getJobId() + "]", cause); } updateHolder.listener.onFailure(e); executeProcessUpdates(updatesIterator); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobConfigProvider.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobConfigProvider.java index 9944b8f4fc00..865e714c343d 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobConfigProvider.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobConfigProvider.java @@ -124,7 +124,7 @@ public class JobConfigProvider { executeAsyncWithOrigin(client, ML_ORIGIN, IndexAction.INSTANCE, indexRequest, ActionListener.wrap( listener::onResponse, e -> { - if (e instanceof VersionConflictEngineException) { + if (ExceptionsHelper.unwrapCause(e) instanceof VersionConflictEngineException) { // the job already exists listener.onFailure(ExceptionsHelper.jobAlreadyExists(job.getId())); } else { @@ -409,7 +409,7 @@ public class JobConfigProvider { * For the list of job Ids find all that match existing jobs Ids. * The repsonse is all the job Ids in {@code ids} that match an existing * job Id. - * @param ids Job Ids to find + * @param ids Job Ids to find * @param listener The matched Ids listener */ public void jobIdMatches(List ids, ActionListener> listener) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProvider.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProvider.java index d9a2bc2e5c30..777467af2d49 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProvider.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProvider.java @@ -304,7 +304,7 @@ public class JobResultsProvider { e -> { // Possible that the index was created while the request was executing, // so we need to handle that possibility - if (e instanceof ResourceAlreadyExistsException) { + if (ExceptionsHelper.unwrapCause(e) instanceof ResourceAlreadyExistsException) { LOGGER.info("Index already exists"); // Add the term field mappings and alias. The complication is that the state at the // beginning of the operation doesn't have any knowledge of the index, as it's only @@ -1189,7 +1189,7 @@ public class JobResultsProvider { .sortDescending(true).from(BUCKETS_FOR_ESTABLISHED_MEMORY_SIZE - 1).size(1) .includeInterim(false); bucketsViaInternalClient(jobId, bucketQuery, bucketHandler, e -> { - if (e instanceof ResourceNotFoundException) { + if (ExceptionsHelper.unwrapCause(e) instanceof ResourceNotFoundException) { handler.accept(0L); } else { errorHandler.accept(e); @@ -1437,7 +1437,7 @@ public class JobResultsProvider { @Override public void onFailure(Exception e) { - if (e instanceof IndexNotFoundException) { + if (ExceptionsHelper.unwrapCause(e) instanceof IndexNotFoundException) { listener.onFailure(new ResourceNotFoundException("No calendar with id [" + calendarId + "]")); } else { listener.onFailure(e); From 8f7855464a5c95a738b443f18ae48390fd99128e Mon Sep 17 00:00:00 2001 From: Alpar Torok Date: Tue, 8 Oct 2019 14:39:58 +0300 Subject: [PATCH 46/55] Convert RunTask to use testclusers, remove ClusterFormationTasks (#47572) * Convert RunTask to use testclusers, remove ClusterFormationTasks This PR adds a new RunTask and a way for it to start a testclusters cluster out of band and block on it to replace the old RunTask that used ClusterFormationTasks. With this we can now remove ClusterFormationTasks. --- build.gradle | 10 +- .../gradle/plugin/PluginBuildPlugin.groovy | 65 +- .../gradle/test/ClusterConfiguration.groovy | 254 ----- .../gradle/test/ClusterFormationTasks.groovy | 991 ------------------ .../elasticsearch/gradle/test/NodeInfo.groovy | 297 ------ .../elasticsearch/gradle/test/RunTask.groovy | 41 - .../testclusters/ElasticsearchCluster.java | 2 +- .../testclusters/ElasticsearchNode.java | 35 +- .../gradle/testclusters/RunTask.java | 73 ++ .../testclusters/TestClustersAware.java | 6 +- .../testclusters/TestClustersPlugin.java | 4 +- distribution/build.gradle | 44 +- modules/build.gradle | 5 - modules/lang-painless/build.gradle | 26 +- modules/rank-eval/build.gradle | 5 - modules/reindex/build.gradle | 8 - x-pack/docs/build.gradle | 2 - .../sql/qa/security/with-ssl/build.gradle | 1 - x-pack/qa/smoke-test-plugins-ssl/build.gradle | 2 +- 19 files changed, 181 insertions(+), 1690 deletions(-) delete mode 100644 buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy delete mode 100644 buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy delete mode 100644 buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy delete mode 100644 buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RunTask.groovy create mode 100644 buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/RunTask.java diff --git a/build.gradle b/build.gradle index 82203b9a45b4..6c2897e42688 100644 --- a/build.gradle +++ b/build.gradle @@ -17,17 +17,17 @@ * under the License. */ + import com.github.jengelman.gradle.plugins.shadow.ShadowPlugin import org.apache.tools.ant.taskdefs.condition.Os import org.elasticsearch.gradle.BuildPlugin -import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.BwcVersions +import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.VersionProperties import org.elasticsearch.gradle.plugin.PluginBuildPlugin -import org.elasticsearch.gradle.tool.Boilerplate -import org.gradle.util.GradleVersion -import org.gradle.util.DistributionLocator import org.gradle.plugins.ide.eclipse.model.SourceFolder +import org.gradle.util.DistributionLocator +import org.gradle.util.GradleVersion import static org.elasticsearch.gradle.tool.Boilerplate.maybeConfigure @@ -449,7 +449,7 @@ class Run extends DefaultTask { description = "Enable debugging configuration, to allow attaching a debugger to elasticsearch." ) public void setDebug(boolean enabled) { - project.project(':distribution').run.clusterConfig.debug = enabled + project.project(':distribution').run.debug = enabled } } task run(type: Run) { diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy index 645190809342..6d624b774226 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy @@ -24,7 +24,7 @@ import org.elasticsearch.gradle.NoticeTask import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.VersionProperties import org.elasticsearch.gradle.test.RestIntegTestTask -import org.elasticsearch.gradle.test.RunTask +import org.elasticsearch.gradle.testclusters.RunTask import org.elasticsearch.gradle.testclusters.TestClustersPlugin import org.elasticsearch.gradle.tool.ClasspathUtils import org.gradle.api.InvalidUserDataException @@ -65,35 +65,28 @@ class PluginBuildPlugin implements Plugin { project.archivesBaseName = name project.description = extension1.description configurePublishing(project, extension1) - if (project.plugins.hasPlugin(TestClustersPlugin.class) == false) { - project.integTestCluster.dependsOn(project.tasks.bundlePlugin) - if (isModule) { - project.integTestCluster.module(project) - } else { - project.integTestCluster.plugin(project.path) - } - } else { - project.tasks.integTest.dependsOn(project.tasks.bundlePlugin) - if (isModule) { - project.testClusters.integTest.module( - project.file(project.tasks.bundlePlugin.archiveFile) - ) - } else { - project.testClusters.integTest.plugin( - project.file(project.tasks.bundlePlugin.archiveFile) - ) - } - project.extensions.getByType(PluginPropertiesExtension).extendedPlugins.each { pluginName -> - // Auto add dependent modules to the test cluster - if (project.findProject(":modules:${pluginName}") != null) { - project.integTest.dependsOn(project.project(":modules:${pluginName}").tasks.bundlePlugin) - project.testClusters.integTest.module( - project.file(project.project(":modules:${pluginName}").tasks.bundlePlugin.archiveFile) - ) - } + project.tasks.integTest.dependsOn(project.tasks.bundlePlugin) + if (isModule) { + project.testClusters.integTest.module( + project.file(project.tasks.bundlePlugin.archiveFile) + ) + } else { + project.testClusters.integTest.plugin( + project.file(project.tasks.bundlePlugin.archiveFile) + ) + } + + project.extensions.getByType(PluginPropertiesExtension).extendedPlugins.each { pluginName -> + // Auto add dependent modules to the test cluster + if (project.findProject(":modules:${pluginName}") != null) { + project.integTest.dependsOn(project.project(":modules:${pluginName}").tasks.bundlePlugin) + project.testClusters.integTest.module( + project.file(project.project(":modules:${pluginName}").tasks.bundlePlugin.archiveFile) + ) } } + if (extension1.name == null) { throw new InvalidUserDataException('name is a required setting for esplugin') } @@ -117,14 +110,6 @@ class PluginBuildPlugin implements Plugin { ] buildProperties.expand(properties) buildProperties.inputs.properties(properties) - project.tasks.run.dependsOn(project.tasks.bundlePlugin) - if (isModule) { - project.tasks.run.clusterConfig.distribution = System.getProperty( - 'run.distribution', isXPackModule ? 'default' : 'oss' - ) - } else { - project.tasks.run.clusterConfig.plugin(project.path) - } if (isModule == false || isXPackModule) { addNoticeGeneration(project, extension1) } @@ -145,7 +130,11 @@ class PluginBuildPlugin implements Plugin { createIntegTestTask(project) createBundleTasks(project, extension) project.configurations.getByName('default').extendsFrom(project.configurations.getByName('runtime')) - project.tasks.create('run', RunTask) // allow running ES with this plugin in the foreground of a build + // allow running ES with this plugin in the foreground of a build + project.tasks.register('run', RunTask) { + dependsOn(project.tasks.bundlePlugin) + useCluster project.testClusters.integTest + } } @@ -178,10 +167,6 @@ class PluginBuildPlugin implements Plugin { private static void createIntegTestTask(Project project) { RestIntegTestTask integTest = project.tasks.create('integTest', RestIntegTestTask.class) integTest.mustRunAfter('precommit', 'test') - if (project.plugins.hasPlugin(TestClustersPlugin.class) == false) { - // only if not using test clusters - project.integTestCluster.distribution = System.getProperty('tests.distribution', 'integ-test-zip') - } project.check.dependsOn(integTest) } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy deleted file mode 100644 index b9baa1a0146a..000000000000 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy +++ /dev/null @@ -1,254 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.gradle.test - -import org.elasticsearch.gradle.Version -import org.gradle.api.GradleException -import org.gradle.api.Project -import org.gradle.api.tasks.Input - -/** Configuration for an elasticsearch cluster, used for integration tests. */ -class ClusterConfiguration { - - private final Project project - - @Input - String distribution = 'default' - - @Input - int numNodes = 1 - - @Input - int numBwcNodes = 0 - - @Input - Version bwcVersion = null - - @Input - int httpPort = 0 - - @Input - int transportPort = 0 - - /** - * An override of the data directory. Input is the node number and output - * is the override data directory. - */ - @Input - Closure dataDir = null - - /** Optional override of the cluster name. */ - @Input - String clusterName = null - - @Input - boolean daemonize = true - - @Input - boolean debug = false - - /** - * Whether the initial_master_nodes setting should be automatically derived from the nodes - * in the cluster. Only takes effect if all nodes in the cluster understand this setting - * and the discovery type is not explicitly set. - */ - @Input - boolean autoSetInitialMasterNodes = true - - /** - * Whether the file-based discovery provider should be automatically setup based on - * the nodes in the cluster. Only takes effect if no other hosts provider is already - * configured. - */ - @Input - boolean autoSetHostsProvider = true - - @Input - String jvmArgs = "-Xms" + System.getProperty('tests.heap.size', '512m') + - " " + "-Xmx" + System.getProperty('tests.heap.size', '512m') + - " " + System.getProperty('tests.jvm.argline', '') - - /** - * Should the shared environment be cleaned on cluster startup? Defaults - * to {@code true} so we run with a clean cluster but some tests wish to - * preserve snapshots between clusters so they set this to true. - */ - @Input - boolean cleanShared = true - - /** - * A closure to call which returns the unicast host to connect to for cluster formation. - * - * This allows multi node clusters, or a new cluster to connect to an existing cluster. - * The closure takes three arguments, the NodeInfo for the first node in the cluster, - * the NodeInfo for the node current being configured, an AntBuilder which may be used - * to wait on conditions before returning. - */ - @Input - Closure unicastTransportUri = { NodeInfo seedNode, NodeInfo node, AntBuilder ant -> - if (seedNode == node) { - return null - } - ant.waitfor(maxwait: '40', maxwaitunit: 'second', checkevery: '500', checkeveryunit: 'millisecond', - timeoutproperty: "failed.${seedNode.transportPortsFile.path}") { - resourceexists { - file(file: seedNode.transportPortsFile.toString()) - } - } - if (ant.properties.containsKey("failed.${seedNode.transportPortsFile.path}".toString())) { - throw new GradleException("Failed to locate seed node transport file [${seedNode.transportPortsFile}]: " + - "timed out waiting for it to be created after 40 seconds") - } - return seedNode.transportUri() - } - - /** - * A closure to call which returns a manually supplied list of unicast seed hosts. - */ - @Input - Closure> otherUnicastHostAddresses = { - Collections.emptyList() - } - - /** - * A closure to call before the cluster is considered ready. The closure is passed the node info, - * as well as a groovy AntBuilder, to enable running ant condition checks. The default wait - * condition is for http on the http port. - */ - @Input - Closure waitCondition = { NodeInfo node, AntBuilder ant -> - File tmpFile = new File(node.cwd, 'wait.success') - String waitUrl = "http://${node.httpUri()}/_cluster/health?wait_for_nodes=>=${numNodes}&wait_for_status=yellow" - ant.echo(message: "==> [${new Date()}] checking health: ${waitUrl}", - level: 'info') - // checking here for wait_for_nodes to be >= the number of nodes because its possible - // this cluster is attempting to connect to nodes created by another task (same cluster name), - // so there will be more nodes in that case in the cluster state - ant.get(src: waitUrl, - dest: tmpFile.toString(), - ignoreerrors: true, // do not fail on error, so logging buffers can be flushed by the wait task - retries: 10) - return tmpFile.exists() - } - - /** - * The maximum number of seconds to wait for nodes to complete startup, which includes writing - * the ports files for the transports and the pid file. This wait time occurs before the wait - * condition is executed. - */ - @Input - int nodeStartupWaitSeconds = 30 - - public ClusterConfiguration(Project project) { - this.project = project - } - - // **Note** for systemProperties, settings, keystoreFiles etc: - // value could be a GString that is evaluated to just a String - // there are cases when value depends on task that is not executed yet on configuration stage - Map systemProperties = new HashMap<>() - - Map environmentVariables = new HashMap<>() - - Map settings = new HashMap<>() - - Map keystoreSettings = new HashMap<>() - - Map keystoreFiles = new HashMap<>() - - // map from destination path, to source file - Map extraConfigFiles = new HashMap<>() - - LinkedHashMap plugins = new LinkedHashMap<>() - - List modules = new ArrayList<>() - - LinkedHashMap setupCommands = new LinkedHashMap<>() - - List dependencies = new ArrayList<>() - - @Input - void systemProperty(String property, Object value) { - systemProperties.put(property, value) - } - - @Input - void environment(String variable, Object value) { - environmentVariables.put(variable, value) - } - - @Input - void setting(String name, Object value) { - settings.put(name, value) - } - - @Input - void keystoreSetting(String name, String value) { - keystoreSettings.put(name, value) - } - - /** - * Adds a file to the keystore. The name is the secure setting name, and the sourceFile - * is anything accepted by project.file() - */ - @Input - void keystoreFile(String name, Object sourceFile) { - keystoreFiles.put(name, sourceFile) - } - - @Input - void plugin(String path) { - Project pluginProject = project.project(path) - plugins.put(pluginProject.name, pluginProject) - } - - @Input - void mavenPlugin(String name, String mavenCoords) { - plugins.put(name, mavenCoords) - } - - /** Add a module to the cluster. The project must be an esplugin and have a single zip default artifact. */ - @Input - void module(Project moduleProject) { - modules.add(moduleProject) - } - - @Input - void setupCommand(String name, Object... args) { - setupCommands.put(name, args) - } - - /** - * Add an extra configuration file. The path is relative to the config dir, and the sourceFile - * is anything accepted by project.file() - */ - @Input - void extraConfigFile(String path, Object sourceFile) { - if (path == 'elasticsearch.yml') { - throw new GradleException('Overwriting elasticsearch.yml is not allowed, add additional settings using cluster { setting "foo", "bar" }') - } - extraConfigFiles.put(path, sourceFile) - } - - /** Add dependencies that must be run before the first task setting up the cluster. */ - @Input - void dependsOn(Object... deps) { - dependencies.addAll(deps) - } -} diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy deleted file mode 100644 index bfba30619b37..000000000000 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy +++ /dev/null @@ -1,991 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.gradle.test - -import org.apache.tools.ant.DefaultLogger -import org.apache.tools.ant.taskdefs.condition.Os -import org.elasticsearch.gradle.BuildPlugin -import org.elasticsearch.gradle.BwcVersions -import org.elasticsearch.gradle.LoggedExec -import org.elasticsearch.gradle.Version -import org.elasticsearch.gradle.VersionProperties -import org.elasticsearch.gradle.plugin.PluginBuildPlugin -import org.elasticsearch.gradle.plugin.PluginPropertiesExtension -import org.gradle.api.AntBuilder -import org.gradle.api.DefaultTask -import org.gradle.api.GradleException -import org.gradle.api.Project -import org.gradle.api.Task -import org.gradle.api.artifacts.Configuration -import org.gradle.api.artifacts.Dependency -import org.gradle.api.file.FileCollection -import org.gradle.api.logging.Logger -import org.gradle.api.tasks.Copy -import org.gradle.api.tasks.Delete -import org.gradle.api.tasks.Exec -import org.gradle.internal.jvm.Jvm - -import java.nio.charset.StandardCharsets -import java.nio.file.Paths -import java.util.concurrent.TimeUnit -import java.util.stream.Collectors - -/** - * A helper for creating tasks to build a cluster that is used by a task, and tear down the cluster when the task is finished. - */ -class ClusterFormationTasks { - - /** - * Adds dependent tasks to the given task to start and stop a cluster with the given configuration. - * - * Returns a list of NodeInfo objects for each node in the cluster. - */ - static List setup(Project project, String prefix, Task runner, ClusterConfiguration config) { - File sharedDir = new File(project.buildDir, "cluster/shared") - Object startDependencies = config.dependencies - /* First, if we want a clean environment, we remove everything in the - * shared cluster directory to ensure there are no leftovers in repos - * or anything in theory this should not be necessary but repositories - * are only deleted in the cluster-state and not on-disk such that - * snapshots survive failures / test runs and there is no simple way - * today to fix that. */ - if (config.cleanShared) { - Task cleanup = project.tasks.create( - name: "${prefix}#prepareCluster.cleanShared", - type: Delete, - dependsOn: startDependencies) { - delete sharedDir - doLast { - sharedDir.mkdirs() - } - } - startDependencies = cleanup - } - List startTasks = [] - List nodes = [] - if (config.numNodes < config.numBwcNodes) { - throw new GradleException("numNodes must be >= numBwcNodes [${config.numNodes} < ${config.numBwcNodes}]") - } - if (config.numBwcNodes > 0 && config.bwcVersion == null) { - throw new GradleException("bwcVersion must not be null if numBwcNodes is > 0") - } - // this is our current version distribution configuration we use for all kinds of REST tests etc. - Configuration currentDistro = project.configurations.create("${prefix}_elasticsearchDistro") - Configuration bwcDistro = project.configurations.create("${prefix}_elasticsearchBwcDistro") - Configuration bwcPlugins = project.configurations.create("${prefix}_elasticsearchBwcPlugins") - if (System.getProperty('tests.distribution', 'oss') == 'integ-test-zip') { - throw new Exception("tests.distribution=integ-test-zip is not supported") - } - configureDistributionDependency(project, config.distribution, currentDistro, VersionProperties.elasticsearch) - boolean hasBwcNodes = config.numBwcNodes > 0 - if (hasBwcNodes) { - if (config.bwcVersion == null) { - throw new IllegalArgumentException("Must specify bwcVersion when numBwcNodes > 0") - } - // if we have a cluster that has a BWC cluster we also need to configure a dependency on the BWC version - // this version uses the same distribution etc. and only differs in the version we depend on. - // from here on everything else works the same as if it's the current version, we fetch the BWC version - // from mirrors using gradles built-in mechanism etc. - - configureDistributionDependency(project, config.distribution, bwcDistro, config.bwcVersion.toString()) - for (Map.Entry entry : config.plugins.entrySet()) { - configureBwcPluginDependency(project, entry.getValue(), bwcPlugins, config.bwcVersion) - } - bwcDistro.resolutionStrategy.cacheChangingModulesFor(0, TimeUnit.SECONDS) - bwcPlugins.resolutionStrategy.cacheChangingModulesFor(0, TimeUnit.SECONDS) - } - for (int i = 0; i < config.numNodes; i++) { - // we start N nodes and out of these N nodes there might be M bwc nodes. - // for each of those nodes we might have a different configuration - Configuration distro - String elasticsearchVersion - if (i < config.numBwcNodes) { - elasticsearchVersion = config.bwcVersion.toString() - if (project.bwcVersions.unreleased.contains(config.bwcVersion) && - (project.version != elasticsearchVersion)) { - elasticsearchVersion += "-SNAPSHOT" - } - distro = bwcDistro - } else { - elasticsearchVersion = VersionProperties.elasticsearch - distro = currentDistro - } - NodeInfo node = new NodeInfo(config, i, project, prefix, elasticsearchVersion, sharedDir) - nodes.add(node) - Closure writeConfigSetup - Object dependsOn - writeConfigSetup = { Map esConfig -> - if (config.getAutoSetHostsProvider()) { - if (esConfig.containsKey("discovery.seed_providers") == false) { - esConfig["discovery.seed_providers"] = 'file' - } - esConfig["discovery.seed_hosts"] = [] - } - if (esConfig['discovery.type'] == null && config.getAutoSetInitialMasterNodes()) { - esConfig['cluster.initial_master_nodes'] = nodes.stream().map({ n -> - if (n.config.settings['node.name'] == null) { - return "node-" + n.nodeNum - } else { - return n.config.settings['node.name'] - } - }).collect(Collectors.toList()) - } - esConfig - } - dependsOn = startDependencies - startTasks.add(configureNode(project, prefix, runner, dependsOn, node, config, distro, writeConfigSetup)) - } - - Task wait = configureWaitTask("${prefix}#wait", project, nodes, startTasks, config.nodeStartupWaitSeconds) - runner.dependsOn(wait) - - return nodes - } - - /** Adds a dependency on the given distribution */ - static void configureDistributionDependency(Project project, String distro, Configuration configuration, String elasticsearchVersion) { - boolean internalBuild = project.hasProperty('bwcVersions') - if (distro.equals("integ-test-zip")) { - // short circuit integ test so it doesn't complicate the rest of the distribution setup below - if (internalBuild) { - project.dependencies.add( - configuration.name, - project.dependencies.project(path: ":distribution", configuration: 'integ-test-zip') - ) - } else { - project.dependencies.add( - configuration.name, - "org.elasticsearch.distribution.integ-test-zip:elasticsearch:${elasticsearchVersion}@zip" - ) - } - return - } - // TEMP HACK - // The oss docs CI build overrides the distro on the command line. This hack handles backcompat until CI is updated. - if (distro.equals('oss-zip')) { - distro = 'oss' - } - if (distro.equals('zip')) { - distro = 'default' - } - // END TEMP HACK - if (['oss', 'default'].contains(distro) == false) { - throw new GradleException("Unknown distribution: ${distro} in project ${project.path}") - } - Version version = Version.fromString(elasticsearchVersion) - String os = getOs() - String classifier = "${os}-x86_64" - String packaging = os.equals('windows') ? 'zip' : 'tar.gz' - String artifactName = 'elasticsearch' - if (distro.equals('oss') && Version.fromString(elasticsearchVersion).onOrAfter('6.3.0')) { - artifactName += '-oss' - } - Object dependency - String snapshotProject = "${os}-${os.equals('windows') ? 'zip' : 'tar'}" - if (version.before("7.0.0")) { - snapshotProject = "zip" - } - if (distro.equals("oss")) { - snapshotProject = "oss-" + snapshotProject - } - - BwcVersions.UnreleasedVersionInfo unreleasedInfo = null - - if (project.hasProperty('bwcVersions')) { - // NOTE: leniency is needed for external plugin authors using build-tools. maybe build the version compat info into build-tools? - unreleasedInfo = project.bwcVersions.unreleasedInfo(version) - } - if (unreleasedInfo != null) { - dependency = project.dependencies.project( - path: unreleasedInfo.gradleProjectPath, configuration: snapshotProject - ) - } else if (internalBuild && elasticsearchVersion.equals(VersionProperties.elasticsearch)) { - dependency = project.dependencies.project(path: ":distribution:archives:${snapshotProject}") - } else { - if (version.before('7.0.0')) { - classifier = "" // for bwc, before we had classifiers - } - // group does not matter as it is not used when we pull from the ivy repo that points to the download service - dependency = "dnm:${artifactName}:${elasticsearchVersion}-${classifier}@${packaging}" - } - project.dependencies.add(configuration.name, dependency) - } - - /** Adds a dependency on a different version of the given plugin, which will be retrieved using gradle's dependency resolution */ - static void configureBwcPluginDependency(Project project, Object plugin, Configuration configuration, Version elasticsearchVersion) { - if (plugin instanceof Project) { - Project pluginProject = (Project)plugin - verifyProjectHasBuildPlugin(configuration.name, elasticsearchVersion, project, pluginProject) - final String pluginName = findPluginName(pluginProject) - project.dependencies.add(configuration.name, "org.elasticsearch.plugin:${pluginName}:${elasticsearchVersion}@zip") - } else { - project.dependencies.add(configuration.name, "${plugin}@zip") - } - } - - /** - * Adds dependent tasks to start an elasticsearch cluster before the given task is executed, - * and stop it after it has finished executing. - * - * The setup of the cluster involves the following: - *
    - *
  1. Cleanup the extraction directory
  2. - *
  3. Extract a fresh copy of elasticsearch
  4. - *
  5. Write an elasticsearch.yml config file
  6. - *
  7. Copy plugins that will be installed to a temporary dir (which contains spaces)
  8. - *
  9. Install plugins
  10. - *
  11. Run additional setup commands
  12. - *
  13. Start elasticsearch
  14. - *
- * - * @return a task which starts the node. - */ - static Task configureNode(Project project, String prefix, Task runner, Object dependsOn, NodeInfo node, ClusterConfiguration config, - Configuration distribution, Closure writeConfig) { - - // tasks are chained so their execution order is maintained - Task setup = project.tasks.create(name: taskName(prefix, node, 'clean'), type: Delete, dependsOn: dependsOn) { - delete node.homeDir - delete node.cwd - } - setup = project.tasks.create(name: taskName(prefix, node, 'createCwd'), type: DefaultTask, dependsOn: setup) { - doLast { - node.cwd.mkdirs() - } - outputs.dir node.cwd - } - setup = configureCheckPreviousTask(taskName(prefix, node, 'checkPrevious'), project, setup, node) - setup = configureStopTask(taskName(prefix, node, 'stopPrevious'), project, setup, node) - setup = configureExtractTask(taskName(prefix, node, 'extract'), project, setup, node, distribution, config.distribution) - setup = configureWriteConfigTask(taskName(prefix, node, 'configure'), project, setup, node, writeConfig) - setup = configureCreateKeystoreTask(taskName(prefix, node, 'createKeystore'), project, setup, node) - setup = configureAddKeystoreSettingTasks(prefix, project, setup, node) - setup = configureAddKeystoreFileTasks(prefix, project, setup, node) - - if (node.config.plugins.isEmpty() == false) { - if (node.nodeVersion == Version.fromString(VersionProperties.elasticsearch)) { - setup = configureCopyPluginsTask(taskName(prefix, node, 'copyPlugins'), project, setup, node, prefix) - } else { - setup = configureCopyBwcPluginsTask(taskName(prefix, node, 'copyBwcPlugins'), project, setup, node, prefix) - } - } - - // install modules - for (Project module : node.config.modules) { - String actionName = pluginTaskName('install', module.name, 'Module') - setup = configureInstallModuleTask(taskName(prefix, node, actionName), project, setup, node, module) - } - - // install plugins - for (String pluginName : node.config.plugins.keySet()) { - String actionName = pluginTaskName('install', pluginName, 'Plugin') - setup = configureInstallPluginTask(taskName(prefix, node, actionName), project, setup, node, pluginName, prefix) - } - - // sets up any extra config files that need to be copied over to the ES instance; - // its run after plugins have been installed, as the extra config files may belong to plugins - setup = configureExtraConfigFilesTask(taskName(prefix, node, 'extraConfig'), project, setup, node) - - // extra setup commands - for (Map.Entry command : node.config.setupCommands.entrySet()) { - // the first argument is the actual script name, relative to home - Object[] args = command.getValue().clone() - final Object commandPath - if (Os.isFamily(Os.FAMILY_WINDOWS)) { - /* - * We have to delay building the string as the path will not exist during configuration which will fail on Windows due to - * getting the short name requiring the path to already exist. Note that we have to capture the value of arg[0] now - * otherwise we would stack overflow later since arg[0] is replaced below. - */ - String argsZero = args[0] - commandPath = "${-> Paths.get(NodeInfo.getShortPathName(node.homeDir.toString())).resolve(argsZero.toString()).toString()}" - } else { - commandPath = node.homeDir.toPath().resolve(args[0].toString()).toString() - } - args[0] = commandPath - setup = configureExecTask(taskName(prefix, node, command.getKey()), project, setup, node, args) - } - - Task start = configureStartTask(taskName(prefix, node, 'start'), project, setup, node) - - if (node.config.daemonize) { - Task stop = configureStopTask(taskName(prefix, node, 'stop'), project, [], node) - // if we are running in the background, make sure to stop the server when the task completes - runner.finalizedBy(stop) - start.finalizedBy(stop) - for (Object dependency : config.dependencies) { - if (dependency instanceof Fixture) { - def depStop = ((Fixture)dependency).stopTask - runner.finalizedBy(depStop) - start.finalizedBy(depStop) - } - } - } - return start - } - - /** Adds a task to extract the elasticsearch distribution */ - static Task configureExtractTask(String name, Project project, Task setup, NodeInfo node, - Configuration configuration, String distribution) { - List extractDependsOn = [configuration, setup] - /* configuration.singleFile will be an external artifact if this is being run by a plugin not living in the - elasticsearch source tree. If this is a plugin built in the elasticsearch source tree or this is a distro in - the elasticsearch source tree then this should be the version of elasticsearch built by the source tree. - If it isn't then Bad Things(TM) will happen. */ - Task extract = project.tasks.create(name: name, type: Copy, dependsOn: extractDependsOn) { - if (getOs().equals("windows") || distribution.equals("integ-test-zip")) { - from { - project.zipTree(configuration.singleFile) - } - } else { - // macos and linux use tar - from { - project.tarTree(project.resources.gzip(configuration.singleFile)) - } - } - into node.baseDir - } - - return extract - } - - /** Adds a task to write elasticsearch.yml for the given node configuration */ - static Task configureWriteConfigTask(String name, Project project, Task setup, NodeInfo node, Closure configFilter) { - Map esConfig = [ - 'cluster.name' : node.clusterName, - 'node.name' : "node-" + node.nodeNum, - (node.nodeVersion.onOrAfter('7.4.0') ? 'node.pidfile' : 'pidfile') : node.pidFile, - 'path.repo' : "${node.sharedDir}/repo", - 'path.shared_data' : "${node.sharedDir}/", - // Define a node attribute so we can test that it exists - 'node.attr.testattr' : 'test', - // Don't wait for state, just start up quickly. This will also allow new and old nodes in the BWC case to become the master - 'discovery.initial_state_timeout' : '0s' - ] - esConfig['http.port'] = node.config.httpPort - if (node.nodeVersion.onOrAfter('6.7.0')) { - esConfig['transport.port'] = node.config.transportPort - } else { - esConfig['transport.tcp.port'] = node.config.transportPort - } - // Default the watermarks to absurdly low to prevent the tests from failing on nodes without enough disk space - esConfig['cluster.routing.allocation.disk.watermark.low'] = '1b' - esConfig['cluster.routing.allocation.disk.watermark.high'] = '1b' - if (node.nodeVersion.major >= 6) { - esConfig['cluster.routing.allocation.disk.watermark.flood_stage'] = '1b' - } - // increase script compilation limit since tests can rapid-fire script compilations - esConfig['script.max_compilations_rate'] = '2048/1m' - // Temporarily disable the real memory usage circuit breaker. It depends on real memory usage which we have no full control - // over and the REST client will not retry on circuit breaking exceptions yet (see #31986 for details). Once the REST client - // can retry on circuit breaking exceptions, we can revert again to the default configuration. - if (node.nodeVersion.major >= 7) { - esConfig['indices.breaker.total.use_real_memory'] = false - } - - Task writeConfig = project.tasks.create(name: name, type: DefaultTask, dependsOn: setup) - writeConfig.doFirst { - for (Map.Entry setting : node.config.settings) { - if (setting.value == null) { - esConfig.remove(setting.key) - } else { - esConfig.put(setting.key, setting.value) - } - } - - esConfig = configFilter.call(esConfig) - File configFile = new File(node.pathConf, 'elasticsearch.yml') - logger.info("Configuring ${configFile}") - configFile.setText(esConfig.collect { key, value -> "${key}: ${value}" }.join('\n'), 'UTF-8') - } - } - - /** Adds a task to create keystore */ - static Task configureCreateKeystoreTask(String name, Project project, Task setup, NodeInfo node) { - if (node.config.keystoreSettings.isEmpty() && node.config.keystoreFiles.isEmpty()) { - return setup - } else { - /* - * We have to delay building the string as the path will not exist during configuration which will fail on Windows due to - * getting the short name requiring the path to already exist. - */ - final Object esKeystoreUtil = "${-> node.binPath().resolve('elasticsearch-keystore').toString()}" - return configureExecTask(name, project, setup, node, esKeystoreUtil, 'create') - } - } - - /** Adds tasks to add settings to the keystore */ - static Task configureAddKeystoreSettingTasks(String parent, Project project, Task setup, NodeInfo node) { - Map kvs = node.config.keystoreSettings - Task parentTask = setup - /* - * We have to delay building the string as the path will not exist during configuration which will fail on Windows due to getting - * the short name requiring the path to already exist. - */ - final Object esKeystoreUtil = "${-> node.binPath().resolve('elasticsearch-keystore').toString()}" - for (Map.Entry entry in kvs) { - String key = entry.getKey() - String name = taskName(parent, node, 'addToKeystore#' + key) - Task t = configureExecTask(name, project, parentTask, node, esKeystoreUtil, 'add', key, '-x') - String settingsValue = entry.getValue() // eval this early otherwise it will not use the right value - t.doFirst { - standardInput = new ByteArrayInputStream(settingsValue.getBytes(StandardCharsets.UTF_8)) - } - parentTask = t - } - return parentTask - } - - /** Adds tasks to add files to the keystore */ - static Task configureAddKeystoreFileTasks(String parent, Project project, Task setup, NodeInfo node) { - Map kvs = node.config.keystoreFiles - if (kvs.isEmpty()) { - return setup - } - Task parentTask = setup - /* - * We have to delay building the string as the path will not exist during configuration which will fail on Windows due to getting - * the short name requiring the path to already exist. - */ - final Object esKeystoreUtil = "${-> node.binPath().resolve('elasticsearch-keystore').toString()}" - for (Map.Entry entry in kvs) { - String key = entry.getKey() - String name = taskName(parent, node, 'addToKeystore#' + key) - String srcFileName = entry.getValue() - Task t = configureExecTask(name, project, parentTask, node, esKeystoreUtil, 'add-file', key, srcFileName) - t.doFirst { - File srcFile = project.file(srcFileName) - if (srcFile.isDirectory()) { - throw new GradleException("Source for keystoreFile must be a file: ${srcFile}") - } - if (srcFile.exists() == false) { - throw new GradleException("Source file for keystoreFile does not exist: ${srcFile}") - } - } - parentTask = t - } - return parentTask - } - - static Task configureExtraConfigFilesTask(String name, Project project, Task setup, NodeInfo node) { - if (node.config.extraConfigFiles.isEmpty()) { - return setup - } - Copy copyConfig = project.tasks.create(name: name, type: Copy, dependsOn: setup) - File configDir = new File(node.homeDir, 'config') - copyConfig.into(configDir) // copy must always have a general dest dir, even though we don't use it - for (Map.Entry extraConfigFile : node.config.extraConfigFiles.entrySet()) { - Object extraConfigFileValue = extraConfigFile.getValue() - copyConfig.doFirst { - // make sure the copy won't be a no-op or act on a directory - File srcConfigFile = project.file(extraConfigFileValue) - if (srcConfigFile.isDirectory()) { - throw new GradleException("Source for extraConfigFile must be a file: ${srcConfigFile}") - } - if (srcConfigFile.exists() == false) { - throw new GradleException("Source file for extraConfigFile does not exist: ${srcConfigFile}") - } - } - File destConfigFile = new File(node.homeDir, 'config/' + extraConfigFile.getKey()) - // wrap source file in closure to delay resolution to execution time - copyConfig.from({ extraConfigFileValue }) { - // this must be in a closure so it is only applied to the single file specified in from above - into(configDir.toPath().relativize(destConfigFile.canonicalFile.parentFile.toPath()).toFile()) - rename { destConfigFile.name } - } - } - return copyConfig - } - - /** - * Adds a task to copy plugins to a temp dir, which they will later be installed from. - * - * For each plugin, if the plugin has rest spec apis in its tests, those api files are also copied - * to the test resources for this project. - */ - static Task configureCopyPluginsTask(String name, Project project, Task setup, NodeInfo node, String prefix) { - Copy copyPlugins = project.tasks.create(name: name, type: Copy, dependsOn: setup) - - List pluginFiles = [] - for (Map.Entry plugin : node.config.plugins.entrySet()) { - - String configurationName = pluginConfigurationName(prefix, plugin.key) - Configuration configuration = project.configurations.findByName(configurationName) - if (configuration == null) { - configuration = project.configurations.create(configurationName) - } - - if (plugin.getValue() instanceof Project) { - Project pluginProject = plugin.getValue() - verifyProjectHasBuildPlugin(name, node.nodeVersion, project, pluginProject) - - project.dependencies.add(configurationName, project.dependencies.project(path: pluginProject.path, configuration: 'zip')) - setup.dependsOn(pluginProject.tasks.bundlePlugin) - - // also allow rest tests to use the rest spec from the plugin - String copyRestSpecTaskName = pluginTaskName('copy', plugin.getKey(), 'PluginRestSpec') - Copy copyRestSpec = project.tasks.findByName(copyRestSpecTaskName) - for (File resourceDir : pluginProject.sourceSets.test.resources.srcDirs) { - File restApiDir = new File(resourceDir, 'rest-api-spec/api') - if (restApiDir.exists() == false) continue - if (copyRestSpec == null) { - copyRestSpec = project.tasks.create(name: copyRestSpecTaskName, type: Copy) - copyPlugins.dependsOn(copyRestSpec) - copyRestSpec.into(project.sourceSets.test.output.resourcesDir) - } - copyRestSpec.from(resourceDir).include('rest-api-spec/api/**') - } - } else { - project.dependencies.add(configurationName, "${plugin.getValue()}@zip") - } - - - - pluginFiles.add(configuration) - } - - copyPlugins.into(node.pluginsTmpDir) - copyPlugins.from(pluginFiles) - return copyPlugins - } - - private static String pluginConfigurationName(final String prefix, final String name) { - return "_plugin_${prefix}_${name}".replace(':', '_') - } - - private static String pluginBwcConfigurationName(final String prefix, final String name) { - return "_plugin_bwc_${prefix}_${name}".replace(':', '_') - } - - /** Configures task to copy a plugin based on a zip file resolved using dependencies for an older version */ - static Task configureCopyBwcPluginsTask(String name, Project project, Task setup, NodeInfo node, String prefix) { - Configuration bwcPlugins = project.configurations.getByName("${prefix}_elasticsearchBwcPlugins") - for (Map.Entry plugin : node.config.plugins.entrySet()) { - String configurationName = pluginBwcConfigurationName(prefix, plugin.key) - Configuration configuration = project.configurations.findByName(configurationName) - if (configuration == null) { - configuration = project.configurations.create(configurationName) - } - - if (plugin.getValue() instanceof Project) { - Project pluginProject = plugin.getValue() - verifyProjectHasBuildPlugin(name, node.nodeVersion, project, pluginProject) - - final String depName = findPluginName(pluginProject) - - Dependency dep = bwcPlugins.dependencies.find { - it.name == depName - } - configuration.dependencies.add(dep) - } else { - project.dependencies.add(configurationName, "${plugin.getValue()}@zip") - } - } - - Copy copyPlugins = project.tasks.create(name: name, type: Copy, dependsOn: setup) { - from bwcPlugins - into node.pluginsTmpDir - } - return copyPlugins - } - - static Task configureInstallModuleTask(String name, Project project, Task setup, NodeInfo node, Project module) { - if (node.config.distribution != 'integ-test-zip') { - project.logger.info("Not installing modules for $name, ${node.config.distribution} already has them") - return setup - } - if (module.plugins.hasPlugin(PluginBuildPlugin) == false) { - throw new GradleException("Task ${name} cannot include module ${module.path} which is not an esplugin") - } - Copy installModule = project.tasks.create(name, Copy.class) - installModule.dependsOn(setup) - installModule.dependsOn(module.tasks.bundlePlugin) - installModule.into(new File(node.homeDir, "modules/${module.name}")) - installModule.from({ project.zipTree(module.tasks.bundlePlugin.outputs.files.singleFile) }) - return installModule - } - - static Task configureInstallPluginTask(String name, Project project, Task setup, NodeInfo node, String pluginName, String prefix) { - FileCollection pluginZip; - if (node.nodeVersion != Version.fromString(VersionProperties.elasticsearch)) { - pluginZip = project.configurations.getByName(pluginBwcConfigurationName(prefix, pluginName)) - } else { - pluginZip = project.configurations.getByName(pluginConfigurationName(prefix, pluginName)) - } - // delay reading the file location until execution time by wrapping in a closure within a GString - final Object file = "${-> new File(node.pluginsTmpDir, pluginZip.singleFile.getName()).toURI().toURL().toString()}" - /* - * We have to delay building the string as the path will not exist during configuration which will fail on Windows due to getting - * the short name requiring the path to already exist. - */ - final Object esPluginUtil = "${-> node.binPath().resolve('elasticsearch-plugin').toString()}" - final Object[] args = [esPluginUtil, 'install', '--batch', file] - return configureExecTask(name, project, setup, node, args) - } - - /** Wrapper for command line argument: surrounds comma with double quotes **/ - private static class EscapeCommaWrapper { - - Object arg - - public String toString() { - String s = arg.toString() - - /// Surround strings that contains a comma with double quotes - if (s.indexOf(',') != -1) { - return "\"${s}\"" - } - return s - } - } - - /** Adds a task to execute a command to help setup the cluster */ - static Task configureExecTask(String name, Project project, Task setup, NodeInfo node, Object[] execArgs) { - return project.tasks.create(name: name, type: LoggedExec, dependsOn: setup) { Exec exec -> - exec.workingDir node.cwd - if (useRuntimeJava(project, node)) { - exec.environment.put('JAVA_HOME', project.runtimeJavaHome) - } else { - // force JAVA_HOME to *not* be set - exec.environment.remove('JAVA_HOME') - } - if (Os.isFamily(Os.FAMILY_WINDOWS)) { - exec.executable 'cmd' - exec.args '/C', 'call' - // On Windows the comma character is considered a parameter separator: - // argument are wrapped in an ExecArgWrapper that escapes commas - exec.args execArgs.collect { a -> new EscapeCommaWrapper(arg: a) } - } else { - exec.commandLine execArgs - } - } - } - - public static boolean useRuntimeJava(Project project, NodeInfo node) { - return (project.isRuntimeJavaHomeSet || - (node.isBwcNode == false && node.nodeVersion.before(Version.fromString("7.0.0"))) || - node.config.distribution == 'integ-test-zip') - } - - /** Adds a task to start an elasticsearch node with the given configuration */ - static Task configureStartTask(String name, Project project, Task setup, NodeInfo node) { - // this closure is converted into ant nodes by groovy's AntBuilder - Closure antRunner = { AntBuilder ant -> - ant.exec(executable: node.executable, spawn: node.config.daemonize, newenvironment: true, - dir: node.cwd, taskname: 'elasticsearch') { - node.env.each { key, value -> env(key: key, value: value) } - if (useRuntimeJava(project, node)) { - env(key: 'JAVA_HOME', value: project.runtimeJavaHome) - } - node.args.each { arg(value: it) } - if (Os.isFamily(Os.FAMILY_WINDOWS)) { - // Having no TMP on Windows defaults to C:\Windows and permission errors - // Since we configure ant to run with a new environment above, we need to explicitly pass this - String tmp = System.getenv("TMP") - assert tmp != null - env(key: "TMP", value: tmp) - } - } - } - - // this closure is the actual code to run elasticsearch - Closure elasticsearchRunner = { - // Due to how ant exec works with the spawn option, we lose all stdout/stderr from the - // process executed. To work around this, when spawning, we wrap the elasticsearch start - // command inside another shell script, which simply internally redirects the output - // of the real elasticsearch script. This allows ant to keep the streams open with the - // dummy process, but us to have the output available if there is an error in the - // elasticsearch start script - if (node.config.daemonize) { - node.writeWrapperScript() - } - - node.getCommandString().eachLine { line -> logger.info(line) } - - if (logger.isInfoEnabled() || node.config.daemonize == false) { - runAntCommand(project, antRunner, System.out, System.err) - } else { - // buffer the output, we may not need to print it - PrintStream captureStream = new PrintStream(node.buffer, true, "UTF-8") - runAntCommand(project, antRunner, captureStream, captureStream) - } - } - - Task start = project.tasks.create(name: name, type: DefaultTask, dependsOn: setup) - if (node.javaVersion != null) { - BuildPlugin.requireJavaHome(start, node.javaVersion) - } - start.doLast(elasticsearchRunner) - start.doFirst { - // If the node runs in a FIPS 140-2 JVM, the BCFKS default keystore will be password protected - if (project.inFipsJvm){ - node.config.systemProperties.put('javax.net.ssl.trustStorePassword', 'password') - node.config.systemProperties.put('javax.net.ssl.keyStorePassword', 'password') - } - - // Configure ES JAVA OPTS - adds system properties, assertion flags, remote debug etc - List esJavaOpts = [node.env.get('ES_JAVA_OPTS', '')] - String collectedSystemProperties = node.config.systemProperties.collect { key, value -> "-D${key}=${value}" }.join(" ") - esJavaOpts.add(collectedSystemProperties) - esJavaOpts.add(node.config.jvmArgs) - if (Boolean.parseBoolean(System.getProperty('tests.asserts', 'true'))) { - // put the enable assertions options before other options to allow - // flexibility to disable assertions for specific packages or classes - // in the cluster-specific options - esJavaOpts.add("-ea") - esJavaOpts.add("-esa") - } - // we must add debug options inside the closure so the config is read at execution time, as - // gradle task options are not processed until the end of the configuration phase - if (node.config.debug) { - println 'Running elasticsearch in debug mode, suspending until connected on port 8000' - esJavaOpts.add('-agentlib:jdwp=transport=dt_socket,server=y,suspend=y,address=8000') - } - node.env['ES_JAVA_OPTS'] = esJavaOpts.join(" ") - - // - project.logger.info("Starting node in ${node.clusterName} distribution: ${node.config.distribution}") - } - return start - } - - static Task configureWaitTask(String name, Project project, List nodes, List startTasks, int waitSeconds) { - Task wait = project.tasks.create(name: name, dependsOn: startTasks) - wait.doLast { - - Collection unicastHosts = new HashSet<>() - nodes.forEach { node -> - unicastHosts.addAll(node.config.otherUnicastHostAddresses.call()) - String unicastHost = node.config.unicastTransportUri(node, null, project.createAntBuilder()) - if (unicastHost != null) { - unicastHosts.add(unicastHost) - } - } - String unicastHostsTxt = String.join("\n", unicastHosts) - nodes.forEach { node -> - node.pathConf.toPath().resolve("unicast_hosts.txt").setText(unicastHostsTxt) - } - - ant.waitfor(maxwait: "${waitSeconds}", maxwaitunit: 'second', checkevery: '500', checkeveryunit: 'millisecond', timeoutproperty: "failed${name}") { - or { - for (NodeInfo node : nodes) { - resourceexists { - file(file: node.failedMarker.toString()) - } - } - and { - for (NodeInfo node : nodes) { - resourceexists { - file(file: node.pidFile.toString()) - } - resourceexists { - file(file: node.httpPortsFile.toString()) - } - resourceexists { - file(file: node.transportPortsFile.toString()) - } - } - } - } - } - if (ant.properties.containsKey("failed${name}".toString())) { - waitFailed(project, nodes, logger, "Failed to start elasticsearch: timed out after ${waitSeconds} seconds") - } - - boolean anyNodeFailed = false - for (NodeInfo node : nodes) { - if (node.failedMarker.exists()) { - logger.error("Failed to start elasticsearch: ${node.failedMarker.toString()} exists") - anyNodeFailed = true - } - } - if (anyNodeFailed) { - waitFailed(project, nodes, logger, 'Failed to start elasticsearch') - } - - // make sure all files exist otherwise we haven't fully started up - boolean missingFile = false - for (NodeInfo node : nodes) { - missingFile |= node.pidFile.exists() == false - missingFile |= node.httpPortsFile.exists() == false - missingFile |= node.transportPortsFile.exists() == false - } - if (missingFile) { - waitFailed(project, nodes, logger, 'Elasticsearch did not complete startup in time allotted') - } - - // go through each node checking the wait condition - for (NodeInfo node : nodes) { - // first bind node info to the closure, then pass to the ant runner so we can get good logging - Closure antRunner = node.config.waitCondition.curry(node) - - boolean success - if (logger.isInfoEnabled()) { - success = runAntCommand(project, antRunner, System.out, System.err) - } else { - PrintStream captureStream = new PrintStream(node.buffer, true, "UTF-8") - success = runAntCommand(project, antRunner, captureStream, captureStream) - } - - if (success == false) { - waitFailed(project, nodes, logger, 'Elasticsearch cluster failed to pass wait condition') - } - } - } - return wait - } - - static void waitFailed(Project project, List nodes, Logger logger, String msg) { - for (NodeInfo node : nodes) { - if (logger.isInfoEnabled() == false) { - // We already log the command at info level. No need to do it twice. - node.getCommandString().eachLine { line -> logger.error(line) } - } - logger.error("Node ${node.nodeNum} output:") - logger.error("|-----------------------------------------") - logger.error("| failure marker exists: ${node.failedMarker.exists()}") - logger.error("| pid file exists: ${node.pidFile.exists()}") - logger.error("| http ports file exists: ${node.httpPortsFile.exists()}") - logger.error("| transport ports file exists: ${node.transportPortsFile.exists()}") - // the waitfor failed, so dump any output we got (if info logging this goes directly to stdout) - logger.error("|\n| [ant output]") - node.buffer.toString('UTF-8').eachLine { line -> logger.error("| ${line}") } - // also dump the log file for the startup script (which will include ES logging output to stdout) - if (node.startLog.exists()) { - logger.error("|\n| [log]") - node.startLog.eachLine { line -> logger.error("| ${line}") } - } - if (node.pidFile.exists() && node.failedMarker.exists() == false && - (node.httpPortsFile.exists() == false || node.transportPortsFile.exists() == false)) { - logger.error("|\n| [jstack]") - String pid = node.pidFile.getText('UTF-8') - ByteArrayOutputStream output = new ByteArrayOutputStream() - project.exec { - commandLine = ["${project.runtimeJavaHome}/bin/jstack", pid] - standardOutput = output - } - output.toString('UTF-8').eachLine { line -> logger.error("| ${line}") } - } - logger.error("|-----------------------------------------") - } - throw new GradleException(msg) - } - - /** Adds a task to check if the process with the given pidfile is actually elasticsearch */ - static Task configureCheckPreviousTask(String name, Project project, Object depends, NodeInfo node) { - return project.tasks.create(name: name, type: Exec, dependsOn: depends) { - onlyIf { node.pidFile.exists() } - // the pid file won't actually be read until execution time, since the read is wrapped within an inner closure of the GString - ext.pid = "${ -> node.pidFile.getText('UTF-8').trim()}" - final File jps = Jvm.forHome(project.runtimeJavaHome).getExecutable('jps') - commandLine jps, '-l' - standardOutput = new ByteArrayOutputStream() - doLast { - String out = standardOutput.toString() - if (out.contains("${ext.pid} org.elasticsearch.bootstrap.Elasticsearch") == false) { - logger.error('jps -l') - logger.error(out) - logger.error("pid file: ${node.pidFile}") - logger.error("pid: ${ext.pid}") - throw new GradleException("jps -l did not report any process with org.elasticsearch.bootstrap.Elasticsearch\n" + - "Did you run gradle clean? Maybe an old pid file is still lying around.") - } else { - logger.info(out) - } - } - } - } - - /** Adds a task to kill an elasticsearch node with the given pidfile */ - static Task configureStopTask(String name, Project project, Object depends, NodeInfo node) { - return project.tasks.create(name: name, type: LoggedExec, dependsOn: depends) { - onlyIf { node.pidFile.exists() } - // the pid file won't actually be read until execution time, since the read is wrapped within an inner closure of the GString - ext.pid = "${ -> node.pidFile.getText('UTF-8').trim()}" - doFirst { - logger.info("Shutting down external node with pid ${pid}") - } - if (Os.isFamily(Os.FAMILY_WINDOWS)) { - executable 'Taskkill' - args '/PID', pid, '/F' - } else { - executable 'kill' - args '-9', pid - } - doLast { - project.delete(node.pidFile) - // Large tests can exhaust disk space, clean up jdk from the distribution to save some space - project.delete(new File(node.homeDir, "jdk")) - } - } - } - - /** Returns a unique task name for this task and node configuration */ - static String taskName(String prefix, NodeInfo node, String action) { - if (node.config.numNodes > 1) { - return "${prefix}#node${node.nodeNum}.${action}" - } else { - return "${prefix}#${action}" - } - } - - public static String pluginTaskName(String action, String name, String suffix) { - // replace every dash followed by a character with just the uppercase character - String camelName = name.replaceAll(/-(\w)/) { _, c -> c.toUpperCase(Locale.ROOT) } - return action + camelName[0].toUpperCase(Locale.ROOT) + camelName.substring(1) + suffix - } - - /** Runs an ant command, sending output to the given out and error streams */ - static Object runAntCommand(Project project, Closure command, PrintStream outputStream, PrintStream errorStream) { - DefaultLogger listener = new DefaultLogger( - errorPrintStream: errorStream, - outputPrintStream: outputStream, - messageOutputLevel: org.apache.tools.ant.Project.MSG_INFO) - - AntBuilder ant = project.createAntBuilder() - ant.project.addBuildListener(listener) - Object retVal = command(ant) - ant.project.removeBuildListener(listener) - return retVal - } - - static void verifyProjectHasBuildPlugin(String name, Version version, Project project, Project pluginProject) { - if (pluginProject.plugins.hasPlugin(PluginBuildPlugin) == false) { - throw new GradleException("Task [${name}] cannot add plugin [${pluginProject.path}] with version [${version}] to project's " + - "[${project.path}] dependencies: the plugin is not an esplugin") - } - } - - /** Find the plugin name in the given project. */ - static String findPluginName(Project pluginProject) { - PluginPropertiesExtension extension = pluginProject.extensions.findByName('esplugin') - return extension.name - } - - /** Find the current OS */ - static String getOs() { - String os = "linux" - if (Os.isFamily(Os.FAMILY_WINDOWS)) { - os = "windows" - } else if (Os.isFamily(Os.FAMILY_MAC)) { - os = "darwin" - } - return os - } -} diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy deleted file mode 100644 index cb7a8397ed01..000000000000 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy +++ /dev/null @@ -1,297 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.gradle.test - -import com.sun.jna.Native -import com.sun.jna.WString -import org.apache.tools.ant.taskdefs.condition.Os -import org.elasticsearch.gradle.Version -import org.elasticsearch.gradle.VersionProperties -import org.gradle.api.Project - -import java.nio.file.Files -import java.nio.file.Path -import java.nio.file.Paths -/** - * A container for the files and configuration associated with a single node in a test cluster. - */ -class NodeInfo { - /** Gradle project this node is part of */ - Project project - - /** common configuration for all nodes, including this one */ - ClusterConfiguration config - - /** node number within the cluster, for creating unique names and paths */ - int nodeNum - - /** name of the cluster this node is part of */ - String clusterName - - /** root directory all node files and operations happen under */ - File baseDir - - /** shared data directory all nodes share */ - File sharedDir - - /** the pid file the node will use */ - File pidFile - - /** a file written by elasticsearch containing the ports of each bound address for http */ - File httpPortsFile - - /** a file written by elasticsearch containing the ports of each bound address for transport */ - File transportPortsFile - - /** elasticsearch home dir */ - File homeDir - - /** config directory */ - File pathConf - - /** data directory (as an Object, to allow lazy evaluation) */ - Object dataDir - - /** THE config file */ - File configFile - - /** working directory for the node process */ - File cwd - - /** file that if it exists, indicates the node failed to start */ - File failedMarker - - /** stdout/stderr log of the elasticsearch process for this node */ - File startLog - - /** directory to install plugins from */ - File pluginsTmpDir - - /** Major version of java this node runs with, or {@code null} if using the runtime java version */ - Integer javaVersion - - /** environment variables to start the node with */ - Map env - - /** arguments to start the node with */ - List args - - /** Executable to run the bin/elasticsearch with, either cmd or sh */ - String executable - - /** Path to the elasticsearch start script */ - private Object esScript - - /** script to run when running in the background */ - private File wrapperScript - - /** buffer for ant output when starting this node */ - ByteArrayOutputStream buffer = new ByteArrayOutputStream() - - /** the version of elasticsearch that this node runs */ - Version nodeVersion - - /** true if the node is not the current version */ - boolean isBwcNode - - /** Holds node configuration for part of a test cluster. */ - NodeInfo(ClusterConfiguration config, int nodeNum, Project project, String prefix, String nodeVersion, File sharedDir) { - this.config = config - this.nodeNum = nodeNum - this.project = project - this.sharedDir = sharedDir - if (config.clusterName != null) { - clusterName = config.clusterName - } else { - clusterName = project.path.replace(':', '_').substring(1) + '_' + prefix - } - baseDir = new File(project.buildDir, "cluster/${prefix} node${nodeNum}") - pidFile = new File(baseDir, 'es.pid') - this.nodeVersion = Version.fromString(nodeVersion) - this.isBwcNode = this.nodeVersion.before(VersionProperties.elasticsearch) - homeDir = new File(baseDir, "elasticsearch-${nodeVersion}") - pathConf = new File(homeDir, 'config') - if (config.dataDir != null) { - dataDir = "${config.dataDir(nodeNum)}" - } else { - dataDir = new File(homeDir, "data") - } - configFile = new File(pathConf, 'elasticsearch.yml') - // even for rpm/deb, the logs are under home because we dont start with real services - File logsDir = new File(homeDir, 'logs') - httpPortsFile = new File(logsDir, 'http.ports') - transportPortsFile = new File(logsDir, 'transport.ports') - cwd = new File(baseDir, "cwd") - failedMarker = new File(cwd, 'run.failed') - startLog = new File(cwd, 'run.log') - pluginsTmpDir = new File(baseDir, "plugins tmp") - - args = [] - if (Os.isFamily(Os.FAMILY_WINDOWS)) { - executable = 'cmd' - args.add('/C') - args.add('"') // quote the entire command - wrapperScript = new File(cwd, "run.bat") - /* - * We have to delay building the string as the path will not exist during configuration which will fail on Windows due to - * getting the short name requiring the path to already exist. - */ - esScript = "${-> binPath().resolve('elasticsearch.bat').toString()}" - } else { - executable = 'bash' - wrapperScript = new File(cwd, "run") - esScript = binPath().resolve('elasticsearch') - } - if (config.daemonize) { - if (Os.isFamily(Os.FAMILY_WINDOWS)) { - /* - * We have to delay building the string as the path will not exist during configuration which will fail on Windows due to - * getting the short name requiring the path to already exist. - */ - args.add("${-> getShortPathName(wrapperScript.toString())}") - } else { - args.add("${wrapperScript}") - } - } else { - args.add("${esScript}") - } - - - if (this.nodeVersion.before("6.2.0")) { - javaVersion = 8 - } else if (this.nodeVersion.onOrAfter("6.2.0") && this.nodeVersion.before("6.3.0")) { - javaVersion = 9 - } else if (this.nodeVersion.onOrAfter("6.3.0") && this.nodeVersion.before("6.5.0")) { - javaVersion = 10 - } - - args.addAll("-E", "node.portsfile=true") - env = [:] - env.putAll(config.environmentVariables) - for (Map.Entry property : System.properties.entrySet()) { - if (property.key.startsWith('tests.es.')) { - args.add("-E") - args.add("${property.key.substring('tests.es.'.size())}=${property.value}") - } - } - if (Os.isFamily(Os.FAMILY_WINDOWS)) { - /* - * We have to delay building the string as the path will not exist during configuration which will fail on Windows due to - * getting the short name requiring the path to already exist. - */ - env.put('ES_PATH_CONF', "${-> getShortPathName(pathConf.toString())}") - } - else { - env.put('ES_PATH_CONF', pathConf) - } - if (!System.properties.containsKey("tests.es.path.data")) { - if (Os.isFamily(Os.FAMILY_WINDOWS)) { - /* - * We have to delay building the string as the path will not exist during configuration which will fail on Windows due to - * getting the short name requiring the path to already exist. This one is extra tricky because usually we rely on the node - * creating its data directory on startup but we simply can not do that here because getting the short path name requires - * the directory to already exist. Therefore, we create this directory immediately before getting the short name. - */ - args.addAll("-E", "path.data=${-> Files.createDirectories(Paths.get(dataDir.toString())); getShortPathName(dataDir.toString())}") - } else { - args.addAll("-E", "path.data=${-> dataDir.toString()}") - } - } - if (Os.isFamily(Os.FAMILY_WINDOWS)) { - args.add('"') // end the entire command, quoted - } - } - - Path binPath() { - if (Os.isFamily(Os.FAMILY_WINDOWS)) { - return Paths.get(getShortPathName(new File(homeDir, 'bin').toString())) - } else { - return Paths.get(new File(homeDir, 'bin').toURI()) - } - } - - static String getShortPathName(String path) { - assert Os.isFamily(Os.FAMILY_WINDOWS) - final WString longPath = new WString("\\\\?\\" + path) - // first we get the length of the buffer needed - final int length = JNAKernel32Library.getInstance().GetShortPathNameW(longPath, null, 0) - if (length == 0) { - throw new IllegalStateException("path [" + path + "] encountered error [" + Native.getLastError() + "]") - } - final char[] shortPath = new char[length] - // knowing the length of the buffer, now we get the short name - if (JNAKernel32Library.getInstance().GetShortPathNameW(longPath, shortPath, length) == 0) { - throw new IllegalStateException("path [" + path + "] encountered error [" + Native.getLastError() + "]") - } - // we have to strip the \\?\ away from the path for cmd.exe - return Native.toString(shortPath).substring(4) - } - - /** Returns debug string for the command that started this node. */ - String getCommandString() { - String esCommandString = "\nNode ${nodeNum} configuration:\n" - esCommandString += "|-----------------------------------------\n" - esCommandString += "| cwd: ${cwd}\n" - esCommandString += "| command: ${executable} ${args.join(' ')}\n" - esCommandString += '| environment:\n' - env.each { k, v -> esCommandString += "| ${k}: ${v}\n" } - if (config.daemonize) { - esCommandString += "|\n| [${wrapperScript.name}]\n" - wrapperScript.eachLine('UTF-8', { line -> esCommandString += " ${line}\n"}) - } - esCommandString += '|\n| [elasticsearch.yml]\n' - configFile.eachLine('UTF-8', { line -> esCommandString += "| ${line}\n" }) - esCommandString += "|-----------------------------------------" - return esCommandString - } - - void writeWrapperScript() { - String argsPasser = '"$@"' - String exitMarker = "; if [ \$? != 0 ]; then touch run.failed; fi" - if (Os.isFamily(Os.FAMILY_WINDOWS)) { - argsPasser = '%*' - exitMarker = "\r\n if \"%errorlevel%\" neq \"0\" ( type nul >> run.failed )" - } - wrapperScript.setText("\"${esScript}\" ${argsPasser} > run.log 2>&1 ${exitMarker}", 'UTF-8') - } - - /** Returns an address and port suitable for a uri to connect to this node over http */ - String httpUri() { - return httpPortsFile.readLines("UTF-8").get(0) - } - - /** Returns an address and port suitable for a uri to connect to this node over transport protocol */ - String transportUri() { - return transportPortsFile.readLines("UTF-8").get(0) - } - - /** Returns the file which contains the transport protocol ports for this node */ - File getTransportPortsFile() { - return transportPortsFile - } - - /** Returns the data directory for this node */ - File getDataDir() { - if (!(dataDir instanceof File)) { - return new File(dataDir) - } - return dataDir - } -} diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RunTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RunTask.groovy deleted file mode 100644 index f74d2944b886..000000000000 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RunTask.groovy +++ /dev/null @@ -1,41 +0,0 @@ -package org.elasticsearch.gradle.test - -import org.gradle.api.DefaultTask -import org.gradle.api.Task -import org.gradle.api.tasks.Internal -import org.gradle.api.tasks.options.Option -import org.gradle.util.ConfigureUtil - -class RunTask extends DefaultTask { - - @Internal - ClusterConfiguration clusterConfig - - RunTask() { - description = "Runs elasticsearch with '${project.path}'" - group = 'Verification' - clusterConfig = new ClusterConfiguration(project) - clusterConfig.httpPort = 9200 - clusterConfig.transportPort = 9300 - clusterConfig.daemonize = false - clusterConfig.distribution = 'default' - project.afterEvaluate { - ClusterFormationTasks.setup(project, name, this, clusterConfig) - } - } - - @Option( - option = "debug-jvm", - description = "Enable debugging configuration, to allow attaching a debugger to elasticsearch." - ) - void setDebug(boolean enabled) { - clusterConfig.debug = enabled; - } - - /** Configure the cluster that will be run. */ - @Override - Task configure(Closure closure) { - ConfigureUtil.configure(closure, clusterConfig) - return this - } -} diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java index 5d89bee02ae4..7d0f2330f4fe 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java @@ -104,7 +104,7 @@ public class ElasticsearchCluster implements TestClusterConfiguration, Named { } } - private ElasticsearchNode getFirstNode() { + ElasticsearchNode getFirstNode() { return nodes.getAt(clusterName + "-0"); } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java index 279f734d9e82..b27bac839e9a 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java @@ -148,6 +148,8 @@ public class ElasticsearchNode implements TestClusterConfiguration { private volatile Process esProcess; private Function nameCustomization = Function.identity(); private boolean isWorkingDirConfigured = false; + private String httpPort = "0"; + private String transportPort = "0"; ElasticsearchNode(String path, String name, Project project, ReaperService reaper, File workingDirBase) { this.path = path; @@ -359,8 +361,7 @@ public class ElasticsearchNode implements TestClusterConfiguration { @Override public void freeze() { - requireNonNull(distributions, "null distribution passed when configuring test cluster `" + this + "`"); - requireNonNull(javaHome, "null javaHome passed when configuring test cluster `" + this + "`"); + requireNonNull(testDistribution, "null testDistribution passed when configuring test cluster `" + this + "`"); LOGGER.info("Locking configuration of `{}`", this); configurationFrozen.set(true); } @@ -637,7 +638,9 @@ public class ElasticsearchNode implements TestClusterConfiguration { private Map getESEnvironment() { Map defaultEnv = new HashMap<>(); - defaultEnv.put("JAVA_HOME", getJavaHome().getAbsolutePath()); + if ( getJavaHome() != null) { + defaultEnv.put("JAVA_HOME", getJavaHome().getAbsolutePath()); + } defaultEnv.put("ES_PATH_CONF", configFile.getParent().toString()); String systemPropertiesString = ""; if (systemProperties.isEmpty() == false) { @@ -696,9 +699,11 @@ public class ElasticsearchNode implements TestClusterConfiguration { // Don't inherit anything from the environment for as that would lack reproducibility environment.clear(); environment.putAll(getESEnvironment()); + // don't buffer all in memory, make sure we don't block on the default pipes processBuilder.redirectError(ProcessBuilder.Redirect.appendTo(esStderrFile.toFile())); processBuilder.redirectOutput(ProcessBuilder.Redirect.appendTo(esStdoutFile.toFile())); + LOGGER.info("Running `{}` in `{}` for {} env: {}", command, workingDir, this, environment); try { esProcess = processBuilder.start(); @@ -988,11 +993,11 @@ public class ElasticsearchNode implements TestClusterConfiguration { defaultConfig.put("path.shared_data", workingDir.resolve("sharedData").toString()); defaultConfig.put("node.attr.testattr", "test"); defaultConfig.put("node.portsfile", "true"); - defaultConfig.put("http.port", "0"); + defaultConfig.put("http.port", httpPort); if (getVersion().onOrAfter(Version.fromString("6.7.0"))) { - defaultConfig.put("transport.port", "0"); + defaultConfig.put("transport.port", transportPort); } else { - defaultConfig.put("transport.tcp.port", "0"); + defaultConfig.put("transport.tcp.port", transportPort); } // Default the watermarks to absurdly low to prevent the tests from failing on nodes without enough disk space defaultConfig.put("cluster.routing.allocation.disk.watermark.low", "1b"); @@ -1287,6 +1292,24 @@ public class ElasticsearchNode implements TestClusterConfiguration { } } + void setHttpPort(String httpPort) { + this.httpPort = httpPort; + } + + void setTransportPort(String transportPort) { + this.transportPort = transportPort; + } + + @Internal + Path getEsStdoutFile() { + return esStdoutFile; + } + + @Internal + Path getEsStderrFile() { + return esStderrFile; + } + private static class FileEntry implements Named { private String name; private File file; diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/RunTask.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/RunTask.java new file mode 100644 index 000000000000..d7a7bbcc41ba --- /dev/null +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/RunTask.java @@ -0,0 +1,73 @@ +package org.elasticsearch.gradle.testclusters; + +import org.gradle.api.logging.Logger; +import org.gradle.api.logging.Logging; +import org.gradle.api.tasks.Input; +import org.gradle.api.tasks.TaskAction; +import org.gradle.api.tasks.options.Option; + +import java.io.BufferedReader; +import java.io.IOException; +import java.nio.file.Files; +import java.util.HashSet; +import java.util.Set; + +public class RunTask extends DefaultTestClustersTask { + + private static final Logger logger = Logging.getLogger(RunTask.class); + + private Boolean debug = false; + + @Option( + option = "debug-jvm", + description = "Enable debugging configuration, to allow attaching a debugger to elasticsearch." + ) + public void setDebug(boolean enabled) { + this.debug = debug; + } + + @Input + public Boolean getDebug() { + return debug; + } + + @Override + public void beforeStart() { + int debugPort = 8000; + int httpPort = 9200; + int transportPort = 9300; + for (ElasticsearchCluster cluster : getClusters()) { + cluster.getFirstNode().setHttpPort(String.valueOf(httpPort)); + httpPort++; + cluster.getFirstNode().setTransportPort(String.valueOf(transportPort)); + transportPort++; + for (ElasticsearchNode node : cluster.getNodes()) { + if (debug) { + logger.lifecycle( + "Running elasticsearch in debug mode, {} suspending until connected on debugPort {}", + node, debugPort + ); + node.jvmArgs("-agentlib:jdwp=transport=dt_socket,server=y,suspend=y,address=" + debugPort); + debugPort += 1; + } + } + } + } + + @TaskAction + public void runAndWait() throws IOException { + Set toRead = new HashSet<>(); + for (ElasticsearchCluster cluster : getClusters()) { + for (ElasticsearchNode node : cluster.getNodes()) { + toRead.add(Files.newBufferedReader(node.getEsStdoutFile())); + } + } + while (Thread.currentThread().isInterrupted() == false) { + for (BufferedReader bufferedReader : toRead) { + if (bufferedReader.ready()) { + logger.lifecycle(bufferedReader.readLine()); + } + } + } + } +} diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersAware.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersAware.java index 1669a62d57b5..8cbb4ca13321 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersAware.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersAware.java @@ -18,9 +18,13 @@ interface TestClustersAware extends Task { ); } - cluster.getNodes().stream().flatMap(node -> node.getDistributions().stream()).forEach( distro -> + cluster.getNodes().stream().flatMap(node -> node.getDistributions().stream()).forEach(distro -> dependsOn(distro.getExtracted()) ); getClusters().add(cluster); } + + default void beforeStart() { + } + } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java index 9e307fca5d3c..da10235a9d9a 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java @@ -123,7 +123,9 @@ public class TestClustersPlugin implements Plugin { return; } // we only start the cluster before the actions, so we'll not start it if the task is up-to-date - ((TestClustersAware) task).getClusters().forEach(registry::maybeStartCluster); + TestClustersAware awareTask = (TestClustersAware) task; + awareTask.beforeStart(); + awareTask.getClusters().forEach(registry::maybeStartCluster); } @Override public void afterActions(Task task) {} diff --git a/distribution/build.gradle b/distribution/build.gradle index f20e5a654260..384fa8d923fa 100644 --- a/distribution/build.gradle +++ b/distribution/build.gradle @@ -23,11 +23,13 @@ import org.elasticsearch.gradle.ConcatFilesTask import org.elasticsearch.gradle.MavenFilteringHack import org.elasticsearch.gradle.NoticeTask import org.elasticsearch.gradle.VersionProperties -import org.elasticsearch.gradle.test.RunTask +import org.elasticsearch.gradle.testclusters.RunTask import java.nio.file.Files import java.nio.file.Path +apply plugin: 'elasticsearch.testclusters' + /***************************************************************************** * Third party dependencies report * *****************************************************************************/ @@ -414,28 +416,32 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) { } -task run(type: RunTask) { - distribution = System.getProperty('run.distribution', 'default') - if (distribution == 'default') { - String licenseType = System.getProperty("run.license_type", "basic") - if (licenseType == 'trial') { - setting 'xpack.ml.enabled', 'true' - setting 'xpack.graph.enabled', 'true' - setting 'xpack.watcher.enabled', 'true' - setting 'xpack.license.self_generated.type', 'trial' - } else if (licenseType != 'basic') { - throw new IllegalArgumentException("Unsupported self-generated license type: [" + licenseType + "[basic] or [trial].") +testClusters { + runTask { + testDistribution = System.getProperty('run.distribution', 'default') + if (System.getProperty('run.distribution', 'default') == 'default') { + String licenseType = System.getProperty("run.license_type", "basic") + if (licenseType == 'trial') { + setting 'xpack.ml.enabled', 'true' + setting 'xpack.graph.enabled', 'true' + setting 'xpack.watcher.enabled', 'true' + setting 'xpack.license.self_generated.type', 'trial' + } else if (licenseType != 'basic') { + throw new IllegalArgumentException("Unsupported self-generated license type: [" + licenseType + "[basic] or [trial].") + } + setting 'xpack.security.enabled', 'true' + setting 'xpack.monitoring.enabled', 'true' + setting 'xpack.sql.enabled', 'true' + setting 'xpack.rollup.enabled', 'true' + keystore 'bootstrap.password', 'password' } - setupCommand 'setupTestAdmin', - 'bin/elasticsearch-users', 'useradd', 'elastic-admin', '-p', 'elastic-password', '-r', 'superuser' - setting 'xpack.security.enabled', 'true' - setting 'xpack.monitoring.enabled', 'true' - setting 'xpack.sql.enabled', 'true' - setting 'xpack.rollup.enabled', 'true' - keystoreSetting 'bootstrap.password', 'password' } } +task run(type: RunTask) { + useCluster testClusters.runTask; +} + /** * Build some variables that are replaced in the packages. This includes both * scripts like bin/elasticsearch and bin/elasticsearch-plugin that a user might run and also diff --git a/modules/build.gradle b/modules/build.gradle index f8c5e7ec142b..e1ecc1f6cc26 100644 --- a/modules/build.gradle +++ b/modules/build.gradle @@ -26,11 +26,6 @@ configure(subprojects.findAll { it.parent.path == project.path }) { // for local ES plugins, the name of the plugin is the same as the directory name project.name } - - run { - // these cannot be run with the normal distribution, since they are included in it! - distribution = 'integ-test-zip' - } if (project.file('src/main/packaging').exists()) { throw new InvalidModelException("Modules cannot contain packaging files") diff --git a/modules/lang-painless/build.gradle b/modules/lang-painless/build.gradle index 30e222cec12d..c9eadd7a3f72 100644 --- a/modules/lang-painless/build.gradle +++ b/modules/lang-painless/build.gradle @@ -16,10 +16,7 @@ * specific language governing permissions and limitations * under the License. */ - -import org.elasticsearch.gradle.test.ClusterConfiguration -import org.elasticsearch.gradle.test.ClusterFormationTasks - +import org.elasticsearch.gradle.testclusters.DefaultTestClustersTask; esplugin { description 'An easy, safe and fast scripting language for Elasticsearch' classname 'org.elasticsearch.painless.PainlessPlugin' @@ -76,16 +73,21 @@ dependencies { docCompile project(':modules:lang-painless') } -ClusterConfiguration clusterConfig = project.extensions.create("generateContextCluster", ClusterConfiguration.class, project) -gradle.projectsEvaluated { - project.ext.generateContextNodes = ClusterFormationTasks.setup(project, "generateContextCluster", generateContextDoc, clusterConfig) +testClusters { + generateContextCluster { + testDistribution = 'DEFAULT' + } } -clusterConfig.distribution = 'default' -task generateContextDoc(type: JavaExec) { - main = 'org.elasticsearch.painless.ContextDocGenerator' - classpath = sourceSets.doc.runtimeClasspath - systemProperty "cluster.uri", "${-> project.ext.generateContextNodes.collect { it.httpUri() }.join(',') }" +task generateContextDoc(type: DefaultTestClustersTask) { + useCluster testClusters.generateContextCluster + doFirst { + project.javaexec { + main = 'org.elasticsearch.painless.ContextDocGenerator' + classpath = sourceSets.doc.runtimeClasspath + systemProperty "cluster.uri", "${-> testClusters.generateContextCluster.singleNode().getAllHttpSocketURI()}" + }.assertNormalExitValue() + } } /********************************************** diff --git a/modules/rank-eval/build.gradle b/modules/rank-eval/build.gradle index 3e8e40ae89c4..35f8fef5176a 100644 --- a/modules/rank-eval/build.gradle +++ b/modules/rank-eval/build.gradle @@ -26,8 +26,3 @@ testClusters.integTest { // Modules who's integration is explicitly tested in integration tests module file(project(':modules:lang-mustache').tasks.bundlePlugin.archiveFile) } - -run { - // Modules who's integration is explicitly tested in integration tests - module project(':modules:lang-mustache') -} diff --git a/modules/reindex/build.gradle b/modules/reindex/build.gradle index 78846e2d81dd..9e645b0c778b 100644 --- a/modules/reindex/build.gradle +++ b/modules/reindex/build.gradle @@ -36,14 +36,6 @@ testClusters.integTest { setting 'reindex.remote.whitelist', '127.0.0.1:*' } -run { - // Modules who's integration is explicitly tested in integration tests - module project(':modules:parent-join') - module project(':modules:lang-painless') - // Whitelist reindexing from the local node so we can test reindex-from-remote. - setting 'reindex.remote.whitelist', '127.0.0.1:*' -} - test { /* * We have to disable setting the number of available processors as tests in the diff --git a/x-pack/docs/build.gradle b/x-pack/docs/build.gradle index 0854c9e4036b..54c3c48fda7d 100644 --- a/x-pack/docs/build.gradle +++ b/x-pack/docs/build.gradle @@ -1,5 +1,3 @@ -import org.elasticsearch.gradle.test.NodeInfo - import java.nio.charset.StandardCharsets apply plugin: 'elasticsearch.docs-test' diff --git a/x-pack/plugin/sql/qa/security/with-ssl/build.gradle b/x-pack/plugin/sql/qa/security/with-ssl/build.gradle index d73a181bef10..6a6179ae393f 100644 --- a/x-pack/plugin/sql/qa/security/with-ssl/build.gradle +++ b/x-pack/plugin/sql/qa/security/with-ssl/build.gradle @@ -1,6 +1,5 @@ import org.elasticsearch.gradle.BuildPlugin import org.elasticsearch.gradle.LoggedExec -import org.elasticsearch.gradle.test.NodeInfo import javax.net.ssl.HttpsURLConnection import javax.net.ssl.KeyManagerFactory diff --git a/x-pack/qa/smoke-test-plugins-ssl/build.gradle b/x-pack/qa/smoke-test-plugins-ssl/build.gradle index 429891e2a973..de1a531bc0cd 100644 --- a/x-pack/qa/smoke-test-plugins-ssl/build.gradle +++ b/x-pack/qa/smoke-test-plugins-ssl/build.gradle @@ -1,5 +1,5 @@ import org.elasticsearch.gradle.MavenFilteringHack -import org.elasticsearch.gradle.test.NodeInfo + import org.elasticsearch.gradle.http.WaitForHttpResource apply plugin: 'elasticsearch.testclusters' From 29c6da75db628c71f2d52fc6eb6f22ede65e6f68 Mon Sep 17 00:00:00 2001 From: James Rodewig Date: Tue, 8 Oct 2019 08:34:39 -0400 Subject: [PATCH 47/55] [DOCS] Fix errors in rollover index API docs (#47702) --- .../reference/indices/rollover-index.asciidoc | 23 +++++++++++-------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/docs/reference/indices/rollover-index.asciidoc b/docs/reference/indices/rollover-index.asciidoc index 294c38790e1b..cbc0dfc081ee 100644 --- a/docs/reference/indices/rollover-index.asciidoc +++ b/docs/reference/indices/rollover-index.asciidoc @@ -42,19 +42,24 @@ NOTE: To roll over an index, a condition must be met *when you call the API*. automatically roll over indices when a condition is met, you can use {es}'s <>. -The API accepts a single alias name and a list of `conditions`. The alias must point to a write index for -a Rollover request to be valid. There are two ways this can be achieved, and depending on the configuration, the -alias metadata will be updated differently. The two scenarios are as follows: +The rollover index API accepts a single alias name +and a list of `conditions`. - - The alias only points to a single index with `is_write_index` not configured (defaults to `null`). +If the specified alias points to a single index, +the rollover request: -In this scenario, the original index will have their rollover alias will be added to the newly created index, and removed -from the original (rolled-over) index. +. Creates a new index +. Adds the alias to the new index +. Removes the alias from the original index - - The alias points to one or more indices with `is_write_index` set to `true` on the index to be rolled over (the write index). +If the specified alias points to multiple indices, +one of these indices must have `is_write_index` set to `true`. +In this case, +the rollover request: -In this scenario, the write index will have its rollover alias' `is_write_index` set to `false`, while the newly created index -will now have the rollover alias pointing to it as the write index with `is_write_index` as `true`. +. Creates a new index +. Sets `is_write_index` to `true` for the new index +. Sets `is_write_index` to `false` for the original index [[rollover-wait-active-shards]] From 71fab46950e89341df8c9cab493569e8807abcbb Mon Sep 17 00:00:00 2001 From: Lee Hinman Date: Tue, 8 Oct 2019 08:32:15 -0600 Subject: [PATCH 48/55] Add Snapshot Lifecycle Retention documentation (#47545) * Add Snapshot Lifecycle Retention documentation This commits adds API and general purpose documentation for SLM retention. Relates to #43663 * Fix docs tests * Update default now that #47604 has been merged * Update docs/reference/ilm/apis/slm-api.asciidoc Co-Authored-By: Gordon Brown * Update docs/reference/ilm/apis/slm-api.asciidoc Co-Authored-By: Gordon Brown * Update docs with feedback --- docs/reference/ilm/apis/slm-api.asciidoc | 75 ++++++++--- .../ilm/getting-started-slm.asciidoc | 30 ++++- docs/reference/ilm/index.asciidoc | 2 + docs/reference/ilm/slm-retention.asciidoc | 119 ++++++++++++++++++ 4 files changed, 205 insertions(+), 21 deletions(-) create mode 100644 docs/reference/ilm/slm-retention.asciidoc diff --git a/docs/reference/ilm/apis/slm-api.asciidoc b/docs/reference/ilm/apis/slm-api.asciidoc index 4ac7a0b45331..9522ccb7b76c 100644 --- a/docs/reference/ilm/apis/slm-api.asciidoc +++ b/docs/reference/ilm/apis/slm-api.asciidoc @@ -7,7 +7,9 @@ The Snapshot Lifecycle Management APIs are used to manage policies for the time and frequency of automatic snapshots. Snapshot Lifecycle Management is related to <>, however, instead of managing a lifecycle of actions that are performed on a single index, SLM -allows configuring policies spanning multiple indices. +allows configuring policies spanning multiple indices. Snapshot Lifecycle +Management can also perform deletion of older snapshots based on a configurable +retention policy. SLM policy management is split into three different CRUD APIs, a way to put or update policies, a way to retrieve policies, and a way to delete unwanted policies, as @@ -62,7 +64,11 @@ PUT /_slm/policy/daily-snapshots "ignore_unavailable": false, "include_global_state": false }, - "retention": {} + "retention": { <6> + "expire_after": "30d", <7> + "min_count": 5, <8> + "max_count": 50 <9> + } } -------------------------------------------------- // TEST[setup:setup-repository] @@ -72,6 +78,10 @@ PUT /_slm/policy/daily-snapshots <3> Which repository to take the snapshot in <4> Any extra snapshot configuration <5> Which indices the snapshot should contain +<6> Optional retention configuration +<7> Keep snapshots for 30 days +<8> Always keep at least 5 successful snapshots, even if they're more than 30 days old +<9> Keep no more than 50 successful snapshots, even if they're less than 30 days old The top-level keys that the policy supports are described below: @@ -139,7 +149,11 @@ The output looks similar to the following: "ignore_unavailable": false, "include_global_state": false }, - "retention": {} + "retention": { + "expire_after": "30d", + "min_count": 5, + "max_count": 50 + } }, "stats": { "policy": "daily-snapshots", @@ -229,7 +243,11 @@ Which, in this case shows an error because the index did not exist: "ignore_unavailable": false, "include_global_state": false }, - "retention": {} + "retention": { + "expire_after": "30d", + "min_count": 5, + "max_count": 50 + } }, "stats": { "policy": "daily-snapshots", @@ -270,6 +288,11 @@ PUT /_slm/policy/daily-snapshots "indices": ["data-*", "important"], "ignore_unavailable": true, "include_global_state": false + }, + "retention": { + "expire_after": "30d", + "min_count": 5, + "max_count": 50 } } -------------------------------------------------- @@ -318,7 +341,11 @@ Which now includes the successful snapshot information: "ignore_unavailable": true, "include_global_state": false }, - "retention": {} + "retention": { + "expire_after": "30d", + "min_count": 5, + "max_count": 50 + } }, "stats": { "policy": "daily-snapshots", @@ -374,22 +401,14 @@ Which returns a response similar to: "retention_timed_out": 0, "retention_deletion_time": "1.4s", "retention_deletion_time_millis": 1404, - "policy_metrics": [ - { - "policy": "daily-snapshots", - "snapshots_taken": 1, - "snapshots_failed": 1, - "snapshots_deleted": 0, - "snapshot_deletion_failures": 0 - } - ], + "policy_stats": [ ], "total_snapshots_taken": 1, "total_snapshots_failed": 1, "total_snapshots_deleted": 0, "total_snapshot_deletion_failures": 0 } -------------------------------------------------- -// TESTRESPONSE[s/runs": 13/runs": $body.retention_runs/ s/_failed": 0/_failed": $body.retention_failed/ s/_timed_out": 0/_timed_out": $body.retention_timed_out/ s/"1.4s"/$body.retention_deletion_time/ s/1404/$body.retention_deletion_time_millis/] +// TESTRESPONSE[s/runs": 13/runs": $body.retention_runs/ s/_failed": 0/_failed": $body.retention_failed/ s/_timed_out": 0/_timed_out": $body.retention_timed_out/ s/"1.4s"/$body.retention_deletion_time/ s/1404/$body.retention_deletion_time_millis/ s/total_snapshots_taken": 1/total_snapshots_taken": $body.total_snapshots_taken/ s/total_snapshots_failed": 1/total_snapshots_failed": $body.total_snapshots_failed/ s/"policy_stats": [.*]/"policy_stats": $body.policy_stats/] [[slm-api-delete]] === Delete Snapshot Lifecycle Policy API @@ -410,3 +429,29 @@ any currently ongoing snapshots or remove any previously taken snapshots. DELETE /_slm/policy/daily-snapshots -------------------------------------------------- // TEST[continued] + +[[slm-api-execute-retention]] +=== Execute Snapshot Lifecycle Retention API + +While Snapshot Lifecycle Management retention is usually invoked through the global cluster settings +for its schedule, it can sometimes be useful to invoke a retention run to expunge expired snapshots +immediately. This API allows you to run a one-off retention run. + +==== Example + +To immediately start snapshot retention, use the following + +[source,console] +-------------------------------------------------- +POST /_slm/_execute_retention +-------------------------------------------------- + +This API will immediately return, as retention will be run asynchronously in the background: + +[source,console-result] +-------------------------------------------------- +{ + "acknowledged": true +} +-------------------------------------------------- + diff --git a/docs/reference/ilm/getting-started-slm.asciidoc b/docs/reference/ilm/getting-started-slm.asciidoc index c41d4da4092e..54ebef9a8dd3 100644 --- a/docs/reference/ilm/getting-started-slm.asciidoc +++ b/docs/reference/ilm/getting-started-slm.asciidoc @@ -6,7 +6,8 @@ Let's get started with snapshot lifecycle management (SLM) by working through a hands-on scenario. The goal of this example is to automatically back up {es} indices using the <> every day at a particular -time. +time. Once these snapshots have been created, they are kept for a configured +amount of time and then deleted per a configured retention policy. [float] [[slm-and-security]] @@ -14,8 +15,9 @@ time. Before starting, it's important to understand the privileges that are needed when configuring SLM if you are using the security plugin. There are two built-in cluster privileges that can be used to assist: `manage_slm` and -`read_slm`. It's also good to note that the `create_snapshot` permission -allows taking snapshots even for indices the role may not have access to. +`read_slm`. It's also good to note that the `cluster:admin/snapshot/*` +permission allows taking and deleting snapshots even for indices the role may +not have access to. An example of configuring an administrator role for SLM follows: @@ -23,7 +25,7 @@ An example of configuring an administrator role for SLM follows: ----------------------------------- POST /_security/role/slm-admin { - "cluster": ["manage_slm", "create_snapshot"], + "cluster": ["manage_slm", "cluster:admin/snapshot/*"], "indices": [ { "names": [".slm-history-*"], @@ -82,6 +84,10 @@ snapshots, what the snapshots should be named, and which indices should be included, among other things. We'll use the <> API to create the policy. +When configurating a policy, retention can also optionally be configured. See +the <> documentation for the full documentation of +how retention works. + [source,console] -------------------------------------------------- PUT /_slm/policy/nightly-snapshots @@ -92,7 +98,11 @@ PUT /_slm/policy/nightly-snapshots "config": { <4> "indices": ["*"] <5> }, - "retention": {} + "retention": { <6> + "expire_after": "30d", <7> + "min_count": 5, <8> + "max_count": 50 <9> + } } -------------------------------------------------- // TEST[continued] @@ -105,6 +115,10 @@ PUT /_slm/policy/nightly-snapshots <3> the repository the snapshot should be stored in <4> the configuration to be used for the snapshot requests (see below) <5> which indices should be included in the snapshot, in this case, every index +<6> Optional retention configuration +<7> Keep snapshots for 30 days +<8> Always keep at least 5 successful snapshots +<9> Keep no more than 50 successful snapshots, even if they're less than 30 days old This policy will take a snapshot of every index each day at 1:30AM UTC. Snapshots are incremental, allowing frequent snapshots to be stored efficiently, @@ -166,7 +180,11 @@ next time the policy will be executed. "config": { "indices": ["*"], }, - "retention": {} + "retention": { + "expire_after": "30d", + "min_count": 5, + "max_count": 50 + } }, "last_success": { <1> "snapshot_name": "nightly-snap-2019.04.24-tmtnyjtrsxkhbrrdcgg18a", <2> diff --git a/docs/reference/ilm/index.asciidoc b/docs/reference/ilm/index.asciidoc index 3ace2efe95bf..10af04f8a14b 100644 --- a/docs/reference/ilm/index.asciidoc +++ b/docs/reference/ilm/index.asciidoc @@ -87,3 +87,5 @@ include::start-stop-ilm.asciidoc[] include::ilm-with-existing-indices.asciidoc[] include::getting-started-slm.asciidoc[] + +include::slm-retention.asciidoc[] diff --git a/docs/reference/ilm/slm-retention.asciidoc b/docs/reference/ilm/slm-retention.asciidoc new file mode 100644 index 000000000000..6362af3e3d5b --- /dev/null +++ b/docs/reference/ilm/slm-retention.asciidoc @@ -0,0 +1,119 @@ +[role="xpack"] +[testenv="basic"] +[[slm-retention]] +== Snapshot lifecycle management retention + +Automatic deletion of older snapshots is an optional feature of snapshot lifecycle management. +Retention is run as a cluster level task that is not associated with a particular policy's schedule +(though the configuration of which snapshots to keep is done on a per-policy basis). Retention +configuration conists of two parts—The first a cluster-level configuration for when retention is +run and for how long, the second configured on a policy for which snapshots should be eligible for +retention. + +The cluster level settings for retention are shown below, and can be changed dynamically using the +<> API: + +|===================================== +| Setting | Default value | Description + +| `slm.retention_schedule` | `0 30 1 * * ?` | A periodic or absolute time schedule for when + retention should be run. Supports all values supported by the cron scheduler: <>. Retention can also be manually run using the + <>. Defaults to daily at 1:30am in the master + node's timezone. + +| `slm.retention_duration` | `"1h"` | A limit of how long SLM should spend deleting old snapshots. +|===================================== + +Policy level configuration for retention is done inside the `retention` object when creating or +updating a policy. All of the retention configurations options are optional. + +[source,console] +-------------------------------------------------- +PUT /_slm/policy/daily-snapshots +{ + "schedule": "0 30 1 * * ?", + "name": "", + "repository": "my_repository", + "retention": { <1> + "expire_after": "30d", <2> + "min_count": 5, <3> + "max_count": 50 <4> + } +} +-------------------------------------------------- +// TEST[setup:setup-repository] +<1> Optional retention configuration +<2> Keep snapshots for 30 days +<3> Always keep at least 5 successful snapshots +<4> Keep no more than 50 successful snapshots + +Supported configuration for retention from within a policy are as follows. The default value for +each is unset unless specified by the user in the policy configuration. + +NOTE: The oldest snapshots are always deleted first, in the case of a `max_count` of 5 for a policy +with 6 snapshots, the oldest snapshot will be deleted. + +|===================================== +| Setting | Description +| `expire_after` | A timevalue for how old a snapshot must be in order to be eligible for deletion. +| `min_count` | A minimum number of snapshots to keep, regardless of age. +| `max_count` | The maximum number of snapshots to keep, regardless of age. +|===================================== + +As an example, the retention setting in the policy configured about would read in English as: + +____ +Remove snapshots older than thirty days, but always keep the latest five snapshots. If there are +more than fifty snapshots, remove the oldest surplus snapshots until there are no more than fifty +successful snapshots. +____ + +If multiple policies are configured to snapshot to the same repository, or manual snapshots have +been taken without using the <>, they are treated as not +eligible for retention, and do not count towards any limits. This allows multiple policies to have +differing retention configuration while using the same snapshot repository. + +Statistics for snapshot retention can be retrieved using the <>: + +[source,console] +-------------------------------------------------- +GET /_slm/stats +-------------------------------------------------- +// TEST[continued] + +Which returns a response + +[source,js] +-------------------------------------------------- +{ + "retention_runs": 13, <1> + "retention_failed": 0, <2> + "retention_timed_out": 0, <3> + "retention_deletion_time": "1.4s", <4> + "retention_deletion_time_millis": 1404, + "policy_stats": [ + { + "policy": "daily-snapshots", + "snapshots_taken": 1, + "snapshots_failed": 1, + "snapshots_deleted": 0, <5> + "snapshot_deletion_failures": 0 <6> + } + ], + "total_snapshots_taken": 1, + "total_snapshots_failed": 1, + "total_snapshots_deleted": 0, <7> + "total_snapshot_deletion_failures": 0 <8> +} +-------------------------------------------------- +// TESTRESPONSE[skip:this is not actually running retention] +<1> Number of times retention has been run +<2> Number of times retention failed while running +<3> Number of times retention hit the `slm.retention_duration` time limit and had to stop before deleting all eligible snapshots +<4> Total time spent deleting snapshots by the retention process +<5> Number of snapshots created by the "daily-snapshots" policy that have been deleted +<6> Number of snapshots that failed to be deleted +<7> Total number of snapshots deleted across all policies +<8> Total number of snapshot deletion failures across all policies From 224676713c6c8249677889257f792108c5f9c863 Mon Sep 17 00:00:00 2001 From: Dimitris Athanasiou Date: Tue, 8 Oct 2019 19:32:58 +0300 Subject: [PATCH 49/55] [ML] Add basic BWC tests for data frame analytics (#47212) --- x-pack/qa/rolling-upgrade/build.gradle | 5 +- .../90_ml_data_frame_analytics_crud.yml | 143 ++++++++++++++++++ .../90_ml_data_frame_analytics_crud.yml | 64 ++++++++ .../90_ml_data_frame_analytics_crud.yml | 80 ++++++++++ 4 files changed, 290 insertions(+), 2 deletions(-) create mode 100644 x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/90_ml_data_frame_analytics_crud.yml create mode 100644 x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/90_ml_data_frame_analytics_crud.yml create mode 100644 x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/90_ml_data_frame_analytics_crud.yml diff --git a/x-pack/qa/rolling-upgrade/build.gradle b/x-pack/qa/rolling-upgrade/build.gradle index 3408beb3c348..7abad034dd1f 100644 --- a/x-pack/qa/rolling-upgrade/build.gradle +++ b/x-pack/qa/rolling-upgrade/build.gradle @@ -132,7 +132,8 @@ for (Version bwcVersion : bwcVersions.wireCompatible) { 'mixed_cluster/40_ml_datafeed_crud/Put job and datafeed without aggs in mixed cluster', 'mixed_cluster/40_ml_datafeed_crud/Put job and datafeed with aggs in mixed cluster', 'mixed_cluster/80_data_frame_jobs_crud/Test put batch data frame transforms on mixed cluster', - 'mixed_cluster/80_data_frame_jobs_crud/Test put continuous data frame transform on mixed cluster' + 'mixed_cluster/80_data_frame_jobs_crud/Test put continuous data frame transform on mixed cluster', + 'mixed_cluster/90_ml_data_frame_analytics_crud/Put an outlier_detection job on the mixed cluster' ].join(',') } @@ -179,4 +180,4 @@ task bwcTestSnapshots { } check.dependsOn(bwcTestSnapshots) -test.enabled = false \ No newline at end of file +test.enabled = false diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/90_ml_data_frame_analytics_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/90_ml_data_frame_analytics_crud.yml new file mode 100644 index 000000000000..ae6364054365 --- /dev/null +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/90_ml_data_frame_analytics_crud.yml @@ -0,0 +1,143 @@ +--- +"Get old outlier_detection job": + + - do: + ml.get_data_frame_analytics: + id: "old_cluster_outlier_detection_job" + - match: { count: 1 } + - match: { data_frame_analytics.0.id: "old_cluster_outlier_detection_job" } + - match: { data_frame_analytics.0.source.index: ["bwc_ml_outlier_detection_job_source"] } + - match: { data_frame_analytics.0.source.query: {"term" : { "user" : "Kimchy" }} } + - match: { data_frame_analytics.0.dest.index: "old_cluster_outlier_detection_job_results" } + - match: { data_frame_analytics.0.analysis: { + "outlier_detection":{ + "compute_feature_influence": true, + "outlier_fraction": 0.05, + "standardization_enabled": true + } + }} + +--- +"Get old outlier_detection job stats": + + - do: + ml.get_data_frame_analytics_stats: + id: "old_cluster_outlier_detection_job" + - match: { count: 1 } + - match: { data_frame_analytics.0.id: "old_cluster_outlier_detection_job" } + - match: { data_frame_analytics.0.state: "stopped" } + +--- +"Start and stop old outlier_detection job": + + - do: + ml.start_data_frame_analytics: + id: "old_cluster_outlier_detection_job" + - match: { acknowledged: true } + + - do: + ml.stop_data_frame_analytics: + id: "old_cluster_outlier_detection_job" + - match: { stopped: true } + + - do: + ml.get_data_frame_analytics_stats: + id: "old_cluster_outlier_detection_job" + - match: { count: 1 } + - match: { data_frame_analytics.0.id: "old_cluster_outlier_detection_job" } + - match: { data_frame_analytics.0.state: "stopped" } + +--- +"Get old regression job": + + - do: + ml.get_data_frame_analytics: + id: "old_cluster_regression_job" + - match: { count: 1 } + - match: { data_frame_analytics.0.id: "old_cluster_regression_job" } + - match: { data_frame_analytics.0.source.index: ["bwc_ml_regression_job_source"] } + - match: { data_frame_analytics.0.source.query: {"term": { "user": "Kimchy" }} } + - match: { data_frame_analytics.0.dest.index: "old_cluster_regression_job_results" } + - match: { data_frame_analytics.0.analysis: {"regression":{ "dependent_variable": "foo", "training_percent": 100.0 }} } + +--- +"Get old regression job stats": + + - do: + ml.get_data_frame_analytics_stats: + id: "old_cluster_regression_job" + - match: { count: 1 } + - match: { data_frame_analytics.0.id: "old_cluster_regression_job" } + - match: { data_frame_analytics.0.state: "stopped" } + +--- +"Start and stop old regression job": + + - do: + ml.start_data_frame_analytics: + id: "old_cluster_regression_job" + - match: { acknowledged: true } + + - do: + ml.stop_data_frame_analytics: + id: "old_cluster_regression_job" + - match: { stopped: true } + + - do: + ml.get_data_frame_analytics_stats: + id: "old_cluster_regression_job" + - match: { count: 1 } + - match: { data_frame_analytics.0.id: "old_cluster_regression_job" } + - match: { data_frame_analytics.0.state: "stopped" } + +--- +"Put an outlier_detection job on the mixed cluster": + + - do: + ml.put_data_frame_analytics: + id: "mixed_cluster_outlier_detection_job" + body: > + { + "source": { + "index": "bwc_ml_outlier_detection_job_source", + "query": {"term" : { "user" : "Kimchy" }} + }, + "dest": { + "index": "mixed_cluster_outlier_detection_job_results" + }, + "analysis": {"outlier_detection":{}} + } + - match: { id: "mixed_cluster_outlier_detection_job" } + + - do: + ml.get_data_frame_analytics: + id: "mixed_cluster_outlier_detection_job" + - match: { count: 1 } + - match: { data_frame_analytics.0.id: "mixed_cluster_outlier_detection_job" } + - match: { data_frame_analytics.0.source.index: ["bwc_ml_outlier_detection_job_source"] } + - match: { data_frame_analytics.0.source.query: {"term": { "user": "Kimchy" }} } + - match: { data_frame_analytics.0.dest.index: "mixed_cluster_outlier_detection_job_results" } + - match: { data_frame_analytics.0.analysis: { + "outlier_detection":{ + "compute_feature_influence": true, + "outlier_fraction": 0.05, + "standardization_enabled": true + } + }} + + - do: + ml.start_data_frame_analytics: + id: "mixed_cluster_outlier_detection_job" + - match: { acknowledged: true } + + - do: + ml.stop_data_frame_analytics: + id: "mixed_cluster_outlier_detection_job" + - match: { stopped: true } + + - do: + ml.get_data_frame_analytics_stats: + id: "mixed_cluster_outlier_detection_job" + - match: { count: 1 } + - match: { data_frame_analytics.0.id: "mixed_cluster_outlier_detection_job" } + - match: { data_frame_analytics.0.state: "stopped" } diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/90_ml_data_frame_analytics_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/90_ml_data_frame_analytics_crud.yml new file mode 100644 index 000000000000..7cfd2fe1fd15 --- /dev/null +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/90_ml_data_frame_analytics_crud.yml @@ -0,0 +1,64 @@ +setup: + + - do: + index: + index: bwc_ml_outlier_detection_job_source + body: > + { + "numeric_field_1": 42.0 + } + + - do: + index: + index: bwc_ml_regression_job_source + body: > + { + "numeric_field_1": 1.0, + "foo": 10.0 + } + + - do: + indices.refresh: + index: bwc_ml_* + +--- +"Put outlier_detection job on the old cluster": + + - do: + ml.put_data_frame_analytics: + id: "old_cluster_outlier_detection_job" + body: > + { + "source": { + "index": "bwc_ml_outlier_detection_job_source", + "query": {"term" : { "user" : "Kimchy" }} + }, + "dest": { + "index": "old_cluster_outlier_detection_job_results" + }, + "analysis": {"outlier_detection":{}} + } + - match: { id: "old_cluster_outlier_detection_job" } + +--- +"Put regression job on the old cluster": + + - do: + ml.put_data_frame_analytics: + id: "old_cluster_regression_job" + body: > + { + "source": { + "index": "bwc_ml_regression_job_source", + "query": {"term" : { "user" : "Kimchy" }} + }, + "dest": { + "index": "old_cluster_regression_job_results" + }, + "analysis": { + "regression":{ + "dependent_variable": "foo" + } + } + } + - match: { id: "old_cluster_regression_job" } diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/90_ml_data_frame_analytics_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/90_ml_data_frame_analytics_crud.yml new file mode 100644 index 000000000000..d4c2d63ec500 --- /dev/null +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/90_ml_data_frame_analytics_crud.yml @@ -0,0 +1,80 @@ +--- +"Get old cluster outlier_detection job": + + - do: + ml.get_data_frame_analytics: + id: "old_cluster_outlier_detection_job" + - match: { count: 1 } + - match: { data_frame_analytics.0.id: "old_cluster_outlier_detection_job" } + - match: { data_frame_analytics.0.source.index: ["bwc_ml_outlier_detection_job_source"] } + - match: { data_frame_analytics.0.source.query: {"term": { "user": "Kimchy" }} } + - match: { data_frame_analytics.0.dest.index: "old_cluster_outlier_detection_job_results" } + - match: { data_frame_analytics.0.analysis: { + "outlier_detection":{ + "compute_feature_influence": true, + "outlier_fraction": 0.05, + "standardization_enabled": true + } + }} + +--- +"Get old cluster outlier_detection job stats": + + - do: + ml.get_data_frame_analytics_stats: + id: "old_cluster_outlier_detection_job" + - match: { count: 1 } + - match: { data_frame_analytics.0.id: "old_cluster_outlier_detection_job" } + - match: { data_frame_analytics.0.state: "stopped" } + +--- +"Get old cluster regression job": + + - do: + ml.get_data_frame_analytics: + id: "old_cluster_regression_job" + - match: { count: 1 } + - match: { data_frame_analytics.0.id: "old_cluster_regression_job" } + - match: { data_frame_analytics.0.source.index: ["bwc_ml_regression_job_source"] } + - match: { data_frame_analytics.0.source.query: {"term": { "user": "Kimchy" }} } + - match: { data_frame_analytics.0.dest.index: "old_cluster_regression_job_results" } + - match: { data_frame_analytics.0.analysis: {"regression":{ "dependent_variable": "foo", "training_percent": 100.0 }} } + +--- +"Get old cluster regression job stats": + + - do: + ml.get_data_frame_analytics_stats: + id: "old_cluster_regression_job" + - match: { count: 1 } + - match: { data_frame_analytics.0.id: "old_cluster_regression_job" } + - match: { data_frame_analytics.0.state: "stopped" } + +--- +"Get mixed cluster outlier_detection job": + + - do: + ml.get_data_frame_analytics: + id: "mixed_cluster_outlier_detection_job" + - match: { count: 1 } + - match: { data_frame_analytics.0.id: "mixed_cluster_outlier_detection_job" } + - match: { data_frame_analytics.0.source.index: ["bwc_ml_outlier_detection_job_source"] } + - match: { data_frame_analytics.0.source.query: {"term": { "user": "Kimchy" }} } + - match: { data_frame_analytics.0.dest.index: "mixed_cluster_outlier_detection_job_results" } + - match: { data_frame_analytics.0.analysis: { + "outlier_detection":{ + "compute_feature_influence": true, + "outlier_fraction": 0.05, + "standardization_enabled": true + } + }} + +--- +"Get mixed cluster outlier_detection job stats": + + - do: + ml.get_data_frame_analytics_stats: + id: "mixed_cluster_outlier_detection_job" + - match: { count: 1 } + - match: { data_frame_analytics.0.id: "mixed_cluster_outlier_detection_job" } + - match: { data_frame_analytics.0.state: "stopped" } From e2dc56040adffd2c31b3eac5a50f2603b800fe51 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Tue, 8 Oct 2019 18:58:22 +0200 Subject: [PATCH 50/55] Make loadShardSnapshot Exceptions Consistent (#47728) Similar to #47507. We are throwing `SnapshotException` when you (and SLM tests) would expect a `SnapshotMissingException` for concurrent snapshot status and snapshot delete operations with a very low probability. Fixed the exception type and added a test for this scenario. --- .../blobstore/BlobStoreRepository.java | 2 ++ .../AbstractSnapshotIntegTestCase.java | 18 ++++++++++ .../SharedClusterSnapshotRestoreIT.java | 15 -------- .../snapshots/SnapshotStatusApisIT.java | 34 +++++++++++++++++++ 4 files changed, 54 insertions(+), 15 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index c4acfd44106d..a1f90199e223 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -1293,6 +1293,8 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp private BlobStoreIndexShardSnapshot loadShardSnapshot(BlobContainer shardContainer, SnapshotId snapshotId) { try { return indexShardSnapshotFormat.read(shardContainer, snapshotId.getUUID()); + } catch (NoSuchFileException ex) { + throw new SnapshotMissingException(metadata.name(), snapshotId, ex); } catch (IOException ex) { throw new SnapshotException(metadata.name(), snapshotId, "failed to read shard snapshot file for [" + shardContainer.path() + ']', ex); diff --git a/server/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java b/server/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java index 3576beaac54e..624c20a18a67 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java +++ b/server/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.snapshots; +import org.apache.lucene.util.SetOnce; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; @@ -25,9 +26,12 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.RepositoriesService; +import org.elasticsearch.repositories.Repository; +import org.elasticsearch.repositories.RepositoryData; import org.elasticsearch.repositories.blobstore.BlobStoreTestUtil; import org.elasticsearch.snapshots.mockstore.MockRepository; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.threadpool.ThreadPool; import org.junit.After; import java.io.IOException; @@ -40,6 +44,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.List; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; @@ -88,6 +93,19 @@ public abstract class AbstractSnapshotIntegTestCase extends ESIntegTestCase { skipRepoConsistencyCheckReason = reason; } + protected RepositoryData getRepositoryData(Repository repository) throws InterruptedException { + ThreadPool threadPool = internalCluster().getInstance(ThreadPool.class, internalCluster().getMasterName()); + final SetOnce repositoryData = new SetOnce<>(); + final CountDownLatch latch = new CountDownLatch(1); + threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(() -> { + repositoryData.set(repository.getRepositoryData()); + latch.countDown(); + }); + + latch.await(); + return repositoryData.get(); + } + public static long getFailureCount(String repository) { long failureCount = 0; for (RepositoriesService repositoriesService : diff --git a/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java index 118bba411d2e..8d676f7fa0a8 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -19,7 +19,6 @@ package org.elasticsearch.snapshots; -import org.apache.lucene.util.SetOnce; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; @@ -105,7 +104,6 @@ import java.util.HashMap; import java.util.List; import java.util.Locale; import java.util.Map; -import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.function.Consumer; @@ -3763,19 +3761,6 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas } } - private RepositoryData getRepositoryData(Repository repository) throws InterruptedException { - ThreadPool threadPool = internalCluster().getInstance(ThreadPool.class, internalCluster().getMasterName()); - final SetOnce repositoryData = new SetOnce<>(); - final CountDownLatch latch = new CountDownLatch(1); - threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(() -> { - repositoryData.set(repository.getRepositoryData()); - latch.countDown(); - }); - - latch.await(); - return repositoryData.get(); - } - private void verifySnapshotInfo(final String repo, final GetSnapshotsResponse response, final Map> indicesPerSnapshot) { for (SnapshotInfo snapshotInfo : response.getSnapshots("test-repo")) { diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java index 67a8ad3909a1..5060866fa6f0 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java @@ -30,6 +30,8 @@ import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.core.internal.io.IOUtils; +import org.elasticsearch.repositories.RepositoriesService; +import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.blobstore.BlobStoreRepository; import java.io.IOException; @@ -137,4 +139,36 @@ public class SnapshotStatusApisIT extends AbstractSnapshotIntegTestCase { .getSnapshots(new GetSnapshotsRequest(new String[] {"test-repo"}, new String[] {"test-snap"})).actionGet(); assertThat(snapshotsResponse.getFailedResponses().get("test-repo"), instanceOf(SnapshotMissingException.class)); } + + public void testExceptionOnMissingShardLevelSnapBlob() throws IOException, InterruptedException { + disableRepoConsistencyCheck("This test intentionally corrupts the repository"); + + logger.info("--> creating repository"); + final Path repoPath = randomRepoPath(); + assertAcked(client().admin().cluster().preparePutRepository("test-repo").setType("fs").setSettings( + Settings.builder().put("location", repoPath).build())); + + createIndex("test-idx-1"); + ensureGreen(); + + logger.info("--> indexing some data"); + for (int i = 0; i < 100; i++) { + index("test-idx-1", "_doc", Integer.toString(i), "foo", "bar" + i); + } + refresh(); + + logger.info("--> snapshot"); + final CreateSnapshotResponse response = + client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).get(); + + logger.info("--> delete shard-level snap-${uuid}.dat file for one shard in this snapshot to simulate concurrent delete"); + final RepositoriesService service = internalCluster().getMasterNodeInstance(RepositoriesService.class); + final Repository repository = service.repository("test-repo"); + final String indexRepoId = getRepositoryData(repository).resolveIndexId(response.getSnapshotInfo().indices().get(0)).getId(); + IOUtils.rm(repoPath.resolve("indices").resolve(indexRepoId).resolve("0").resolve( + BlobStoreRepository.SNAPSHOT_PREFIX + response.getSnapshotInfo().snapshotId().getUUID() + ".dat")); + + expectThrows(SnapshotMissingException.class, () -> client().admin().cluster() + .prepareSnapshotStatus("test-repo").setSnapshots("test-snap").execute().actionGet()); + } } From 98611e77cc9931236c7b8f06e4dccd404346fa09 Mon Sep 17 00:00:00 2001 From: Tim Brooks Date: Tue, 8 Oct 2019 12:28:14 -0600 Subject: [PATCH 51/55] Introduce simple remote connection strategy (#47480) This commit introduces a simple remote connection strategy which will open remote connections to a configurable list of user supplied addresses. These addresses can be remote Elasticsearch nodes or intermediate proxies. We will perform normal clustername and version validation, but otherwise rely on the remote cluster to route requests to the appropriate remote node. --- .../transport/ConnectionManager.java | 6 + .../transport/RemoteConnectionStrategy.java | 17 +- .../transport/SimpleConnectionStrategy.java | 161 +++++++++++ .../transport/SniffConnectionStrategy.java | 18 +- .../SimpleConnectionStrategyTests.java | 268 ++++++++++++++++++ 5 files changed, 454 insertions(+), 16 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/transport/SimpleConnectionStrategy.java create mode 100644 server/src/test/java/org/elasticsearch/transport/SimpleConnectionStrategyTests.java diff --git a/server/src/main/java/org/elasticsearch/transport/ConnectionManager.java b/server/src/main/java/org/elasticsearch/transport/ConnectionManager.java index c11afa088aa5..110053bcee77 100644 --- a/server/src/main/java/org/elasticsearch/transport/ConnectionManager.java +++ b/server/src/main/java/org/elasticsearch/transport/ConnectionManager.java @@ -31,8 +31,10 @@ import org.elasticsearch.common.util.concurrent.RunOnce; import org.elasticsearch.core.internal.io.IOUtils; import java.io.Closeable; +import java.util.Collections; import java.util.Iterator; import java.util.Map; +import java.util.Set; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; @@ -216,6 +218,10 @@ public class ConnectionManager implements Closeable { return connectedNodes.size(); } + public Set getAllConnectedNodes() { + return Collections.unmodifiableSet(connectedNodes.keySet()); + } + @Override public void close() { internalClose(true); diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteConnectionStrategy.java b/server/src/main/java/org/elasticsearch/transport/RemoteConnectionStrategy.java index 191484deff2f..d327a171920e 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteConnectionStrategy.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteConnectionStrategy.java @@ -39,17 +39,20 @@ import java.util.concurrent.atomic.AtomicBoolean; public abstract class RemoteConnectionStrategy implements TransportConnectionListener, Closeable { - protected static final Logger logger = LogManager.getLogger(RemoteConnectionStrategy.class); + private static final Logger logger = LogManager.getLogger(RemoteConnectionStrategy.class); private static final int MAX_LISTENERS = 100; private final AtomicBoolean closed = new AtomicBoolean(false); private final Object mutex = new Object(); - private final ThreadPool threadPool; - protected final RemoteConnectionManager connectionManager; private List> listeners = new ArrayList<>(); - RemoteConnectionStrategy(ThreadPool threadPool, RemoteConnectionManager connectionManager) { - this.threadPool = threadPool; + protected final TransportService transportService; + protected final RemoteConnectionManager connectionManager; + protected final String clusterAlias; + + RemoteConnectionStrategy(String clusterAlias, TransportService transportService, RemoteConnectionManager connectionManager) { + this.clusterAlias = clusterAlias; + this.transportService = transportService; this.connectionManager = connectionManager; connectionManager.getConnectionManager().addListener(this); } @@ -61,7 +64,7 @@ public abstract class RemoteConnectionStrategy implements TransportConnectionLis void connect(ActionListener connectListener) { boolean runConnect = false; final ActionListener listener = - ContextPreservingActionListener.wrapPreservingContext(connectListener, threadPool.getThreadContext()); + ContextPreservingActionListener.wrapPreservingContext(connectListener, transportService.getThreadPool().getThreadContext()); boolean closed; synchronized (mutex) { closed = this.closed.get(); @@ -83,7 +86,7 @@ public abstract class RemoteConnectionStrategy implements TransportConnectionLis return; } if (runConnect) { - ExecutorService executor = threadPool.executor(ThreadPool.Names.MANAGEMENT); + ExecutorService executor = transportService.getThreadPool().executor(ThreadPool.Names.MANAGEMENT); executor.submit(new AbstractRunnable() { @Override public void onFailure(Exception e) { diff --git a/server/src/main/java/org/elasticsearch/transport/SimpleConnectionStrategy.java b/server/src/main/java/org/elasticsearch/transport/SimpleConnectionStrategy.java new file mode 100644 index 000000000000..24e9e18c8dc1 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/transport/SimpleConnectionStrategy.java @@ -0,0 +1,161 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.util.concurrent.CountDown; + +import java.util.Iterator; +import java.util.List; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Supplier; +import java.util.stream.Collectors; + +public class SimpleConnectionStrategy extends RemoteConnectionStrategy { + + private static final int MAX_CONNECT_ATTEMPTS_PER_RUN = 3; + private static final Logger logger = LogManager.getLogger(SimpleConnectionStrategy.class); + + private final int maxNumRemoteConnections; + private final AtomicLong counter = new AtomicLong(0); + private final List> addresses; + private final AtomicReference remoteClusterName = new AtomicReference<>(); + private final ConnectionProfile profile; + private final ConnectionManager.ConnectionValidator clusterNameValidator; + + SimpleConnectionStrategy(String clusterAlias, TransportService transportService, RemoteConnectionManager connectionManager, + int maxNumRemoteConnections, List> addresses) { + super(clusterAlias, transportService, connectionManager); + this.maxNumRemoteConnections = maxNumRemoteConnections; + assert addresses.isEmpty() == false : "Cannot use simple connection strategy with no configured addresses"; + this.addresses = addresses; + // TODO: Move into the ConnectionManager + this.profile = new ConnectionProfile.Builder() + .addConnections(1, TransportRequestOptions.Type.REG, TransportRequestOptions.Type.PING) + .addConnections(0, TransportRequestOptions.Type.BULK, TransportRequestOptions.Type.STATE, TransportRequestOptions.Type.RECOVERY) + .build(); + this.clusterNameValidator = (newConnection, actualProfile, listener) -> + transportService.handshake(newConnection, actualProfile.getHandshakeTimeout().millis(), cn -> true, + ActionListener.map(listener, resp -> { + ClusterName remote = resp.getClusterName(); + if (remoteClusterName.compareAndSet(null, remote)) { + return null; + } else { + if (remoteClusterName.get().equals(remote) == false) { + DiscoveryNode node = newConnection.getNode(); + throw new ConnectTransportException(node, "handshake failed. unexpected remote cluster name " + remote); + } + return null; + } + })); + } + + @Override + protected boolean shouldOpenMoreConnections() { + return connectionManager.size() < maxNumRemoteConnections; + } + + @Override + protected void connectImpl(ActionListener listener) { + performSimpleConnectionProcess(addresses.iterator(), listener); + } + + private void performSimpleConnectionProcess(Iterator> addressIter, ActionListener listener) { + openConnections(listener, 1); + } + + private void openConnections(ActionListener finished, int attemptNumber) { + if (attemptNumber <= MAX_CONNECT_ATTEMPTS_PER_RUN) { + List resolved = addresses.stream().map(Supplier::get).collect(Collectors.toList()); + + int remaining = maxNumRemoteConnections - connectionManager.size(); + ActionListener compositeListener = new ActionListener<>() { + + private final AtomicInteger successfulConnections = new AtomicInteger(0); + private final CountDown countDown = new CountDown(remaining); + + @Override + public void onResponse(Void v) { + successfulConnections.incrementAndGet(); + if (countDown.countDown()) { + if (shouldOpenMoreConnections()) { + openConnections(finished, attemptNumber + 1); + } else { + finished.onResponse(v); + } + } + } + + @Override + public void onFailure(Exception e) { + if (countDown.countDown()) { + openConnections(finished, attemptNumber + 1); + } + } + }; + + + for (int i = 0; i < remaining; ++i) { + TransportAddress address = nextAddress(resolved); + String id = clusterAlias + "#" + address; + DiscoveryNode node = new DiscoveryNode(id, address, Version.CURRENT.minimumCompatibilityVersion()); + + connectionManager.connectToNode(node, profile, clusterNameValidator, new ActionListener<>() { + @Override + public void onResponse(Void v) { + compositeListener.onResponse(v); + } + + @Override + public void onFailure(Exception e) { + logger.debug(new ParameterizedMessage("failed to open remote connection [remote cluster: {}, address: {}]", + clusterAlias, address), e); + compositeListener.onFailure(e); + } + }); + } + } else { + int openConnections = connectionManager.size(); + if (openConnections == 0) { + finished.onFailure(new IllegalStateException("Unable to open any simple connections to remote cluster [" + clusterAlias + + "]")); + } else { + logger.debug("unable to open maximum number of connections [remote cluster: {}, opened: {}, maximum: {}]", clusterAlias, + openConnections, maxNumRemoteConnections); + finished.onResponse(null); + } + } + } + + private TransportAddress nextAddress(List resolvedAddresses) { + long curr; + while ((curr = counter.getAndIncrement()) == Long.MIN_VALUE) ; + return resolvedAddresses.get(Math.floorMod(curr, resolvedAddresses.size())); + } +} diff --git a/server/src/main/java/org/elasticsearch/transport/SniffConnectionStrategy.java b/server/src/main/java/org/elasticsearch/transport/SniffConnectionStrategy.java index ce820b744bda..f71ce576a3c2 100644 --- a/server/src/main/java/org/elasticsearch/transport/SniffConnectionStrategy.java +++ b/server/src/main/java/org/elasticsearch/transport/SniffConnectionStrategy.java @@ -19,6 +19,8 @@ package org.elasticsearch.transport; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.util.SetOnce; import org.elasticsearch.action.ActionListener; @@ -45,9 +47,9 @@ import java.util.function.Supplier; public class SniffConnectionStrategy extends RemoteConnectionStrategy { - private final String clusterAlias; + private static final Logger logger = LogManager.getLogger(SniffConnectionStrategy.class); + private final List>> seedNodes; - private final TransportService transportService; private final int maxNumRemoteConnections; private final Predicate nodePredicate; private final SetOnce remoteClusterName = new SetOnce<>(); @@ -56,9 +58,7 @@ public class SniffConnectionStrategy extends RemoteConnectionStrategy { SniffConnectionStrategy(String clusterAlias, TransportService transportService, RemoteConnectionManager connectionManager, String proxyAddress, int maxNumRemoteConnections, Predicate nodePredicate, List>> seedNodes) { - super(transportService.getThreadPool(), connectionManager); - this.clusterAlias = clusterAlias; - this.transportService = transportService; + super(clusterAlias, transportService, connectionManager); this.proxyAddress = proxyAddress; this.maxNumRemoteConnections = maxNumRemoteConnections; this.nodePredicate = nodePredicate; @@ -109,15 +109,15 @@ public class SniffConnectionStrategy extends RemoteConnectionStrategy { onFailure.accept(e); } - final StepListener handShakeStep = new StepListener<>(); + final StepListener handshakeStep = new StepListener<>(); openConnectionStep.whenComplete(connection -> { ConnectionProfile connectionProfile = connectionManager.getConnectionManager().getConnectionProfile(); transportService.handshake(connection, connectionProfile.getHandshakeTimeout().millis(), - getRemoteClusterNamePredicate(), handShakeStep); + getRemoteClusterNamePredicate(), handshakeStep); }, onFailure); final StepListener fullConnectionStep = new StepListener<>(); - handShakeStep.whenComplete(handshakeResponse -> { + handshakeStep.whenComplete(handshakeResponse -> { final DiscoveryNode handshakeNode = maybeAddProxyAddress(proxyAddress, handshakeResponse.getDiscoveryNode()); if (nodePredicate.test(handshakeNode) && shouldOpenMoreConnections()) { @@ -135,7 +135,7 @@ public class SniffConnectionStrategy extends RemoteConnectionStrategy { fullConnectionStep.whenComplete(aVoid -> { if (remoteClusterName.get() == null) { - TransportService.HandshakeResponse handshakeResponse = handShakeStep.result(); + TransportService.HandshakeResponse handshakeResponse = handshakeStep.result(); assert handshakeResponse.getClusterName().value() != null; remoteClusterName.set(handshakeResponse.getClusterName()); } diff --git a/server/src/test/java/org/elasticsearch/transport/SimpleConnectionStrategyTests.java b/server/src/test/java/org/elasticsearch/transport/SimpleConnectionStrategyTests.java new file mode 100644 index 000000000000..68e2622c040f --- /dev/null +++ b/server/src/test/java/org/elasticsearch/transport/SimpleConnectionStrategyTests.java @@ -0,0 +1,268 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport; + +import org.elasticsearch.Version; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.test.transport.StubbableTransport; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; + +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Supplier; +import java.util.stream.Collectors; + +public class SimpleConnectionStrategyTests extends ESTestCase { + + private final String clusterAlias = "cluster-alias"; + private final ConnectionProfile profile = RemoteClusterService.buildConnectionProfileFromSettings(Settings.EMPTY, "cluster"); + private final ThreadPool threadPool = new TestThreadPool(getClass().getName()); + + @Override + public void tearDown() throws Exception { + super.tearDown(); + ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS); + } + + private MockTransportService startTransport(String id, Version version) { + return startTransport(id, version, Settings.EMPTY); + } + + public MockTransportService startTransport(final String id, final Version version, final Settings settings) { + boolean success = false; + final Settings s = Settings.builder() + .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), clusterAlias) + .put("node.name", id) + .put(settings) + .build(); + MockTransportService newService = MockTransportService.createNewService(settings, version, threadPool); + try { + newService.start(); + newService.acceptIncomingRequests(); + success = true; + return newService; + } finally { + if (success == false) { + newService.close(); + } + } + } + + public void testSimpleStrategyWillOpenExpectedNumberOfConnectionsToAddresses() { + try (MockTransportService transport1 = startTransport("node1", Version.CURRENT); + MockTransportService transport2 = startTransport("node2", Version.CURRENT)) { + TransportAddress address1 = transport1.boundAddress().publishAddress(); + TransportAddress address2 = transport2.boundAddress().publishAddress(); + + try (MockTransportService localService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool)) { + localService.start(); + localService.acceptIncomingRequests(); + + ConnectionManager connectionManager = new ConnectionManager(profile, localService.transport); + int numOfConnections = randomIntBetween(4, 8); + try (RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); + SimpleConnectionStrategy strategy = new SimpleConnectionStrategy(clusterAlias, localService, remoteConnectionManager, + numOfConnections, addresses(address1, address2))) { + assertFalse(connectionManager.getAllConnectedNodes().stream().anyMatch(n -> n.getAddress().equals(address1))); + assertFalse(connectionManager.getAllConnectedNodes().stream().anyMatch(n -> n.getAddress().equals(address2))); + + PlainActionFuture connectFuture = PlainActionFuture.newFuture(); + strategy.connect(connectFuture); + connectFuture.actionGet(); + + assertTrue(connectionManager.getAllConnectedNodes().stream().anyMatch(n -> n.getAddress().equals(address1))); + assertTrue(connectionManager.getAllConnectedNodes().stream().anyMatch(n -> n.getAddress().equals(address2))); + assertEquals(numOfConnections, connectionManager.size()); + assertTrue(strategy.assertNoRunningConnections()); + } + } + } + } + + public void testSimpleStrategyWillOpenNewConnectionsOnDisconnect() throws Exception { + try (MockTransportService transport1 = startTransport("node1", Version.CURRENT); + MockTransportService transport2 = startTransport("node2", Version.CURRENT)) { + TransportAddress address1 = transport1.boundAddress().publishAddress(); + TransportAddress address2 = transport2.boundAddress().publishAddress(); + + try (MockTransportService localService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool)) { + localService.start(); + localService.acceptIncomingRequests(); + + ConnectionManager connectionManager = new ConnectionManager(profile, localService.transport); + int numOfConnections = randomIntBetween(4, 8); + try (RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); + SimpleConnectionStrategy strategy = new SimpleConnectionStrategy(clusterAlias, localService, remoteConnectionManager, + numOfConnections, addresses(address1, address2))) { + assertFalse(connectionManager.getAllConnectedNodes().stream().anyMatch(n -> n.getAddress().equals(address1))); + assertFalse(connectionManager.getAllConnectedNodes().stream().anyMatch(n -> n.getAddress().equals(address2))); + + PlainActionFuture connectFuture = PlainActionFuture.newFuture(); + strategy.connect(connectFuture); + connectFuture.actionGet(); + + assertTrue(connectionManager.getAllConnectedNodes().stream().anyMatch(n -> n.getAddress().equals(address1))); + long initialConnectionsToTransport2 = connectionManager.getAllConnectedNodes().stream() + .filter(n -> n.getAddress().equals(address2)) + .count(); + assertNotEquals(0, initialConnectionsToTransport2); + assertEquals(numOfConnections, connectionManager.size()); + assertTrue(strategy.assertNoRunningConnections()); + + transport1.close(); + + assertBusy(() -> { + assertFalse(connectionManager.getAllConnectedNodes().stream().anyMatch(n -> n.getAddress().equals(address1))); + // More connections now pointing to transport2 + long finalConnectionsToTransport2 = connectionManager.getAllConnectedNodes().stream() + .filter(n -> n.getAddress().equals(address2)) + .count(); + assertTrue(finalConnectionsToTransport2 > initialConnectionsToTransport2); + assertTrue(strategy.assertNoRunningConnections()); + }); + } + } + } + } + + public void testConnectWithSingleIncompatibleNode() { + Version incompatibleVersion = Version.CURRENT.minimumCompatibilityVersion().minimumCompatibilityVersion(); + try (MockTransportService transport1 = startTransport("compatible-node", Version.CURRENT); + MockTransportService transport2 = startTransport("incompatible-node", incompatibleVersion)) { + TransportAddress address1 = transport1.boundAddress().publishAddress(); + TransportAddress address2 = transport2.boundAddress().publishAddress(); + + try (MockTransportService localService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool)) { + localService.start(); + localService.acceptIncomingRequests(); + + StubbableTransport stubbableTransport = new StubbableTransport(localService.transport); + ConnectionManager connectionManager = new ConnectionManager(profile, stubbableTransport); + AtomicInteger address1Attempts = new AtomicInteger(0); + AtomicInteger address2Attempts = new AtomicInteger(0); + stubbableTransport.setDefaultConnectBehavior((transport, discoveryNode, profile, listener) -> { + if (discoveryNode.getAddress().equals(address1)) { + address1Attempts.incrementAndGet(); + transport.openConnection(discoveryNode, profile, listener); + } else if (discoveryNode.getAddress().equals(address2)) { + address2Attempts.incrementAndGet(); + transport.openConnection(discoveryNode, profile, listener); + } else { + throw new AssertionError("Unexpected address"); + } + }); + int numOfConnections = 5; + try (RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); + SimpleConnectionStrategy strategy = new SimpleConnectionStrategy(clusterAlias, localService, remoteConnectionManager, + numOfConnections, addresses(address1, address2))) { + assertFalse(connectionManager.getAllConnectedNodes().stream().anyMatch(n -> n.getAddress().equals(address1))); + assertFalse(connectionManager.getAllConnectedNodes().stream().anyMatch(n -> n.getAddress().equals(address2))); + + PlainActionFuture connectFuture = PlainActionFuture.newFuture(); + strategy.connect(connectFuture); + connectFuture.actionGet(); + + assertEquals(4 ,connectionManager.size()); + assertEquals(4 ,connectionManager.getAllConnectedNodes().stream().map(n -> n.getAddress().equals(address1)).count()); + // Three attempts on first round, one attempts on second round, zero attempts on third round + assertEquals(4, address1Attempts.get()); + // Two attempts on first round, one attempt on second round, one attempt on third round + assertEquals(4, address2Attempts.get()); + assertFalse(connectionManager.getAllConnectedNodes().stream().anyMatch(n -> n.getAddress().equals(address2))); + assertTrue(strategy.assertNoRunningConnections()); + } + } + } + } + + public void testConnectFailsWithIncompatibleNodes() { + Version incompatibleVersion = Version.CURRENT.minimumCompatibilityVersion().minimumCompatibilityVersion(); + try (MockTransportService transport1 = startTransport("incompatible-node", incompatibleVersion)) { + TransportAddress address1 = transport1.boundAddress().publishAddress(); + + try (MockTransportService localService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool)) { + localService.start(); + localService.acceptIncomingRequests(); + + ConnectionManager connectionManager = new ConnectionManager(profile, localService.transport); + int numOfConnections = randomIntBetween(4, 8); + try (RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); + SimpleConnectionStrategy strategy = new SimpleConnectionStrategy(clusterAlias, localService, remoteConnectionManager, + numOfConnections, addresses(address1))) { + + PlainActionFuture connectFuture = PlainActionFuture.newFuture(); + strategy.connect(connectFuture); + expectThrows(Exception.class, connectFuture::actionGet); + + assertFalse(connectionManager.getAllConnectedNodes().stream().anyMatch(n -> n.getAddress().equals(address1))); + assertEquals(0, connectionManager.size()); + assertTrue(strategy.assertNoRunningConnections()); + } + } + } + } + + public void testClusterNameValidationPreventConnectingToDifferentClusters() throws Exception { + Settings otherSettings = Settings.builder().put("cluster.name", "otherCluster").build(); + + try (MockTransportService transport1 = startTransport("cluster1", Version.CURRENT); + MockTransportService transport2 = startTransport("cluster2", Version.CURRENT, otherSettings)) { + TransportAddress address1 = transport1.boundAddress().publishAddress(); + TransportAddress address2 = transport2.boundAddress().publishAddress(); + + try (MockTransportService localService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool)) { + localService.start(); + localService.acceptIncomingRequests(); + + ConnectionManager connectionManager = new ConnectionManager(profile, localService.transport); + int numOfConnections = randomIntBetween(4, 8); + try (RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); + SimpleConnectionStrategy strategy = new SimpleConnectionStrategy(clusterAlias, localService, remoteConnectionManager, + numOfConnections, addresses(address1, address2))) { + assertFalse(connectionManager.getAllConnectedNodes().stream().anyMatch(n -> n.getAddress().equals(address1))); + assertFalse(connectionManager.getAllConnectedNodes().stream().anyMatch(n -> n.getAddress().equals(address2))); + + PlainActionFuture connectFuture = PlainActionFuture.newFuture(); + strategy.connect(connectFuture); + connectFuture.actionGet(); + + if (connectionManager.getAllConnectedNodes().stream().anyMatch(n -> n.getAddress().equals(address1))) { + assertFalse(connectionManager.getAllConnectedNodes().stream().anyMatch(n -> n.getAddress().equals(address2))); + } else { + assertTrue(connectionManager.getAllConnectedNodes().stream().anyMatch(n -> n.getAddress().equals(address2))); + } + assertTrue(strategy.assertNoRunningConnections()); + } + } + } + } + + private static List> addresses(final TransportAddress... addresses) { + return Arrays.stream(addresses).map(s -> (Supplier) () -> s).collect(Collectors.toList()); + } +} From e221f8632f9d7397bbca181c4b20ac2f7a7ddfd1 Mon Sep 17 00:00:00 2001 From: Gordon Brown Date: Tue, 8 Oct 2019 15:13:52 -0600 Subject: [PATCH 52/55] Manage retention of failed snapshots in SLM (#47617) Failed snapshots will eventually build up unless they are deleted. While failures may not take up much space, they add noise to the list of snapshots and it's desirable to remove them when they are no longer useful. With this change, failed snapshots are deleted using the following strategy: `FAILED` snapshots will be kept until the configured `expire_after` period has passed, if present, and then be deleted. If there is no configured `expire_after` in the retention policy, then they will be deleted if there is at least one more recent successful snapshot from this policy (as they may otherwise be useful for troubleshooting purposes). Failed snapshots are not counted towards either `min_count` or `max_count`. --- .../slm/SnapshotRetentionConfiguration.java | 64 +++++--- .../SnapshotRetentionConfigurationTests.java | 131 ++++++++++++++- .../xpack/slm/SnapshotRetentionTask.java | 7 +- .../slm/SLMSnapshotBlockingIntegTests.java | 155 ++++++++++++++++++ .../xpack/slm/SnapshotRetentionTaskTests.java | 2 +- 5 files changed, 335 insertions(+), 24 deletions(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotRetentionConfiguration.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotRetentionConfiguration.java index 1d3d4bd7a82f..970b54a7b584 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotRetentionConfiguration.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotRetentionConfiguration.java @@ -20,6 +20,7 @@ import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.snapshots.SnapshotInfo; +import org.elasticsearch.snapshots.SnapshotState; import java.io.IOException; import java.util.Comparator; @@ -29,6 +30,7 @@ import java.util.Set; import java.util.function.LongSupplier; import java.util.function.Predicate; import java.util.stream.Collectors; +import java.util.stream.Stream; public class SnapshotRetentionConfiguration implements ToXContentObject, Writeable { @@ -113,33 +115,48 @@ public class SnapshotRetentionConfiguration implements ToXContentObject, Writeab * @param allSnapshots a list of all snapshot pertaining to this SLM policy and repository */ public Predicate getSnapshotDeletionPredicate(final List allSnapshots) { - final int snapCount = allSnapshots.size(); - List sortedSnapshots = allSnapshots.stream() + final int totalSnapshotCount = allSnapshots.size(); + final List sortedSnapshots = allSnapshots.stream() .sorted(Comparator.comparingLong(SnapshotInfo::startTime)) .collect(Collectors.toList()); + final long successfulSnapshotCount = allSnapshots.stream() + .filter(snap -> SnapshotState.SUCCESS.equals(snap.state())) + .count(); + final long newestSuccessfulTimestamp = allSnapshots.stream() + .filter(snap -> SnapshotState.SUCCESS.equals(snap.state())) + .mapToLong(SnapshotInfo::startTime) + .max() + .orElse(Long.MIN_VALUE); return si -> { final String snapName = si.snapshotId().getName(); - // First, enforce the maximum count, if the size is over the maximum number of + // First, if there's no expire_after and a more recent successful snapshot, we can delete all the failed ones + if (this.expireAfter == null && SnapshotState.FAILED.equals(si.state()) && newestSuccessfulTimestamp > si.startTime()) { + // There's no expire_after and there's a more recent successful snapshot, delete this failed one + logger.trace("[{}]: ELIGIBLE as it is FAILED and there is a more recent successful snapshot", snapName); + return true; + } + + // Next, enforce the maximum count, if the size is over the maximum number of // snapshots, then allow the oldest N (where N is the number over the maximum snapshot // count) snapshots to be eligible for deletion if (this.maximumSnapshotCount != null) { - if (allSnapshots.size() > this.maximumSnapshotCount) { - int snapsToDelete = allSnapshots.size() - this.maximumSnapshotCount; - boolean eligible = sortedSnapshots.stream() + if (successfulSnapshotCount > this.maximumSnapshotCount) { + final long snapsToDelete = successfulSnapshotCount - this.maximumSnapshotCount; + final boolean eligible = sortedSnapshots.stream() .limit(snapsToDelete) .anyMatch(s -> s.equals(si)); if (eligible) { logger.trace("[{}]: ELIGIBLE as it is one of the {} oldest snapshots with " + - "{} total snapshots, over the limit of {} maximum snapshots", - snapName, snapsToDelete, snapCount, this.maximumSnapshotCount); + "{} non-failed snapshots ({} total), over the limit of {} maximum snapshots", + snapName, snapsToDelete, successfulSnapshotCount, totalSnapshotCount, this.maximumSnapshotCount); return true; } else { logger.trace("[{}]: INELIGIBLE as it is not one of the {} oldest snapshots with " + - "{} total snapshots, over the limit of {} maximum snapshots", - snapName, snapsToDelete, snapCount, this.maximumSnapshotCount); + "{} non-failed snapshots ({} total), over the limit of {} maximum snapshots", + snapName, snapsToDelete, successfulSnapshotCount, totalSnapshotCount, this.maximumSnapshotCount); return false; } } @@ -149,25 +166,34 @@ public class SnapshotRetentionConfiguration implements ToXContentObject, Writeab // if we haven't hit the minimum then we need to keep the snapshot regardless of // expiration time if (this.minimumSnapshotCount != null) { - if (allSnapshots.size() <= this.minimumSnapshotCount) { - logger.trace("[{}]: INELIGIBLE as there are {} snapshots and {} minimum snapshots needed", - snapName, snapCount, this.minimumSnapshotCount); - return false; - } + if (successfulSnapshotCount <= this.minimumSnapshotCount) + if (SnapshotState.FAILED.equals(si.state()) == false) { + logger.trace("[{}]: INELIGIBLE as there are {} non-failed snapshots ({} total) and {} minimum snapshots needed", + snapName, successfulSnapshotCount, totalSnapshotCount, this.minimumSnapshotCount); + return false; + } else { + logger.trace("[{}]: SKIPPING minimum snapshot count check as this snapshot is {} and not counted " + + "towards the minimum snapshot count.", snapName, SnapshotState.FAILED); + } } // Finally, check the expiration time of the snapshot, if it is past, then it is // eligible for deletion if (this.expireAfter != null) { - TimeValue snapshotAge = new TimeValue(nowSupplier.getAsLong() - si.startTime()); + final TimeValue snapshotAge = new TimeValue(nowSupplier.getAsLong() - si.startTime()); if (this.minimumSnapshotCount != null) { - int eligibleForExpiration = snapCount - minimumSnapshotCount; + final long eligibleForExpiration = successfulSnapshotCount - minimumSnapshotCount; // Only the oldest N snapshots are actually eligible, since if we went below this we // would fall below the configured minimum number of snapshots to keep - Set snapsEligibleForExpiration = sortedSnapshots.stream() - .limit(eligibleForExpiration) + final Stream successfulSnapsEligibleForExpiration = sortedSnapshots.stream() + .filter(snap -> SnapshotState.SUCCESS.equals(snap.state())) + .limit(eligibleForExpiration); + final Stream failedSnaps = sortedSnapshots.stream() + .filter(snap -> SnapshotState.FAILED.equals(snap.state())); + + final Set snapsEligibleForExpiration = Stream.concat(successfulSnapsEligibleForExpiration, failedSnaps) .collect(Collectors.toSet()); if (snapsEligibleForExpiration.contains(si) == false) { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/slm/SnapshotRetentionConfigurationTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/slm/SnapshotRetentionConfigurationTests.java index 378fe0c2d774..50832553edf5 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/slm/SnapshotRetentionConfigurationTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/slm/SnapshotRetentionConfigurationTests.java @@ -7,8 +7,10 @@ package org.elasticsearch.xpack.slm; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.snapshots.SnapshotInfo; +import org.elasticsearch.snapshots.SnapshotShardFailure; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.slm.SnapshotLifecyclePolicy; import org.elasticsearch.xpack.core.slm.SnapshotRetentionConfiguration; @@ -100,10 +102,137 @@ public class SnapshotRetentionConfigurationTests extends ESTestCase { assertThat(conf.getSnapshotDeletionPredicate(infos).test(s9), equalTo(false)); } + public void testFailuresDeletedIfExpired() { + SnapshotRetentionConfiguration conf = new SnapshotRetentionConfiguration( + () -> TimeValue.timeValueDays(1).millis() + 1, + TimeValue.timeValueDays(1), null, null); + SnapshotInfo oldInfo = makeFailureInfo(0); + assertThat(conf.getSnapshotDeletionPredicate(Collections.singletonList(oldInfo)).test(oldInfo), equalTo(true)); + + SnapshotInfo newInfo = makeFailureInfo(1); + assertThat(conf.getSnapshotDeletionPredicate(Collections.singletonList(newInfo)).test(newInfo), equalTo(false)); + + List infos = new ArrayList<>(); + infos.add(newInfo); + infos.add(oldInfo); + assertThat(conf.getSnapshotDeletionPredicate(infos).test(newInfo), equalTo(false)); + assertThat(conf.getSnapshotDeletionPredicate(infos).test(oldInfo), equalTo(true)); + } + + public void testFailuresDeletedIfNoExpiryAndMoreRecentSuccessExists() { + SnapshotRetentionConfiguration conf = new SnapshotRetentionConfiguration(() -> 1, null, 2, 5); + SnapshotInfo s1 = makeInfo(1); + SnapshotInfo s2 = makeInfo(2); + SnapshotInfo s3 = makeFailureInfo(3); + SnapshotInfo s4 = makeInfo(4); + + List infos = Arrays.asList(s1 , s2, s3, s4); + assertThat(conf.getSnapshotDeletionPredicate(infos).test(s1), equalTo(false)); + assertThat(conf.getSnapshotDeletionPredicate(infos).test(s2), equalTo(false)); + assertThat(conf.getSnapshotDeletionPredicate(infos).test(s3), equalTo(true)); + assertThat(conf.getSnapshotDeletionPredicate(infos).test(s4), equalTo(false)); + } + + public void testFailuresKeptIfNoExpiryAndNoMoreRecentSuccess() { + // Also tests that failures are not counted towards the maximum + SnapshotRetentionConfiguration conf = new SnapshotRetentionConfiguration(() -> 1, null, 2, 3); + SnapshotInfo s1 = makeInfo(1); + SnapshotInfo s2 = makeInfo(2); + SnapshotInfo s3 = makeInfo(3); + SnapshotInfo s4 = makeFailureInfo(4); + + List infos = Arrays.asList(s1 , s2, s3, s4); + assertThat(conf.getSnapshotDeletionPredicate(infos).test(s1), equalTo(false)); + assertThat(conf.getSnapshotDeletionPredicate(infos).test(s2), equalTo(false)); + assertThat(conf.getSnapshotDeletionPredicate(infos).test(s3), equalTo(false)); + assertThat(conf.getSnapshotDeletionPredicate(infos).test(s4), equalTo(false)); + } + + public void testFailuresNotCountedTowardsMaximum() { + SnapshotRetentionConfiguration conf = new SnapshotRetentionConfiguration(() -> 1, TimeValue.timeValueDays(1), 2, 2); + SnapshotInfo s1 = makeInfo(1); + SnapshotInfo s2 = makeFailureInfo(2); + SnapshotInfo s3 = makeFailureInfo(3); + SnapshotInfo s4 = makeFailureInfo(4); + SnapshotInfo s5 = makeInfo(5); + + List infos = Arrays.asList(s1 , s2, s3, s4, s5); + assertThat(conf.getSnapshotDeletionPredicate(infos).test(s1), equalTo(false)); + assertThat(conf.getSnapshotDeletionPredicate(infos).test(s2), equalTo(false)); + assertThat(conf.getSnapshotDeletionPredicate(infos).test(s3), equalTo(false)); + assertThat(conf.getSnapshotDeletionPredicate(infos).test(s4), equalTo(false)); + assertThat(conf.getSnapshotDeletionPredicate(infos).test(s5), equalTo(false)); + } + + public void testFailuresNotCountedTowardsMinimum() { + SnapshotRetentionConfiguration conf = new SnapshotRetentionConfiguration(() -> TimeValue.timeValueDays(1).millis() + 1, + TimeValue.timeValueDays(1), 2, null); + SnapshotInfo oldInfo = makeInfo(0); + SnapshotInfo failureInfo = makeFailureInfo( 1); + SnapshotInfo newInfo = makeInfo(2); + + List infos = new ArrayList<>(); + infos.add(newInfo); + infos.add(failureInfo); + infos.add(oldInfo); + assertThat(conf.getSnapshotDeletionPredicate(infos).test(newInfo), equalTo(false)); + assertThat(conf.getSnapshotDeletionPredicate(infos).test(failureInfo), equalTo(false)); + assertThat(conf.getSnapshotDeletionPredicate(infos).test(oldInfo), equalTo(false)); + + conf = new SnapshotRetentionConfiguration(() -> TimeValue.timeValueDays(1).millis() + 2, + TimeValue.timeValueDays(1), 1, null); + assertThat(conf.getSnapshotDeletionPredicate(infos).test(newInfo), equalTo(false)); + assertThat(conf.getSnapshotDeletionPredicate(infos).test(failureInfo), equalTo(true)); + assertThat(conf.getSnapshotDeletionPredicate(infos).test(oldInfo), equalTo(true)); + } + + public void testMostRecentSuccessfulTimestampIsUsed() { + SnapshotRetentionConfiguration conf = new SnapshotRetentionConfiguration(() -> 1, null, 2, 2); + SnapshotInfo s1 = makeInfo(1); + SnapshotInfo s2 = makeInfo(2); + SnapshotInfo s3 = makeFailureInfo(3); + SnapshotInfo s4 = makeFailureInfo(4); + + List infos = Arrays.asList(s1 , s2, s3, s4); + assertThat(conf.getSnapshotDeletionPredicate(infos).test(s1), equalTo(false)); + assertThat(conf.getSnapshotDeletionPredicate(infos).test(s2), equalTo(false)); + assertThat(conf.getSnapshotDeletionPredicate(infos).test(s3), equalTo(false)); + assertThat(conf.getSnapshotDeletionPredicate(infos).test(s4), equalTo(false)); + } + private SnapshotInfo makeInfo(long startTime) { final Map meta = new HashMap<>(); meta.put(SnapshotLifecyclePolicy.POLICY_ID_METADATA_FIELD, REPO); + final int totalShards = between(1,20); return new SnapshotInfo(new SnapshotId("snap-" + randomAlphaOfLength(3), "uuid"), - Collections.singletonList("foo"), startTime, false, meta); + Collections.singletonList("foo"), + startTime, + null, + startTime + between(1,10000), + totalShards, + new ArrayList<>(), + false, + meta); + } + + private SnapshotInfo makeFailureInfo(long startTime) { + final Map meta = new HashMap<>(); + meta.put(SnapshotLifecyclePolicy.POLICY_ID_METADATA_FIELD, REPO); + final int totalShards = between(1,20); + final List failures = new ArrayList<>(); + final int failureCount = between(1,totalShards); + for (int i = 0; i < failureCount; i++) { + failures.add(new SnapshotShardFailure("nodeId", new ShardId("index-name", "index-uuid", i), "failed")); + } + assert failureCount == failures.size(); + return new SnapshotInfo(new SnapshotId("snap-fail-" + randomAlphaOfLength(3), "uuid-fail"), + Collections.singletonList("foo-fail"), + startTime, + "forced-failure", + startTime + between(1,10000), + totalShards, + failures, + randomBoolean(), + meta); } } diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/SnapshotRetentionTask.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/SnapshotRetentionTask.java index a9e67c41db19..73759c033e3a 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/SnapshotRetentionTask.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/SnapshotRetentionTask.java @@ -131,7 +131,7 @@ public class SnapshotRetentionTask implements SchedulerEngine.Listener { // Finally, asynchronously retrieve all the snapshots, deleting them serially, // before updating the cluster state with the new metrics and setting 'running' // back to false - getAllSuccessfulSnapshots(repositioriesToFetch, new ActionListener<>() { + getAllRetainableSnapshots(repositioriesToFetch, new ActionListener<>() { @Override public void onResponse(Map> allSnapshots) { try { @@ -222,7 +222,7 @@ public class SnapshotRetentionTask implements SchedulerEngine.Listener { return eligible; } - void getAllSuccessfulSnapshots(Collection repositories, ActionListener>> listener, + void getAllRetainableSnapshots(Collection repositories, ActionListener>> listener, Consumer errorHandler) { if (repositories.isEmpty()) { // Skip retrieving anything if there are no repositories to fetch @@ -236,11 +236,12 @@ public class SnapshotRetentionTask implements SchedulerEngine.Listener { @Override public void onResponse(final GetSnapshotsResponse resp) { Map> snapshots = new HashMap<>(); + final Set retainableStates = Set.of(SnapshotState.SUCCESS, SnapshotState.FAILED); repositories.forEach(repo -> { snapshots.put(repo, // Only return snapshots in the SUCCESS state resp.getSnapshots(repo).stream() - .filter(info -> info.state() == SnapshotState.SUCCESS) + .filter(info -> retainableStates.contains(info.state())) .collect(Collectors.toList())); }); listener.onResponse(snapshots); diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/slm/SLMSnapshotBlockingIntegTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/slm/SLMSnapshotBlockingIntegTests.java index 1d066a0d7f0c..aa13ec27a1ea 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/slm/SLMSnapshotBlockingIntegTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/slm/SLMSnapshotBlockingIntegTests.java @@ -6,11 +6,15 @@ package org.elasticsearch.xpack.slm; +import org.elasticsearch.action.ActionFuture; +import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse; import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotStatus; import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusResponse; +import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.SnapshotsInProgress; +import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; @@ -18,7 +22,9 @@ import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.snapshots.ConcurrentSnapshotExecutionException; +import org.elasticsearch.snapshots.SnapshotInfo; import org.elasticsearch.snapshots.SnapshotMissingException; +import org.elasticsearch.snapshots.SnapshotState; import org.elasticsearch.snapshots.mockstore.MockRepository; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.junit.annotations.TestLogging; @@ -38,8 +44,13 @@ import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.Map; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; +import static org.elasticsearch.index.mapper.MapperService.SINGLE_MAPPING_NAME; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.anyOf; +import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -231,6 +242,137 @@ public class SLMSnapshotBlockingIntegTests extends ESIntegTestCase { } } + public void testBasicFailureRetention() throws Exception { + final String indexName = "test-idx"; + final String policyId = "test-policy"; + // Setup + logger.info("--> starting two master nodes and two data nodes"); + internalCluster().startMasterOnlyNodes(2); + internalCluster().startDataOnlyNodes(2); + + createAndPopulateIndex(indexName); + + // Create a snapshot repo + initializeRepo(REPO); + + createSnapshotPolicy(policyId, "snap", "1 2 3 4 5 ?", REPO, indexName, true, + new SnapshotRetentionConfiguration(null, 1, 2)); + + // Create a failed snapshot + AtomicReference failedSnapshotName = new AtomicReference<>(); + { + logger.info("--> stopping random data node, which should cause shards to go missing"); + internalCluster().stopRandomDataNode(); + assertBusy(() -> + assertEquals(ClusterHealthStatus.RED, client().admin().cluster().prepareHealth().get().getStatus()), + 30, TimeUnit.SECONDS); + + final String masterNode = blockMasterFromFinalizingSnapshotOnIndexFile(REPO); + + logger.info("--> start snapshot"); + ActionFuture snapshotFuture = client() + .execute(ExecuteSnapshotLifecycleAction.INSTANCE, new ExecuteSnapshotLifecycleAction.Request(policyId)); + + logger.info("--> waiting for block to kick in on " + masterNode); + waitForBlock(masterNode, REPO, TimeValue.timeValueSeconds(60)); + + logger.info("--> stopping master node"); + internalCluster().stopCurrentMasterNode(); + + logger.info("--> wait until the snapshot is done"); + failedSnapshotName.set(snapshotFuture.get().getSnapshotName()); + assertNotNull(failedSnapshotName.get()); + + logger.info("--> verify that snapshot [{}] failed", failedSnapshotName.get()); + assertBusy(() -> { + GetSnapshotsResponse snapshotsStatusResponse = client().admin().cluster() + .prepareGetSnapshots(REPO).setSnapshots(failedSnapshotName.get()).get(); + SnapshotInfo snapshotInfo = snapshotsStatusResponse.getSnapshots(REPO).get(0); + assertEquals(SnapshotState.FAILED, snapshotInfo.state()); + }); + } + + // Run retention - we'll check the results later to make sure it's had time to run. + { + logger.info("--> executing SLM retention"); + assertAcked(client().execute(ExecuteSnapshotRetentionAction.INSTANCE, new ExecuteSnapshotRetentionAction.Request()).get()); + } + + // Take a successful snapshot + AtomicReference successfulSnapshotName = new AtomicReference<>(); + { + logger.info("--> deleting old index [{}], as it is now missing shards", indexName); + assertAcked(client().admin().indices().prepareDelete(indexName).get()); + createAndPopulateIndex(indexName); + + logger.info("--> unblocking snapshots"); + unblockRepo(REPO); + unblockAllDataNodes(REPO); + + logger.info("--> taking new snapshot"); + + ActionFuture snapshotResponse = client() + .execute(ExecuteSnapshotLifecycleAction.INSTANCE, new ExecuteSnapshotLifecycleAction.Request(policyId)); + logger.info("--> waiting for snapshot to complete"); + successfulSnapshotName.set(snapshotResponse.get().getSnapshotName()); + assertNotNull(successfulSnapshotName.get()); + Thread.sleep(TimeValue.timeValueSeconds(10).millis()); + logger.info("--> verify that snapshot [{}] succeeded", successfulSnapshotName.get()); + assertBusy(() -> { + GetSnapshotsResponse snapshotsStatusResponse = client().admin().cluster() + .prepareGetSnapshots(REPO).setSnapshots(successfulSnapshotName.get()).get(); + SnapshotInfo snapshotInfo = snapshotsStatusResponse.getSnapshots(REPO).get(0); + assertEquals(SnapshotState.SUCCESS, snapshotInfo.state()); + }); + } + + // Check that the failed snapshot from before still exists, now that retention has run + { + logger.info("--> verify that snapshot [{}] still exists", failedSnapshotName.get()); + GetSnapshotsResponse snapshotsStatusResponse = client().admin().cluster() + .prepareGetSnapshots(REPO).setSnapshots(failedSnapshotName.get()).get(); + SnapshotInfo snapshotInfo = snapshotsStatusResponse.getSnapshots(REPO).get(0); + assertEquals(SnapshotState.FAILED, snapshotInfo.state()); + } + + // Run retention again and make sure the failure was deleted + { + logger.info("--> executing SLM retention"); + assertAcked(client().execute(ExecuteSnapshotRetentionAction.INSTANCE, new ExecuteSnapshotRetentionAction.Request()).get()); + logger.info("--> waiting for failed snapshot [{}] to be deleted", failedSnapshotName.get()); + assertBusy(() -> { + try { + GetSnapshotsResponse snapshotsStatusResponse = client().admin().cluster() + .prepareGetSnapshots(REPO).setSnapshots(failedSnapshotName.get()).get(); + assertThat(snapshotsStatusResponse.getSnapshots(REPO), empty()); + } catch (SnapshotMissingException e) { + // This is what we want to happen + } + logger.info("--> failed snapshot [{}] has been deleted, checking successful snapshot [{}] still exists", + failedSnapshotName.get(), successfulSnapshotName.get()); + GetSnapshotsResponse snapshotsStatusResponse = client().admin().cluster() + .prepareGetSnapshots(REPO).setSnapshots(successfulSnapshotName.get()).get(); + SnapshotInfo snapshotInfo = snapshotsStatusResponse.getSnapshots(REPO).get(0); + assertEquals(SnapshotState.SUCCESS, snapshotInfo.state()); + }); + } + } + + private void createAndPopulateIndex(String indexName) throws InterruptedException { + logger.info("--> creating and populating index [{}]", indexName); + assertAcked(prepareCreate(indexName, 0, Settings.builder() + .put("number_of_shards", 6).put("number_of_replicas", 0))); + ensureGreen(); + + final int numdocs = randomIntBetween(50, 100); + IndexRequestBuilder[] builders = new IndexRequestBuilder[numdocs]; + for (int i = 0; i < builders.length; i++) { + builders[i] = client().prepareIndex(indexName, SINGLE_MAPPING_NAME, Integer.toString(i)).setSource("field1", "bar " + i); + } + indexRandom(true, builders); + flushAndRefresh(); + } + private void initializeRepo(String repoName) { client().admin().cluster().preparePutRepository(repoName) .setType("mock") @@ -314,4 +456,17 @@ public class SLMSnapshotBlockingIntegTests extends ESIntegTestCase { ((MockRepository)repositoriesService.repository(repository)).unblock(); } } + + public void waitForBlock(String node, String repository, TimeValue timeout) throws InterruptedException { + long start = System.currentTimeMillis(); + RepositoriesService repositoriesService = internalCluster().getInstance(RepositoriesService.class, node); + MockRepository mockRepository = (MockRepository) repositoriesService.repository(repository); + while (System.currentTimeMillis() - start < timeout.millis()) { + if (mockRepository.blocked()) { + return; + } + Thread.sleep(100); + } + fail("Timeout waiting for node [" + node + "] to be blocked"); + } } diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/slm/SnapshotRetentionTaskTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/slm/SnapshotRetentionTaskTests.java index 53c85c5e2302..24d137f6839e 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/slm/SnapshotRetentionTaskTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/slm/SnapshotRetentionTaskTests.java @@ -442,7 +442,7 @@ public class SnapshotRetentionTaskTests extends ESTestCase { } @Override - void getAllSuccessfulSnapshots(Collection repositories, + void getAllRetainableSnapshots(Collection repositories, ActionListener>> listener, Consumer errorHandler) { listener.onResponse(this.snapshotRetriever.get()); From 8b201e64ffeffa8dfe8c5849a81decb2a62dbe06 Mon Sep 17 00:00:00 2001 From: Jake Landis Date: Tue, 8 Oct 2019 17:20:12 -0500 Subject: [PATCH 53/55] =?UTF-8?q?Fix=20cluster=20alert=20for=20watcher/mon?= =?UTF-8?q?itoring=20IndexOutOfBoundsExcep=E2=80=A6=20(#45308)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If a cluster sending monitoring data is unhealthy and triggers an alert, then stops sending data the following exception [1] can occur. This exception stops the current Watch and the behavior is actually correct in part due to the exception. Simply fixing the exception introduces some incorrect behavior. Now that the Watch does not error in the this case, it will result in an incorrectly "resolved" alert. The fix here is two parts a) fix the exception b) fix the following incorrect behavior. a) fixing the exception is as easy as checking the size of the array before accessing it. b) fixing the following incorrect behavior is a bit more intrusive - Note - the UI depends on the success/met state for each condition to determine an "OK" or "FIRING" In this scenario, where an unhealthy cluster triggers an alert and then goes silent, it should keep "FIRING" until it hears back that the cluster is green. To keep the Watch "FIRING" either the index action or the email action needs to fire. Since the Watch is neither a "new" alert or a "resolved" alert, we do not want to keep sending an email (that would be non-passive too). Without completely changing the logic of how an alert is resolved allowing the index action to take place would result in the alert being resolved. Since we can not keep "FIRING" either the email or index action (since we don't want to resolve the alert nor re-write the logic for alert resolution), we will introduce a 3rd action. A logging action that WILL fire when the cluster is unhealthy. Specifically will fire when there is an unresolved alert and it can not find the cluster state. This logging action is logged at debug, so it should be noticed much. This logging action serves as an 'anchor' for the UI to keep the state in an a "FIRING" status until the alert is resolved. This presents a possible scenario where a cluster starts firing, then goes completely silent forever, the Watch will be "FIRING" forever. This is an edge case that already exists in some scenarios and requires manual intervention to remove that Watch. This changes changes to use a template-like method to populate the version_created for the default monitoring watches. The version is set to 7.5 since that is where this is first introduced. Fixes #43184 --- .../monitoring/exporter/ClusterAlertsUtil.java | 11 ++++++++++- .../watches/elasticsearch_cluster_status.json | 18 +++++++++++++++--- .../watches/elasticsearch_nodes.json | 2 +- .../elasticsearch_version_mismatch.json | 2 +- .../watches/kibana_version_mismatch.json | 2 +- .../watches/logstash_version_mismatch.json | 2 +- .../watches/xpack_license_expiration.json | 2 +- .../exporter/ClusterAlertsUtilTests.java | 1 + 8 files changed, 31 insertions(+), 9 deletions(-) diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/ClusterAlertsUtil.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/ClusterAlertsUtil.java index 2fe7e983a7a5..0aae71944875 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/ClusterAlertsUtil.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/ClusterAlertsUtil.java @@ -49,11 +49,19 @@ public class ClusterAlertsUtil { private static final Pattern UNIQUE_WATCH_ID_PROPERTY = Pattern.compile(Pattern.quote("${monitoring.watch.unique_id}")); + /** + * Replace the ${monitoring.watch.unique_id} field in the watches. + * + * @see #createUniqueWatchId(ClusterService, String) + */ + private static final Pattern VERSION_CREATED_PROPERTY = + Pattern.compile(Pattern.quote("${monitoring.version_created}")); + /** * The last time that all watches were updated. For now, all watches have been updated in the same version and should all be replaced * together. */ - public static final int LAST_UPDATED_VERSION = Version.V_7_0_0.id; + public static final int LAST_UPDATED_VERSION = Version.V_7_5_0.id; /** * An unsorted list of Watch IDs representing resource files for Monitoring Cluster Alerts. @@ -113,6 +121,7 @@ public class ClusterAlertsUtil { source = CLUSTER_UUID_PROPERTY.matcher(source).replaceAll(clusterUuid); source = WATCH_ID_PROPERTY.matcher(source).replaceAll(watchId); source = UNIQUE_WATCH_ID_PROPERTY.matcher(source).replaceAll(uniqueWatchId); + source = VERSION_CREATED_PROPERTY.matcher(source).replaceAll(Integer.toString(LAST_UPDATED_VERSION)); return source; } catch (final IOException e) { diff --git a/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/elasticsearch_cluster_status.json b/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/elasticsearch_cluster_status.json index 4e250d5d743b..16e52bce019b 100644 --- a/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/elasticsearch_cluster_status.json +++ b/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/elasticsearch_cluster_status.json @@ -7,7 +7,7 @@ "link": "elasticsearch/indices", "severity": 2100, "type": "monitoring", - "version_created": 7000099, + "version_created": "${monitoring.version_created}", "watch": "${monitoring.watch.id}" } }, @@ -134,11 +134,23 @@ }, "transform": { "script": { - "source": "ctx.vars.email_recipient = (ctx.payload.kibana_settings.hits.total > 0 && ctx.payload.kibana_settings.hits.hits[0]._source.kibana_settings.xpack != null) ? ctx.payload.kibana_settings.hits.hits[0]._source.kibana_settings.xpack.default_admin_email : null;ctx.vars.is_new = ctx.vars.fails_check && !ctx.vars.not_resolved;ctx.vars.is_resolved = !ctx.vars.fails_check && ctx.vars.not_resolved;def state = ctx.payload.check.hits.hits[0]._source.cluster_state.status;if (ctx.vars.not_resolved){ctx.payload = ctx.payload.alert.hits.hits[0]._source;if (ctx.vars.fails_check == false) {ctx.payload.resolved_timestamp = ctx.execution_time;}} else {ctx.payload = ['timestamp': ctx.execution_time, 'metadata': ctx.metadata.xpack];}if (ctx.vars.fails_check) {ctx.payload.prefix = 'Elasticsearch cluster status is ' + state + '.';if (state == 'red') {ctx.payload.message = 'Allocate missing primary shards and replica shards.';ctx.payload.metadata.severity = 2100;} else {ctx.payload.message = 'Allocate missing replica shards.';ctx.payload.metadata.severity = 1100;}}ctx.vars.state = state.toUpperCase();ctx.payload.update_timestamp = ctx.execution_time;return ctx.payload;" + "source": "ctx.vars.email_recipient = (ctx.payload.kibana_settings.hits.total > 0 && ctx.payload.kibana_settings.hits.hits[0]._source.kibana_settings.xpack != null) ? ctx.payload.kibana_settings.hits.hits[0]._source.kibana_settings.xpack.default_admin_email : null;ctx.vars.is_new = ctx.vars.fails_check && !ctx.vars.not_resolved;ctx.vars.is_resolved = !ctx.vars.fails_check && ctx.vars.not_resolved;ctx.vars.found_state = ctx.payload.check.hits.total != 0;def state = ctx.vars.found_state ? ctx.payload.check.hits.hits[0]._source.cluster_state.status : 'unknown';if (ctx.vars.not_resolved){ctx.payload = ctx.payload.alert.hits.hits[0]._source;if (ctx.vars.fails_check == false) {ctx.payload.resolved_timestamp = ctx.execution_time;}} else {ctx.payload = ['timestamp': ctx.execution_time, 'metadata': ctx.metadata.xpack];}if (ctx.vars.fails_check) {ctx.payload.prefix = 'Elasticsearch cluster status is ' + state + '.';if (state == 'red') {ctx.payload.message = 'Allocate missing primary shards and replica shards.';ctx.payload.metadata.severity = 2100;} else {ctx.payload.message = 'Allocate missing replica shards.';ctx.payload.metadata.severity = 1100;}}ctx.vars.state = state.toUpperCase();ctx.payload.update_timestamp = ctx.execution_time;return ctx.payload;" } }, "actions": { + "log_state_not_found": { + "condition": { + "script": "!ctx.vars.found_state" + }, + "logging" : { + "text" : "Watch [{{ctx.metadata.xpack.watch}}] could not determine cluster state for cluster [{{ctx.metadata.xpack.cluster_uuid}}]. This likely means the cluster has not sent any monitoring data recently.", + "level" : "debug" + } + }, "add_to_alerts_index": { + "condition": { + "script": "ctx.vars.found_state" + }, "index": { "index": ".monitoring-alerts-7", "doc_id": "${monitoring.watch.unique_id}" @@ -146,7 +158,7 @@ }, "send_email_to_admin": { "condition": { - "script": "return ctx.vars.email_recipient != null && (ctx.vars.is_new || ctx.vars.is_resolved)" + "script": "return ctx.vars.email_recipient != null && ctx.vars.found_state && (ctx.vars.is_new || ctx.vars.is_resolved)" }, "email": { "to": "X-Pack Admin <{{ctx.vars.email_recipient}}>", diff --git a/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/elasticsearch_nodes.json b/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/elasticsearch_nodes.json index d79cb786267d..4347801fa2a4 100644 --- a/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/elasticsearch_nodes.json +++ b/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/elasticsearch_nodes.json @@ -7,7 +7,7 @@ "link": "elasticsearch/nodes", "severity": 1999, "type": "monitoring", - "version_created": 7000099, + "version_created": "${monitoring.version_created}", "watch": "${monitoring.watch.id}" } }, diff --git a/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/elasticsearch_version_mismatch.json b/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/elasticsearch_version_mismatch.json index 37132a03c7b6..05fa83966237 100644 --- a/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/elasticsearch_version_mismatch.json +++ b/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/elasticsearch_version_mismatch.json @@ -7,7 +7,7 @@ "link": "elasticsearch/nodes", "severity": 1000, "type": "monitoring", - "version_created": 7000099, + "version_created": "${monitoring.version_created}", "watch": "${monitoring.watch.id}" } }, diff --git a/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/kibana_version_mismatch.json b/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/kibana_version_mismatch.json index 3e08fd98843d..b35137ad1405 100644 --- a/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/kibana_version_mismatch.json +++ b/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/kibana_version_mismatch.json @@ -7,7 +7,7 @@ "link": "kibana/instances", "severity": 1000, "type": "monitoring", - "version_created": 7000099, + "version_created": "${monitoring.version_created}", "watch": "${monitoring.watch.id}" } }, diff --git a/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/logstash_version_mismatch.json b/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/logstash_version_mismatch.json index 8bb5b5efe9d7..8417ef4d069f 100644 --- a/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/logstash_version_mismatch.json +++ b/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/logstash_version_mismatch.json @@ -7,7 +7,7 @@ "link": "logstash/instances", "severity": 1000, "type": "monitoring", - "version_created": 7000099, + "version_created": "${monitoring.version_created}", "watch": "${monitoring.watch.id}" } }, diff --git a/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/xpack_license_expiration.json b/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/xpack_license_expiration.json index 3f1f49e0240d..350419191411 100644 --- a/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/xpack_license_expiration.json +++ b/x-pack/plugin/monitoring/src/main/resources/monitoring/watches/xpack_license_expiration.json @@ -8,7 +8,7 @@ "alert_index": ".monitoring-alerts-7", "cluster_uuid": "${monitoring.watch.cluster_uuid}", "type": "monitoring", - "version_created": 7000099, + "version_created": "${monitoring.version_created}", "watch": "${monitoring.watch.id}" } }, diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/ClusterAlertsUtilTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/ClusterAlertsUtilTests.java index 868cd17b3eb8..7ce728e2582a 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/ClusterAlertsUtilTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/ClusterAlertsUtilTests.java @@ -68,6 +68,7 @@ public class ClusterAlertsUtilTests extends ESTestCase { assertThat(watch, notNullValue()); assertThat(watch, containsString(clusterUuid)); assertThat(watch, containsString(watchId)); + assertThat(watch, containsString(String.valueOf(ClusterAlertsUtil.LAST_UPDATED_VERSION))); if ("elasticsearch_nodes".equals(watchId) == false) { assertThat(watch, containsString(clusterUuid + "_" + watchId)); From 4aa6a9e6b0097204b0f071792bbf4ebef37d40bd Mon Sep 17 00:00:00 2001 From: Lee Hinman Date: Tue, 8 Oct 2019 17:15:49 -0600 Subject: [PATCH 54/55] Separate SLM stop/start/status API from ILM (#47710) * Separate SLM stop/start/status API from ILM This separates a start/stop/status API for SLM from being tied to ILM's operation mode. These APIs look like: ``` POST /_slm/stop POST /_slm/start GET /_slm/status ``` This allows administrators to have fine-grained control over preventing periodic snapshots and deletions while performing cluster maintenance. Relates to #43663 * Allow going from RUNNING to STOPPED * Align with the OperationMode rules * Fix slmStopping method * Make OperationModeUpdateTask constructor private * Wipe snapshots better in test --- .../core/slm/action/GetSLMStatusAction.java | 75 +++++++++++++++ .../xpack/core/slm/action/StartSLMAction.java | 55 +++++++++++ .../xpack/core/slm/action/StopSLMAction.java | 55 +++++++++++ .../xpack/slm/SnapshotLifecycleRestIT.java | 92 +++++++++++++++++++ .../xpack/ilm/IndexLifecycle.java | 19 +++- .../xpack/ilm/IndexLifecycleService.java | 3 +- .../xpack/ilm/OperationModeUpdateTask.java | 53 ++++++++--- .../ilm/action/TransportStartILMAction.java | 2 +- .../ilm/action/TransportStopILMAction.java | 2 +- .../xpack/slm/SnapshotLifecycleService.java | 28 +++++- .../xpack/slm/SnapshotRetentionTask.java | 8 +- .../slm/action/RestGetSLMStatusAction.java | 34 +++++++ .../xpack/slm/action/RestStartSLMAction.java | 34 +++++++ .../xpack/slm/action/RestStopSLMAction.java | 34 +++++++ .../action/TransportGetSLMStatusAction.java | 66 +++++++++++++ .../slm/action/TransportStartSLMAction.java | 70 ++++++++++++++ .../slm/action/TransportStopSLMAction.java | 70 ++++++++++++++ .../xpack/ilm/IndexLifecycleServiceTests.java | 2 +- .../ilm/OperationModeUpdateTaskTests.java | 2 +- .../xpack/slm/SnapshotRetentionTaskTests.java | 42 +++++++++ 20 files changed, 719 insertions(+), 27 deletions(-) create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/action/GetSLMStatusAction.java create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/action/StartSLMAction.java create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/action/StopSLMAction.java create mode 100644 x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/RestGetSLMStatusAction.java create mode 100644 x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/RestStartSLMAction.java create mode 100644 x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/RestStopSLMAction.java create mode 100644 x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/TransportGetSLMStatusAction.java create mode 100644 x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/TransportStartSLMAction.java create mode 100644 x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/TransportStopSLMAction.java diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/action/GetSLMStatusAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/action/GetSLMStatusAction.java new file mode 100644 index 000000000000..83a7d8da5890 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/action/GetSLMStatusAction.java @@ -0,0 +1,75 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.slm.action; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.ilm.OperationMode; + +import java.io.IOException; + +public class GetSLMStatusAction extends ActionType { + public static final GetSLMStatusAction INSTANCE = new GetSLMStatusAction(); + public static final String NAME = "cluster:admin/slm/status"; + + protected GetSLMStatusAction() { + super(NAME, GetSLMStatusAction.Response::new); + } + + public static class Response extends ActionResponse implements ToXContentObject { + + private OperationMode mode; + + public Response(StreamInput in) throws IOException { + super(in); + this.mode = in.readEnum(OperationMode.class); + } + + public Response(OperationMode mode) { + this.mode = mode; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeEnum(this.mode); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("operation_mode", this.mode); + builder.endObject(); + return builder; + } + } + + public static class Request extends AcknowledgedRequest { + + public Request(StreamInput in) throws IOException { + super(in); + } + + public Request() { + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/action/StartSLMAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/action/StartSLMAction.java new file mode 100644 index 000000000000..faea61c379ef --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/action/StartSLMAction.java @@ -0,0 +1,55 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.slm.action; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.common.io.stream.StreamInput; + +import java.io.IOException; + +public class StartSLMAction extends ActionType { + public static final StartSLMAction INSTANCE = new StartSLMAction(); + public static final String NAME = "cluster:admin/slm/start"; + + protected StartSLMAction() { + super(NAME, AcknowledgedResponse::new); + } + + public static class Request extends AcknowledgedRequest { + + public Request(StreamInput in) throws IOException { + super(in); + } + + public Request() { + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public int hashCode() { + return 86; + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + return true; + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/action/StopSLMAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/action/StopSLMAction.java new file mode 100644 index 000000000000..a1bbbafa070c --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/action/StopSLMAction.java @@ -0,0 +1,55 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.slm.action; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.common.io.stream.StreamInput; + +import java.io.IOException; + +public class StopSLMAction extends ActionType { + public static final StopSLMAction INSTANCE = new StopSLMAction(); + public static final String NAME = "cluster:admin/slm/stop"; + + protected StopSLMAction() { + super(NAME, AcknowledgedResponse::new); + } + + public static class Request extends AcknowledgedRequest { + + public Request(StreamInput in) throws IOException { + super(in); + } + + public Request() { + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public int hashCode() { + return 85; + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + return true; + } + } +} diff --git a/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleRestIT.java b/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleRestIT.java index 1d82bc77b115..caccbe6e4280 100644 --- a/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleRestIT.java +++ b/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleRestIT.java @@ -243,6 +243,98 @@ public class SnapshotLifecycleRestIT extends ESRestTestCase { }); } + + @SuppressWarnings("unchecked") + public void testStartStopStatus() throws Exception { + final String indexName = "test"; + final String policyName = "start-stop-policy"; + final String repoId = "start-stop-repo"; + int docCount = randomIntBetween(10, 50); + for (int i = 0; i < docCount; i++) { + index(client(), indexName, "" + i, "foo", "bar"); + } + + // Create a snapshot repo + initializeRepo(repoId); + + // Stop SLM so nothing happens + client().performRequest(new Request("POST", "/_slm/stop")); + + assertBusy(() -> { + logger.info("--> waiting for SLM to stop"); + assertThat(EntityUtils.toString(client().performRequest(new Request("GET", "/_slm/status")).getEntity()), + containsString("STOPPED")); + }); + + try { + createSnapshotPolicy(policyName, "snap", "*/1 * * * * ?", repoId, indexName, true, + new SnapshotRetentionConfiguration(TimeValue.ZERO, null, null)); + long start = System.currentTimeMillis(); + final String snapshotName = executePolicy(policyName); + + // Check that the executed snapshot is created + assertBusy(() -> { + try { + logger.info("--> checking for snapshot creation..."); + Response response = client().performRequest(new Request("GET", "/_snapshot/" + repoId + "/" + snapshotName)); + Map snapshotResponseMap; + try (InputStream is = response.getEntity().getContent()) { + snapshotResponseMap = XContentHelper.convertToMap(XContentType.JSON.xContent(), is, true); + } + assertThat(snapshotResponseMap.size(), greaterThan(0)); + final Map metadata = extractMetadata(snapshotResponseMap, snapshotName); + assertNotNull(metadata); + assertThat(metadata.get("policy"), equalTo(policyName)); + assertHistoryIsPresent(policyName, true, repoId, CREATE_OPERATION); + } catch (ResponseException e) { + fail("expected snapshot to exist but it does not: " + EntityUtils.toString(e.getResponse().getEntity())); + } + }); + + // Sleep for up to a second, but at least 1 second since we scheduled the policy so we can + // ensure it *would* have run if SLM were running + Thread.sleep(Math.min(0, TimeValue.timeValueSeconds(1).millis() - Math.min(0, System.currentTimeMillis() - start))); + + client().performRequest(new Request("POST", "/_slm/_execute_retention")); + + // Retention and the manually executed policy should still have run, + // but only the one we manually ran. + assertBusy(() -> { + logger.info("--> checking for stats updates..."); + Map stats = getSLMStats(); + Map policyStats = policyStatsAsMap(stats); + Map policyIdStats = (Map) policyStats.get(policyName); + int snapsTaken = (int) policyIdStats.get(SnapshotLifecycleStats.SnapshotPolicyStats.SNAPSHOTS_TAKEN.getPreferredName()); + int totalTaken = (int) stats.get(SnapshotLifecycleStats.TOTAL_TAKEN.getPreferredName()); + int totalFailed = (int) stats.get(SnapshotLifecycleStats.TOTAL_FAILED.getPreferredName()); + int totalDeleted = (int) stats.get(SnapshotLifecycleStats.TOTAL_DELETIONS.getPreferredName()); + assertThat(snapsTaken, equalTo(1)); + assertThat(totalTaken, equalTo(1)); + assertThat(totalDeleted, equalTo(1)); + assertThat(totalFailed, equalTo(0)); + }); + + assertBusy(() -> { + try { + Map>> snaps = wipeSnapshots(); + logger.info("--> checking for wiped snapshots: {}", snaps); + assertThat(snaps.size(), equalTo(0)); + } catch (ResponseException e) { + logger.error("got exception wiping snapshots", e); + fail("got exception: " + EntityUtils.toString(e.getResponse().getEntity())); + } + }); + } finally { + client().performRequest(new Request("POST", "/_slm/start")); + + assertBusy(() -> { + logger.info("--> waiting for SLM to start"); + assertThat(EntityUtils.toString(client().performRequest(new Request("GET", "/_slm/status")).getEntity()), + containsString("RUNNING")); + }); + } + } + @SuppressWarnings("unchecked") public void testBasicTimeBasedRetenion() throws Exception { final String indexName = "test"; diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycle.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycle.java index f3cb03775614..397bc0afe6c9 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycle.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycle.java @@ -66,9 +66,12 @@ import org.elasticsearch.xpack.core.slm.SnapshotLifecycleMetadata; import org.elasticsearch.xpack.core.slm.action.DeleteSnapshotLifecycleAction; import org.elasticsearch.xpack.core.slm.action.ExecuteSnapshotLifecycleAction; import org.elasticsearch.xpack.core.slm.action.ExecuteSnapshotRetentionAction; +import org.elasticsearch.xpack.core.slm.action.GetSLMStatusAction; import org.elasticsearch.xpack.core.slm.action.GetSnapshotLifecycleAction; import org.elasticsearch.xpack.core.slm.action.GetSnapshotLifecycleStatsAction; import org.elasticsearch.xpack.core.slm.action.PutSnapshotLifecycleAction; +import org.elasticsearch.xpack.core.slm.action.StartSLMAction; +import org.elasticsearch.xpack.core.slm.action.StopSLMAction; import org.elasticsearch.xpack.core.slm.history.SnapshotHistoryStore; import org.elasticsearch.xpack.core.slm.history.SnapshotLifecycleTemplateRegistry; import org.elasticsearch.xpack.ilm.action.RestDeleteLifecycleAction; @@ -98,15 +101,21 @@ import org.elasticsearch.xpack.slm.SnapshotRetentionTask; import org.elasticsearch.xpack.slm.action.RestDeleteSnapshotLifecycleAction; import org.elasticsearch.xpack.slm.action.RestExecuteSnapshotLifecycleAction; import org.elasticsearch.xpack.slm.action.RestExecuteSnapshotRetentionAction; +import org.elasticsearch.xpack.slm.action.RestGetSLMStatusAction; import org.elasticsearch.xpack.slm.action.RestGetSnapshotLifecycleAction; import org.elasticsearch.xpack.slm.action.RestGetSnapshotLifecycleStatsAction; import org.elasticsearch.xpack.slm.action.RestPutSnapshotLifecycleAction; +import org.elasticsearch.xpack.slm.action.RestStartSLMAction; +import org.elasticsearch.xpack.slm.action.RestStopSLMAction; import org.elasticsearch.xpack.slm.action.TransportDeleteSnapshotLifecycleAction; import org.elasticsearch.xpack.slm.action.TransportExecuteSnapshotLifecycleAction; import org.elasticsearch.xpack.slm.action.TransportExecuteSnapshotRetentionAction; +import org.elasticsearch.xpack.slm.action.TransportGetSLMStatusAction; import org.elasticsearch.xpack.slm.action.TransportGetSnapshotLifecycleAction; import org.elasticsearch.xpack.slm.action.TransportGetSnapshotLifecycleStatsAction; import org.elasticsearch.xpack.slm.action.TransportPutSnapshotLifecycleAction; +import org.elasticsearch.xpack.slm.action.TransportStartSLMAction; +import org.elasticsearch.xpack.slm.action.TransportStopSLMAction; import java.io.IOException; import java.time.Clock; @@ -234,7 +243,10 @@ public class IndexLifecycle extends Plugin implements ActionPlugin { new RestGetSnapshotLifecycleAction(restController), new RestExecuteSnapshotLifecycleAction(restController), new RestGetSnapshotLifecycleStatsAction(restController), - new RestExecuteSnapshotRetentionAction(restController) + new RestExecuteSnapshotRetentionAction(restController), + new RestStopSLMAction(restController), + new RestStartSLMAction(restController), + new RestGetSLMStatusAction(restController) )); } return handlers; @@ -270,7 +282,10 @@ public class IndexLifecycle extends Plugin implements ActionPlugin { new ActionHandler<>(GetSnapshotLifecycleAction.INSTANCE, TransportGetSnapshotLifecycleAction.class), new ActionHandler<>(ExecuteSnapshotLifecycleAction.INSTANCE, TransportExecuteSnapshotLifecycleAction.class), new ActionHandler<>(GetSnapshotLifecycleStatsAction.INSTANCE, TransportGetSnapshotLifecycleStatsAction.class), - new ActionHandler<>(ExecuteSnapshotRetentionAction.INSTANCE, TransportExecuteSnapshotRetentionAction.class) + new ActionHandler<>(ExecuteSnapshotRetentionAction.INSTANCE, TransportExecuteSnapshotRetentionAction.class), + new ActionHandler<>(StartSLMAction.INSTANCE, TransportStartSLMAction.class), + new ActionHandler<>(StopSLMAction.INSTANCE, TransportStopSLMAction.class), + new ActionHandler<>(GetSLMStatusAction.INSTANCE, TransportGetSLMStatusAction.class) )); } return actions; diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleService.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleService.java index 1cf860bf5c7c..7ab0d5a7e584 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleService.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleService.java @@ -307,8 +307,7 @@ public class IndexLifecycleService } public void submitOperationModeUpdate(OperationMode mode) { - clusterService.submitStateUpdateTask("ilm_operation_mode_update", - new OperationModeUpdateTask(mode)); + clusterService.submitStateUpdateTask("ilm_operation_mode_update", OperationModeUpdateTask.ilmMode(mode)); } /** diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/OperationModeUpdateTask.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/OperationModeUpdateTask.java index 53d4a5307b0d..9bb4a8df3d41 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/OperationModeUpdateTask.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/OperationModeUpdateTask.java @@ -10,20 +10,37 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.Nullable; import org.elasticsearch.xpack.core.ilm.OperationMode; import org.elasticsearch.xpack.core.ilm.IndexLifecycleMetadata; import org.elasticsearch.xpack.core.slm.SnapshotLifecycleMetadata; public class OperationModeUpdateTask extends ClusterStateUpdateTask { private static final Logger logger = LogManager.getLogger(OperationModeUpdateTask.class); - private final OperationMode mode; + @Nullable + private final OperationMode ilmMode; + @Nullable + private final OperationMode slmMode; - public OperationModeUpdateTask(OperationMode mode) { - this.mode = mode; + private OperationModeUpdateTask(OperationMode ilmMode, OperationMode slmMode) { + this.ilmMode = ilmMode; + this.slmMode = slmMode; } - OperationMode getOperationMode() { - return mode; + public static OperationModeUpdateTask ilmMode(OperationMode mode) { + return new OperationModeUpdateTask(mode, null); + } + + public static OperationModeUpdateTask slmMode(OperationMode mode) { + return new OperationModeUpdateTask(null, mode); + } + + OperationMode getILMOperationMode() { + return ilmMode; + } + + OperationMode getSLMOperationMode() { + return slmMode; } @Override @@ -35,20 +52,26 @@ public class OperationModeUpdateTask extends ClusterStateUpdateTask { } private ClusterState updateILMState(final ClusterState currentState) { + if (ilmMode == null) { + return currentState; + } IndexLifecycleMetadata currentMetadata = currentState.metaData().custom(IndexLifecycleMetadata.TYPE); - if (currentMetadata != null && currentMetadata.getOperationMode().isValidChange(mode) == false) { + if (currentMetadata != null && currentMetadata.getOperationMode().isValidChange(ilmMode) == false) { return currentState; } else if (currentMetadata == null) { currentMetadata = IndexLifecycleMetadata.EMPTY; } final OperationMode newMode; - if (currentMetadata.getOperationMode().isValidChange(mode)) { - newMode = mode; + if (currentMetadata.getOperationMode().isValidChange(ilmMode)) { + newMode = ilmMode; } else { newMode = currentMetadata.getOperationMode(); } + if (newMode.equals(ilmMode) == false) { + logger.info("updating ILM operation mode to {}", newMode); + } return ClusterState.builder(currentState) .metaData(MetaData.builder(currentState.metaData()) .putCustom(IndexLifecycleMetadata.TYPE, @@ -57,20 +80,26 @@ public class OperationModeUpdateTask extends ClusterStateUpdateTask { } private ClusterState updateSLMState(final ClusterState currentState) { + if (slmMode == null) { + return currentState; + } SnapshotLifecycleMetadata currentMetadata = currentState.metaData().custom(SnapshotLifecycleMetadata.TYPE); - if (currentMetadata != null && currentMetadata.getOperationMode().isValidChange(mode) == false) { + if (currentMetadata != null && currentMetadata.getOperationMode().isValidChange(slmMode) == false) { return currentState; } else if (currentMetadata == null) { currentMetadata = SnapshotLifecycleMetadata.EMPTY; } final OperationMode newMode; - if (currentMetadata.getOperationMode().isValidChange(mode)) { - newMode = mode; + if (currentMetadata.getOperationMode().isValidChange(slmMode)) { + newMode = slmMode; } else { newMode = currentMetadata.getOperationMode(); } + if (newMode.equals(slmMode) == false) { + logger.info("updating SLM operation mode to {}", newMode); + } return ClusterState.builder(currentState) .metaData(MetaData.builder(currentState.metaData()) .putCustom(SnapshotLifecycleMetadata.TYPE, @@ -81,6 +110,6 @@ public class OperationModeUpdateTask extends ClusterStateUpdateTask { @Override public void onFailure(String source, Exception e) { - logger.error("unable to update lifecycle metadata with new mode [" + mode + "]", e); + logger.error("unable to update lifecycle metadata with new ilm mode [" + ilmMode + "], slm mode [" + slmMode + "]", e); } } diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportStartILMAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportStartILMAction.java index cf7836c35b41..341aa98373f3 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportStartILMAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportStartILMAction.java @@ -53,7 +53,7 @@ public class TransportStartILMAction extends TransportMasterNodeAction(request, listener) { @Override public ClusterState execute(ClusterState currentState) { - return (new OperationModeUpdateTask(OperationMode.RUNNING)).execute(currentState); + return (OperationModeUpdateTask.ilmMode(OperationMode.RUNNING)).execute(currentState); } @Override diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportStopILMAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportStopILMAction.java index 308b2dcef34b..c6c37c0fa074 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportStopILMAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportStopILMAction.java @@ -53,7 +53,7 @@ public class TransportStopILMAction extends TransportMasterNodeAction(request, listener) { @Override public ClusterState execute(ClusterState currentState) { - return (new OperationModeUpdateTask(OperationMode.STOPPING)).execute(currentState); + return (OperationModeUpdateTask.ilmMode(OperationMode.STOPPING)).execute(currentState); } @Override diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleService.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleService.java index 0d27584d83eb..c0f8e6515877 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleService.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleService.java @@ -23,6 +23,7 @@ import org.elasticsearch.xpack.core.scheduler.SchedulerEngine; import org.elasticsearch.xpack.core.slm.SnapshotLifecycleMetadata; import org.elasticsearch.xpack.core.slm.SnapshotLifecyclePolicy; import org.elasticsearch.xpack.core.slm.SnapshotLifecyclePolicyMetadata; +import org.elasticsearch.xpack.ilm.OperationModeUpdateTask; import java.io.Closeable; import java.time.Clock; @@ -65,10 +66,13 @@ public class SnapshotLifecycleService implements LocalNodeMasterListener, Closea if (this.isMaster) { final ClusterState state = event.state(); - if (ilmStoppedOrStopping(state)) { + if (slmStoppedOrStopping(state)) { if (scheduler.scheduledJobIds().size() > 0) { cancelSnapshotJobs(); } + if (slmStopping(state)) { + submitOperationModeUpdate(OperationMode.STOPPED); + } return; } @@ -82,8 +86,8 @@ public class SnapshotLifecycleService implements LocalNodeMasterListener, Closea this.isMaster = true; scheduler.register(snapshotTask); final ClusterState state = clusterService.state(); - if (ilmStoppedOrStopping(state)) { - // ILM is currently stopped, so don't schedule jobs + if (slmStoppedOrStopping(state)) { + // SLM is currently stopped, so don't schedule jobs return; } scheduleSnapshotJobs(state); @@ -102,15 +106,29 @@ public class SnapshotLifecycleService implements LocalNodeMasterListener, Closea } /** - * Returns true if ILM is in the stopped or stopped state + * Returns true if SLM is in the stopping or stopped state */ - static boolean ilmStoppedOrStopping(ClusterState state) { + static boolean slmStoppedOrStopping(ClusterState state) { return Optional.ofNullable((SnapshotLifecycleMetadata) state.metaData().custom(SnapshotLifecycleMetadata.TYPE)) .map(SnapshotLifecycleMetadata::getOperationMode) .map(mode -> OperationMode.STOPPING == mode || OperationMode.STOPPED == mode) .orElse(false); } + /** + * Returns true if SLM is in the stopping state + */ + static boolean slmStopping(ClusterState state) { + return Optional.ofNullable((SnapshotLifecycleMetadata) state.metaData().custom(SnapshotLifecycleMetadata.TYPE)) + .map(SnapshotLifecycleMetadata::getOperationMode) + .map(mode -> OperationMode.STOPPING == mode) + .orElse(false); + } + + public void submitOperationModeUpdate(OperationMode mode) { + clusterService.submitStateUpdateTask("slm_operation_mode_update", OperationModeUpdateTask.slmMode(mode)); + } + /** * Schedule all non-scheduled snapshot jobs contained in the cluster state */ diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/SnapshotRetentionTask.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/SnapshotRetentionTask.java index 73759c033e3a..c62538fed1b7 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/SnapshotRetentionTask.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/SnapshotRetentionTask.java @@ -90,8 +90,12 @@ public class SnapshotRetentionTask implements SchedulerEngine.Listener { SnapshotRetentionService.SLM_RETENTION_MANUAL_JOB_ID + " but it was " + event.getJobName(); final ClusterState state = clusterService.state(); - if (SnapshotLifecycleService.ilmStoppedOrStopping(state)) { - logger.debug("skipping SLM retention as ILM is currently stopped or stopping"); + + // Skip running retention if SLM is disabled, however, even if it's + // disabled we allow manual running. + if (SnapshotLifecycleService.slmStoppedOrStopping(state) && + event.getJobName().equals(SnapshotRetentionService.SLM_RETENTION_MANUAL_JOB_ID) == false) { + logger.debug("skipping SLM retention as SLM is currently stopped or stopping"); return; } diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/RestGetSLMStatusAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/RestGetSLMStatusAction.java new file mode 100644 index 000000000000..26f3197a7612 --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/RestGetSLMStatusAction.java @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.slm.action; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.core.slm.action.GetSLMStatusAction; + +public class RestGetSLMStatusAction extends BaseRestHandler { + + public RestGetSLMStatusAction(RestController controller) { + controller.registerHandler(RestRequest.Method.GET, "/_slm/status", this); + } + + @Override + public String getName() { + return "slm_get_operation_mode_action"; + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { + GetSLMStatusAction.Request request = new GetSLMStatusAction.Request(); + request.timeout(restRequest.paramAsTime("timeout", request.timeout())); + request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + return channel -> client.execute(GetSLMStatusAction.INSTANCE, request, new RestToXContentListener<>(channel)); + } +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/RestStartSLMAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/RestStartSLMAction.java new file mode 100644 index 000000000000..87dc7d2bb227 --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/RestStartSLMAction.java @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.slm.action; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.core.slm.action.StartSLMAction; + +public class RestStartSLMAction extends BaseRestHandler { + + public RestStartSLMAction(RestController controller) { + controller.registerHandler(RestRequest.Method.POST, "/_slm/start", this); + } + + @Override + public String getName() { + return "slm_start_action"; + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { + StartSLMAction.Request request = new StartSLMAction.Request(); + request.timeout(restRequest.paramAsTime("timeout", request.timeout())); + request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + return channel -> client.execute(StartSLMAction.INSTANCE, request, new RestToXContentListener<>(channel)); + } +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/RestStopSLMAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/RestStopSLMAction.java new file mode 100644 index 000000000000..ac74b37d5875 --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/RestStopSLMAction.java @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.slm.action; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.core.slm.action.StopSLMAction; + +public class RestStopSLMAction extends BaseRestHandler { + + public RestStopSLMAction(RestController controller) { + controller.registerHandler(RestRequest.Method.POST, "/_slm/stop", this); + } + + @Override + public String getName() { + return "slm_stop_action"; + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { + StopSLMAction.Request request = new StopSLMAction.Request(); + request.timeout(restRequest.paramAsTime("timeout", request.timeout())); + request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + return channel -> client.execute(StopSLMAction.INSTANCE, request, new RestToXContentListener<>(channel)); + } +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/TransportGetSLMStatusAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/TransportGetSLMStatusAction.java new file mode 100644 index 000000000000..ca8548ca31f1 --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/TransportGetSLMStatusAction.java @@ -0,0 +1,66 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.slm.action; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.master.TransportMasterNodeAction; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.ilm.OperationMode; +import org.elasticsearch.xpack.core.slm.SnapshotLifecycleMetadata; +import org.elasticsearch.xpack.core.slm.action.GetSLMStatusAction; + +import java.io.IOException; + +public class TransportGetSLMStatusAction extends TransportMasterNodeAction { + + @Inject + public TransportGetSLMStatusAction(TransportService transportService, ClusterService clusterService, ThreadPool threadPool, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { + super(GetSLMStatusAction.NAME, transportService, clusterService, threadPool, actionFilters, + GetSLMStatusAction.Request::new, indexNameExpressionResolver); + } + + @Override + protected String executor() { + return ThreadPool.Names.SAME; + } + + @Override + protected GetSLMStatusAction.Response read(StreamInput in) throws IOException { + return new GetSLMStatusAction.Response(in); + } + + @Override + protected void masterOperation(Task task, GetSLMStatusAction.Request request, + ClusterState state, ActionListener listener) { + SnapshotLifecycleMetadata metadata = state.metaData().custom(SnapshotLifecycleMetadata.TYPE); + final GetSLMStatusAction.Response response; + if (metadata == null) { + // no need to actually install metadata just yet, but safe to say it is not stopped + response = new GetSLMStatusAction.Response(OperationMode.RUNNING); + } else { + response = new GetSLMStatusAction.Response(metadata.getOperationMode()); + } + listener.onResponse(response); + } + + @Override + protected ClusterBlockException checkBlock(GetSLMStatusAction.Request request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } +} + diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/TransportStartSLMAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/TransportStartSLMAction.java new file mode 100644 index 000000000000..17fdb62be381 --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/TransportStartSLMAction.java @@ -0,0 +1,70 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.slm.action; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.action.support.master.TransportMasterNodeAction; +import org.elasticsearch.cluster.AckedClusterStateUpdateTask; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.ilm.OperationMode; +import org.elasticsearch.xpack.core.slm.action.StartSLMAction; +import org.elasticsearch.xpack.ilm.OperationModeUpdateTask; + +import java.io.IOException; + +public class TransportStartSLMAction extends TransportMasterNodeAction { + + @Inject + public TransportStartSLMAction(TransportService transportService, ClusterService clusterService, ThreadPool threadPool, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { + super(StartSLMAction.NAME, transportService, clusterService, threadPool, actionFilters, StartSLMAction.Request::new, + indexNameExpressionResolver); + } + + @Override + protected String executor() { + return ThreadPool.Names.SAME; + } + + @Override + protected AcknowledgedResponse read(StreamInput in) throws IOException { + return new AcknowledgedResponse(in); + } + + @Override + protected void masterOperation(Task task, StartSLMAction.Request request, ClusterState state, + ActionListener listener) { + clusterService.submitStateUpdateTask("slm_operation_mode_update", + new AckedClusterStateUpdateTask(request, listener) { + @Override + public ClusterState execute(ClusterState currentState) { + return (OperationModeUpdateTask.slmMode(OperationMode.RUNNING)).execute(currentState); + } + + @Override + protected AcknowledgedResponse newResponse(boolean acknowledged) { + return new AcknowledgedResponse(acknowledged); + } + }); + } + + @Override + protected ClusterBlockException checkBlock(StartSLMAction.Request request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/TransportStopSLMAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/TransportStopSLMAction.java new file mode 100644 index 000000000000..15765d069fef --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/TransportStopSLMAction.java @@ -0,0 +1,70 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.slm.action; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.action.support.master.TransportMasterNodeAction; +import org.elasticsearch.cluster.AckedClusterStateUpdateTask; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.ilm.OperationMode; +import org.elasticsearch.xpack.core.slm.action.StopSLMAction; +import org.elasticsearch.xpack.ilm.OperationModeUpdateTask; + +import java.io.IOException; + +public class TransportStopSLMAction extends TransportMasterNodeAction { + + @Inject + public TransportStopSLMAction(TransportService transportService, ClusterService clusterService, ThreadPool threadPool, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { + super(StopSLMAction.NAME, transportService, clusterService, threadPool, actionFilters, StopSLMAction.Request::new, + indexNameExpressionResolver); + } + + @Override + protected String executor() { + return ThreadPool.Names.SAME; + } + + @Override + protected AcknowledgedResponse read(StreamInput in) throws IOException { + return new AcknowledgedResponse(in); + } + + @Override + protected void masterOperation(Task task, StopSLMAction.Request request, ClusterState state, + ActionListener listener) { + clusterService.submitStateUpdateTask("slm_operation_mode_update", + new AckedClusterStateUpdateTask(request, listener) { + @Override + public ClusterState execute(ClusterState currentState) { + return (OperationModeUpdateTask.slmMode(OperationMode.STOPPING)).execute(currentState); + } + + @Override + protected AcknowledgedResponse newResponse(boolean acknowledged) { + return new AcknowledgedResponse(acknowledged); + } + }); + } + + @Override + protected ClusterBlockException checkBlock(StopSLMAction.Request request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } +} diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleServiceTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleServiceTests.java index 87151627b02c..996d208aae91 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleServiceTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleServiceTests.java @@ -289,7 +289,7 @@ public class IndexLifecycleServiceTests extends ESTestCase { doAnswer(invocationOnMock -> { OperationModeUpdateTask task = (OperationModeUpdateTask) invocationOnMock.getArguments()[1]; - assertThat(task.getOperationMode(), equalTo(OperationMode.STOPPED)); + assertThat(task.getILMOperationMode(), equalTo(OperationMode.STOPPED)); moveToMaintenance.set(true); return null; }).when(clusterService).submitStateUpdateTask(eq("ilm_operation_mode_update"), any(OperationModeUpdateTask.class)); diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/OperationModeUpdateTaskTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/OperationModeUpdateTaskTests.java index f3ed5924cfeb..7e361ca9b87b 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/OperationModeUpdateTaskTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/OperationModeUpdateTaskTests.java @@ -71,7 +71,7 @@ public class OperationModeUpdateTaskTests extends ESTestCase { .build()); } ClusterState state = ClusterState.builder(ClusterName.DEFAULT).metaData(metaData).build(); - OperationModeUpdateTask task = new OperationModeUpdateTask(requestMode); + OperationModeUpdateTask task = OperationModeUpdateTask.ilmMode(requestMode); ClusterState newState = task.execute(state); if (assertSameClusterState) { assertSame(state, newState); diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/slm/SnapshotRetentionTaskTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/slm/SnapshotRetentionTaskTests.java index 24d137f6839e..dff01f8e9b0b 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/slm/SnapshotRetentionTaskTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/slm/SnapshotRetentionTaskTests.java @@ -50,6 +50,7 @@ import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; import java.util.function.Consumer; import java.util.function.Function; @@ -402,6 +403,47 @@ public class SnapshotRetentionTaskTests extends ESTestCase { } } + public void testRunManuallyWhileStopping() throws Exception { + doTestRunManuallyDuringMode(OperationMode.STOPPING); + } + + public void testRunManuallyWhileStopped() throws Exception { + doTestRunManuallyDuringMode(OperationMode.STOPPED); + } + + private void doTestRunManuallyDuringMode(OperationMode mode) throws Exception { + try (ThreadPool threadPool = new TestThreadPool("slm-test"); + ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool); + Client noOpClient = new NoOpClient("slm-test")) { + final String policyId = "policy"; + final String repoId = "repo"; + SnapshotLifecyclePolicy policy = new SnapshotLifecyclePolicy(policyId, "snap", "1 * * * * ?", + repoId, null, new SnapshotRetentionConfiguration(TimeValue.timeValueDays(30), null, null)); + + ClusterState state = createState(mode, policy); + ClusterServiceUtils.setState(clusterService, state); + + AtomicBoolean retentionWasRun = new AtomicBoolean(false); + MockSnapshotRetentionTask task = new MockSnapshotRetentionTask(noOpClient, clusterService, + new SnapshotLifecycleTaskTests.VerifyingHistoryStore(noOpClient, ZoneOffset.UTC, (historyItem) -> { }), + threadPool, + () -> { + retentionWasRun.set(true); + return Collections.emptyMap(); + }, + (deletionPolicyId, repo, snapId, slmStats, listener) -> { }, + System::nanoTime); + + long time = System.currentTimeMillis(); + task.triggered(new SchedulerEngine.Event(SnapshotRetentionService.SLM_RETENTION_MANUAL_JOB_ID, time, time)); + + assertTrue("retention should be run manually even if SLM is disabled", retentionWasRun.get()); + + threadPool.shutdownNow(); + threadPool.awaitTermination(10, TimeUnit.SECONDS); + } + } + public ClusterState createState(SnapshotLifecyclePolicy... policies) { return createState(OperationMode.RUNNING, policies); } From 603c3e6211b05061589f3e6ee21752f72a993618 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Tue, 8 Oct 2019 16:59:26 -0700 Subject: [PATCH 55/55] Filter out special gradle threads from leak control (#47713) This commit adds a thread filter for gradle unit tests which omits threads gradle may create that we have no control over shutting down. The current example of this is for project.exec which gradle pools. closes #47417 --- .../gradle/test/GradleThreadsFilter.java | 35 +++++++++++++++++++ .../gradle/test/GradleUnitTestCase.java | 4 +++ 2 files changed, 39 insertions(+) create mode 100644 buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleThreadsFilter.java diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleThreadsFilter.java b/buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleThreadsFilter.java new file mode 100644 index 000000000000..179cce23bce7 --- /dev/null +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleThreadsFilter.java @@ -0,0 +1,35 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.gradle.test; + +import com.carrotsearch.randomizedtesting.ThreadFilter; + +/** + * Filter out threads controlled by gradle that may be created during unit tests. + * + * Currently this is only the pooled threads for Exec. + */ +public class GradleThreadsFilter implements ThreadFilter { + + @Override + public boolean reject(Thread t) { + return t.getName().startsWith("Exec process"); + } +} diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleUnitTestCase.java b/buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleUnitTestCase.java index b24624c7854b..58852230bb7e 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleUnitTestCase.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleUnitTestCase.java @@ -3,6 +3,7 @@ package org.elasticsearch.gradle.test; import com.carrotsearch.randomizedtesting.JUnit4MethodProvider; import com.carrotsearch.randomizedtesting.RandomizedRunner; import com.carrotsearch.randomizedtesting.annotations.TestMethodProviders; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; import org.junit.runner.RunWith; @RunWith(RandomizedRunner.class) @@ -10,5 +11,8 @@ import org.junit.runner.RunWith; JUnit4MethodProvider.class, JUnit3MethodProvider.class }) +@ThreadLeakFilters(defaultFilters = true, filters = { + GradleThreadsFilter.class +}) public abstract class GradleUnitTestCase extends BaseTestCase { }