From d7278188703622f075a33b389d26c2d787cf7065 Mon Sep 17 00:00:00 2001 From: Dimitris Rempapis Date: Thu, 16 Jan 2025 11:42:50 +0200 Subject: [PATCH 01/30] Test/standard token filter archive indices (#120107) Create tests for archive indices for custom analyzers including a standard token filter ES v5/6 --- x-pack/qa/repository-old-versions/README.md | 16 +++++ .../elasticsearch/oldrepos/OldMappingsIT.java | 60 ++++++++++++++++--- .../oldrepos/standard_token_filter.json | 8 +++ 3 files changed, 76 insertions(+), 8 deletions(-) create mode 100644 x-pack/qa/repository-old-versions/README.md create mode 100644 x-pack/qa/repository-old-versions/src/test/resources/org/elasticsearch/oldrepos/standard_token_filter.json diff --git a/x-pack/qa/repository-old-versions/README.md b/x-pack/qa/repository-old-versions/README.md new file mode 100644 index 000000000000..057877ecc99d --- /dev/null +++ b/x-pack/qa/repository-old-versions/README.md @@ -0,0 +1,16 @@ + +### Project repository-old-versions + +Test project, for Lucene indices backward compatibility with versions before N-2 +(Archive-indices). + +The project aims to do the following +1. Deploy a cluster in version 5 / 6 +2. Create an index, add a document, verify index integrity, create a snapshot +3. Deploy a cluster in the Current version +4. Restore an index and verify index integrity + + + + + diff --git a/x-pack/qa/repository-old-versions/src/test/java/org/elasticsearch/oldrepos/OldMappingsIT.java b/x-pack/qa/repository-old-versions/src/test/java/org/elasticsearch/oldrepos/OldMappingsIT.java index 95bc92d4f185..e594655ed21a 100644 --- a/x-pack/qa/repository-old-versions/src/test/java/org/elasticsearch/oldrepos/OldMappingsIT.java +++ b/x-pack/qa/repository-old-versions/src/test/java/org/elasticsearch/oldrepos/OldMappingsIT.java @@ -17,6 +17,7 @@ import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; import org.elasticsearch.client.WarningsHandler; +import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; @@ -25,6 +26,7 @@ import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.core.Booleans; import org.elasticsearch.core.PathUtils; import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentType; @@ -36,6 +38,7 @@ import java.util.List; import java.util.Map; import java.util.stream.Collectors; +import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.hasSize; @@ -80,9 +83,9 @@ public class OldMappingsIT extends ESRestTestCase { String snapshotName = "snap"; List indices; if (oldVersion.before(Version.fromString("6.0.0"))) { - indices = Arrays.asList("filebeat", "winlogbeat", "custom", "nested"); + indices = Arrays.asList("filebeat", "winlogbeat", "custom", "nested", "standard_token_filter"); } else { - indices = Arrays.asList("filebeat", "custom", "nested"); + indices = Arrays.asList("filebeat", "custom", "nested", "standard_token_filter"); } int oldEsPort = Integer.parseInt(System.getProperty("tests.es.port")); @@ -92,6 +95,20 @@ public class OldMappingsIT extends ESRestTestCase { if (oldVersion.before(Version.fromString("6.0.0"))) { assertOK(oldEs.performRequest(createIndex("winlogbeat", "winlogbeat.json"))); } + assertOK( + oldEs.performRequest( + createIndex( + "standard_token_filter", + "standard_token_filter.json", + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put("index.analysis.analyzer.custom_analyzer.type", "custom") + .put("index.analysis.analyzer.custom_analyzer.tokenizer", "standard") + .put("index.analysis.analyzer.custom_analyzer.filter", "standard") + .build() + ) + ) + ); assertOK(oldEs.performRequest(createIndex("custom", "custom.json"))); assertOK(oldEs.performRequest(createIndex("nested", "nested.json"))); @@ -143,6 +160,12 @@ public class OldMappingsIT extends ESRestTestCase { doc3.setJsonEntity(Strings.toString(bodyDoc3)); assertOK(oldEs.performRequest(doc3)); + Request doc4 = new Request("POST", "/" + "standard_token_filter" + "/" + "doc"); + doc4.addParameter("refresh", "true"); + XContentBuilder bodyDoc4 = XContentFactory.jsonBuilder().startObject().field("content", "Doc 1").endObject(); + doc4.setJsonEntity(Strings.toString(bodyDoc4)); + assertOK(oldEs.performRequest(doc4)); + // register repo on old ES and take snapshot Request createRepoRequest = new Request("PUT", "/_snapshot/" + repoName); createRepoRequest.setJsonEntity(Strings.format(""" @@ -174,15 +197,21 @@ public class OldMappingsIT extends ESRestTestCase { } private Request createIndex(String indexName, String file) throws IOException { + return createIndex(indexName, file, Settings.EMPTY); + } + + private Request createIndex(String indexName, String file, Settings settings) throws IOException { Request createIndex = new Request("PUT", "/" + indexName); int numberOfShards = randomIntBetween(1, 3); - XContentBuilder builder = XContentFactory.jsonBuilder() - .startObject() - .startObject("settings") - .field("index.number_of_shards", numberOfShards) - .endObject() - .startObject("mappings"); + XContentBuilder builder = XContentFactory.jsonBuilder().startObject(); + + builder.startObject("settings"); + builder.field(SETTING_NUMBER_OF_SHARDS, numberOfShards); + settings.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + + builder.startObject("mappings"); builder.rawValue(OldMappingsIT.class.getResourceAsStream(file), XContentType.JSON); builder.endObject().endObject(); @@ -202,6 +231,21 @@ public class OldMappingsIT extends ESRestTestCase { } } + public void testStandardTokenFilter() throws IOException { + Request search = new Request("POST", "/" + "standard_token_filter" + "/_search"); + XContentBuilder query = XContentBuilder.builder(XContentType.JSON.xContent()) + .startObject() + .startObject("query") + .startObject("match_all") + .endObject() + .endObject() + .endObject(); + search.setJsonEntity(Strings.toString(query)); + Map response = entityAsMap(client().performRequest(search)); + List hits = (List) (XContentMapValues.extractValue("hits.hits", response)); + assertThat(hits, hasSize(1)); + } + public void testSearchKeyword() throws IOException { Request search = new Request("POST", "/" + "custom" + "/_search"); XContentBuilder query = XContentBuilder.builder(XContentType.JSON.xContent()) diff --git a/x-pack/qa/repository-old-versions/src/test/resources/org/elasticsearch/oldrepos/standard_token_filter.json b/x-pack/qa/repository-old-versions/src/test/resources/org/elasticsearch/oldrepos/standard_token_filter.json new file mode 100644 index 000000000000..dfaab0dfd60e --- /dev/null +++ b/x-pack/qa/repository-old-versions/src/test/resources/org/elasticsearch/oldrepos/standard_token_filter.json @@ -0,0 +1,8 @@ +"_default_": { + "properties": { + "content": { + "type": "text", + "analyzer": "custom_analyzer" + } + } +} From cd6d5e5cf4623d862d07705107a7ce4a21373dcc Mon Sep 17 00:00:00 2001 From: Ievgen Degtiarenko Date: Thu, 16 Jan 2025 11:22:57 +0100 Subject: [PATCH 02/30] Fix {lookup-join.MvJoinKeyFromRow ASYNC} (#120259) The test was failing due to the different order of rows in the result. This change fixes the test by explicitly sorting the rows. --- muted-tests.yml | 3 --- .../qa/testFixtures/src/main/resources/lookup-join.csv-spec | 5 +++-- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 69e6ba22b84b..053b5e5c06dd 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -251,9 +251,6 @@ tests: - class: org.elasticsearch.oldrepos.OldRepositoryAccessIT method: testOldSourceOnlyRepoAccess issue: https://github.com/elastic/elasticsearch/issues/120080 -- class: org.elasticsearch.xpack.esql.qa.multi_node.EsqlSpecIT - method: test {lookup-join.MvJoinKeyFromRow ASYNC} - issue: https://github.com/elastic/elasticsearch/issues/120242 # Examples: # diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/lookup-join.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/lookup-join.csv-spec index 0bad68cd9b28..8d24ddb45602 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/lookup-join.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/lookup-join.csv-spec @@ -450,13 +450,14 @@ required_capability: join_lookup_v11 ROW language_code = [4, 5, 6, 7] | LOOKUP JOIN languages_lookup_non_unique_key ON language_code | KEEP language_code, language_name, country +| SORT language_code, language_name, country ; language_code:integer | language_name:keyword | country:text -[4, 5, 6, 7] | Quenya | null -[4, 5, 6, 7] | null | Atlantis [4, 5, 6, 7] | Mv-Lang | Mv-Land [4, 5, 6, 7] | Mv-Lang2 | Mv-Land2 +[4, 5, 6, 7] | Quenya | null +[4, 5, 6, 7] | null | Atlantis ; mvJoinKeyFromRowExpanded From 6b1112d5ea8f77206c9037f454a79c9b9291f909 Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Thu, 16 Jan 2025 11:59:54 +0100 Subject: [PATCH 03/30] Use getMaxDataNodeCompatibleIndexVersion() in LogsdbIndexModeSettingsProvider (#120123) This matches with the version being used when actually creating the index in cluster state on elected master node. Avoids creating `MappingService` instance in `LogsdbIndexModeSettingsProvider ` with settings that aren't supported with minimum support index version. Co-authored-by: Kostas Krikellas --- .../xpack/logsdb/LogsdbRestIT.java | 53 +++++++++++++++++++ .../xpack/logsdb/LogsDBPlugin.java | 6 ++- .../LogsdbIndexModeSettingsProvider.java | 2 +- 3 files changed, 59 insertions(+), 2 deletions(-) diff --git a/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsdbRestIT.java b/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsdbRestIT.java index bd8093c0a01c..177858b84ad4 100644 --- a/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsdbRestIT.java +++ b/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsdbRestIT.java @@ -281,4 +281,57 @@ public class LogsdbRestIT extends ESRestTestCase { assertEquals("true", settings.get(IndexSettings.LOGSDB_ROUTE_ON_SORT_FIELDS.getKey())); assertEquals(List.of("host.name", "message"), settings.get(IndexMetadata.INDEX_ROUTING_PATH.getKey())); } + + public void testLogsdbDefaultWithRecoveryUseSyntheticSource() throws IOException { + Request request = new Request("PUT", "/_cluster/settings"); + request.setJsonEntity("{ \"transient\": { \"cluster.logsdb.enabled\": true } }"); + assertOK(client().performRequest(request)); + + request = new Request("POST", "/_index_template/1"); + request.setJsonEntity(""" + { + "index_patterns": ["my-log-*"], + "data_stream": { + }, + "template": { + "settings":{ + "index": { + "mode": "logsdb", + "recovery.use_synthetic_source" : "true" + } + }, + "mappings": { + "properties": { + "@timestamp" : { + "type": "date" + }, + "host.name": { + "type": "keyword" + }, + "message": { + "type": "keyword" + } + } + } + } + } + """); + assertOK(client().performRequest(request)); + + request = new Request("POST", "/my-log-foo/_doc"); + request.setJsonEntity(""" + { + "@timestamp": "2020-01-01T00:00:00.000Z", + "host.name": "foo", + "message": "bar" + } + """); + assertOK(client().performRequest(request)); + + String index = DataStream.getDefaultBackingIndexName("my-log-foo", 1); + var settings = (Map) ((Map) getIndexSettings(index).get(index)).get("settings"); + assertEquals("logsdb", settings.get("index.mode")); + assertNull(settings.get("index.mapping.source.mode")); + assertEquals("true", settings.get(IndexSettings.LOGSDB_SORT_ON_HOST_NAME.getKey())); + } } diff --git a/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsDBPlugin.java b/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsDBPlugin.java index 266847209f49..6c18626edfb7 100644 --- a/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsDBPlugin.java +++ b/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsDBPlugin.java @@ -13,6 +13,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexSettingProvider; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.license.LicenseService; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.plugins.ActionPlugin; @@ -69,7 +70,10 @@ public class LogsDBPlugin extends Plugin implements ActionPlugin { public Collection getAdditionalIndexSettingProviders(IndexSettingProvider.Parameters parameters) { logsdbIndexModeSettingsProvider.init( parameters.mapperServiceFactory(), - () -> parameters.clusterService().state().nodes().getMinSupportedIndexVersion(), + () -> IndexVersion.min( + IndexVersion.current(), + parameters.clusterService().state().nodes().getMaxDataNodeCompatibleIndexVersion() + ), DiscoveryNode.isStateless(settings) == false ); return List.of(logsdbIndexModeSettingsProvider); diff --git a/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsdbIndexModeSettingsProvider.java b/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsdbIndexModeSettingsProvider.java index b2bdfb459981..29b3a80ce289 100644 --- a/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsdbIndexModeSettingsProvider.java +++ b/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/LogsdbIndexModeSettingsProvider.java @@ -268,7 +268,7 @@ final class LogsdbIndexModeSettingsProvider implements IndexSettingProvider { // In case invalid mappings or setting are provided, then mapper service creation can fail. // In that case it is ok to return false here. The index creation will fail anyway later, so no need to fallback to stored // source. - LOGGER.info(() -> Strings.format("unable to create mapper service for index [%s]", indexName), e); + LOGGER.warn(() -> Strings.format("unable to create mapper service for index [%s]", indexName), e); return MappingHints.EMPTY; } } From 4b59fa7e6e9f5955a19bcd7bf552263065ae842b Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Thu, 16 Jan 2025 12:00:57 +0100 Subject: [PATCH 04/30] Stop retaining reference to intermediary aggregation results in QueryPhaseResultConsumer (#119984) We retained a reference to the partial `MergeResult` until after the search response has been sent. This can waste a lot of memory in some cases where partial merges don't do much to reduce memory consumption. Lets `null` out all the fields that may retain heavyweight references on `reduce`. Also, creating new lists saves churn and makes it easier to reason about things for the 2 mutable lists this makes non-final and saves some copying. --- .../search/QueryPhaseResultConsumer.java | 114 +++++++++--------- 1 file changed, 55 insertions(+), 59 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/search/QueryPhaseResultConsumer.java b/server/src/main/java/org/elasticsearch/action/search/QueryPhaseResultConsumer.java index 37d5065fdd03..9a8dd94dcd32 100644 --- a/server/src/main/java/org/elasticsearch/action/search/QueryPhaseResultConsumer.java +++ b/server/src/main/java/org/elasticsearch/action/search/QueryPhaseResultConsumer.java @@ -29,7 +29,6 @@ import org.elasticsearch.search.rank.context.QueryPhaseRankCoordinatorContext; import java.util.ArrayDeque; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; import java.util.Comparator; import java.util.List; @@ -67,8 +66,8 @@ public class QueryPhaseResultConsumer extends ArraySearchPhaseResults onPartialMergeFailure; private final int batchReduceSize; - private final List buffer = new ArrayList<>(); - private final List emptyResults = new ArrayList<>(); + private List buffer = new ArrayList<>(); + private List emptyResults = new ArrayList<>(); // the memory that is accounted in the circuit breaker for this consumer private volatile long circuitBreakerBytes; // the memory that is currently used in the buffer @@ -159,32 +158,40 @@ public class QueryPhaseResultConsumer extends ArraySearchPhaseResults buffer; + synchronized (this) { + // final reduce, we're done with the buffer so we just null it out and continue with a local variable to + // save field references. The synchronized block is never contended but needed to have a memory barrier and sync buffer's + // contents with all the previous writers to it + buffer = this.buffer; + buffer = buffer == null ? Collections.emptyList() : buffer; + this.buffer = null; + } // ensure consistent ordering - sortBuffer(); + buffer.sort(RESULT_COMPARATOR); final TopDocsStats topDocsStats = this.topDocsStats; + var mergeResult = this.mergeResult; + this.mergeResult = null; final int resultSize = buffer.size() + (mergeResult == null ? 0 : 1); final List topDocsList = hasTopDocs ? new ArrayList<>(resultSize) : null; final List> aggsList = hasAggs ? new ArrayList<>(resultSize) : null; - synchronized (this) { - if (mergeResult != null) { - if (topDocsList != null) { - topDocsList.add(mergeResult.reducedTopDocs); - } - if (aggsList != null) { - aggsList.add(DelayableWriteable.referencing(mergeResult.reducedAggs)); - } + if (mergeResult != null) { + if (topDocsList != null) { + topDocsList.add(mergeResult.reducedTopDocs); } - for (QuerySearchResult result : buffer) { - topDocsStats.add(result.topDocs(), result.searchTimedOut(), result.terminatedEarly()); - if (topDocsList != null) { - TopDocsAndMaxScore topDocs = result.consumeTopDocs(); - setShardIndex(topDocs.topDocs, result.getShardIndex()); - topDocsList.add(topDocs.topDocs); - } - if (aggsList != null) { - aggsList.add(result.getAggs()); - } + if (aggsList != null) { + aggsList.add(DelayableWriteable.referencing(mergeResult.reducedAggs)); + } + } + for (QuerySearchResult result : buffer) { + topDocsStats.add(result.topDocs(), result.searchTimedOut(), result.terminatedEarly()); + if (topDocsList != null) { + TopDocsAndMaxScore topDocs = result.consumeTopDocs(); + setShardIndex(topDocs.topDocs, result.getShardIndex()); + topDocsList.add(topDocs.topDocs); + } + if (aggsList != null) { + aggsList.add(result.getAggs()); } } SearchPhaseController.ReducedQueryPhase reducePhase; @@ -206,7 +213,7 @@ public class QueryPhaseResultConsumer extends ArraySearchPhaseResults RESULT_COMPARATOR = Comparator.comparingInt(QuerySearchResult::getShardIndex); private MergeResult partialReduce( - QuerySearchResult[] toConsume, - List emptyResults, + List toConsume, + List processedShards, TopDocsStats topDocsStats, MergeResult lastMerge, int numReducePhases ) { // ensure consistent ordering - Arrays.sort(toConsume, RESULT_COMPARATOR); + toConsume.sort(RESULT_COMPARATOR); - final List processedShards = new ArrayList<>(emptyResults); final TopDocs newTopDocs; final InternalAggregations newAggs; final List> aggsList; - final int resultSetSize = toConsume.length + (lastMerge != null ? 1 : 0); + final int resultSetSize = toConsume.size() + (lastMerge != null ? 1 : 0); if (hasAggs) { aggsList = new ArrayList<>(resultSetSize); if (lastMerge != null) { @@ -307,12 +314,6 @@ public class QueryPhaseResultConsumer extends ArraySearchPhaseResults 0) { - buffer.sort(RESULT_COMPARATOR); - } - } - private synchronized void addWithoutBreaking(long size) { circuitBreaker.addWithoutBreaking(size); circuitBreakerBytes += size; @@ -376,21 +377,21 @@ public class QueryPhaseResultConsumer extends ArraySearchPhaseResults= batchReduceSize) { hasPartialReduce = true; executeNextImmediately = false; - QuerySearchResult[] clone = buffer.toArray(QuerySearchResult[]::new); - MergeTask task = new MergeTask(clone, aggsCurrentBufferSize, new ArrayList<>(emptyResults), next); + MergeTask task = new MergeTask(b, aggsCurrentBufferSize, emptyResults, next); + b = buffer = new ArrayList<>(); + emptyResults = new ArrayList<>(); aggsCurrentBufferSize = 0; - buffer.clear(); - emptyResults.clear(); queue.add(task); tryExecuteNext(); } - buffer.add(result); + b.add(result); } } } @@ -404,10 +405,13 @@ public class QueryPhaseResultConsumer extends ArraySearchPhaseResults toConsume = mergeTask.consumeBuffer(); while (mergeTask != null) { final MergeResult thisMergeResult = mergeResult; long estimatedTotalSize = (thisMergeResult != null ? thisMergeResult.estimatedSize : 0) + mergeTask.aggsBufferSize; @@ -512,15 +516,7 @@ public class QueryPhaseResultConsumer extends ArraySearchPhaseResults toConsume) { for (QuerySearchResult result : toConsume) { result.releaseAggs(); } @@ -535,19 +531,19 @@ public class QueryPhaseResultConsumer extends ArraySearchPhaseResults emptyResults; - private QuerySearchResult[] buffer; + private List buffer; private final long aggsBufferSize; private Runnable next; - private MergeTask(QuerySearchResult[] buffer, long aggsBufferSize, List emptyResults, Runnable next) { + private MergeTask(List buffer, long aggsBufferSize, List emptyResults, Runnable next) { this.buffer = buffer; this.aggsBufferSize = aggsBufferSize; this.emptyResults = emptyResults; this.next = next; } - public synchronized QuerySearchResult[] consumeBuffer() { - QuerySearchResult[] toRet = buffer; + public synchronized List consumeBuffer() { + List toRet = buffer; buffer = null; return toRet; } @@ -559,7 +555,7 @@ public class QueryPhaseResultConsumer extends ArraySearchPhaseResults buffer = consumeBuffer(); if (buffer != null) { releaseAggs(buffer); } From 7f3e7773d692813b506a3d13a31d0e70e55d20b4 Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Thu, 16 Jan 2025 22:01:14 +1100 Subject: [PATCH 05/30] [Test] Fix testcontainers wait for Minio (#120175) This PR changes the wait strategy from the default HostPortWaitStrategy to LogMessageWaitStrategy to accommodate the change in the latest Minio docker image. The default HostPortWaitStrategy has two issues: 1. It assumes certain Linux commands such as grep and nc available inside the container. This is not the case for the latest version of Minio docker image which does not have either of the commands. 2. The first item on their own is not fatal since the check also falls back on just reading the listening port as a file with `/bin/bash -c ' Date: Thu, 16 Jan 2025 12:47:49 +0100 Subject: [PATCH 06/30] Revert "Remove deprecated tracing.apm.* settings for v9 (#119926)" (#120268) Temporarily reverts elastic/elasticsearch#119926 due to unforeseen usage. Relates to ES-10293 --- .../server/cli/APMJvmOptions.java | 50 ++++++- .../server/cli/APMJvmOptionsTests.java | 139 ++++++++++++------ docs/changelog/119926.yaml | 11 -- .../org/elasticsearch/telemetry/apm/APM.java | 9 +- .../apm/internal/APMAgentSettings.java | 96 ++++++++++-- .../apm/internal/APMAgentSettingsTests.java | 116 ++++++++++++++- 6 files changed, 342 insertions(+), 79 deletions(-) delete mode 100644 docs/changelog/119926.yaml diff --git a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/APMJvmOptions.java b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/APMJvmOptions.java index 1e57d9fab7cf..c3b976894676 100644 --- a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/APMJvmOptions.java +++ b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/APMJvmOptions.java @@ -187,12 +187,20 @@ class APMJvmOptions { static void extractSecureSettings(SecureSettings secrets, Map propertiesMap) { final Set settingNames = secrets.getSettingNames(); for (String key : List.of("api_key", "secret_token")) { - String prefix = "telemetry."; - if (settingNames.contains(prefix + key)) { - try (SecureString token = secrets.getString(prefix + key)) { - propertiesMap.put(key, token.toString()); + for (String prefix : List.of("telemetry.", "tracing.apm.")) { + if (settingNames.contains(prefix + key)) { + if (propertiesMap.containsKey(key)) { + throw new IllegalStateException( + Strings.format("Duplicate telemetry setting: [telemetry.%s] and [tracing.apm.%s]", key, key) + ); + } + + try (SecureString token = secrets.getString(prefix + key)) { + propertiesMap.put(key, token.toString()); + } } } + } } @@ -219,12 +227,44 @@ class APMJvmOptions { static Map extractApmSettings(Settings settings) throws UserException { final Map propertiesMap = new HashMap<>(); + // tracing.apm.agent. is deprecated by telemetry.agent. final String telemetryAgentPrefix = "telemetry.agent."; + final String deprecatedTelemetryAgentPrefix = "tracing.apm.agent."; final Settings telemetryAgentSettings = settings.getByPrefix(telemetryAgentPrefix); telemetryAgentSettings.keySet().forEach(key -> propertiesMap.put(key, String.valueOf(telemetryAgentSettings.get(key)))); + final Settings apmAgentSettings = settings.getByPrefix(deprecatedTelemetryAgentPrefix); + for (String key : apmAgentSettings.keySet()) { + if (propertiesMap.containsKey(key)) { + throw new IllegalStateException( + Strings.format( + "Duplicate telemetry setting: [%s%s] and [%s%s]", + telemetryAgentPrefix, + key, + deprecatedTelemetryAgentPrefix, + key + ) + ); + } + propertiesMap.put(key, String.valueOf(apmAgentSettings.get(key))); + } + StringJoiner globalLabels = extractGlobalLabels(telemetryAgentPrefix, propertiesMap, settings); + if (globalLabels.length() == 0) { + globalLabels = extractGlobalLabels(deprecatedTelemetryAgentPrefix, propertiesMap, settings); + } else { + StringJoiner tracingGlobalLabels = extractGlobalLabels(deprecatedTelemetryAgentPrefix, propertiesMap, settings); + if (tracingGlobalLabels.length() != 0) { + throw new IllegalArgumentException( + "Cannot have global labels with tracing.agent prefix [" + + globalLabels + + "] and telemetry.apm.agent prefix [" + + tracingGlobalLabels + + "]" + ); + } + } if (globalLabels.length() > 0) { propertiesMap.put("global_labels", globalLabels.toString()); } @@ -234,7 +274,7 @@ class APMJvmOptions { if (propertiesMap.containsKey(key)) { throw new UserException( ExitCodes.CONFIG, - "Do not set a value for [telemetry.agent." + key + "], as this is configured automatically by Elasticsearch" + "Do not set a value for [tracing.apm.agent." + key + "], as this is configured automatically by Elasticsearch" ); } } diff --git a/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/APMJvmOptionsTests.java b/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/APMJvmOptionsTests.java index 0e067afc1aa7..a7ba8eb11fbc 100644 --- a/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/APMJvmOptionsTests.java +++ b/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/APMJvmOptionsTests.java @@ -25,15 +25,18 @@ import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.function.Function; import static org.elasticsearch.test.MapMatcher.matchesMap; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasEntry; import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; @@ -79,63 +82,109 @@ public class APMJvmOptionsTests extends ESTestCase { } public void testExtractSecureSettings() { - MockSecureSettings secureSettings = new MockSecureSettings(); - secureSettings.setString("telemetry.secret_token", "token"); - secureSettings.setString("telemetry.api_key", "key"); + MockSecureSettings duplicateSecureSettings = new MockSecureSettings(); - Map propertiesMap = new HashMap<>(); - APMJvmOptions.extractSecureSettings(secureSettings, propertiesMap); + for (String prefix : List.of("telemetry.", "tracing.apm.")) { + MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString(prefix + "secret_token", "token"); + secureSettings.setString(prefix + "api_key", "key"); + + duplicateSecureSettings.setString(prefix + "api_key", "secret"); + + Map propertiesMap = new HashMap<>(); + APMJvmOptions.extractSecureSettings(secureSettings, propertiesMap); + + assertThat(propertiesMap, matchesMap(Map.of("secret_token", "token", "api_key", "key"))); + } + + Exception exception = expectThrows( + IllegalStateException.class, + () -> APMJvmOptions.extractSecureSettings(duplicateSecureSettings, new HashMap<>()) + ); + assertThat(exception.getMessage(), containsString("Duplicate telemetry setting")); + assertThat(exception.getMessage(), containsString("telemetry.api_key")); + assertThat(exception.getMessage(), containsString("tracing.apm.api_key")); - assertThat(propertiesMap, matchesMap(Map.of("secret_token", "token", "api_key", "key"))); } public void testExtractSettings() throws UserException { - Settings defaults = Settings.builder() - .put("telemetry.agent.server_url", "https://myurl:443") - .put("telemetry.agent.service_node_name", "instance-0000000001") - .build(); + Function buildSettings = (prefix) -> Settings.builder() + .put(prefix + "server_url", "https://myurl:443") + .put(prefix + "service_node_name", "instance-0000000001"); - var name = "APM Tracing"; - var deploy = "123"; - var org = "456"; - var extracted = APMJvmOptions.extractApmSettings( - Settings.builder() - .put(defaults) - .put("telemetry.agent.global_labels.deployment_name", name) - .put("telemetry.agent.global_labels.deployment_id", deploy) - .put("telemetry.agent.global_labels.organization_id", org) - .build() - ); + for (String prefix : List.of("tracing.apm.agent.", "telemetry.agent.")) { + var name = "APM Tracing"; + var deploy = "123"; + var org = "456"; + var extracted = APMJvmOptions.extractApmSettings( + buildSettings.apply(prefix) + .put(prefix + "global_labels.deployment_name", name) + .put(prefix + "global_labels.deployment_id", deploy) + .put(prefix + "global_labels.organization_id", org) + .build() + ); - assertThat( - extracted, - allOf( - hasEntry("server_url", "https://myurl:443"), - hasEntry("service_node_name", "instance-0000000001"), - hasEntry(equalTo("global_labels"), not(endsWith(","))), // test that we have collapsed all global labels into one - not(hasKey("global_labels.organization_id")) // tests that we strip out the top level label keys + assertThat( + extracted, + allOf( + hasEntry("server_url", "https://myurl:443"), + hasEntry("service_node_name", "instance-0000000001"), + hasEntry(equalTo("global_labels"), not(endsWith(","))), // test that we have collapsed all global labels into one + not(hasKey("global_labels.organization_id")) // tests that we strip out the top level label keys + ) + ); + + List labels = Arrays.stream(extracted.get("global_labels").split(",")).toList(); + assertThat(labels, hasSize(3)); + assertThat(labels, containsInAnyOrder("deployment_name=APM Tracing", "organization_id=" + org, "deployment_id=" + deploy)); + + // test replacing with underscores and skipping empty + name = "APM=Tracing"; + deploy = ""; + org = ",456"; + extracted = APMJvmOptions.extractApmSettings( + buildSettings.apply(prefix) + .put(prefix + "global_labels.deployment_name", name) + .put(prefix + "global_labels.deployment_id", deploy) + .put(prefix + "global_labels.organization_id", org) + .build() + ); + labels = Arrays.stream(extracted.get("global_labels").split(",")).toList(); + assertThat(labels, hasSize(2)); + assertThat(labels, containsInAnyOrder("deployment_name=APM_Tracing", "organization_id=_456")); + } + + IllegalStateException err = expectThrows( + IllegalStateException.class, + () -> APMJvmOptions.extractApmSettings( + Settings.builder() + .put("tracing.apm.agent.server_url", "https://myurl:443") + .put("telemetry.agent.server_url", "https://myurl-2:443") + .build() ) ); + assertThat(err.getMessage(), is("Duplicate telemetry setting: [telemetry.agent.server_url] and [tracing.apm.agent.server_url]")); + } - List labels = Arrays.stream(extracted.get("global_labels").split(",")).toList(); - assertThat(labels, hasSize(3)); - assertThat(labels, containsInAnyOrder("deployment_name=APM Tracing", "organization_id=" + org, "deployment_id=" + deploy)); + public void testNoMixedLabels() { + String telemetryAgent = "telemetry.agent."; + String tracingAgent = "tracing.apm.agent."; + Settings settings = Settings.builder() + .put("tracing.apm.enabled", true) + .put(telemetryAgent + "server_url", "https://myurl:443") + .put(telemetryAgent + "service_node_name", "instance-0000000001") + .put(tracingAgent + "global_labels.deployment_id", "123") + .put(telemetryAgent + "global_labels.organization_id", "456") + .build(); - // test replacing with underscores and skipping empty - name = "APM=Tracing"; - deploy = ""; - org = ",456"; - extracted = APMJvmOptions.extractApmSettings( - Settings.builder() - .put(defaults) - .put("telemetry.agent.global_labels.deployment_name", name) - .put("telemetry.agent.global_labels.deployment_id", deploy) - .put("telemetry.agent.global_labels.organization_id", org) - .build() + IllegalArgumentException err = assertThrows(IllegalArgumentException.class, () -> APMJvmOptions.extractApmSettings(settings)); + assertThat( + err.getMessage(), + is( + "Cannot have global labels with tracing.agent prefix [organization_id=456] and" + + " telemetry.apm.agent prefix [deployment_id=123]" + ) ); - labels = Arrays.stream(extracted.get("global_labels").split(",")).toList(); - assertThat(labels, hasSize(2)); - assertThat(labels, containsInAnyOrder("deployment_name=APM_Tracing", "organization_id=_456")); } private Path makeFakeAgentJar() throws IOException { diff --git a/docs/changelog/119926.yaml b/docs/changelog/119926.yaml deleted file mode 100644 index 3afafd5b2117..000000000000 --- a/docs/changelog/119926.yaml +++ /dev/null @@ -1,11 +0,0 @@ -pr: 119926 -summary: "Deprecated tracing.apm.* settings got removed." -area: Infra/Metrics -type: breaking -issues: [] -breaking: - title: "Deprecated tracing.apm.* settings got removed." - area: Cluster and node setting - details: Deprecated `tracing.apm.*` settings got removed, use respective `telemetry.*` / `telemetry.tracing.*` settings instead. - impact: 9.x nodes will refuse to start if any such setting (including secret settings) is still present. - notable: false diff --git a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/APM.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/APM.java index 43447cfa21a6..339a4ec24ca1 100644 --- a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/APM.java +++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/APM.java @@ -92,7 +92,14 @@ public class APM extends Plugin implements NetworkPlugin, TelemetryPlugin { APMAgentSettings.TELEMETRY_TRACING_ENABLED_SETTING, APMAgentSettings.TELEMETRY_TRACING_NAMES_INCLUDE_SETTING, APMAgentSettings.TELEMETRY_TRACING_NAMES_EXCLUDE_SETTING, - APMAgentSettings.TELEMETRY_TRACING_SANITIZE_FIELD_NAMES + APMAgentSettings.TELEMETRY_TRACING_SANITIZE_FIELD_NAMES, + // The settings below are deprecated and are currently kept as fallback. + APMAgentSettings.TRACING_APM_SECRET_TOKEN_SETTING, + APMAgentSettings.TRACING_APM_API_KEY_SETTING, + APMAgentSettings.TRACING_APM_ENABLED_SETTING, + APMAgentSettings.TRACING_APM_NAMES_INCLUDE_SETTING, + APMAgentSettings.TRACING_APM_NAMES_EXCLUDE_SETTING, + APMAgentSettings.TRACING_APM_SANITIZE_FIELD_NAMES ); } } diff --git a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/APMAgentSettings.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/APMAgentSettings.java index 8647761e2def..f66683a787bc 100644 --- a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/APMAgentSettings.java +++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/APMAgentSettings.java @@ -25,7 +25,9 @@ import java.security.PrivilegedAction; import java.util.List; import java.util.Objects; import java.util.Set; +import java.util.function.Function; +import static org.elasticsearch.common.settings.Setting.Property.Deprecated; import static org.elasticsearch.common.settings.Setting.Property.NodeScope; import static org.elasticsearch.common.settings.Setting.Property.OperatorDynamic; @@ -99,6 +101,9 @@ public class APMAgentSettings { private static final String TELEMETRY_SETTING_PREFIX = "telemetry."; + // The old legacy prefix + private static final String LEGACY_TRACING_APM_SETTING_PREFIX = "tracing.apm."; + /** * Allow-list of APM agent config keys users are permitted to configure. * @see APM Java Agent Configuration @@ -243,24 +248,56 @@ public class APMAgentSettings { public static final Setting.AffixSetting APM_AGENT_SETTINGS = Setting.prefixKeySetting( TELEMETRY_SETTING_PREFIX + "agent.", - null, // no fallback - (namespace, qualifiedKey) -> concreteAgentSetting(namespace, qualifiedKey, NodeScope, OperatorDynamic) + LEGACY_TRACING_APM_SETTING_PREFIX + "agent.", + (namespace, qualifiedKey) -> qualifiedKey.startsWith(LEGACY_TRACING_APM_SETTING_PREFIX) + ? concreteAgentSetting(namespace, qualifiedKey, NodeScope, OperatorDynamic, Deprecated) + : concreteAgentSetting(namespace, qualifiedKey, NodeScope, OperatorDynamic) ); - public static final Setting> TELEMETRY_TRACING_NAMES_INCLUDE_SETTING = Setting.stringListSetting( + /** + * @deprecated in favor of TELEMETRY_TRACING_NAMES_INCLUDE_SETTING. + */ + @Deprecated + public static final Setting> TRACING_APM_NAMES_INCLUDE_SETTING = Setting.stringListSetting( + LEGACY_TRACING_APM_SETTING_PREFIX + "names.include", + OperatorDynamic, + NodeScope, + Deprecated + ); + + public static final Setting> TELEMETRY_TRACING_NAMES_INCLUDE_SETTING = Setting.listSetting( TELEMETRY_SETTING_PREFIX + "tracing.names.include", + TRACING_APM_NAMES_INCLUDE_SETTING, + Function.identity(), OperatorDynamic, NodeScope ); - public static final Setting> TELEMETRY_TRACING_NAMES_EXCLUDE_SETTING = Setting.stringListSetting( + /** + * @deprecated in favor of TELEMETRY_TRACING_NAMES_EXCLUDE_SETTING. + */ + @Deprecated + public static final Setting> TRACING_APM_NAMES_EXCLUDE_SETTING = Setting.stringListSetting( + LEGACY_TRACING_APM_SETTING_PREFIX + "names.exclude", + OperatorDynamic, + NodeScope, + Deprecated + ); + + public static final Setting> TELEMETRY_TRACING_NAMES_EXCLUDE_SETTING = Setting.listSetting( TELEMETRY_SETTING_PREFIX + "tracing.names.exclude", + TRACING_APM_NAMES_EXCLUDE_SETTING, + Function.identity(), OperatorDynamic, NodeScope ); - public static final Setting> TELEMETRY_TRACING_SANITIZE_FIELD_NAMES = Setting.stringListSetting( - TELEMETRY_SETTING_PREFIX + "tracing.sanitize_field_names", + /** + * @deprecated in favor of TELEMETRY_TRACING_SANITIZE_FIELD_NAMES. + */ + @Deprecated + public static final Setting> TRACING_APM_SANITIZE_FIELD_NAMES = Setting.stringListSetting( + LEGACY_TRACING_APM_SETTING_PREFIX + "sanitize_field_names", List.of( "password", "passwd", @@ -276,12 +313,33 @@ public class APMAgentSettings { "set-cookie" ), OperatorDynamic, + NodeScope, + Deprecated + ); + + public static final Setting> TELEMETRY_TRACING_SANITIZE_FIELD_NAMES = Setting.listSetting( + TELEMETRY_SETTING_PREFIX + "tracing.sanitize_field_names", + TRACING_APM_SANITIZE_FIELD_NAMES, + Function.identity(), + OperatorDynamic, NodeScope ); + /** + * @deprecated in favor of TELEMETRY_TRACING_ENABLED_SETTING. + */ + @Deprecated + public static final Setting TRACING_APM_ENABLED_SETTING = Setting.boolSetting( + LEGACY_TRACING_APM_SETTING_PREFIX + "enabled", + false, + OperatorDynamic, + NodeScope, + Deprecated + ); + public static final Setting TELEMETRY_TRACING_ENABLED_SETTING = Setting.boolSetting( TELEMETRY_SETTING_PREFIX + "tracing.enabled", - false, + TRACING_APM_ENABLED_SETTING, OperatorDynamic, NodeScope ); @@ -293,13 +351,33 @@ public class APMAgentSettings { NodeScope ); + /** + * @deprecated in favor of TELEMETRY_SECRET_TOKEN_SETTING. + */ + @Deprecated + public static final Setting TRACING_APM_SECRET_TOKEN_SETTING = SecureSetting.secureString( + LEGACY_TRACING_APM_SETTING_PREFIX + "secret_token", + null, + Deprecated + ); + public static final Setting TELEMETRY_SECRET_TOKEN_SETTING = SecureSetting.secureString( TELEMETRY_SETTING_PREFIX + "secret_token", - null + TRACING_APM_SECRET_TOKEN_SETTING + ); + + /** + * @deprecated in favor of TELEMETRY_API_KEY_SETTING. + */ + @Deprecated + public static final Setting TRACING_APM_API_KEY_SETTING = SecureSetting.secureString( + LEGACY_TRACING_APM_SETTING_PREFIX + "api_key", + null, + Deprecated ); public static final Setting TELEMETRY_API_KEY_SETTING = SecureSetting.secureString( TELEMETRY_SETTING_PREFIX + "api_key", - null + TRACING_APM_API_KEY_SETTING ); } diff --git a/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/APMAgentSettingsTests.java b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/APMAgentSettingsTests.java index 551667242092..a60048c82a3c 100644 --- a/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/APMAgentSettingsTests.java +++ b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/APMAgentSettingsTests.java @@ -11,6 +11,8 @@ package org.elasticsearch.telemetry.apm.internal; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.MockSecureSettings; +import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESTestCase; import org.mockito.Mockito; @@ -19,13 +21,21 @@ import java.util.List; import java.util.Set; import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.APM_AGENT_SETTINGS; +import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TELEMETRY_API_KEY_SETTING; import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TELEMETRY_METRICS_ENABLED_SETTING; +import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TELEMETRY_SECRET_TOKEN_SETTING; import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TELEMETRY_TRACING_ENABLED_SETTING; import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TELEMETRY_TRACING_NAMES_EXCLUDE_SETTING; import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TELEMETRY_TRACING_NAMES_INCLUDE_SETTING; import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TELEMETRY_TRACING_SANITIZE_FIELD_NAMES; +import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TRACING_APM_API_KEY_SETTING; +import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TRACING_APM_ENABLED_SETTING; +import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TRACING_APM_NAMES_EXCLUDE_SETTING; +import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TRACING_APM_NAMES_INCLUDE_SETTING; +import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TRACING_APM_SANITIZE_FIELD_NAMES; +import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TRACING_APM_SECRET_TOKEN_SETTING; +import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.hasEntry; import static org.hamcrest.Matchers.hasItem; import static org.mockito.Mockito.clearInvocations; import static org.mockito.Mockito.mock; @@ -60,6 +70,14 @@ public class APMAgentSettingsTests extends ESTestCase { } } + public void testEnableTracingUsingLegacySetting() { + Settings settings = Settings.builder().put(TRACING_APM_ENABLED_SETTING.getKey(), true).build(); + apmAgentSettings.initAgentSystemProperties(settings); + + verify(apmAgentSettings).setAgentSetting("recording", "true"); + assertWarnings("[tracing.apm.enabled] setting was deprecated in Elasticsearch and will be removed in a future release."); + } + public void testEnableMetrics() { for (boolean tracingEnabled : List.of(true, false)) { clearInvocations(apmAgentSettings, apmTelemetryProvider.getMeterService()); @@ -103,6 +121,14 @@ public class APMAgentSettingsTests extends ESTestCase { } } + public void testDisableTracingUsingLegacySetting() { + Settings settings = Settings.builder().put(TRACING_APM_ENABLED_SETTING.getKey(), false).build(); + apmAgentSettings.initAgentSystemProperties(settings); + + verify(apmAgentSettings).setAgentSetting("recording", "false"); + assertWarnings("[tracing.apm.enabled] setting was deprecated in Elasticsearch and will be removed in a future release."); + } + public void testDisableMetrics() { for (boolean tracingEnabled : List.of(true, false)) { clearInvocations(apmAgentSettings, apmTelemetryProvider.getMeterService()); @@ -155,18 +181,70 @@ public class APMAgentSettingsTests extends ESTestCase { verify(apmAgentSettings).setAgentSetting("span_compression_enabled", "true"); } + public void testSetAgentsSettingsWithLegacyPrefix() { + Settings settings = Settings.builder() + .put(TELEMETRY_TRACING_ENABLED_SETTING.getKey(), true) + .put("tracing.apm.agent.span_compression_enabled", "true") + .build(); + apmAgentSettings.initAgentSystemProperties(settings); + + verify(apmAgentSettings).setAgentSetting("recording", "true"); + verify(apmAgentSettings).setAgentSetting("span_compression_enabled", "true"); + assertWarnings( + "[tracing.apm.agent.span_compression_enabled] setting was deprecated in Elasticsearch and will be removed in a future release." + ); + } + /** * Check that invalid or forbidden APM agent settings are rejected. */ public void testRejectForbiddenOrUnknownAgentSettings() { - String prefix = APM_AGENT_SETTINGS.getKey(); - Settings settings = Settings.builder().put(prefix + "unknown", "true").build(); - Exception exception = expectThrows(IllegalArgumentException.class, () -> APM_AGENT_SETTINGS.getAsMap(settings)); - assertThat(exception.getMessage(), containsString("[" + prefix + "unknown]")); - + List prefixes = List.of(APM_AGENT_SETTINGS.getKey(), "tracing.apm.agent."); + for (String prefix : prefixes) { + Settings settings = Settings.builder().put(prefix + "unknown", "true").build(); + Exception exception = expectThrows(IllegalArgumentException.class, () -> APM_AGENT_SETTINGS.getAsMap(settings)); + assertThat(exception.getMessage(), containsString("[" + prefix + "unknown]")); + } // though, accept / ignore nested global_labels - var map = APMAgentSettings.APM_AGENT_SETTINGS.getAsMap(Settings.builder().put(prefix + "global_labels.abc", "123").build()); - assertThat(map, hasEntry("global_labels.abc", "123")); + for (String prefix : prefixes) { + Settings settings = Settings.builder().put(prefix + "global_labels.abc", "123").build(); + APMAgentSettings.APM_AGENT_SETTINGS.getAsMap(settings); + + if (prefix.startsWith("tracing.apm.agent.")) { + assertWarnings( + "[tracing.apm.agent.global_labels.abc] setting was deprecated in Elasticsearch and will be removed in a future release." + ); + } + } + } + + public void testTelemetryTracingNamesIncludeFallback() { + Settings settings = Settings.builder().put(TRACING_APM_NAMES_INCLUDE_SETTING.getKey(), "abc,xyz").build(); + + List included = TELEMETRY_TRACING_NAMES_INCLUDE_SETTING.get(settings); + + assertThat(included, containsInAnyOrder("abc", "xyz")); + assertWarnings("[tracing.apm.names.include] setting was deprecated in Elasticsearch and will be removed in a future release."); + } + + public void testTelemetryTracingNamesExcludeFallback() { + Settings settings = Settings.builder().put(TRACING_APM_NAMES_EXCLUDE_SETTING.getKey(), "abc,xyz").build(); + + List included = TELEMETRY_TRACING_NAMES_EXCLUDE_SETTING.get(settings); + + assertThat(included, containsInAnyOrder("abc", "xyz")); + assertWarnings("[tracing.apm.names.exclude] setting was deprecated in Elasticsearch and will be removed in a future release."); + } + + public void testTelemetryTracingSanitizeFieldNamesFallback() { + Settings settings = Settings.builder().put(TRACING_APM_SANITIZE_FIELD_NAMES.getKey(), "abc,xyz").build(); + + List included = TELEMETRY_TRACING_SANITIZE_FIELD_NAMES.get(settings); + + assertThat(included, containsInAnyOrder("abc", "xyz")); + assertWarnings( + "[tracing.apm.sanitize_field_names] setting was deprecated in Elasticsearch and will be removed in a future release." + ); } public void testTelemetryTracingSanitizeFieldNamesFallbackDefault() { @@ -174,6 +252,28 @@ public class APMAgentSettingsTests extends ESTestCase { assertThat(included, hasItem("password")); // and more defaults } + public void testTelemetrySecretTokenFallback() { + MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString(TRACING_APM_SECRET_TOKEN_SETTING.getKey(), "verysecret"); + Settings settings = Settings.builder().setSecureSettings(secureSettings).build(); + + try (SecureString secureString = TELEMETRY_SECRET_TOKEN_SETTING.get(settings)) { + assertEquals("verysecret", secureString.toString()); + } + assertWarnings("[tracing.apm.secret_token] setting was deprecated in Elasticsearch and will be removed in a future release."); + } + + public void testTelemetryApiKeyFallback() { + MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString(TRACING_APM_API_KEY_SETTING.getKey(), "abc"); + Settings settings = Settings.builder().setSecureSettings(secureSettings).build(); + + try (SecureString secureString = TELEMETRY_API_KEY_SETTING.get(settings)) { + assertEquals("abc", secureString.toString()); + } + assertWarnings("[tracing.apm.api_key] setting was deprecated in Elasticsearch and will be removed in a future release."); + } + /** * Check that invalid or forbidden APM agent settings are rejected if their last part resembles an allowed setting. */ From 7b6bdfa323fcc7460dc252b3dbc84c6b1314fb66 Mon Sep 17 00:00:00 2001 From: Rene Groeschke Date: Thu, 16 Jan 2025 13:23:04 +0100 Subject: [PATCH 07/30] [Gradle] Make rolling upgrade tests configuration cache compatible (#119577) With this, all rolling upgrade tests that involve a `nextNodeToNextVersion` update are gradle configuration cache compatible. Simplify API around test cluster registry and cc compatible usage of test cluster in TestClusterAware tasks. --- .../testclusters/TestClustersAware.java | 10 +++- .../testclusters/TestClustersPlugin.java | 2 +- .../testclusters/TestClustersRegistry.java | 17 ++++++ .../build.gradle | 16 +++--- qa/mixed-cluster/build.gradle | 52 ++++++------------- qa/rolling-upgrade-legacy/build.gradle | 18 +++---- .../downgrade-to-basic-license/build.gradle | 19 +------ .../plugin/ccr/qa/multi-cluster/build.gradle | 37 +++---------- .../ccr/qa/non-compliant-license/build.gradle | 12 +---- x-pack/plugin/ccr/qa/restart/build.gradle | 34 ++---------- x-pack/plugin/ccr/qa/security/build.gradle | 11 +--- .../eql/qa/ccs-rolling-upgrade/build.gradle | 15 +++--- x-pack/plugin/eql/qa/mixed-node/build.gradle | 17 +++--- .../shutdown/qa/rolling-upgrade/build.gradle | 27 +++++----- .../plugin/sql/qa/jdbc/security/build.gradle | 13 +---- x-pack/plugin/sql/qa/mixed-node/build.gradle | 17 +++--- .../sql/qa/server/security/build.gradle | 7 +-- x-pack/qa/mixed-tier-cluster/build.gradle | 10 ++-- x-pack/qa/rolling-upgrade-basic/build.gradle | 14 ++--- .../build.gradle | 22 ++++---- x-pack/qa/rolling-upgrade/build.gradle | 22 ++++---- 21 files changed, 149 insertions(+), 243 deletions(-) diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersAware.java b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersAware.java index 9e5fc1f09ac9..2e313fa73c4e 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersAware.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersAware.java @@ -26,7 +26,7 @@ public interface TestClustersAware extends Task { Collection getClusters(); @ServiceReference(REGISTRY_SERVICE_NAME) - Property getRegistery(); + Property getRegistry(); @ServiceReference(TEST_CLUSTER_TASKS_SERVICE) Property getTasksService(); @@ -47,6 +47,14 @@ public interface TestClustersAware extends Task { getClusters().add(cluster); } + default Provider getClusterInfo(String clusterName) { + return getProject().getProviders().of(TestClusterValueSource.class, source -> { + source.getParameters().getService().set(getRegistry()); + source.getParameters().getClusterName().set(clusterName); + source.getParameters().getPath().set(getProject().getIsolated().getPath()); + }); + } + default void useCluster(Provider cluster) { useCluster(cluster.get()); } diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java index ada31bc11a65..c3dc49a2683f 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java @@ -249,7 +249,7 @@ public class TestClustersPlugin implements Plugin { .forEach(awareTask -> { awareTask.doFirst(task -> { awareTask.beforeStart(); - awareTask.getClusters().forEach(awareTask.getRegistery().get()::maybeStartCluster); + awareTask.getClusters().forEach(awareTask.getRegistry().get()::maybeStartCluster); }); }); }); diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersRegistry.java b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersRegistry.java index 8d2a9217e7d0..dcfe7c29a52b 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersRegistry.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersRegistry.java @@ -109,6 +109,23 @@ public abstract class TestClustersRegistry implements BuildService cluster) { + nextNodeToNextVersion(cluster.get()); + } + + public void nextNodeToNextVersion(ElasticsearchCluster cluster) { + nextNodeToNextVersion(cluster.getPath(), cluster.getName()); + } + + public void nextNodeToNextVersion(String path, String clusterName) { + ElasticsearchCluster cluster = runningClusters.stream() + .filter(c -> c.getPath().equals(path)) + .filter(c -> c.getName().equals(clusterName)) + .findFirst() + .orElseThrow(); + cluster.nextNodeToNextVersion(); + } + public void storeProcess(String id, Process esProcess) { nodeProcesses.put(id, esProcess); } diff --git a/qa/ccs-rolling-upgrade-remote-cluster/build.gradle b/qa/ccs-rolling-upgrade-remote-cluster/build.gradle index 5bbade8cf6fc..1e9788c69dba 100644 --- a/qa/ccs-rolling-upgrade-remote-cluster/build.gradle +++ b/qa/ccs-rolling-upgrade-remote-cluster/build.gradle @@ -45,11 +45,11 @@ buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> useCluster localCluster useCluster remoteCluster systemProperty 'tests.upgrade_from_version', bwcVersion.toString().replace('-SNAPSHOT', '') + nonInputProperties.systemProperty('tests.rest.cluster', getClusterInfo(localCluster.name).map { it.allHttpSocketURI.join(",") }) + nonInputProperties.systemProperty('tests.rest.remote_cluster', getClusterInfo(remoteCluster.name).map { it.allHttpSocketURI.join(",") }) - doFirst { - nonInputProperties.systemProperty('tests.rest.cluster', localCluster.map(c -> c.allHttpSocketURI.join(","))) - nonInputProperties.systemProperty('tests.rest.remote_cluster', remoteCluster.map(c -> c.allHttpSocketURI.join(","))) - } + def fipsDisabled = buildParams.inFipsJvm == false + onlyIf("FIPS mode disabled") { fipsDisabled } } tasks.register("${baseName}#oldClusterTest", StandaloneRestIntegTestTask) { @@ -60,28 +60,28 @@ buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> cluster.nodes.forEach { node -> node.getAllTransportPortURI() } - cluster.nextNodeToNextVersion() + getRegistry().get().nextNodeToNextVersion(cluster) } } tasks.register("${baseName}#oneThirdUpgraded", StandaloneRestIntegTestTask) { dependsOn "${baseName}#oldClusterTest" doFirst { - remoteCluster.get().nextNodeToNextVersion() + getRegistry().get().nextNodeToNextVersion(remoteCluster) } } tasks.register("${baseName}#twoThirdUpgraded", StandaloneRestIntegTestTask) { dependsOn "${baseName}#oneThirdUpgraded" doFirst { - remoteCluster.get().nextNodeToNextVersion() + getRegistry().get().nextNodeToNextVersion(remoteCluster) } } tasks.register("${baseName}#fullUpgraded", StandaloneRestIntegTestTask) { dependsOn "${baseName}#twoThirdUpgraded" doFirst { - remoteCluster.get().nextNodeToNextVersion() + getRegistry().get().nextNodeToNextVersion(remoteCluster) } } diff --git a/qa/mixed-cluster/build.gradle b/qa/mixed-cluster/build.gradle index 8dd5031c0782..088983745728 100644 --- a/qa/mixed-cluster/build.gradle +++ b/qa/mixed-cluster/build.gradle @@ -82,7 +82,7 @@ buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> def baseCluster = testClusters.register(baseName) { versions = [bwcVersion.toString(), project.version] numberOfNodes = 4 - setting 'path.repo', "${buildDir}/cluster/shared/repo/${baseName}" + setting 'path.repo', "${layout.buildDirectory.asFile.get()}/cluster/shared/repo/${baseName}" setting 'xpack.security.enabled', 'false' setting "xpack.license.self_generated.type", "trial" /* There is a chance we have more master changes than "normal", so to avoid this test from failing, @@ -96,50 +96,32 @@ buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> tasks.register("${baseName}#mixedClusterTest", StandaloneRestIntegTestTask) { useCluster baseCluster mustRunAfter("precommit") - Provider serviceProvider = GradleUtils.getBuildService( - project.gradle.sharedServices, - TestClustersPlugin.REGISTRY_SERVICE_NAME - ) - - def baseInfo = project.getProviders().of(TestClusterValueSource.class) { - it.parameters.path.set(clusterPath) - it.parameters.clusterName.set(baseName) - it.parameters.service = serviceProvider - }.map { it.getAllHttpSocketURI() } - - def baseInfoAfterOneNodeUpdate = project.getProviders().of(TestClusterValueSource.class) { - it.parameters.path.set(clusterPath) - it.parameters.clusterName.set(baseName) - it.parameters.service = serviceProvider - }.map { it.getAllHttpSocketURI() } - - def baseInfoAfterTwoNodesUpdate = project.getProviders().of(TestClusterValueSource.class) { - it.parameters.path.set(clusterPath) - it.parameters.clusterName.set(baseName) - it.parameters.service = serviceProvider - }.map { it.getAllHttpSocketURI() } - def nonInputProps = nonInputProperties - def sharedRepoFolder = new File(buildDir, "cluster/shared/repo/${baseName}") + def baseInfo = getClusterInfo(baseName).map { it.allHttpSocketURI.join(",") } + def baseInfoAfterOneNodeUpdate = getClusterInfo(baseName).map { it.allHttpSocketURI.join(",") } + def baseInfoAfterTwoNodesUpdate = getClusterInfo(baseName).map { it.allHttpSocketURI.join(",") } + def sharedRepoFolder = layout.buildDirectory.file("cluster/shared/repo/${baseName}").get().asFile doFirst { delete(sharedRepoFolder) // Getting the endpoints causes a wait for the cluster println "Test cluster endpoints are: ${-> baseInfo.get().join(",")}" println "Upgrading one node to create a mixed cluster" - baseCluster.get().nextNodeToNextVersion() + getRegistry().get().nextNodeToNextVersion(baseCluster) + // Getting the endpoints causes a wait for the cluster - println "Upgrade complete, endpoints are: ${-> baseInfoAfterOneNodeUpdate.get().join(",")}" + println "Upgrade complete, endpoints are: ${-> baseInfoAfterOneNodeUpdate.get()}" println "Upgrading another node to create a mixed cluster" - baseCluster.get().nextNodeToNextVersion() - nonInputProps.systemProperty('tests.rest.cluster', baseInfoAfterTwoNodesUpdate.map(c -> c.join(","))) - nonInputProps.systemProperty('tests.clustername', baseName) - if (excludeList.isEmpty() == false) { - systemProperty 'tests.rest.blacklist', excludeList.join(',') - } + getRegistry().get().nextNodeToNextVersion(baseCluster) } - systemProperty 'tests.path.repo', "${buildDir}/cluster/shared/repo/${baseName}" + if (excludeList.isEmpty() == false) { + systemProperty 'tests.rest.blacklist', excludeList.join(',') + } + nonInputProperties.systemProperty('tests.rest.cluster', baseInfoAfterTwoNodesUpdate) + nonInputProperties.systemProperty('tests.clustername', baseName) + systemProperty 'tests.path.repo', "${layout.buildDirectory.file("cluster/shared/repo/${baseName}").get().asFile}" systemProperty 'tests.bwc_nodes_version', bwcVersion.toString().replace('-SNAPSHOT', '') systemProperty 'tests.new_nodes_version', project.version.toString().replace('-SNAPSHOT', '') -// onlyIf("BWC tests disabled") { project.bwc_tests_enabled } + def bwcEnabled = project.bwc_tests_enabled + onlyIf("BWC tests disabled") { bwcEnabled } } tasks.register(bwcTaskName(bwcVersion)) { diff --git a/qa/rolling-upgrade-legacy/build.gradle b/qa/rolling-upgrade-legacy/build.gradle index e1c31fd50c0d..839daaf1a949 100644 --- a/qa/rolling-upgrade-legacy/build.gradle +++ b/qa/rolling-upgrade-legacy/build.gradle @@ -40,7 +40,7 @@ buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> numberOfNodes = 3 setting 'repositories.url.allowed_urls', 'http://snapshot.test*' - setting 'path.repo', "${buildDir}/cluster/shared/repo/${baseName}" + setting 'path.repo', "${layout.buildDirectory.get().asFile}/cluster/shared/repo/${baseName}" setting 'xpack.security.enabled', 'false' requiresFeature 'es.index_mode_feature_flag_registered', Version.fromString("8.0.0") } @@ -52,12 +52,12 @@ buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> useCluster baseCluster mustRunAfter("precommit") doFirst { - delete("${buildDir}/cluster/shared/repo/${baseName}") + delete("${layout.buildDirectory.get().asFile}/cluster/shared/repo/${baseName}") } def excludeList = [] systemProperty 'tests.rest.suite', 'old_cluster' systemProperty 'tests.upgrade_from_version', oldVersion - nonInputProperties.systemProperty('tests.rest.cluster', baseCluster.map(c -> c.allHttpSocketURI.join(","))) + nonInputProperties.systemProperty('tests.rest.cluster', getClusterInfo(baseName).map { it.allHttpSocketURI.join(",") }) nonInputProperties.systemProperty('tests.clustername', baseName) if (excludeList.isEmpty() == false) { systemProperty 'tests.rest.blacklist', excludeList.join(',') @@ -68,12 +68,12 @@ buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> dependsOn "${baseName}#oldClusterTest" useCluster baseCluster doFirst { - baseCluster.get().nextNodeToNextVersion() + getRegistry().get().nextNodeToNextVersion(baseCluster) } systemProperty 'tests.rest.suite', 'mixed_cluster' systemProperty 'tests.upgrade_from_version', oldVersion systemProperty 'tests.first_round', 'true' - nonInputProperties.systemProperty('tests.rest.cluster', baseCluster.map(c -> c.allHttpSocketURI.join(","))) + nonInputProperties.systemProperty('tests.rest.cluster', getClusterInfo(baseName).map { it.allHttpSocketURI.join(",") }) nonInputProperties.systemProperty('tests.clustername', baseName) def excludeList = [] if (excludeList.isEmpty() == false) { @@ -85,12 +85,12 @@ buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> dependsOn "${baseName}#oneThirdUpgradedTest" useCluster baseCluster doFirst { - baseCluster.get().nextNodeToNextVersion() + getRegistry().get().nextNodeToNextVersion(baseCluster) } systemProperty 'tests.rest.suite', 'mixed_cluster' systemProperty 'tests.upgrade_from_version', oldVersion systemProperty 'tests.first_round', 'false' - nonInputProperties.systemProperty('tests.rest.cluster', baseCluster.map(c -> c.allHttpSocketURI.join(","))) + nonInputProperties.systemProperty('tests.rest.cluster', getClusterInfo(baseName).map { it.allHttpSocketURI.join(",") }) nonInputProperties.systemProperty('tests.clustername', baseName) def excludeList = [] if (excludeList.isEmpty() == false) { @@ -101,12 +101,12 @@ buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> tasks.register("${baseName}#upgradedClusterTest", StandaloneRestIntegTestTask) { dependsOn "${baseName}#twoThirdsUpgradedTest" doFirst { - baseCluster.get().nextNodeToNextVersion() + getRegistry().get().nextNodeToNextVersion(baseCluster) } useCluster testClusters.named(baseName) systemProperty 'tests.rest.suite', 'upgraded_cluster' systemProperty 'tests.upgrade_from_version', oldVersion - nonInputProperties.systemProperty('tests.rest.cluster', baseCluster.map(c -> c.allHttpSocketURI.join(","))) + nonInputProperties.systemProperty('tests.rest.cluster', getClusterInfo(baseName).map { it.allHttpSocketURI.join(",") }) nonInputProperties.systemProperty('tests.clustername', baseName) def excludeList = [] if (excludeList.isEmpty() == false) { diff --git a/x-pack/plugin/ccr/qa/downgrade-to-basic-license/build.gradle b/x-pack/plugin/ccr/qa/downgrade-to-basic-license/build.gradle index 86f974ed1335..b3721ab3ac93 100644 --- a/x-pack/plugin/ccr/qa/downgrade-to-basic-license/build.gradle +++ b/x-pack/plugin/ccr/qa/downgrade-to-basic-license/build.gradle @@ -78,24 +78,7 @@ tasks.register("follow-cluster", RestIntegTestTask) { useCluster leaderCluster systemProperty 'tests.target_cluster', 'follow' nonInputProperties.systemProperty 'java.security.policy', "file://${policyFile}" - Provider serviceProvider = GradleUtils.getBuildService( - project.gradle.sharedServices, - TestClustersPlugin.REGISTRY_SERVICE_NAME - ) - def leaderInfo = project.getProviders().of(TestClusterValueSource.class) { - it.parameters.path.set(clusterPath) - it.parameters.clusterName.set("leader-cluster") - it.parameters.service = serviceProvider - } - def followInfo = project.getProviders().of(TestClusterValueSource.class) { - it.parameters.path.set(clusterPath) - it.parameters.clusterName.set("follow-cluster") - it.parameters.service = serviceProvider - } - def leaderUri = leaderInfo.map { it.getAllHttpSocketURI().get(0) } - def followerUri = followInfo.map { it.getAllHttpSocketURI().get(0) } - - nonInputProperties.systemProperty 'tests.leader_host', leaderUri + nonInputProperties.systemProperty 'tests.leader_host', getClusterInfo('leader-cluster').map { it.getAllHttpSocketURI().get(0) } nonInputProperties.systemProperty 'log', followCluster.map(c -> c.getFirstNode().getServerLog()) } diff --git a/x-pack/plugin/ccr/qa/multi-cluster/build.gradle b/x-pack/plugin/ccr/qa/multi-cluster/build.gradle index 61678784e6b3..d5bc9395fc9c 100644 --- a/x-pack/plugin/ccr/qa/multi-cluster/build.gradle +++ b/x-pack/plugin/ccr/qa/multi-cluster/build.gradle @@ -29,7 +29,7 @@ def leaderCluster = testClusters.register('leader-cluster') { setting 'xpack.license.self_generated.type', 'trial' setting 'xpack.security.enabled', 'true' user username: 'admin', password: 'admin-password', role: 'superuser' - setting 'path.repo', "${buildDir}/cluster/shared/repo/leader-cluster" + setting 'path.repo', "${layout.buildDirectory.asFile.get()}/cluster/shared/repo/leader-cluster" } def middleCluster = testClusters.register('middle-cluster') { @@ -55,25 +55,16 @@ def middleCluster = testClusters.register('middle-cluster') { tasks.register("leader-cluster", RestIntegTestTask) { mustRunAfter("precommit") systemProperty 'tests.target_cluster', 'leader' - systemProperty 'tests.leader_cluster_repository_path', "${buildDir}/cluster/shared/repo/leader-cluster" + systemProperty 'tests.leader_cluster_repository_path', "${layout.buildDirectory.asFile.get()}/cluster/shared/repo/leader-cluster" } tasks.register("middle-cluster", RestIntegTestTask) { dependsOn "leader-cluster" useCluster testClusters.named("leader-cluster") systemProperty 'tests.target_cluster', 'middle' - systemProperty 'tests.leader_cluster_repository_path', "${buildDir}/cluster/shared/repo/leader-cluster" + systemProperty 'tests.leader_cluster_repository_path', "${layout.buildDirectory.asFile.get()}/cluster/shared/repo/leader-cluster" - Provider serviceProvider = GradleUtils.getBuildService( - project.gradle.sharedServices, - TestClustersPlugin.REGISTRY_SERVICE_NAME - ) - - def leaderUri = project.getProviders().of(TestClusterValueSource.class) { - it.parameters.path.set(clusterPath) - it.parameters.clusterName.set("leader-cluster") - it.parameters.service = serviceProvider - }.map { it.allHttpSocketURI.get(0) } + def leaderUri = getClusterInfo('leader-cluster').map { it.allHttpSocketURI.get(0) } nonInputProperties.systemProperty 'tests.leader_host', leaderUri } @@ -82,24 +73,10 @@ tasks.register('follow-cluster', RestIntegTestTask) { useCluster leaderCluster useCluster middleCluster systemProperty 'tests.target_cluster', 'follow' - systemProperty 'tests.leader_cluster_repository_path', "${buildDir}/cluster/shared/repo/leader-cluster" + systemProperty 'tests.leader_cluster_repository_path', "${layout.buildDirectory.asFile.get()}/cluster/shared/repo/leader-cluster" - Provider serviceProvider = GradleUtils.getBuildService( - project.gradle.sharedServices, - TestClustersPlugin.REGISTRY_SERVICE_NAME - ) - - def leaderUri = project.getProviders().of(TestClusterValueSource.class) { - it.parameters.path.set(clusterPath) - it.parameters.clusterName.set("leader-cluster") - it.parameters.service = serviceProvider - }.map { it.allHttpSocketURI.get(0) } - - def middleUri = project.getProviders().of(TestClusterValueSource.class) { - it.parameters.path.set(clusterPath) - it.parameters.clusterName.set("middle-cluster") - it.parameters.service = serviceProvider - }.map { it.allHttpSocketURI.get(0) } + def leaderUri = getClusterInfo('leader-cluster').map { it.allHttpSocketURI.get(0) } + def middleUri = getClusterInfo('middle-cluster').map { it.allHttpSocketURI.get(0) } nonInputProperties.systemProperty 'tests.leader_host', leaderUri nonInputProperties.systemProperty 'tests.middle_host', middleUri } diff --git a/x-pack/plugin/ccr/qa/non-compliant-license/build.gradle b/x-pack/plugin/ccr/qa/non-compliant-license/build.gradle index ff342accef27..ad4d2cb5afc7 100644 --- a/x-pack/plugin/ccr/qa/non-compliant-license/build.gradle +++ b/x-pack/plugin/ccr/qa/non-compliant-license/build.gradle @@ -53,17 +53,7 @@ tasks.register('follow-cluster', RestIntegTestTask) { useCluster leaderCluster systemProperty 'tests.target_cluster', 'follow' - Provider serviceProvider = GradleUtils.getBuildService( - project.gradle.sharedServices, - TestClustersPlugin.REGISTRY_SERVICE_NAME - ) - def followInfo = project.getProviders().of(TestClusterValueSource.class) { - it.parameters.path.set(clusterPath) - it.parameters.clusterName.set("follow-cluster") - it.parameters.service = serviceProvider - } - def followUri = followInfo.map { it.allHttpSocketURI.get(0) } - + def followUri = getClusterInfo('follow-cluster').map { it.allHttpSocketURI.get(0) } nonInputProperties.systemProperty 'tests.leader_host', followUri } diff --git a/x-pack/plugin/ccr/qa/restart/build.gradle b/x-pack/plugin/ccr/qa/restart/build.gradle index 848beb1da10a..89ad8cad8498 100644 --- a/x-pack/plugin/ccr/qa/restart/build.gradle +++ b/x-pack/plugin/ccr/qa/restart/build.gradle @@ -55,18 +55,8 @@ tasks.register('follow-cluster', RestIntegTestTask) { useCluster leaderCluster systemProperty 'tests.target_cluster', 'follow' - Provider serviceProvider = GradleUtils.getBuildService( - project.gradle.sharedServices, - TestClustersPlugin.REGISTRY_SERVICE_NAME - ) - def leaderUri = project.getProviders().of(TestClusterValueSource.class) { - it.parameters.path.set(clusterPath) - it.parameters.clusterName.set("leader-cluster") - it.parameters.service = serviceProvider - }.map { it.allHttpSocketURI.get(0) } - - nonInputProperties.systemProperty 'tests.leader_host', - "${-> leaderUri.get() }" + def leaderUri = getClusterInfo("leader-cluster").map { it.allHttpSocketURI.get(0) } + nonInputProperties.systemProperty 'tests.leader_host', leaderUri } tasks.register("followClusterRestartTest", StandaloneRestIntegTestTask) { @@ -76,27 +66,13 @@ tasks.register("followClusterRestartTest", StandaloneRestIntegTestTask) { systemProperty 'tests.rest.load_packaged', 'false' systemProperty 'tests.target_cluster', 'follow-restart' - Provider serviceProvider = GradleUtils.getBuildService( - project.gradle.sharedServices, - TestClustersPlugin.REGISTRY_SERVICE_NAME - ) - def leaderUri = project.getProviders().of(TestClusterValueSource.class) { - it.parameters.path.set(clusterPath) - it.parameters.clusterName.set("leader-cluster") - it.parameters.service = serviceProvider - }.map { it.allHttpSocketURI.get(0) } - - def followUris = project.getProviders().of(TestClusterValueSource.class) { - it.parameters.path.set(clusterPath) - it.parameters.clusterName.set("follow-cluster") - it.parameters.service = serviceProvider - }.map { it.allHttpSocketURI.join(",") } - + def leaderUri = getClusterInfo('leader-cluster').map { it.allHttpSocketURI.get(0) } + def followUris = getClusterInfo('follow-cluster').map { it.allHttpSocketURI.join(",") } nonInputProperties.systemProperty 'tests.leader_host', leaderUri nonInputProperties.systemProperty 'tests.rest.cluster', followUris doFirst { - serviceProvider.get().restart(clusterPath, "follow-cluster") + getRegistry().get().restart(clusterPath, "follow-cluster") } } diff --git a/x-pack/plugin/ccr/qa/security/build.gradle b/x-pack/plugin/ccr/qa/security/build.gradle index 454a9ae72173..3ceb86a632e0 100644 --- a/x-pack/plugin/ccr/qa/security/build.gradle +++ b/x-pack/plugin/ccr/qa/security/build.gradle @@ -58,16 +58,7 @@ def followerClusterTestTask = tasks.register('follow-cluster', RestIntegTestTask dependsOn 'leader-cluster' useCluster leadCluster systemProperty 'tests.target_cluster', 'follow' - Provider serviceProvider = GradleUtils.getBuildService( - project.gradle.sharedServices, - TestClustersPlugin.REGISTRY_SERVICE_NAME - ) - def leaderUri = project.getProviders().of(TestClusterValueSource.class) { - it.parameters.path.set(clusterPath) - it.parameters.clusterName.set("leader-cluster") - it.parameters.service = serviceProvider - }.map { it.allHttpSocketURI.get(0) } - + def leaderUri = getClusterInfo('leader-cluster').map { it.allHttpSocketURI.get(0) } nonInputProperties.systemProperty 'tests.leader_host', leaderUri } diff --git a/x-pack/plugin/eql/qa/ccs-rolling-upgrade/build.gradle b/x-pack/plugin/eql/qa/ccs-rolling-upgrade/build.gradle index bc1a44f94d18..50c7d756f43e 100644 --- a/x-pack/plugin/eql/qa/ccs-rolling-upgrade/build.gradle +++ b/x-pack/plugin/eql/qa/ccs-rolling-upgrade/build.gradle @@ -42,39 +42,36 @@ buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> useCluster localCluster useCluster remoteCluster systemProperty 'tests.upgrade_from_version', bwcVersion.toString().replace('-SNAPSHOT', '') - - doFirst { - nonInputProperties.systemProperty('tests.rest.cluster', localCluster.map(c -> c.allHttpSocketURI.join(","))) - nonInputProperties.systemProperty('tests.rest.remote_cluster', remoteCluster.map(c -> c.allHttpSocketURI.join(","))) - } + nonInputProperties.systemProperty('tests.rest.cluster', getClusterInfo("${baseName}-local").map { it.allHttpSocketURI.join(",") }) + nonInputProperties.systemProperty('tests.rest.remote_cluster', getClusterInfo("${baseName}-remote").map { it.allHttpSocketURI.join(",") }) } tasks.register("${baseName}#oldClusterTest", StandaloneRestIntegTestTask) { dependsOn "processTestResources" mustRunAfter("precommit") doFirst { - localCluster.get().nextNodeToNextVersion() + getRegistry().get().nextNodeToNextVersion(localCluster) } } tasks.register("${baseName}#oneThirdUpgraded", StandaloneRestIntegTestTask) { dependsOn "${baseName}#oldClusterTest" doFirst { - remoteCluster.get().nextNodeToNextVersion() + getRegistry().get().nextNodeToNextVersion(remoteCluster) } } tasks.register("${baseName}#twoThirdUpgraded", StandaloneRestIntegTestTask) { dependsOn "${baseName}#oneThirdUpgraded" doFirst { - remoteCluster.get().nextNodeToNextVersion() + getRegistry().get().nextNodeToNextVersion(remoteCluster) } } tasks.register("${baseName}#fullUpgraded", StandaloneRestIntegTestTask) { dependsOn "${baseName}#twoThirdUpgraded" doFirst { - remoteCluster.get().nextNodeToNextVersion() + getRegistry().get().nextNodeToNextVersion(remoteCluster) } } diff --git a/x-pack/plugin/eql/qa/mixed-node/build.gradle b/x-pack/plugin/eql/qa/mixed-node/build.gradle index bbeb439ab615..b13f42ea533f 100644 --- a/x-pack/plugin/eql/qa/mixed-node/build.gradle +++ b/x-pack/plugin/eql/qa/mixed-node/build.gradle @@ -39,19 +39,22 @@ buildParams.bwcVersions.withWireCompatible(v -> v.onOrAfter("7.10.0") && mustRunAfter("precommit") classpath = sourceSets.javaRestTest.runtimeClasspath testClassesDirs = sourceSets.javaRestTest.output.classesDirs + def socketsProvider1 = getClusterInfo(baseName).map { it.allHttpSocketURI.join(",") } + def socketsProvider2 = getClusterInfo(baseName).map { it.allHttpSocketURI.join(",") } doFirst { // Getting the endpoints causes a wait for the cluster - println "Endpoints are: ${-> testClusters."${baseName}".allHttpSocketURI.join(",")}" + println "Endpoints are: ${-> socketsProvider1.get()}" println "Upgrading one node to create a mixed cluster" - cluster.get().nextNodeToNextVersion() - - println "Upgrade complete, endpoints are: ${-> testClusters.named(baseName).get().allHttpSocketURI.join(",")}" - nonInputProperties.systemProperty('tests.rest.cluster', cluster.map(c -> c.allHttpSocketURI.join(","))) - nonInputProperties.systemProperty('tests.clustername', baseName) + getRegistry().get().nextNodeToNextVersion(cluster) + println "Upgrade complete, endpoints are: ${-> socketsProvider2.get()} }" } + nonInputProperties.systemProperty('tests.rest.cluster', getClusterInfo(baseName).map { it.allHttpSocketURI.join(",") }) + nonInputProperties.systemProperty('tests.clustername', baseName) systemProperty 'tests.bwc_nodes_version', bwcVersion.toString().replace('-SNAPSHOT', '') systemProperty 'tests.new_nodes_version', project.version.toString().replace('-SNAPSHOT', '') - onlyIf("BWC tests disabled") { project.bwc_tests_enabled } + + def bwcEnabled = project.bwc_tests_enabled + onlyIf("BWC tests disabled") { bwcEnabled } } tasks.register(bwcTaskName(bwcVersion)) { diff --git a/x-pack/plugin/shutdown/qa/rolling-upgrade/build.gradle b/x-pack/plugin/shutdown/qa/rolling-upgrade/build.gradle index 17996ce82a45..5668375403da 100644 --- a/x-pack/plugin/shutdown/qa/rolling-upgrade/build.gradle +++ b/x-pack/plugin/shutdown/qa/rolling-upgrade/build.gradle @@ -28,7 +28,8 @@ tasks.named("forbiddenPatterns").configure { exclude '**/system_key' } -String outputDir = "${buildDir}/generated-resources/${project.name}" +def buildDirectory = layout.buildDirectory +String outputDir = "${buildDirectory.file("generated-resources/${project.name}").get().asFile}" tasks.register("copyTestNodeKeyMaterial", Copy) { from project(':x-pack:plugin:core').files('src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.pem', @@ -40,15 +41,15 @@ buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> String oldVersion = bwcVersion.toString() // SearchableSnapshotsRollingUpgradeIT uses a specific repository to not interfere with other tests - String searchableSnapshotRepository = "${buildDir}/cluster/shared/searchable-snapshots-repo/${baseName}" - + String searchableSnapshotRepository = "${buildDirectory.file("cluster/shared/searchable-snapshots-repo/${baseName}").get().asFile}" + File repoFolder = buildDirectory.file("cluster/shared/repo/${baseName}").get().asFile def baseCluster = testClusters.register(baseName) { testDistribution = "DEFAULT" versions = [oldVersion, project.version] numberOfNodes = 3 setting 'repositories.url.allowed_urls', 'http://snapshot.test*' - setting 'path.repo', "['${buildDir}/cluster/shared/repo/${baseName}', '${searchableSnapshotRepository}']" + setting 'path.repo', "['${repoFolder}', '${searchableSnapshotRepository}']" setting 'xpack.license.self_generated.type', 'trial' setting 'xpack.security.enabled', 'true' setting 'xpack.security.transport.ssl.enabled', 'true' @@ -107,15 +108,15 @@ buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> useCluster baseCluster mustRunAfter("precommit") dependsOn "copyTestNodeKeyMaterial" + def repoDir = buildDirectory.file("cluster/shared/repo/${baseName}").get().asFile doFirst { - delete("${buildDir}/cluster/shared/repo/${baseName}") + delete(repoDir) delete("${searchableSnapshotRepository}") } - systemProperty 'tests.rest.suite', 'old_cluster' systemProperty 'tests.upgrade_from_version', oldVersion systemProperty 'tests.path.searchable.snapshots.repo', searchableSnapshotRepository - nonInputProperties.systemProperty('tests.rest.cluster', baseCluster.map(c->c.allHttpSocketURI.join(","))) + nonInputProperties.systemProperty('tests.rest.cluster', getClusterInfo(baseName).map { it.allHttpSocketURI.join(",") }) nonInputProperties.systemProperty('tests.clustername', baseName) } @@ -123,9 +124,9 @@ buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> dependsOn "${baseName}#oldClusterTest" useCluster baseCluster doFirst { - baseCluster.get().nextNodeToNextVersion() + getRegistry().get().nextNodeToNextVersion(baseCluster) } - nonInputProperties.systemProperty('tests.rest.cluster', baseCluster.map(c->c.allHttpSocketURI.join(","))) + nonInputProperties.systemProperty('tests.rest.cluster', getClusterInfo(baseName).map { it.allHttpSocketURI.join(",") }) nonInputProperties.systemProperty('tests.clustername', baseName) systemProperty 'tests.rest.suite', 'mixed_cluster' systemProperty 'tests.first_round', 'true' @@ -137,9 +138,9 @@ buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> dependsOn "${baseName}#oneThirdUpgradedTest" useCluster baseCluster doFirst { - baseCluster.get().nextNodeToNextVersion() + getRegistry().get().nextNodeToNextVersion(baseCluster) } - nonInputProperties.systemProperty('tests.rest.cluster', "${-> baseCluster.get().allHttpSocketURI.join(",")}") + nonInputProperties.systemProperty('tests.rest.cluster', getClusterInfo(baseName).map { it.allHttpSocketURI.join(",") }) nonInputProperties.systemProperty('tests.clustername', baseName) systemProperty 'tests.rest.suite', 'mixed_cluster' systemProperty 'tests.first_round', 'false' @@ -151,9 +152,9 @@ buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> dependsOn "${baseName}#twoThirdsUpgradedTest" useCluster baseCluster doFirst { - baseCluster.get().nextNodeToNextVersion() + getRegistry().get().nextNodeToNextVersion(baseCluster) } - nonInputProperties.systemProperty('tests.rest.cluster', "${-> baseCluster.get().allHttpSocketURI.join(",")}") + nonInputProperties.systemProperty('tests.rest.cluster', getClusterInfo(baseName).map { it.allHttpSocketURI.join(",") }) nonInputProperties.systemProperty('tests.clustername', baseName) systemProperty 'tests.rest.suite', 'upgraded_cluster' systemProperty 'tests.upgrade_from_version', oldVersion diff --git a/x-pack/plugin/sql/qa/jdbc/security/build.gradle b/x-pack/plugin/sql/qa/jdbc/security/build.gradle index 4248423d4ff4..bed7ff60107b 100644 --- a/x-pack/plugin/sql/qa/jdbc/security/build.gradle +++ b/x-pack/plugin/sql/qa/jdbc/security/build.gradle @@ -54,18 +54,7 @@ subprojects { dependsOn copyTestClasses classpath += configurations.testArtifacts testClassesDirs = project.files(testArtifactsDir) - - Provider serviceProvider = GradleUtils.getBuildService( - project.gradle.sharedServices, - TestClustersPlugin.REGISTRY_SERVICE_NAME - ) - - def clusterInfo = project.getProviders().of(TestClusterValueSource.class) { - it.parameters.path.set(clusterPath) - it.parameters.clusterName.set(taskName) - it.parameters.service = serviceProvider - } - + def clusterInfo = getClusterInfo(taskName); nonInputProperties.systemProperty 'tests.audit.logfile', clusterInfo.map { it.auditLogs.get(0) } nonInputProperties.systemProperty 'tests.audit.yesterday.logfile', clusterInfo.map { it.auditLogs.get(0).getParentFile().toString() + "/javaRestTest_audit-${new Date().format('yyyy-MM-dd')}-1.json" } diff --git a/x-pack/plugin/sql/qa/mixed-node/build.gradle b/x-pack/plugin/sql/qa/mixed-node/build.gradle index 35600fda0eb3..e08179098193 100644 --- a/x-pack/plugin/sql/qa/mixed-node/build.gradle +++ b/x-pack/plugin/sql/qa/mixed-node/build.gradle @@ -45,22 +45,23 @@ buildParams.bwcVersions.withWireCompatible(v -> v.onOrAfter("7.10.3") && mustRunAfter("precommit") testClassesDirs = sourceSets.javaRestTest.output.classesDirs classpath = sourceSets.javaRestTest.runtimeClasspath + def beforeUpdateInfo = getClusterInfo(baseName).map { it.allHttpSocketURI.join(",") } + def afterUpdateInfo = getClusterInfo(baseName).map { it.allHttpSocketURI.join(",") } doFirst { def cluster = baseCluster.get() // Getting the endpoints causes a wait for the cluster - println "Endpoints are: ${-> cluster.allHttpSocketURI.join(",")}" + println "Endpoints are: ${-> beforeUpdateInfo.get()}" println "Upgrading one node to create a mixed cluster" - cluster.nextNodeToNextVersion() - - println "Upgrade complete, endpoints are: ${-> cluster.allHttpSocketURI.join(",")}" - nonInputProperties.systemProperty('tests.rest.cluster', baseCluster.map(c->c.allHttpSocketURI.join(","))) - nonInputProperties.systemProperty('tests.clustername', baseName) - + getRegistry().get().nextNodeToNextVersion(cluster) + println "Upgrade complete, endpoints are: ${-> afterUpdateInfo.get() }" } + nonInputProperties.systemProperty('tests.rest.cluster', getClusterInfo(baseName).map { it.allHttpSocketURI.join(",") }) + nonInputProperties.systemProperty('tests.clustername', baseName) systemProperty 'tests.bwc_nodes_version', bwcVersion.toString().replace('-SNAPSHOT', '') systemProperty 'tests.new_nodes_version', project.version.toString().replace('-SNAPSHOT', '') - onlyIf("BWC tests disabled") { project.bwc_tests_enabled } + def bwcEnabled = project.bwc_tests_enabled + onlyIf("BWC tests disabled") { bwcEnabled } } tasks.register(bwcTaskName(bwcVersion)) { diff --git a/x-pack/plugin/sql/qa/server/security/build.gradle b/x-pack/plugin/sql/qa/server/security/build.gradle index 37ae3edaf51d..e00989cbaa89 100644 --- a/x-pack/plugin/sql/qa/server/security/build.gradle +++ b/x-pack/plugin/sql/qa/server/security/build.gradle @@ -64,12 +64,7 @@ subprojects { TestClustersPlugin.REGISTRY_SERVICE_NAME ) - def clusterInfo = project.getProviders().of(TestClusterValueSource.class) { - it.parameters.path.set(clusterPath) - it.parameters.clusterName.set("javaRestTest") - it.parameters.service = serviceProvider - } - + def clusterInfo = getClusterInfo('javaRestTest') testClassesDirs += project.files(testArtifactsDir) classpath += configurations.testArtifacts nonInputProperties.systemProperty 'tests.audit.logfile', clusterInfo.map { it.auditLogs.get(0) } diff --git a/x-pack/qa/mixed-tier-cluster/build.gradle b/x-pack/qa/mixed-tier-cluster/build.gradle index bee28c47dc86..40454b2a290c 100644 --- a/x-pack/qa/mixed-tier-cluster/build.gradle +++ b/x-pack/qa/mixed-tier-cluster/build.gradle @@ -40,14 +40,14 @@ buildParams.bwcVersions.withWireCompatible(v -> v.onOrAfter("7.9.0") && tasks.register("${baseName}#mixedClusterTest", StandaloneRestIntegTestTask) { useCluster baseCluster mustRunAfter("precommit") + def beforeEndpoints = getClusterInfo(baseName).map { it.allHttpSocketURI.join(",") } doFirst { // Getting the endpoints causes a wait for the cluster - println "Endpoints are: ${-> baseCluster.get().allHttpSocketURI.join(",")}" - baseCluster.get().nextNodeToNextVersion() - - nonInputProperties.systemProperty('tests.rest.cluster', baseCluster.map(c->c.allHttpSocketURI.join(","))) - nonInputProperties.systemProperty('tests.clustername', baseName) + println "Endpoints are: ${-> beforeEndpoints.get()}" + getRegistry().get().nextNodeToNextVersion(baseCluster) } + nonInputProperties.systemProperty('tests.rest.cluster', getClusterInfo(baseName).map { it.allHttpSocketURI.join(",") }) + nonInputProperties.systemProperty('tests.clustername', baseName) onlyIf("BWC tests disabled") { project.bwc_tests_enabled } } diff --git a/x-pack/qa/rolling-upgrade-basic/build.gradle b/x-pack/qa/rolling-upgrade-basic/build.gradle index 9a447f35eb13..18feb654804b 100644 --- a/x-pack/qa/rolling-upgrade-basic/build.gradle +++ b/x-pack/qa/rolling-upgrade-basic/build.gradle @@ -33,7 +33,7 @@ buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> mustRunAfter("precommit") systemProperty 'tests.rest.suite', 'old_cluster' systemProperty 'tests.upgrade_from_version', version.toString().replace('-SNAPSHOT', '') - nonInputProperties.systemProperty('tests.rest.cluster', baseCluster.map(c->c.allHttpSocketURI.join(","))) + nonInputProperties.systemProperty('tests.rest.cluster', getClusterInfo(baseName).map { it.allHttpSocketURI.join(",") }) nonInputProperties.systemProperty('tests.clustername', baseName) } @@ -42,9 +42,9 @@ buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> dependsOn "${baseName}#oldClusterTest" useCluster baseCluster doFirst { - baseCluster.get().nextNodeToNextVersion() + getRegistry().get().nextNodeToNextVersion(baseCluster) } - nonInputProperties.systemProperty('tests.rest.cluster', baseCluster.map(c->c.allHttpSocketURI.join(","))) + nonInputProperties.systemProperty('tests.rest.cluster', getClusterInfo(baseName).map { it.allHttpSocketURI.join(",") }) nonInputProperties.systemProperty('tests.clustername', baseName) systemProperty 'tests.rest.suite', 'mixed_cluster' systemProperty 'tests.first_round', 'true' @@ -55,9 +55,9 @@ buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> dependsOn "${baseName}#oneThirdUpgradedTest" useCluster baseCluster doFirst { - baseCluster.get().nextNodeToNextVersion() + getRegistry().get().nextNodeToNextVersion(baseCluster) } - nonInputProperties.systemProperty('tests.rest.cluster', baseCluster.map(c->c.allHttpSocketURI.join(","))) + nonInputProperties.systemProperty('tests.rest.cluster', getClusterInfo(baseName).map { it.allHttpSocketURI.join(",") }) nonInputProperties.systemProperty('tests.clustername', baseName) systemProperty 'tests.rest.suite', 'mixed_cluster' systemProperty 'tests.first_round', 'false' @@ -68,9 +68,9 @@ buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> dependsOn "${baseName}#twoThirdsUpgradedTest" useCluster baseCluster doFirst { - baseCluster.get().nextNodeToNextVersion() + getRegistry().get().nextNodeToNextVersion(baseCluster) } - nonInputProperties.systemProperty('tests.rest.cluster', baseCluster.map(c->c.allHttpSocketURI.join(","))) + nonInputProperties.systemProperty('tests.rest.cluster', getClusterInfo(baseName).map { it.allHttpSocketURI.join(",") }) nonInputProperties.systemProperty('tests.clustername', baseName) systemProperty 'tests.rest.suite', 'upgraded_cluster' systemProperty 'tests.upgrade_from_version', oldVersion diff --git a/x-pack/qa/rolling-upgrade-multi-cluster/build.gradle b/x-pack/qa/rolling-upgrade-multi-cluster/build.gradle index ebcb4cd9760f..b7b46d432823 100644 --- a/x-pack/qa/rolling-upgrade-multi-cluster/build.gradle +++ b/x-pack/qa/rolling-upgrade-multi-cluster/build.gradle @@ -38,48 +38,44 @@ buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> useCluster baseLeaderCluster useCluster baseFollowerCluster systemProperty 'tests.upgrade_from_version', bwcVersion.toString().replace('-SNAPSHOT', '') - + def baseClusterName = getName().substring(0, getName().lastIndexOf("#")).replace('#', '-') + def baseCluster = testClusters.named(baseClusterName) doFirst { - def baseCluster = testClusters.named("${baseName}-${kindExt}").get() if (name.endsWith("#clusterTest") == false) { println "Upgrade node $it" - baseCluster.nextNodeToNextVersion() + getRegistry().get().nextNodeToNextVersion(baseCluster) } - nonInputProperties.systemProperty('tests.rest.cluster', baseCluster.allHttpSocketURI.join(",")) - nonInputProperties.systemProperty('tests.clustername', baseName) - nonInputProperties.systemProperty('tests.leader_host', baseLeaderCluster.map(c->c.allHttpSocketURI.last())) - nonInputProperties.systemProperty('tests.leader_remote_cluster_seed', baseLeaderCluster.map(c -> c.allTransportPortURI.last())) - nonInputProperties.systemProperty('tests.follower_host', baseFollowerCluster.map(c -> c.allHttpSocketURI.last())) - nonInputProperties.systemProperty('tests.follower_remote_cluster_seed', baseFollowerCluster.map(c -> c.allTransportPortURI.last())) } + nonInputProperties.systemProperty('tests.rest.cluster', getClusterInfo(baseCluster.name).map { it.allHttpSocketURI.join(",") }) + nonInputProperties.systemProperty('tests.clustername', baseName) + nonInputProperties.systemProperty('tests.leader_host', getClusterInfo(baseLeaderCluster.name).map { c->c.allHttpSocketURI.last() }) + nonInputProperties.systemProperty('tests.leader_remote_cluster_seed', getClusterInfo(baseLeaderCluster.name).map { c -> c.allTransportPortURI.last() }) + nonInputProperties.systemProperty('tests.follower_host', getClusterInfo(baseFollowerCluster.name).map { c->c.allHttpSocketURI.last() }) + nonInputProperties.systemProperty('tests.follower_remote_cluster_seed', getClusterInfo(baseFollowerCluster.name).map { c -> c.allTransportPortURI.last() }) } ["follower", "leader"].each { kind -> tasks.register("${baseName}#${kind}#clusterTest", StandaloneRestIntegTestTask) { systemProperty 'tests.rest.upgrade_state', 'none' systemProperty 'tests.rest.cluster_name', kind - ext.kindExt = kind } tasks.register("${baseName}#${kind}#oneThirdUpgradedTest", StandaloneRestIntegTestTask) { systemProperty 'tests.rest.upgrade_state', 'one_third' systemProperty 'tests.rest.cluster_name', kind dependsOn "${baseName}#leader#clusterTest", "${baseName}#follower#clusterTest" - ext.kindExt = kind } tasks.register("${baseName}#${kind}#twoThirdsUpgradedTest", StandaloneRestIntegTestTask) { systemProperty 'tests.rest.upgrade_state', 'two_third' systemProperty 'tests.rest.cluster_name', kind dependsOn "${baseName}#${kind}#oneThirdUpgradedTest" - ext.kindExt = kind } tasks.register("${baseName}#${kind}#upgradedClusterTest", StandaloneRestIntegTestTask) { systemProperty 'tests.rest.upgrade_state', 'all' systemProperty 'tests.rest.cluster_name', kind dependsOn "${baseName}#${kind}#twoThirdsUpgradedTest" - ext.kindExt = kind } } diff --git a/x-pack/qa/rolling-upgrade/build.gradle b/x-pack/qa/rolling-upgrade/build.gradle index f4c9f8f7ea2b..e45571fd7056 100644 --- a/x-pack/qa/rolling-upgrade/build.gradle +++ b/x-pack/qa/rolling-upgrade/build.gradle @@ -29,7 +29,7 @@ tasks.named("forbiddenPatterns").configure { exclude '**/system_key' } -String outputDir = "${buildDir}/generated-resources/${project.name}" +String outputDir = "${layout.buildDirectory.get().asFile}/generated-resources/${project.name}" tasks.register("copyTestNodeKeyMaterial", Copy) { from project(':x-pack:plugin:core').files('src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.pem', @@ -41,7 +41,7 @@ buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> String oldVersion = bwcVersion.toString() // SearchableSnapshotsRollingUpgradeIT uses a specific repository to not interfere with other tests - String searchableSnapshotRepository = "${buildDir}/cluster/shared/searchable-snapshots-repo/${baseName}" + String searchableSnapshotRepository = "${layout.buildDirectory.get().asFile}/cluster/shared/searchable-snapshots-repo/${baseName}" def baseCluster = testClusters.register(baseName) { testDistribution = "DEFAULT" @@ -56,7 +56,7 @@ buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> } setting 'repositories.url.allowed_urls', 'http://snapshot.test*' - setting 'path.repo', "['${buildDir}/cluster/shared/repo/${baseName}', '${searchableSnapshotRepository}']" + setting 'path.repo', "['${layout.buildDirectory.get().asFile}/cluster/shared/repo/${baseName}', '${searchableSnapshotRepository}']" setting 'xpack.license.self_generated.type', 'trial' setting 'xpack.security.enabled', 'true' setting 'xpack.security.transport.ssl.enabled', 'true' @@ -125,14 +125,14 @@ buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> mustRunAfter("precommit") dependsOn "copyTestNodeKeyMaterial" doFirst { - delete("${buildDir}/cluster/shared/repo/${baseName}") + delete("${layout.buildDirectory.get().asFile}/cluster/shared/repo/${baseName}") delete("${searchableSnapshotRepository}") } systemProperty 'tests.rest.suite', 'old_cluster' systemProperty 'tests.upgrade_from_version', oldVersion systemProperty 'tests.path.searchable.snapshots.repo', searchableSnapshotRepository - nonInputProperties.systemProperty('tests.rest.cluster', baseCluster.map(c->c.allHttpSocketURI.join(","))) + nonInputProperties.systemProperty('tests.rest.cluster', getClusterInfo(baseName).map { it.allHttpSocketURI.join(",") }) nonInputProperties.systemProperty('tests.clustername', baseName) // Disable ML tests for incompatible systems @@ -146,9 +146,9 @@ buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> dependsOn "${baseName}#oldClusterTest" useCluster baseCluster doFirst { - baseCluster.get().nextNodeToNextVersion() + getRegistry().get().nextNodeToNextVersion(baseCluster) } - nonInputProperties.systemProperty('tests.rest.cluster', baseCluster.map(c->c.allHttpSocketURI.join(","))) + nonInputProperties.systemProperty('tests.rest.cluster', getClusterInfo(baseName).map { it.allHttpSocketURI.join(",") }) nonInputProperties.systemProperty('tests.clustername', baseName) systemProperty 'tests.rest.suite', 'mixed_cluster' systemProperty 'tests.first_round', 'true' @@ -183,9 +183,9 @@ buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> dependsOn "${baseName}#oneThirdUpgradedTest" useCluster baseCluster doFirst { - baseCluster.get().nextNodeToNextVersion() + getRegistry().get().nextNodeToNextVersion(baseCluster) } - nonInputProperties.systemProperty('tests.rest.cluster', "${-> baseCluster.get().allHttpSocketURI.join(",")}") + nonInputProperties.systemProperty('tests.rest.cluster', getClusterInfo(baseName).map { it.allHttpSocketURI.join(",") }) nonInputProperties.systemProperty('tests.clustername', baseName) systemProperty 'tests.rest.suite', 'mixed_cluster' systemProperty 'tests.first_round', 'false' @@ -203,9 +203,9 @@ buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> dependsOn "${baseName}#twoThirdsUpgradedTest" useCluster baseCluster doFirst { - baseCluster.get().nextNodeToNextVersion() + getRegistry().get().nextNodeToNextVersion(baseCluster) } - nonInputProperties.systemProperty('tests.rest.cluster', "${-> baseCluster.get().allHttpSocketURI.join(",")}") + nonInputProperties.systemProperty('tests.rest.cluster', getClusterInfo(baseName).map { it.allHttpSocketURI.join(",") }) nonInputProperties.systemProperty('tests.clustername', baseName) systemProperty 'tests.rest.suite', 'upgraded_cluster' systemProperty 'tests.upgrade_from_version', oldVersion From 7972d85acab1d2aa4699cd470da40c3984d3a799 Mon Sep 17 00:00:00 2001 From: Bogdan Pintea Date: Thu, 16 Jan 2025 13:24:45 +0100 Subject: [PATCH 08/30] Minor logging improvements (#120164) This applies minor logging changes removing some duplication and making it obvious in the log message that the entry is produced by ESQL (EsqlResponseListener). It also removes some code duplication in the way query actions produce the RestChannelConsumer. Closes #119692. --- .../esql/action/EsqlResponseListener.java | 24 +++++++++---------- .../esql/action/RestEsqlAsyncQueryAction.java | 15 +----------- .../esql/action/RestEsqlQueryAction.java | 5 ++-- 3 files changed, 15 insertions(+), 29 deletions(-) diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlResponseListener.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlResponseListener.java index fb7e0f651458..3d38b697dc5b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlResponseListener.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlResponseListener.java @@ -31,6 +31,7 @@ import org.elasticsearch.xpack.esql.plugin.EsqlMediaTypeParser; import java.io.IOException; import java.util.Locale; import java.util.concurrent.TimeUnit; +import java.util.function.Consumer; import static org.elasticsearch.xpack.esql.formatter.TextFormat.CSV; import static org.elasticsearch.xpack.esql.formatter.TextFormat.URL_PARAM_DELIMITER; @@ -188,29 +189,26 @@ public final class EsqlResponseListener extends RestRefCountedChunkedToXContentL if (LOGGER.isDebugEnabled() == false) { return listener; } + Consumer logger = response -> LOGGER.debug( + "ESQL query execution {}.\nQuery string or async ID: [{}]\nExecution time: {}ms", + response == null ? "failed" : "finished", + esqlQueryOrId, + getTook(response, TimeUnit.MILLISECONDS) + ); return ActionListener.wrap(r -> { listener.onResponse(r); - // At this point, the StopWatch should already have been stopped, so we log a consistent time. - LOGGER.debug( - "Finished execution of ESQL query.\nQuery string or async ID: [{}]\nExecution time: [{}]ms", - esqlQueryOrId, - getTook(r, TimeUnit.MILLISECONDS) - ); + logger.accept(r); }, ex -> { // In case of failure, stop the time manually before sending out the response. - long timeMillis = getTook(null, TimeUnit.MILLISECONDS); - LOGGER.debug( - "Failed execution of ESQL query.\nQuery string or async ID: [{}]\nExecution time: [{}]ms", - esqlQueryOrId, - timeMillis - ); + logger.accept(null); listener.onFailure(ex); }); } static void logOnFailure(Throwable throwable) { RestStatus status = ExceptionsHelper.status(throwable); - LOGGER.log(status.getStatus() >= 500 ? Level.WARN : Level.DEBUG, () -> "Request failed with status [" + status + "]: ", throwable); + var level = status.getStatus() >= 500 ? Level.WARN : Level.DEBUG; + LOGGER.log(level, () -> "ESQL request failed with status [" + status + "]: ", throwable); } /* diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlAsyncQueryAction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlAsyncQueryAction.java index bd2f8eb38f96..0fd35bc3c455 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlAsyncQueryAction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlAsyncQueryAction.java @@ -14,7 +14,6 @@ import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; -import org.elasticsearch.rest.action.RestCancellableNodeClient; import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; @@ -46,21 +45,9 @@ public class RestEsqlAsyncQueryAction extends BaseRestHandler { @Override protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { - EsqlQueryRequest esqlRequest; try (XContentParser parser = request.contentOrSourceParamParser()) { - esqlRequest = RequestXContent.parseAsync(parser); + return RestEsqlQueryAction.restChannelConsumer(RequestXContent.parseAsync(parser), request, client); } - - LOGGER.debug("Beginning execution of ESQL async query.\nQuery string: [{}]", esqlRequest.query()); - - return channel -> { - RestCancellableNodeClient cancellableClient = new RestCancellableNodeClient(client, request.getHttpChannel()); - cancellableClient.execute( - EsqlQueryAction.INSTANCE, - esqlRequest, - new EsqlResponseListener(channel, request, esqlRequest).wrapWithLogging() - ); - }; } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlQueryAction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlQueryAction.java index 7f5adc310a53..ebe51cc2ab4e 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlQueryAction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlQueryAction.java @@ -45,11 +45,12 @@ public class RestEsqlQueryAction extends BaseRestHandler { @Override protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { - EsqlQueryRequest esqlRequest; try (XContentParser parser = request.contentOrSourceParamParser()) { - esqlRequest = RequestXContent.parseSync(parser); + return restChannelConsumer(RequestXContent.parseSync(parser), request, client); } + } + protected static RestChannelConsumer restChannelConsumer(EsqlQueryRequest esqlRequest, RestRequest request, NodeClient client) { LOGGER.debug("Beginning execution of ESQL query.\nQuery string: [{}]", esqlRequest.query()); return channel -> { From e03343940ef1f615bc8670c3589600987333d7cb Mon Sep 17 00:00:00 2001 From: Jan Kuipers <148754765+jan-elastic@users.noreply.github.com> Date: Thu, 16 Jan 2025 13:51:50 +0100 Subject: [PATCH 09/30] Datafeed jobs it wait for task cancel (#120177) * Fix task cancellation in DatafeedJobsIT. * fix * unmute DatafeedJobsIT --- muted-tests.yml | 2 -- .../xpack/ml/integration/DatafeedJobsIT.java | 9 ++++++--- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 404e6de0a009..db8fa454c036 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -232,8 +232,6 @@ tests: - class: org.elasticsearch.xpack.ilm.actions.SearchableSnapshotActionIT method: testUpdatePolicyToAddPhasesYieldsInvalidActionsToBeSkipped issue: https://github.com/elastic/elasticsearch/issues/118406 -- class: org.elasticsearch.xpack.ml.integration.DatafeedJobsIT - issue: https://github.com/elastic/elasticsearch/issues/120088 - class: org.elasticsearch.xpack.security.QueryableReservedRolesIT method: testConfiguredReservedRolesAfterClosingAndOpeningIndex issue: https://github.com/elastic/elasticsearch/issues/120127 diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsIT.java index 367c1cee8b0e..caba356f82ee 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsIT.java @@ -83,9 +83,12 @@ public class DatafeedJobsIT extends MlNativeAutodetectIntegTestCase { updateClusterSettings(Settings.builder().putNull("logger.org.elasticsearch.xpack.ml.datafeed")); cleanUp(); // Race conditions between closing and killing tasks in these tests, - // sometimes result in lingering persistent tasks (such as "_close"), - // which cause subsequent tests to fail. - client().execute(TransportCancelTasksAction.TYPE, new CancelTasksRequest()); + // sometimes result in lingering persistent close tasks, which cause + // subsequent tests to fail. Therefore, they're explicitly cancelled. + CancelTasksRequest cancelTasksRequest = new CancelTasksRequest(); + cancelTasksRequest.setActions("*close*"); + cancelTasksRequest.setWaitForCompletion(true); + client().execute(TransportCancelTasksAction.TYPE, cancelTasksRequest).actionGet(); } public void testLookbackOnly() throws Exception { From bedeb0414a7bdcfdca82cf78471c561bc979e7b0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20Fred=C3=A9n?= <109296772+jfreden@users.noreply.github.com> Date: Thu, 16 Jan 2025 13:53:57 +0100 Subject: [PATCH 10/30] Fix testInvalidToken by handling test Exception (#120196) --- muted-tests.yml | 3 --- .../xpack/security/authc/AuthenticationServiceTests.java | 3 +++ 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index db8fa454c036..d95095ac81df 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -193,9 +193,6 @@ tests: - class: org.elasticsearch.cluster.service.MasterServiceTests method: testThreadContext issue: https://github.com/elastic/elasticsearch/issues/118914 -- class: org.elasticsearch.xpack.security.authc.AuthenticationServiceTests - method: testInvalidToken - issue: https://github.com/elastic/elasticsearch/issues/119019 - class: org.elasticsearch.xpack.security.authc.ldap.ActiveDirectoryRunAsIT issue: https://github.com/elastic/elasticsearch/issues/115727 - class: org.elasticsearch.smoketest.DocsClientYamlTestSuiteIT diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java index 5eb9fb9b41a2..4b51e301da25 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java @@ -2033,6 +2033,9 @@ public class AuthenticationServiceTests extends ESTestCase { } else if (e instanceof NegativeArraySizeException) { assertThat(e.getMessage(), containsString("array size must be positive but was: ")); latch.countDown(); + } else if (e instanceof ElasticsearchException) { + assertThat(e.getMessage(), containsString(getTestName())); + latch.countDown(); } else { logger.error("unexpected exception", e); latch.countDown(); From 219e8c2d0e2972db96aa638b436a5e879b23f92e Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Thu, 16 Jan 2025 09:36:39 -0500 Subject: [PATCH 11/30] Comment geo and aggs deprecated names (#119872) This adds comments to the deprecated names in geo and aggregations explaining that we're not removing them so we don't break any callers. Some of these have been deprecated for ten years but we're keeping them. --- .../elasticsearch/index/query/GeoPolygonQueryBuilder.java | 2 ++ .../main/java/org/elasticsearch/search/SearchModule.java | 4 ++++ .../bucket/range/GeoDistanceAggregationBuilder.java | 5 +++++ .../metrics/CardinalityAggregationBuilder.java | 6 ++++++ .../search/aggregations/metrics/PercentilesMethod.java | 8 ++++++++ .../search/aggregations/support/ValueType.java | 5 +++++ .../elasticsearch/search/sort/GeoDistanceSortBuilder.java | 5 +++++ 7 files changed, 35 insertions(+) diff --git a/server/src/main/java/org/elasticsearch/index/query/GeoPolygonQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/GeoPolygonQueryBuilder.java index 575c15d5c063..4cddf8f91ab3 100644 --- a/server/src/main/java/org/elasticsearch/index/query/GeoPolygonQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/GeoPolygonQueryBuilder.java @@ -36,6 +36,8 @@ import java.util.List; import java.util.Objects; /** + * Deprecated geo query. Deprecated in #64227, 7.12/8.0. We do not plan to remove this so we + * do not break any users using this. * @deprecated use {@link GeoShapeQueryBuilder} */ @Deprecated diff --git a/server/src/main/java/org/elasticsearch/search/SearchModule.java b/server/src/main/java/org/elasticsearch/search/SearchModule.java index d983dd1ff78d..6716c03a3a93 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchModule.java +++ b/server/src/main/java/org/elasticsearch/search/SearchModule.java @@ -1157,6 +1157,10 @@ public class SearchModule { ); registerQuery( new QuerySpec<>( + /* + * Deprecated in #64227, 7.12/8.0. We do not plan to remove this so we + * do not break any users using this. + */ (new ParseField(GeoPolygonQueryBuilder.NAME).withAllDeprecated(GeoPolygonQueryBuilder.GEO_POLYGON_DEPRECATION_MSG)), GeoPolygonQueryBuilder::new, GeoPolygonQueryBuilder::fromXContent diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/GeoDistanceAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/GeoDistanceAggregationBuilder.java index cfd8dd78595f..d20f768bedb4 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/GeoDistanceAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/GeoDistanceAggregationBuilder.java @@ -48,6 +48,11 @@ public class GeoDistanceAggregationBuilder extends ValuesSourceAggregationBuilde public static final String NAME = "geo_distance"; public static final ValuesSourceRegistry.RegistryKey REGISTRY_KEY = new ValuesSourceRegistry.RegistryKey<>(NAME, GeoDistanceAggregatorSupplier.class); + /** + * The point from which to measure the distance. This has many other names that have been + * deprecated since 2014, but we have no plans to remove these names so we don't break anyone + * using them. + */ static final ParseField ORIGIN_FIELD = new ParseField("origin", "center", "point", "por"); static final ParseField UNIT_FIELD = new ParseField("unit"); static final ParseField DISTANCE_TYPE_FIELD = new ParseField("distance_type"); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregationBuilder.java index 2720ffdb5f7d..560eb61c7d7a 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregationBuilder.java @@ -37,6 +37,12 @@ public final class CardinalityAggregationBuilder extends ValuesSourceAggregation public static final ValuesSourceRegistry.RegistryKey REGISTRY_KEY = new ValuesSourceRegistry.RegistryKey<>(NAME, CardinalityAggregatorSupplier.class); + /** + * Pre-2.0 rehashing was configurable, but it hasn't been for ~10 years. We always rehash because it's + * quite cheap. Attempting to enable or disable it is just a noop with a deprecation message. We have + * no plans to remove this parameter because it isn't worth breaking even the tiny fraction of users + * who are sending it. Deprecation was in #12931. + */ private static final ParseField REHASH = new ParseField("rehash").withAllDeprecated("no replacement - values will always be rehashed"); public static final ParseField PRECISION_THRESHOLD_FIELD = new ParseField("precision_threshold"); public static final ParseField EXECUTION_HINT_FIELD_NAME = new ParseField("execution_hint"); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/PercentilesMethod.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/PercentilesMethod.java index 7c937acdd92d..f62e3394abd9 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/PercentilesMethod.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/PercentilesMethod.java @@ -23,6 +23,10 @@ import java.io.IOException; public enum PercentilesMethod implements Writeable { /** * The TDigest method for calculating percentiles + *

+ * The {@code TDigest} and {@code TDIGEST} names have been deprecated since 8.0, + * but we don't have any plans to remove it so we don't break anyone using it. + *

*/ TDIGEST("tdigest", "TDigest", "TDIGEST") { @Override @@ -32,6 +36,10 @@ public enum PercentilesMethod implements Writeable { }, /** * The HDRHistogram method of calculating percentiles + *

+ * The {@code HDR} name has been deprecated since 8.0, but we don't have any plans + * to remove it so we don't break anyone using it. + *

*/ HDR("hdr", "HDR") { @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValueType.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValueType.java index 7c4def5aa274..103a23bf8ef9 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValueType.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValueType.java @@ -52,6 +52,11 @@ public enum ValueType implements Writeable { private final byte id; private final String preferredName; + /** + * Name of the {@code value_type} field in the JSON. The name {@code valueType} has + * been deprecated since before #22160, but we have no plans to remove it so we don't + * break anyone that might be using it. + */ public static final ParseField VALUE_TYPE = new ParseField("value_type", "valueType"); ValueType(byte id, String description, String preferredName, ValuesSourceType valuesSourceType, DocValueFormat defaultFormat) { diff --git a/server/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java b/server/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java index 2aaade35fb8f..0bd8cc44edb3 100644 --- a/server/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java @@ -74,6 +74,11 @@ public class GeoDistanceSortBuilder extends SortBuilder private static final ParseField UNIT_FIELD = new ParseField("unit"); private static final ParseField DISTANCE_TYPE_FIELD = new ParseField("distance_type"); private static final ParseField VALIDATION_METHOD_FIELD = new ParseField("validation_method"); + /** + * Name for the sort {@link SortMode} which is mostly about sorting on multivalued fields. + * The {@code sort_mode} name has been deprecated since 5.0, but we don't plan to remove + * this so we don't break anyone using this. + */ private static final ParseField SORTMODE_FIELD = new ParseField("mode", "sort_mode"); private static final ParseField IGNORE_UNMAPPED = new ParseField("ignore_unmapped"); From 5b5f0990f1a2703db418aaf75c886dbc7a143f4a Mon Sep 17 00:00:00 2001 From: Tim Grein Date: Thu, 16 Jan 2025 15:40:25 +0100 Subject: [PATCH 12/30] [Inference API] Fix comment in ElasticInferenceServiceFeature to mention new way of enabling elastic inference service feature (#120274) --- .../services/elastic/ElasticInferenceServiceFeature.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceServiceFeature.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceServiceFeature.java index 324c20d0e48b..530efee4a3d4 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceServiceFeature.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceServiceFeature.java @@ -11,7 +11,7 @@ import org.elasticsearch.common.util.FeatureFlag; /** * Elastic Inference Service (EIS) feature flag. When the feature is complete, this flag will be removed. - * Enable feature via JVM option: `-Des.eis_feature_flag_enabled=true`. + * Enable feature via JVM option: `-Des.elastic_inference_service_feature_flag_enabled=true`. */ public class ElasticInferenceServiceFeature { From 46a8e69dd6906357124574f687fa9266b0dd40ac Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lorenzo=20Dematt=C3=A9?= Date: Thu, 16 Jan 2025 15:50:41 +0100 Subject: [PATCH 13/30] [Entitlements] Introducing runtime version-specific checks in IT tests (#120265) --- libs/entitlement/qa/common/build.gradle | 7 ++++ .../common/RestEntitlementsCheckAction.java | 38 +++++++------------ .../common/VersionSpecificNetworkChecks.java | 14 +++++++ .../common/VersionSpecificNetworkChecks.java | 29 ++++++++++++++ 4 files changed, 64 insertions(+), 24 deletions(-) create mode 100644 libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/VersionSpecificNetworkChecks.java create mode 100644 libs/entitlement/qa/common/src/main18/java/org/elasticsearch/entitlement/qa/common/VersionSpecificNetworkChecks.java diff --git a/libs/entitlement/qa/common/build.gradle b/libs/entitlement/qa/common/build.gradle index df3bc66cba21..18bc5679d09c 100644 --- a/libs/entitlement/qa/common/build.gradle +++ b/libs/entitlement/qa/common/build.gradle @@ -7,9 +7,16 @@ * License v3.0 only", or the "Server Side Public License, v 1". */ +import org.elasticsearch.gradle.internal.precommit.CheckForbiddenApisTask + apply plugin: 'elasticsearch.build' +apply plugin: 'elasticsearch.mrjar' dependencies { implementation project(':server') implementation project(':libs:logging') } + +tasks.withType(CheckForbiddenApisTask).configureEach { + replaceSignatureFiles 'jdk-signatures' +} diff --git a/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/RestEntitlementsCheckAction.java b/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/RestEntitlementsCheckAction.java index a156d20e3686..9e7e6e33f3ed 100644 --- a/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/RestEntitlementsCheckAction.java +++ b/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/RestEntitlementsCheckAction.java @@ -49,14 +49,13 @@ import java.net.URL; import java.net.URLClassLoader; import java.net.URLConnection; import java.net.URLStreamHandler; -import java.net.spi.InetAddressResolver; -import java.net.spi.InetAddressResolverProvider; import java.net.spi.URLStreamHandlerProvider; import java.security.NoSuchAlgorithmException; import java.util.List; import java.util.Map; import java.util.Set; import java.util.stream.Collectors; +import java.util.stream.Stream; import javax.net.ssl.HttpsURLConnection; import javax.net.ssl.SSLContext; @@ -73,25 +72,25 @@ public class RestEntitlementsCheckAction extends BaseRestHandler { public static final Thread NO_OP_SHUTDOWN_HOOK = new Thread(() -> {}, "Shutdown hook for testing"); private final String prefix; - record CheckAction(CheckedRunnable action, boolean isAlwaysDeniedToPlugins) { + record CheckAction(CheckedRunnable action, boolean isAlwaysDeniedToPlugins, Integer fromJavaVersion) { /** * These cannot be granted to plugins, so our test plugins cannot test the "allowed" case. - * Used both for always-denied entitlements as well as those granted only to the server itself. + * Used both for always-denied entitlements and those granted only to the server itself. */ static CheckAction deniedToPlugins(CheckedRunnable action) { - return new CheckAction(action, true); + return new CheckAction(action, true, null); } static CheckAction forPlugins(CheckedRunnable action) { - return new CheckAction(action, false); + return new CheckAction(action, false, null); } static CheckAction alwaysDenied(CheckedRunnable action) { - return new CheckAction(action, true); + return new CheckAction(action, true, null); } } - private static final Map checkActions = Map.ofEntries( + private static final Map checkActions = Stream.of( entry("runtime_exit", deniedToPlugins(RestEntitlementsCheckAction::runtimeExit)), entry("runtime_halt", deniedToPlugins(RestEntitlementsCheckAction::runtimeHalt)), entry("system_exit", deniedToPlugins(RestEntitlementsCheckAction::systemExit)), @@ -140,7 +139,10 @@ public class RestEntitlementsCheckAction extends BaseRestHandler { entry("proxySelector_setDefault", alwaysDenied(RestEntitlementsCheckAction::setDefaultProxySelector)), entry("responseCache_setDefault", alwaysDenied(RestEntitlementsCheckAction::setDefaultResponseCache)), - entry("createInetAddressResolverProvider", alwaysDenied(RestEntitlementsCheckAction::createInetAddressResolverProvider)), + entry( + "createInetAddressResolverProvider", + new CheckAction(VersionSpecificNetworkChecks::createInetAddressResolverProvider, true, 18) + ), entry("createURLStreamHandlerProvider", alwaysDenied(RestEntitlementsCheckAction::createURLStreamHandlerProvider)), entry("createURLWithURLStreamHandler", alwaysDenied(RestEntitlementsCheckAction::createURLWithURLStreamHandler)), entry("createURLWithURLStreamHandler2", alwaysDenied(RestEntitlementsCheckAction::createURLWithURLStreamHandler2)), @@ -156,7 +158,9 @@ public class RestEntitlementsCheckAction extends BaseRestHandler { entry("socket_connect", forPlugins(NetworkAccessCheckActions::socketConnect)), entry("server_socket_bind", forPlugins(NetworkAccessCheckActions::serverSocketBind)), entry("server_socket_accept", forPlugins(NetworkAccessCheckActions::serverSocketAccept)) - ); + ) + .filter(entry -> entry.getValue().fromJavaVersion() == null || Runtime.version().feature() >= entry.getValue().fromJavaVersion()) + .collect(Collectors.toUnmodifiableMap(Map.Entry::getKey, Map.Entry::getValue)); private static void createURLStreamHandlerProvider() { var x = new URLStreamHandlerProvider() { @@ -187,20 +191,6 @@ public class RestEntitlementsCheckAction extends BaseRestHandler { }); } - private static void createInetAddressResolverProvider() { - var x = new InetAddressResolverProvider() { - @Override - public InetAddressResolver get(Configuration configuration) { - return null; - } - - @Override - public String name() { - return "TEST"; - } - }; - } - private static void setDefaultResponseCache() { ResponseCache.setDefault(null); } diff --git a/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/VersionSpecificNetworkChecks.java b/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/VersionSpecificNetworkChecks.java new file mode 100644 index 000000000000..e1e0b9e52f51 --- /dev/null +++ b/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/VersionSpecificNetworkChecks.java @@ -0,0 +1,14 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.entitlement.qa.common; + +class VersionSpecificNetworkChecks { + static void createInetAddressResolverProvider() {} +} diff --git a/libs/entitlement/qa/common/src/main18/java/org/elasticsearch/entitlement/qa/common/VersionSpecificNetworkChecks.java b/libs/entitlement/qa/common/src/main18/java/org/elasticsearch/entitlement/qa/common/VersionSpecificNetworkChecks.java new file mode 100644 index 000000000000..0ead32ec480e --- /dev/null +++ b/libs/entitlement/qa/common/src/main18/java/org/elasticsearch/entitlement/qa/common/VersionSpecificNetworkChecks.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.entitlement.qa.common; + +import java.net.spi.InetAddressResolver; +import java.net.spi.InetAddressResolverProvider; + +class VersionSpecificNetworkChecks { + static void createInetAddressResolverProvider() { + var x = new InetAddressResolverProvider() { + @Override + public InetAddressResolver get(Configuration configuration) { + return null; + } + + @Override + public String name() { + return "TEST"; + } + }; + } +} From be049bb698757c2a55dbcb4d09fb5f666e651bb8 Mon Sep 17 00:00:00 2001 From: Simon Cooper Date: Thu, 16 Jan 2025 14:55:30 +0000 Subject: [PATCH 14/30] Remove most references to 7.9 and 7.10 transport versions (#118784) --- .../elasticsearch/ElasticsearchException.java | 4 ++-- .../org/elasticsearch/TransportVersions.java | 1 - .../admin/cluster/node/info/NodeInfo.java | 8 ++------ .../elasticsearch/cluster/ClusterInfo.java | 9 ++------- .../cluster/metadata/IndexMetadata.java | 20 ++++--------------- .../index/query/InnerHitBuilder.java | 15 ++------------ .../query/MatchPhrasePrefixQueryBuilder.java | 8 ++------ .../index/query/PrefixQueryBuilder.java | 8 ++------ .../index/query/RegexpQueryBuilder.java | 8 ++------ .../index/query/TermQueryBuilder.java | 8 ++------ .../index/query/WildcardQueryBuilder.java | 8 ++------ .../index/stats/IndexingPressureStats.java | 10 ++-------- .../elasticsearch/index/store/StoreStats.java | 11 ++-------- .../indices/recovery/RecoveryState.java | 15 ++------------ ...iableWidthHistogramAggregationBuilder.java | 2 +- .../DelayedShardAggregationBuilder.java | 2 +- ...PercentilesPipelineAggregationBuilder.java | 2 +- .../NormalizePipelineAggregationBuilder.java | 2 +- .../rate/RateAggregationBuilder.java | 2 +- .../xpack/core/ccr/AutoFollowMetadata.java | 11 ++-------- .../action/PutAutoFollowPatternAction.java | 8 ++------ .../core/ccr/action/PutFollowAction.java | 8 ++------ .../DataStreamFeatureSetUsage.java | 2 +- .../datatiers/DataTiersFeatureSetUsage.java | 2 +- .../xpack/core/eql/EqlFeatureSetUsage.java | 2 +- .../trainedmodel/EmptyConfigUpdate.java | 2 +- .../trainedmodel/ResultsFieldUpdate.java | 2 +- .../SearchableSnapshotFeatureSetUsage.java | 2 +- ...eClusterMinimumVersionValidationTests.java | 18 ++++++++--------- .../InferencePipelineAggregationBuilder.java | 2 +- 30 files changed, 55 insertions(+), 147 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/ElasticsearchException.java b/server/src/main/java/org/elasticsearch/ElasticsearchException.java index a430611559bb..56083902c3cc 100644 --- a/server/src/main/java/org/elasticsearch/ElasticsearchException.java +++ b/server/src/main/java/org/elasticsearch/ElasticsearchException.java @@ -1824,7 +1824,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte org.elasticsearch.indices.recovery.PeerRecoveryNotFound.class, org.elasticsearch.indices.recovery.PeerRecoveryNotFound::new, 158, - TransportVersions.V_7_9_0 + UNKNOWN_VERSION_ADDED ), NODE_HEALTH_CHECK_FAILURE_EXCEPTION( org.elasticsearch.cluster.coordination.NodeHealthCheckFailureException.class, @@ -1836,7 +1836,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte org.elasticsearch.transport.NoSeedNodeLeftException.class, org.elasticsearch.transport.NoSeedNodeLeftException::new, 160, - TransportVersions.V_7_10_0 + UNKNOWN_VERSION_ADDED ), AUTHENTICATION_PROCESSING_ERROR( org.elasticsearch.ElasticsearchAuthenticationProcessingError.class, diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 957da69aa599..1ab8cdfc2af7 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -62,7 +62,6 @@ public class TransportVersions { public static final TransportVersion V_7_8_1 = def(7_08_01_99); public static final TransportVersion V_7_9_0 = def(7_09_00_99); public static final TransportVersion V_7_10_0 = def(7_10_00_99); - public static final TransportVersion V_7_10_1 = def(7_10_01_99); public static final TransportVersion V_7_11_0 = def(7_11_00_99); public static final TransportVersion V_7_12_0 = def(7_12_00_99); public static final TransportVersion V_7_13_0 = def(7_13_00_99); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java index a7d92682b763..08825706c09e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java @@ -111,9 +111,7 @@ public class NodeInfo extends BaseNodeResponse { addInfoIfNonNull(HttpInfo.class, in.readOptionalWriteable(HttpInfo::new)); addInfoIfNonNull(PluginsAndModules.class, in.readOptionalWriteable(PluginsAndModules::new)); addInfoIfNonNull(IngestInfo.class, in.readOptionalWriteable(IngestInfo::new)); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { - addInfoIfNonNull(AggregationInfo.class, in.readOptionalWriteable(AggregationInfo::new)); - } + addInfoIfNonNull(AggregationInfo.class, in.readOptionalWriteable(AggregationInfo::new)); if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { addInfoIfNonNull(RemoteClusterServerInfo.class, in.readOptionalWriteable(RemoteClusterServerInfo::new)); } @@ -285,9 +283,7 @@ public class NodeInfo extends BaseNodeResponse { out.writeOptionalWriteable(getInfo(HttpInfo.class)); out.writeOptionalWriteable(getInfo(PluginsAndModules.class)); out.writeOptionalWriteable(getInfo(IngestInfo.class)); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { - out.writeOptionalWriteable(getInfo(AggregationInfo.class)); - } + out.writeOptionalWriteable(getInfo(AggregationInfo.class)); if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { out.writeOptionalWriteable(getInfo(RemoteClusterServerInfo.class)); } diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterInfo.java b/server/src/main/java/org/elasticsearch/cluster/ClusterInfo.java index 66fbe35fa52b..230677a6c86c 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterInfo.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterInfo.java @@ -22,7 +22,6 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.xcontent.ChunkedToXContent; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.store.StoreStats; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; @@ -101,9 +100,7 @@ public class ClusterInfo implements ChunkedToXContent, Writeable { this.dataPath = in.getTransportVersion().onOrAfter(DATA_PATH_NEW_KEY_VERSION) ? in.readImmutableMap(NodeAndShard::new, StreamInput::readString) : in.readImmutableMap(nested -> NodeAndShard.from(new ShardRouting(nested)), StreamInput::readString); - this.reservedSpace = in.getTransportVersion().onOrAfter(StoreStats.RESERVED_BYTES_VERSION) - ? in.readImmutableMap(NodeAndPath::new, ReservedSpace::new) - : Map.of(); + this.reservedSpace = in.readImmutableMap(NodeAndPath::new, ReservedSpace::new); } @Override @@ -119,9 +116,7 @@ public class ClusterInfo implements ChunkedToXContent, Writeable { } else { out.writeMap(this.dataPath, (o, k) -> createFakeShardRoutingFromNodeAndShard(k).writeTo(o), StreamOutput::writeString); } - if (out.getTransportVersion().onOrAfter(StoreStats.RESERVED_BYTES_VERSION)) { - out.writeMap(this.reservedSpace); - } + out.writeMap(this.reservedSpace); } /** diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java index b2e0233463bf..04f9448a936b 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java @@ -563,8 +563,6 @@ public class IndexMetadata implements Diffable, ToXContentFragmen public static final String INDEX_STATE_FILE_PREFIX = "state-"; - static final TransportVersion SYSTEM_INDEX_FLAG_ADDED = TransportVersions.V_7_10_0; - static final TransportVersion STATS_AND_FORECAST_ADDED = TransportVersions.V_8_6_0; private final int routingNumShards; @@ -1644,11 +1642,7 @@ public class IndexMetadata implements Diffable, ToXContentFragmen } else { mappingsUpdatedVersion = IndexVersions.ZERO; } - if (in.getTransportVersion().onOrAfter(SYSTEM_INDEX_FLAG_ADDED)) { - isSystem = in.readBoolean(); - } else { - isSystem = false; - } + isSystem = in.readBoolean(); timestampRange = IndexLongFieldRange.readFrom(in); if (in.getTransportVersion().onOrAfter(STATS_AND_FORECAST_ADDED)) { stats = in.readOptionalWriteable(IndexMetadataStats::new); @@ -1694,9 +1688,7 @@ public class IndexMetadata implements Diffable, ToXContentFragmen if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_15_0)) { IndexVersion.writeVersion(mappingsUpdatedVersion, out); } - if (out.getTransportVersion().onOrAfter(SYSTEM_INDEX_FLAG_ADDED)) { - out.writeBoolean(isSystem); - } + out.writeBoolean(isSystem); timestampRange.writeTo(out); if (out.getTransportVersion().onOrAfter(STATS_AND_FORECAST_ADDED)) { out.writeOptionalWriteable(stats); @@ -1798,9 +1790,7 @@ public class IndexMetadata implements Diffable, ToXContentFragmen if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_15_0)) { builder.mappingsUpdatedVersion(IndexVersion.readVersion(in)); } - if (in.getTransportVersion().onOrAfter(SYSTEM_INDEX_FLAG_ADDED)) { - builder.system(in.readBoolean()); - } + builder.system(in.readBoolean()); builder.timestampRange(IndexLongFieldRange.readFrom(in)); if (in.getTransportVersion().onOrAfter(STATS_AND_FORECAST_ADDED)) { @@ -1850,9 +1840,7 @@ public class IndexMetadata implements Diffable, ToXContentFragmen if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_15_0)) { IndexVersion.writeVersion(mappingsUpdatedVersion, out); } - if (out.getTransportVersion().onOrAfter(SYSTEM_INDEX_FLAG_ADDED)) { - out.writeBoolean(isSystem); - } + out.writeBoolean(isSystem); timestampRange.writeTo(out); if (out.getTransportVersion().onOrAfter(STATS_AND_FORECAST_ADDED)) { out.writeOptionalWriteable(stats); diff --git a/server/src/main/java/org/elasticsearch/index/query/InnerHitBuilder.java b/server/src/main/java/org/elasticsearch/index/query/InnerHitBuilder.java index edf32caaee4a..cbdc700d1e18 100644 --- a/server/src/main/java/org/elasticsearch/index/query/InnerHitBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/InnerHitBuilder.java @@ -8,7 +8,6 @@ */ package org.elasticsearch.index.query; -import org.elasticsearch.TransportVersions; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -189,11 +188,7 @@ public final class InnerHitBuilder implements Writeable, ToXContentObject { highlightBuilder = in.readOptionalWriteable(HighlightBuilder::new); this.innerCollapseBuilder = in.readOptionalWriteable(CollapseBuilder::new); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { - if (in.readBoolean()) { - fetchFields = in.readCollectionAsList(FieldAndFormat::new); - } - } + fetchFields = in.readOptionalCollectionAsList(FieldAndFormat::new); } @Override @@ -228,13 +223,7 @@ public final class InnerHitBuilder implements Writeable, ToXContentObject { } out.writeOptionalWriteable(highlightBuilder); out.writeOptionalWriteable(innerCollapseBuilder); - - if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { - out.writeBoolean(fetchFields != null); - if (fetchFields != null) { - out.writeCollection(fetchFields); - } - } + out.writeOptionalCollection(fetchFields); } public String getName() { diff --git a/server/src/main/java/org/elasticsearch/index/query/MatchPhrasePrefixQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/MatchPhrasePrefixQueryBuilder.java index da1a760d1414..139d5c2c2a2e 100644 --- a/server/src/main/java/org/elasticsearch/index/query/MatchPhrasePrefixQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/MatchPhrasePrefixQueryBuilder.java @@ -66,9 +66,7 @@ public class MatchPhrasePrefixQueryBuilder extends AbstractQueryBuilder fieldName = in.readString(); value = in.readString(); rewrite = in.readOptionalString(); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { - caseInsensitive = in.readBoolean(); - } + caseInsensitive = in.readBoolean(); } @Override @@ -86,9 +84,7 @@ public class PrefixQueryBuilder extends AbstractQueryBuilder out.writeString(fieldName); out.writeString(value); out.writeOptionalString(rewrite); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { - out.writeBoolean(caseInsensitive); - } + out.writeBoolean(caseInsensitive); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/query/RegexpQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/RegexpQueryBuilder.java index 461dc6632243..ff3d63d4c254 100644 --- a/server/src/main/java/org/elasticsearch/index/query/RegexpQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/RegexpQueryBuilder.java @@ -88,9 +88,7 @@ public class RegexpQueryBuilder extends AbstractQueryBuilder syntaxFlagsValue = in.readVInt(); maxDeterminizedStates = in.readVInt(); rewrite = in.readOptionalString(); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { - caseInsensitive = in.readBoolean(); - } + caseInsensitive = in.readBoolean(); } @Override @@ -100,9 +98,7 @@ public class RegexpQueryBuilder extends AbstractQueryBuilder out.writeVInt(syntaxFlagsValue); out.writeVInt(maxDeterminizedStates); out.writeOptionalString(rewrite); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { - out.writeBoolean(caseInsensitive); - } + out.writeBoolean(caseInsensitive); } /** Returns the field name used in this query. */ diff --git a/server/src/main/java/org/elasticsearch/index/query/TermQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/TermQueryBuilder.java index 113f66f3e58d..3b2a444e3fc7 100644 --- a/server/src/main/java/org/elasticsearch/index/query/TermQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/TermQueryBuilder.java @@ -89,17 +89,13 @@ public class TermQueryBuilder extends BaseTermQueryBuilder { */ public TermQueryBuilder(StreamInput in) throws IOException { super(in); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { - caseInsensitive = in.readBoolean(); - } + caseInsensitive = in.readBoolean(); } @Override protected void doWriteTo(StreamOutput out) throws IOException { super.doWriteTo(out); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { - out.writeBoolean(caseInsensitive); - } + out.writeBoolean(caseInsensitive); } public static TermQueryBuilder fromXContent(XContentParser parser) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/index/query/WildcardQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/WildcardQueryBuilder.java index 419195e5e5ba..fed6c3df1558 100644 --- a/server/src/main/java/org/elasticsearch/index/query/WildcardQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/WildcardQueryBuilder.java @@ -86,9 +86,7 @@ public class WildcardQueryBuilder extends AbstractQueryBuilder i final String remoteCluster = in.readString(); final List leaderIndexPatterns = in.readStringCollectionAsList(); final String followIndexPattern = in.readOptionalString(); - final Settings settings; - if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_9_0)) { - settings = Settings.readSettingsFromStream(in); - } else { - settings = Settings.EMPTY; - } + final Settings settings = Settings.readSettingsFromStream(in); return new AutoFollowPattern(remoteCluster, leaderIndexPatterns, followIndexPattern, settings, in); } @@ -345,9 +340,7 @@ public class AutoFollowMetadata extends AbstractNamedDiffable i out.writeString(remoteCluster); out.writeStringCollection(leaderIndexPatterns); out.writeOptionalString(followIndexPattern); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_9_0)) { - settings.writeTo(out); - } + settings.writeTo(out); super.writeTo(out); out.writeBoolean(active); if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_14_0)) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutAutoFollowPatternAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutAutoFollowPatternAction.java index dcee7274632e..07b18f7dc4f9 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutAutoFollowPatternAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutAutoFollowPatternAction.java @@ -189,9 +189,7 @@ public class PutAutoFollowPatternAction extends ActionType remoteCluster = in.readString(); leaderIndexPatterns = in.readStringCollectionAsList(); followIndexNamePattern = in.readOptionalString(); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_9_0)) { - settings = Settings.readSettingsFromStream(in); - } + settings = Settings.readSettingsFromStream(in); parameters = new FollowParameters(in); if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_14_0)) { leaderIndexExclusionPatterns = in.readStringCollectionAsList(); @@ -205,9 +203,7 @@ public class PutAutoFollowPatternAction extends ActionType out.writeString(remoteCluster); out.writeStringCollection(leaderIndexPatterns); out.writeOptionalString(followIndexNamePattern); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_9_0)) { - settings.writeTo(out); - } + settings.writeTo(out); parameters.writeTo(out); if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_14_0)) { out.writeStringCollection(leaderIndexExclusionPatterns); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutFollowAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutFollowAction.java index 82941c440484..d902b54dfbdb 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutFollowAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutFollowAction.java @@ -194,9 +194,7 @@ public final class PutFollowAction extends ActionType this.remoteCluster = in.readString(); this.leaderIndex = in.readString(); this.followerIndex = in.readString(); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_9_0)) { - this.settings = Settings.readSettingsFromStream(in); - } + this.settings = Settings.readSettingsFromStream(in); this.parameters = new FollowParameters(in); waitForActiveShards(ActiveShardCount.readFrom(in)); if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_4_0)) { @@ -210,9 +208,7 @@ public final class PutFollowAction extends ActionType out.writeString(remoteCluster); out.writeString(leaderIndex); out.writeString(followerIndex); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_9_0)) { - settings.writeTo(out); - } + settings.writeTo(out); parameters.writeTo(out); waitForActiveShards.writeTo(out); if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_4_0)) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datastreams/DataStreamFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datastreams/DataStreamFeatureSetUsage.java index f8cb9b913b4a..379b07d9b9a7 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datastreams/DataStreamFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datastreams/DataStreamFeatureSetUsage.java @@ -42,7 +42,7 @@ public class DataStreamFeatureSetUsage extends XPackFeatureUsage { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.V_7_9_0; + return TransportVersions.ZERO; } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datatiers/DataTiersFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datatiers/DataTiersFeatureSetUsage.java index a33dd7dff346..48002e6ed41f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datatiers/DataTiersFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datatiers/DataTiersFeatureSetUsage.java @@ -45,7 +45,7 @@ public class DataTiersFeatureSetUsage extends XPackFeatureUsage { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.V_7_10_0; + return TransportVersions.ZERO; } public Map getTierStats() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/eql/EqlFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/eql/EqlFeatureSetUsage.java index 0edbda79ed97..96742c1e5e57 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/eql/EqlFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/eql/EqlFeatureSetUsage.java @@ -54,7 +54,7 @@ public class EqlFeatureSetUsage extends XPackFeatureUsage { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.V_7_9_0; + return TransportVersions.ZERO; } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/EmptyConfigUpdate.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/EmptyConfigUpdate.java index feb3a2e3191f..c6d70543d89e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/EmptyConfigUpdate.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/EmptyConfigUpdate.java @@ -71,7 +71,7 @@ public class EmptyConfigUpdate implements InferenceConfigUpdate { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.V_7_9_0; + return TransportVersions.ZERO; } public static class Builder implements InferenceConfigUpdate.Builder { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ResultsFieldUpdate.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ResultsFieldUpdate.java index 34d3b1c1e38f..fdd8735bb245 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ResultsFieldUpdate.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ResultsFieldUpdate.java @@ -55,7 +55,7 @@ public class ResultsFieldUpdate implements InferenceConfigUpdate { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.V_7_9_0; + return TransportVersions.ZERO; } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/searchablesnapshots/SearchableSnapshotFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/searchablesnapshots/SearchableSnapshotFeatureSetUsage.java index d7d0320b602b..70a5e0c7a3ce 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/searchablesnapshots/SearchableSnapshotFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/searchablesnapshots/SearchableSnapshotFeatureSetUsage.java @@ -34,7 +34,7 @@ public class SearchableSnapshotFeatureSetUsage extends XPackFeatureUsage { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.V_7_9_0; + return TransportVersions.ZERO; } @Override diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/common/validation/RemoteClusterMinimumVersionValidationTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/common/validation/RemoteClusterMinimumVersionValidationTests.java index 299279ee13f1..2781aa9d18c6 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/common/validation/RemoteClusterMinimumVersionValidationTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/common/validation/RemoteClusterMinimumVersionValidationTests.java @@ -33,7 +33,7 @@ import static org.mockito.Mockito.spy; public class RemoteClusterMinimumVersionValidationTests extends ESTestCase { - private static final TransportVersion MIN_EXPECTED_VERSION = TransportVersions.V_7_11_0; + private static final TransportVersion MIN_EXPECTED_VERSION = TransportVersions.V_8_11_X; private static final String REASON = "some reason"; private Context context; @@ -41,9 +41,9 @@ public class RemoteClusterMinimumVersionValidationTests extends ESTestCase { @Before public void setUpMocks() { context = spy(new Context(null, null, null, null, null, null, null, null, null, null)); - doReturn(TransportVersions.V_7_10_0).when(context).getRemoteClusterVersion("cluster-A"); - doReturn(TransportVersions.V_7_11_0).when(context).getRemoteClusterVersion("cluster-B"); - doReturn(TransportVersions.V_7_12_0).when(context).getRemoteClusterVersion("cluster-C"); + doReturn(TransportVersions.V_8_10_X).when(context).getRemoteClusterVersion("cluster-A"); + doReturn(TransportVersions.V_8_11_X).when(context).getRemoteClusterVersion("cluster-B"); + doReturn(TransportVersions.V_8_12_0).when(context).getRemoteClusterVersion("cluster-C"); } public void testGetters() { @@ -82,8 +82,8 @@ public class RemoteClusterMinimumVersionValidationTests extends ESTestCase { ctx -> assertThat( ctx.getValidationException().validationErrors(), contains( - "remote clusters are expected to run at least version [7.11.0] (reason: [some reason]), " - + "but the following clusters were too old: [cluster-A (7.10.0)]" + "remote clusters are expected to run at least version [8.11.0-8.11.4] (reason: [some reason]), " + + "but the following clusters were too old: [cluster-A (8.10.0-8.10.4)]" ) ) ) @@ -93,15 +93,15 @@ public class RemoteClusterMinimumVersionValidationTests extends ESTestCase { public void testValidate_TwoRemoteClusterVersionsTooLow() { doReturn(new HashSet<>(Arrays.asList("cluster-A", "cluster-B", "cluster-C"))).when(context).getRegisteredRemoteClusterNames(); doReturn(new TreeSet<>(Arrays.asList("cluster-A:dummy", "cluster-B:dummy", "cluster-C:dummy"))).when(context).resolveRemoteSource(); - SourceDestValidation validation = new RemoteClusterMinimumVersionValidation(TransportVersions.V_7_12_0, REASON); + SourceDestValidation validation = new RemoteClusterMinimumVersionValidation(TransportVersions.V_8_12_0, REASON); validation.validate( context, ActionTestUtils.assertNoFailureListener( ctx -> assertThat( ctx.getValidationException().validationErrors(), contains( - "remote clusters are expected to run at least version [7.12.0] (reason: [some reason]), " - + "but the following clusters were too old: [cluster-A (7.10.0), cluster-B (7.11.0)]" + "remote clusters are expected to run at least version [8.12.0] (reason: [some reason]), " + + "but the following clusters were too old: [cluster-A (8.10.0-8.10.4), cluster-B (8.11.0-8.11.4)]" ) ) ) diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/inference/InferencePipelineAggregationBuilder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/inference/InferencePipelineAggregationBuilder.java index 16a0f85028b8..f2bb12b13e30 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/inference/InferencePipelineAggregationBuilder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/inference/InferencePipelineAggregationBuilder.java @@ -381,6 +381,6 @@ public class InferencePipelineAggregationBuilder extends AbstractPipelineAggrega @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.V_7_9_0; + return TransportVersions.ZERO; } } From 5f9fefccf2ea5dd263a4c77538eeac022d19c5ff Mon Sep 17 00:00:00 2001 From: Parker Timmins Date: Thu, 16 Jan 2025 09:41:41 -0600 Subject: [PATCH 15/30] Add sanity check to ReindexDatastreamIndexAction (#120231) Add an assert that checks that source and destination index have the same doc count. This requires a refresh of the dest index and a search request against both the source and dest index, so will only be run if asserts are enabled. --- docs/changelog/120231.yaml | 5 ++ ...ReindexDataStreamIndexTransportAction.java | 68 +++++++++++++++---- 2 files changed, 58 insertions(+), 15 deletions(-) create mode 100644 docs/changelog/120231.yaml diff --git a/docs/changelog/120231.yaml b/docs/changelog/120231.yaml new file mode 100644 index 000000000000..58fba0256c54 --- /dev/null +++ b/docs/changelog/120231.yaml @@ -0,0 +1,5 @@ +pr: 120231 +summary: Add sanity check to `ReindexDatastreamIndexAction` +area: Data streams +type: enhancement +issues: [] diff --git a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamIndexTransportAction.java b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamIndexTransportAction.java index 7bb440bc52a1..bd0128e658c3 100644 --- a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamIndexTransportAction.java +++ b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamIndexTransportAction.java @@ -8,13 +8,17 @@ package org.elasticsearch.xpack.migrate.action; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.apache.lucene.search.TotalHits; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; import org.elasticsearch.action.admin.indices.readonly.AddIndexBlockRequest; import org.elasticsearch.action.admin.indices.readonly.AddIndexBlockResponse; import org.elasticsearch.action.admin.indices.readonly.TransportAddIndexBlockAction; +import org.elasticsearch.action.admin.indices.refresh.RefreshAction; +import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; +import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.action.support.IndicesOptions; @@ -25,12 +29,14 @@ import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.Assertions; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.reindex.BulkByScrollResponse; import org.elasticsearch.index.reindex.ReindexAction; import org.elasticsearch.index.reindex.ReindexRequest; import org.elasticsearch.injection.guice.Inject; +import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.threadpool.ThreadPool; @@ -108,6 +114,7 @@ public class ReindexDataStreamIndexTransportAction extends HandledTransportActio .andThen(l -> createIndex(sourceIndex, destIndexName, l, taskId)) .andThen(l -> reindex(sourceIndexName, destIndexName, l, taskId)) .andThen(l -> copyOldSourceSettingsToDest(settingsBefore, destIndexName, l, taskId)) + .andThen(l -> sanityCheck(sourceIndexName, destIndexName, l, taskId)) .andThenApply(ignored -> new ReindexDataStreamIndexAction.Response(destIndexName)) .addListener(listener); } @@ -188,21 +195,6 @@ public class ReindexDataStreamIndexTransportAction extends HandledTransportActio client.execute(ReindexAction.INSTANCE, reindexRequest, listener); } - private void addBlockIfFromSource( - IndexMetadata.APIBlock block, - Settings settingsBefore, - String destIndexName, - ActionListener listener, - TaskId parentTaskId - ) { - if (settingsBefore.getAsBoolean(block.settingName(), false)) { - var errorMessage = String.format(Locale.ROOT, "Add [%s] block to index [%s] was not acknowledged", block.name(), destIndexName); - addBlockToIndex(block, destIndexName, failIfNotAcknowledged(listener, errorMessage), parentTaskId); - } else { - listener.onResponse(null); - } - } - private void updateSettings( String index, Settings.Builder settings, @@ -270,4 +262,50 @@ public class ReindexDataStreamIndexTransportAction extends HandledTransportActio addIndexBlockRequest.setParentTask(parentTaskId); client.admin().indices().execute(TransportAddIndexBlockAction.TYPE, addIndexBlockRequest, listener); } + + private void getIndexDocCount(String index, TaskId parentTaskId, ActionListener listener) { + SearchRequest countRequest = new SearchRequest(index); + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder().size(0).trackTotalHits(true); + countRequest.allowPartialSearchResults(false); + countRequest.source(searchSourceBuilder); + countRequest.setParentTask(parentTaskId); + client.search(countRequest, listener.delegateFailure((delegate, response) -> { + var totalHits = response.getHits().getTotalHits(); + assert totalHits.relation() == TotalHits.Relation.EQUAL_TO; + delegate.onResponse(totalHits.value()); + })); + } + + private void sanityCheck( + String sourceIndexName, + String destIndexName, + ActionListener listener, + TaskId parentTaskId + ) { + if (Assertions.ENABLED) { + logger.debug("Comparing source [{}] and dest [{}] doc counts", sourceIndexName, destIndexName); + client.execute( + RefreshAction.INSTANCE, + new RefreshRequest(destIndexName), + listener.delegateFailureAndWrap((delegate, ignored) -> { + getIndexDocCount(sourceIndexName, parentTaskId, delegate.delegateFailureAndWrap((delegate1, sourceCount) -> { + getIndexDocCount(destIndexName, parentTaskId, delegate1.delegateFailureAndWrap((delegate2, destCount) -> { + assert sourceCount == destCount + : String.format( + Locale.ROOT, + "source index [%s] has %d docs and dest [%s] has %d docs", + sourceIndexName, + sourceCount, + destIndexName, + destCount + ); + delegate2.onResponse(null); + })); + })); + }) + ); + } else { + listener.onResponse(null); + } + } } From 806069b544dccd7455f1df8c0b9d8b29793eb40d Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Thu, 16 Jan 2025 07:50:39 -0800 Subject: [PATCH 16/30] Allow overriding of plugin metadata files in integration tests (#120245) --- .../local/AbstractLocalClusterFactory.java | 40 +++++++++-- .../local/AbstractLocalSpecBuilder.java | 15 +++- .../local/DefaultPluginInstallSpec.java | 31 ++++++++ .../test/cluster/local/LocalClusterSpec.java | 6 +- .../test/cluster/local/LocalSpecBuilder.java | 6 ++ .../test/cluster/local/PluginInstallSpec.java | 33 +++++++++ .../test/cluster/util/ArchivePatcher.java | 70 +++++++++++++++++++ 7 files changed, 190 insertions(+), 11 deletions(-) create mode 100644 test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/DefaultPluginInstallSpec.java create mode 100644 test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/PluginInstallSpec.java create mode 100644 test/test-clusters/src/main/java/org/elasticsearch/test/cluster/util/ArchivePatcher.java diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalClusterFactory.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalClusterFactory.java index 6070ec140d25..8aa6ea4bc9e2 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalClusterFactory.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalClusterFactory.java @@ -19,6 +19,7 @@ import org.elasticsearch.test.cluster.local.distribution.DistributionDescriptor; import org.elasticsearch.test.cluster.local.distribution.DistributionResolver; import org.elasticsearch.test.cluster.local.distribution.DistributionType; import org.elasticsearch.test.cluster.local.model.User; +import org.elasticsearch.test.cluster.util.ArchivePatcher; import org.elasticsearch.test.cluster.util.IOUtils; import org.elasticsearch.test.cluster.util.OS; import org.elasticsearch.test.cluster.util.Pair; @@ -651,27 +652,56 @@ public abstract class AbstractLocalClusterFactory toInstall = spec.getPlugins() + .entrySet() .stream() .map( - pluginName -> pluginPaths.stream() + plugin -> pluginPaths.stream() .map(path -> Pair.of(pattern.matcher(path.getFileName().toString()), path)) - .filter(pair -> pair.left.matches() && pair.left.group(1).equals(pluginName)) + .filter(pair -> pair.left.matches() && pair.left.group(1).equals(plugin.getKey())) .map(p -> p.right.getParent().resolve(p.left.group(0))) .findFirst() + .map(path -> { + DefaultPluginInstallSpec installSpec = plugin.getValue(); + // Path the plugin archive with configured overrides if necessary + if (installSpec.entitlementsOverride != null || installSpec.propertiesOverride != null) { + Path target; + try { + target = Files.createTempFile("patched-", path.getFileName().toString()); + } catch (IOException e) { + throw new UncheckedIOException("Failed to create temporary file", e); + } + ArchivePatcher patcher = new ArchivePatcher(path, target); + if (installSpec.entitlementsOverride != null) { + patcher.override( + "entitlement-policy.yaml", + original -> installSpec.entitlementsOverride.apply(original).asStream() + ); + } + if (installSpec.propertiesOverride != null) { + patcher.override( + "plugin-descriptor.properties", + original -> installSpec.propertiesOverride.apply(original).asStream() + ); + } + return patcher.patch(); + } else { + return path; + } + }) .orElseThrow(() -> { String taskPath = System.getProperty("tests.task"); String project = taskPath.substring(0, taskPath.lastIndexOf(':')); - throw new RuntimeException( + return new RuntimeException( "Unable to locate plugin '" - + pluginName + + plugin.getKey() + "'. Ensure you've added the following to the build script for project '" + project + "':\n\n" + "dependencies {\n" + " clusterPlugins " + "project(':plugins:" - + pluginName + + plugin.getKey() + "')" + "\n}" ); diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalSpecBuilder.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalSpecBuilder.java index c3c4f3fe825e..1ef4bcbfb612 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalSpecBuilder.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalSpecBuilder.java @@ -24,6 +24,7 @@ import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; +import java.util.function.Consumer; import java.util.function.Predicate; import java.util.function.Supplier; @@ -34,7 +35,7 @@ public abstract class AbstractLocalSpecBuilder> im private final List environmentProviders = new ArrayList<>(); private final Map environment = new HashMap<>(); private final Set modules = new HashSet<>(); - private final Set plugins = new HashSet<>(); + private final Map plugins = new HashMap<>(); private final Set features = EnumSet.noneOf(FeatureFlag.class); private final List keystoreProviders = new ArrayList<>(); private final Map keystoreSettings = new HashMap<>(); @@ -132,11 +133,19 @@ public abstract class AbstractLocalSpecBuilder> im @Override public T plugin(String pluginName) { - this.plugins.add(pluginName); + this.plugins.put(pluginName, new DefaultPluginInstallSpec()); return cast(this); } - Set getPlugins() { + @Override + public T plugin(String pluginName, Consumer config) { + DefaultPluginInstallSpec spec = new DefaultPluginInstallSpec(); + config.accept(spec); + this.plugins.put(pluginName, spec); + return cast(this); + } + + Map getPlugins() { return inherit(() -> parent.getPlugins(), plugins); } diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/DefaultPluginInstallSpec.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/DefaultPluginInstallSpec.java new file mode 100644 index 000000000000..364bac1586d6 --- /dev/null +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/DefaultPluginInstallSpec.java @@ -0,0 +1,31 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.test.cluster.local; + +import org.elasticsearch.test.cluster.util.resource.Resource; + +import java.util.function.Function; + +public class DefaultPluginInstallSpec implements PluginInstallSpec { + Function propertiesOverride; + Function entitlementsOverride; + + @Override + public PluginInstallSpec withPropertiesOverride(Function override) { + this.propertiesOverride = override; + return this; + } + + @Override + public PluginInstallSpec withEntitlementsOverride(Function override) { + this.entitlementsOverride = override; + return this; + } +} diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterSpec.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterSpec.java index 02fdb45dffa3..b9e9520e77eb 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterSpec.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterSpec.java @@ -92,7 +92,7 @@ public class LocalClusterSpec implements ClusterSpec { private final List environmentProviders; private final Map environment; private final Set modules; - private final Set plugins; + private final Map plugins; private final DistributionType distributionType; private final Set features; private final List keystoreProviders; @@ -114,7 +114,7 @@ public class LocalClusterSpec implements ClusterSpec { List environmentProviders, Map environment, Set modules, - Set plugins, + Map plugins, DistributionType distributionType, Set features, List keystoreProviders, @@ -179,7 +179,7 @@ public class LocalClusterSpec implements ClusterSpec { return modules; } - public Set getPlugins() { + public Map getPlugins() { return plugins; } diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalSpecBuilder.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalSpecBuilder.java index 1c9ac8a0af6c..2b44126fef4e 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalSpecBuilder.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalSpecBuilder.java @@ -18,6 +18,7 @@ import org.elasticsearch.test.cluster.local.distribution.DistributionType; import org.elasticsearch.test.cluster.util.Version; import org.elasticsearch.test.cluster.util.resource.Resource; +import java.util.function.Consumer; import java.util.function.Predicate; import java.util.function.Supplier; @@ -73,6 +74,11 @@ interface LocalSpecBuilder> { */ T plugin(String pluginName); + /** + * Ensure plugin is installed into the distribution. + */ + T plugin(String pluginName, Consumer config); + /** * Require feature to be enabled in the cluster. */ diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/PluginInstallSpec.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/PluginInstallSpec.java new file mode 100644 index 000000000000..6b0b13ddd2dd --- /dev/null +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/PluginInstallSpec.java @@ -0,0 +1,33 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.test.cluster.local; + +import org.elasticsearch.test.cluster.util.resource.Resource; + +import java.util.function.Function; + +public interface PluginInstallSpec { + + /** + * Override bundled plugin properties file with the given {@link Resource}. The provided override function receives the original + * file content as function argument. + * + * @param override function returning resource used to override bundled properties file + */ + PluginInstallSpec withPropertiesOverride(Function override); + + /** + * Override bundled entitlements policy file with the given {@link Resource}. The provided override function receives the original + * file content as function argument. + * + * @param override function returning resource used to override bundled entitlements policy file + */ + PluginInstallSpec withEntitlementsOverride(Function override); +} diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/util/ArchivePatcher.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/util/ArchivePatcher.java new file mode 100644 index 000000000000..269d1dd9f516 --- /dev/null +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/util/ArchivePatcher.java @@ -0,0 +1,70 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.test.cluster.util; + +import java.io.BufferedOutputStream; +import java.io.BufferedReader; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.io.UncheckedIOException; +import java.nio.file.Path; +import java.util.Enumeration; +import java.util.HashMap; +import java.util.Map; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.zip.ZipEntry; +import java.util.zip.ZipFile; +import java.util.zip.ZipOutputStream; + +public class ArchivePatcher { + private final Path original; + private final Path target; + private final Map> overrides = new HashMap<>(); + + public ArchivePatcher(Path original, Path target) { + this.original = original; + this.target = target; + } + + public void override(String filename, Function override) { + this.overrides.put(filename, override); + } + + public Path patch() { + try ( + ZipFile input = new ZipFile(original.toFile()); + ZipOutputStream output = new ZipOutputStream(new BufferedOutputStream(new FileOutputStream(target.toFile()))) + ) { + Enumeration entries = input.entries(); + while (entries.hasMoreElements()) { + ZipEntry entry = entries.nextElement(); + output.putNextEntry(entry); + if (overrides.containsKey(entry.getName())) { + try (BufferedReader reader = new BufferedReader(new InputStreamReader(input.getInputStream(entry)))) { + String content = reader.lines().collect(Collectors.joining(System.lineSeparator())); + overrides.get(entry.getName()).apply(content).transferTo(output); + } + } else { + input.getInputStream(entry).transferTo(output); + } + output.closeEntry(); + } + output.flush(); + output.finish(); + } catch (IOException e) { + throw new UncheckedIOException("Failed to patch archive", e); + } + + return target; + } +} From b1d4967a6c0891d7866c41709cc03a445803502a Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Thu, 16 Jan 2025 16:55:19 +0100 Subject: [PATCH 17/30] Support regular indices in N-2 version (#119753) This change allows indices created in version 7.x to be opened as read-only indices in a 9.0 cluster. It requires the corresponding change in 8.18 (#120105). To be opened as read-only, indices must be regular, searchable snapshot or legacy indices created in a N-2 version. They must also have the index.blocks.write setting and regular indices require the additional index.verified_read_only. Relates #120105 Relates ES-10320 --- .../AbstractIndexCompatibilityTestCase.java | 58 +++-- ...sterRestartLuceneIndexCompatibilityIT.java | 230 +++++++++++++++--- ...earchableSnapshotIndexCompatibilityIT.java | 8 +- ...lingUpgradeIndexCompatibilityTestCase.java | 6 + ...gradeLuceneIndexCompatibilityTestCase.java | 192 +++++++++++++++ .../metadata/IndexMetadataVerifier.java | 72 ++++-- .../metadata/MetadataIndexStateService.java | 10 + .../common/settings/IndexScopedSettings.java | 1 + .../index/engine/ReadOnlyEngine.java | 2 +- .../elasticsearch/index/shard/IndexShard.java | 2 +- .../index/shard/StoreRecovery.java | 5 +- .../org/elasticsearch/index/store/Store.java | 15 ++ .../index/translog/TranslogConfig.java | 8 +- .../elasticsearch/indices/IndicesService.java | 23 ++ .../indices/recovery/RecoveryTarget.java | 5 +- .../coordination/NodeJoinExecutorTests.java | 55 ++++- .../metadata/IndexMetadataVerifierTests.java | 114 +++++++-- .../action/TransportResumeFollowAction.java | 4 +- 18 files changed, 718 insertions(+), 92 deletions(-) create mode 100644 qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/RollingUpgradeLuceneIndexCompatibilityTestCase.java diff --git a/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/AbstractIndexCompatibilityTestCase.java b/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/AbstractIndexCompatibilityTestCase.java index 13c647983fad..392f2037139a 100644 --- a/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/AbstractIndexCompatibilityTestCase.java +++ b/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/AbstractIndexCompatibilityTestCase.java @@ -13,8 +13,8 @@ import org.apache.http.entity.ContentType; import org.apache.http.entity.InputStreamEntity; import org.elasticsearch.client.Request; import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.core.Strings; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.test.XContentTestUtils; @@ -42,7 +42,9 @@ import static org.elasticsearch.test.cluster.util.Version.fromString; import static org.elasticsearch.test.rest.ObjectPath.createFromResponse; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.notNullValue; public abstract class AbstractIndexCompatibilityTestCase extends ESRestTestCase { @@ -156,8 +158,16 @@ public abstract class AbstractIndexCompatibilityTestCase extends ESRestTestCase return new Version((byte) ((id / 1000000) % 100), (byte) ((id / 10000) % 100), (byte) ((id / 100) % 100)); } + protected static int getNumberOfReplicas(String indexName) throws Exception { + var indexSettings = (Map) ((Map) getIndexSettings(indexName).get(indexName)).get("settings"); + var numberOfReplicas = Integer.parseInt((String) indexSettings.get(IndexMetadata.SETTING_NUMBER_OF_REPLICAS)); + assertThat(numberOfReplicas, allOf(greaterThanOrEqualTo(0), lessThanOrEqualTo(NODES - 1))); + return numberOfReplicas; + } + protected static void indexDocs(String indexName, int numDocs) throws Exception { var request = new Request("POST", "/_bulk"); + request.addParameter("refresh", "true"); var docs = new StringBuilder(); IntStream.range(0, numDocs).forEach(n -> docs.append(Strings.format(""" {"index":{"_index":"%s"}} @@ -185,19 +195,30 @@ public abstract class AbstractIndexCompatibilityTestCase extends ESRestTestCase } protected static void restoreIndex(String repository, String snapshot, String indexName, String renamedIndexName) throws Exception { + restoreIndex(repository, snapshot, indexName, renamedIndexName, Settings.EMPTY); + } + + protected static void restoreIndex( + String repository, + String snapshot, + String indexName, + String renamedIndexName, + Settings indexSettings + ) throws Exception { var request = new Request("POST", "/_snapshot/" + repository + "/" + snapshot + "/_restore"); request.addParameter("wait_for_completion", "true"); - request.setJsonEntity(org.elasticsearch.common.Strings.format(""" + request.setJsonEntity(Strings.format(""" { "indices": "%s", "include_global_state": false, "rename_pattern": "(.+)", "rename_replacement": "%s", - "include_aliases": false - }""", indexName, renamedIndexName)); + "include_aliases": false, + "index_settings": %s + }""", indexName, renamedIndexName, Strings.toString(indexSettings))); var responseBody = createFromResponse(client().performRequest(request)); - assertThat(responseBody.evaluate("snapshot.shards.total"), equalTo((int) responseBody.evaluate("snapshot.shards.failed"))); - assertThat(responseBody.evaluate("snapshot.shards.successful"), equalTo(0)); + assertThat(responseBody.evaluate("snapshot.shards.total"), equalTo((int) responseBody.evaluate("snapshot.shards.successful"))); + assertThat(responseBody.evaluate("snapshot.shards.failed"), equalTo(0)); } protected static void updateRandomIndexSettings(String indexName) throws IOException { @@ -215,20 +236,19 @@ public abstract class AbstractIndexCompatibilityTestCase extends ESRestTestCase updateIndexSettings(indexName, settings); } - protected static void updateRandomMappings(String indexName) throws IOException { + protected static void updateRandomMappings(String indexName) throws Exception { final var runtime = new HashMap<>(); runtime.put("field_" + randomInt(2), Map.of("type", "keyword")); final var properties = new HashMap<>(); properties.put(randomIdentifier(), Map.of("type", "long")); - var body = XContentTestUtils.convertToXContent(Map.of("runtime", runtime, "properties", properties), XContentType.JSON); + updateMappings(indexName, Map.of("runtime", runtime, "properties", properties)); + } + + protected static void updateMappings(String indexName, Map mappings) throws Exception { + var body = XContentTestUtils.convertToXContent(mappings, XContentType.JSON); var request = new Request("PUT", indexName + "/_mappings"); request.setEntity( - new InputStreamEntity( - body.streamInput(), - body.length(), - - ContentType.create(XContentType.JSON.mediaTypeWithoutParameters()) - ) + new InputStreamEntity(body.streamInput(), body.length(), ContentType.create(XContentType.JSON.mediaTypeWithoutParameters())) ); assertOK(client().performRequest(request)); } @@ -238,4 +258,14 @@ public abstract class AbstractIndexCompatibilityTestCase extends ESRestTestCase var state = responseBody.evaluate("metadata.indices." + indexName + ".state"); return IndexMetadata.State.fromString((String) state) == IndexMetadata.State.CLOSE; } + + protected static void addIndexWriteBlock(String indexName) throws Exception { + assertAcknowledged(client().performRequest(new Request("PUT", Strings.format("/%s/_block/write", indexName)))); + } + + protected static void forceMerge(String indexName, int maxNumSegments) throws Exception { + var request = new Request("POST", '/' + indexName + "/_forcemerge"); + request.addParameter("max_num_segments", String.valueOf(maxNumSegments)); + assertOK(client().performRequest(request)); + } } diff --git a/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/FullClusterRestartLuceneIndexCompatibilityIT.java b/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/FullClusterRestartLuceneIndexCompatibilityIT.java index 15d41cc981ce..89fefde08b9a 100644 --- a/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/FullClusterRestartLuceneIndexCompatibilityIT.java +++ b/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/FullClusterRestartLuceneIndexCompatibilityIT.java @@ -9,18 +9,13 @@ package org.elasticsearch.lucene; -import org.elasticsearch.client.Request; -import org.elasticsearch.client.ResponseException; import org.elasticsearch.cluster.metadata.IndexMetadata; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.repositories.fs.FsRepository; -import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.cluster.util.Version; -import static org.hamcrest.CoreMatchers.containsString; -import static org.hamcrest.Matchers.allOf; +import static org.elasticsearch.cluster.metadata.MetadataIndexStateService.VERIFIED_READ_ONLY_SETTING; import static org.hamcrest.Matchers.equalTo; public class FullClusterRestartLuceneIndexCompatibilityIT extends FullClusterRestartIndexCompatibilityTestCase { @@ -34,7 +29,90 @@ public class FullClusterRestartLuceneIndexCompatibilityIT extends FullClusterRes } /** - * Creates an index and a snapshot on N-2, then restores the snapshot on N. + * Creates an index on N-2, upgrades to N -1 and marks as read-only, then upgrades to N. + */ + public void testIndexUpgrade() throws Exception { + final String index = suffix("index"); + final int numDocs = 2431; + + if (isFullyUpgradedTo(VERSION_MINUS_2)) { + logger.debug("--> creating index [{}]", index); + createIndex( + client(), + index, + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, randomInt(2)) + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) + .build() + ); + + logger.debug("--> indexing [{}] docs in [{}]", numDocs, index); + indexDocs(index, numDocs); + return; + } + + if (isFullyUpgradedTo(VERSION_MINUS_1)) { + ensureGreen(index); + + assertThat(indexVersion(index), equalTo(VERSION_MINUS_2)); + assertDocCount(client(), index, numDocs); + + logger.debug("--> flushing [{}]", index); + flush(index, true); + + logger.debug("--> applying write block on [{}]", index); + addIndexWriteBlock(index); + + logger.debug("--> applying verified read-only setting on [{}]", index); + updateIndexSettings(index, Settings.builder().put(VERIFIED_READ_ONLY_SETTING.getKey(), true)); + return; + } + + if (isFullyUpgradedTo(VERSION_CURRENT)) { + ensureGreen(index); + + assertThat(indexVersion(index), equalTo(VERSION_MINUS_2)); + assertDocCount(client(), index, numDocs); + + var indexSettings = getIndexSettingsAsMap(index); + assertThat(indexSettings.get(IndexMetadata.APIBlock.WRITE.settingName()), equalTo(Boolean.TRUE.toString())); + assertThat(indexSettings.get(VERIFIED_READ_ONLY_SETTING.getKey()), equalTo(Boolean.TRUE.toString())); + + var numberOfReplicas = getNumberOfReplicas(index); + if (0 < numberOfReplicas) { + logger.debug("--> resetting number of replicas [{}] to [0]", numberOfReplicas); + updateIndexSettings(index, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)); + } + + updateRandomIndexSettings(index); + updateRandomMappings(index); + + logger.debug("--> adding replica to test peer-recovery"); + updateIndexSettings(index, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)); + ensureGreen(index); + + logger.debug("--> closing restored index [{}]", index); + closeIndex(index); + ensureGreen(index); + + logger.debug("--> adding replica to test peer-recovery for closed shards"); + updateIndexSettings(index, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 2)); + ensureGreen(index); + + logger.debug("--> re-opening restored index [{}]", index); + openIndex(index); + ensureGreen(index); + + assertDocCount(client(), index, numDocs); + + logger.debug("--> deleting index [{}]", index); + deleteIndex(index); + } + } + + /** + * Creates an index on N-2, marks as read-only on N-1 and creates a snapshot, then restores the snapshot on N. */ public void testRestoreIndex() throws Exception { final String repository = suffix("repository"); @@ -59,9 +137,6 @@ public class FullClusterRestartLuceneIndexCompatibilityIT extends FullClusterRes logger.debug("--> indexing [{}] docs in [{}]", numDocs, index); indexDocs(index, numDocs); - - logger.debug("--> creating snapshot [{}]", snapshot); - createSnapshot(client(), repository, snapshot, true); return; } @@ -71,6 +146,18 @@ public class FullClusterRestartLuceneIndexCompatibilityIT extends FullClusterRes assertThat(indexVersion(index), equalTo(VERSION_MINUS_2)); assertDocCount(client(), index, numDocs); + logger.debug("--> flushing [{}]", index); + flush(index, true); + + logger.debug("--> applying write block on [{}]", index); + addIndexWriteBlock(index); + + logger.debug("--> applying verified read-only setting on [{}]", index); + updateIndexSettings(index, Settings.builder().put(VERIFIED_READ_ONLY_SETTING.getKey(), true)); + + logger.debug("--> creating snapshot [{}]", snapshot); + createSnapshot(client(), repository, snapshot, true); + logger.debug("--> deleting index [{}]", index); deleteIndex(index); return; @@ -79,32 +166,109 @@ public class FullClusterRestartLuceneIndexCompatibilityIT extends FullClusterRes if (isFullyUpgradedTo(VERSION_CURRENT)) { var restoredIndex = suffix("index-restored"); logger.debug("--> restoring index [{}] as [{}]", index, restoredIndex); + restoreIndex(repository, snapshot, index, restoredIndex); + ensureGreen(restoredIndex); - // Restoring the index will fail as Elasticsearch does not support reading N-2 yet - var request = new Request("POST", "/_snapshot/" + repository + "/" + snapshot + "/_restore"); - request.addParameter("wait_for_completion", "true"); - request.setJsonEntity(Strings.format(""" - { - "indices": "%s", - "include_global_state": false, - "rename_pattern": "(.+)", - "rename_replacement": "%s", - "include_aliases": false - }""", index, restoredIndex)); + assertThat(indexVersion(restoredIndex), equalTo(VERSION_MINUS_2)); + assertDocCount(client(), restoredIndex, numDocs); - var responseException = expectThrows(ResponseException.class, () -> client().performRequest(request)); - assertEquals(RestStatus.INTERNAL_SERVER_ERROR.getStatus(), responseException.getResponse().getStatusLine().getStatusCode()); - assertThat( - responseException.getMessage(), - allOf( - containsString("cannot restore index [[" + index), - containsString("because it cannot be upgraded"), - containsString("has current compatibility version [" + VERSION_MINUS_2 + '-' + VERSION_MINUS_1.getMajor() + ".0.0]"), - containsString("but the minimum compatible version is [" + VERSION_MINUS_1.getMajor() + ".0.0]."), - containsString("It should be re-indexed in Elasticsearch " + VERSION_MINUS_1.getMajor() + ".x"), - containsString("before upgrading to " + VERSION_CURRENT) - ) + updateRandomIndexSettings(restoredIndex); + updateRandomMappings(restoredIndex); + + logger.debug("--> adding replica to test peer-recovery"); + updateIndexSettings(restoredIndex, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)); + ensureGreen(restoredIndex); + + logger.debug("--> closing restored index [{}]", restoredIndex); + closeIndex(restoredIndex); + ensureGreen(restoredIndex); + + logger.debug("--> adding replica to test peer-recovery for closed shards"); + updateIndexSettings(restoredIndex, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 2)); + ensureGreen(restoredIndex); + + logger.debug("--> re-opening restored index [{}]", restoredIndex); + openIndex(restoredIndex); + ensureGreen(restoredIndex); + + assertDocCount(client(), restoredIndex, numDocs); + + logger.debug("--> deleting restored index [{}]", restoredIndex); + deleteIndex(restoredIndex); + } + } + + /** + * Creates an index on N-2, marks as read-only on N-1 and creates a snapshot and then closes the index, then restores the snapshot on N. + */ + public void testRestoreIndexOverClosedIndex() throws Exception { + final String repository = suffix("repository"); + final String snapshot = suffix("snapshot"); + final String index = suffix("index"); + final int numDocs = 2134; + + if (isFullyUpgradedTo(VERSION_MINUS_2)) { + logger.debug("--> registering repository [{}]", repository); + registerRepository(client(), repository, FsRepository.TYPE, true, repositorySettings()); + + logger.debug("--> creating index [{}]", index); + createIndex( + client(), + index, + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) + .build() ); + + logger.debug("--> indexing [{}] docs in [{}]", numDocs, index); + indexDocs(index, numDocs); + return; + } + + if (isFullyUpgradedTo(VERSION_MINUS_1)) { + ensureGreen(index); + + assertThat(indexVersion(index), equalTo(VERSION_MINUS_2)); + assertDocCount(client(), index, numDocs); + + logger.debug("--> flushing [{}]", index); + flush(index, true); + + logger.debug("--> applying write block on [{}]", index); + addIndexWriteBlock(index); + + logger.debug("--> applying verified read-only setting on [{}]", index); + updateIndexSettings(index, Settings.builder().put(VERIFIED_READ_ONLY_SETTING.getKey(), true)); + + logger.debug("--> creating snapshot [{}]", snapshot); + createSnapshot(client(), repository, snapshot, true); + + logger.debug("--> force-merge index [{}] to 1 segment", index); + forceMerge(index, 1); + + logger.debug("--> closing index [{}]", index); + closeIndex(index); + ensureGreen(index); + return; + } + + if (isFullyUpgradedTo(VERSION_CURRENT)) { + var indexSettings = getIndexSettingsAsMap(index); + assertThat(indexSettings.get(IndexMetadata.APIBlock.WRITE.settingName()), equalTo(Boolean.TRUE.toString())); + assertThat(indexSettings.get(VERIFIED_READ_ONLY_SETTING.getKey()), equalTo(Boolean.TRUE.toString())); + assertThat(isIndexClosed(index), equalTo(true)); + + logger.debug("--> restoring index [{}] over existing closed index", index); + restoreIndex(repository, snapshot, index, index); + ensureGreen(index); + + assertThat(indexVersion(index), equalTo(VERSION_MINUS_2)); + assertDocCount(client(), index, numDocs); + + logger.debug("--> deleting index [{}]", index); + deleteIndex(index); } } } diff --git a/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/FullClusterRestartSearchableSnapshotIndexCompatibilityIT.java b/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/FullClusterRestartSearchableSnapshotIndexCompatibilityIT.java index a7dc5e41fd32..477f2099477c 100644 --- a/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/FullClusterRestartSearchableSnapshotIndexCompatibilityIT.java +++ b/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/FullClusterRestartSearchableSnapshotIndexCompatibilityIT.java @@ -155,9 +155,11 @@ public class FullClusterRestartSearchableSnapshotIndexCompatibilityIT extends Fu assertThat(indexVersion(mountedIndex), equalTo(VERSION_MINUS_2)); assertDocCount(client(), mountedIndex, numDocs); - logger.debug("--> adding replica to test replica upgrade"); - updateIndexSettings(mountedIndex, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)); - ensureGreen(mountedIndex); + if (randomBoolean()) { + logger.debug("--> adding replica to test upgrade with replica"); + updateIndexSettings(mountedIndex, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)); + ensureGreen(mountedIndex); + } if (randomBoolean()) { logger.debug("--> random closing of index [{}] before upgrade", mountedIndex); diff --git a/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/RollingUpgradeIndexCompatibilityTestCase.java b/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/RollingUpgradeIndexCompatibilityTestCase.java index 03b6a9292e35..85fc4abc5e06 100644 --- a/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/RollingUpgradeIndexCompatibilityTestCase.java +++ b/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/RollingUpgradeIndexCompatibilityTestCase.java @@ -73,6 +73,12 @@ public abstract class RollingUpgradeIndexCompatibilityTestCase extends AbstractI closeClients(); cluster().upgradeNodeToVersion(i, expectedNodeVersion); initClient(); + + ensureHealth((request -> { + request.addParameter("timeout", "70s"); + request.addParameter("wait_for_nodes", String.valueOf(NODES)); + request.addParameter("wait_for_status", "yellow"); + })); } currentNodeVersion = nodesVersions().get(nodeName); diff --git a/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/RollingUpgradeLuceneIndexCompatibilityTestCase.java b/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/RollingUpgradeLuceneIndexCompatibilityTestCase.java new file mode 100644 index 000000000000..c183ccc39cde --- /dev/null +++ b/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/RollingUpgradeLuceneIndexCompatibilityTestCase.java @@ -0,0 +1,192 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.lucene; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.repositories.fs.FsRepository; +import org.elasticsearch.test.cluster.util.Version; + +import java.util.List; + +import static org.elasticsearch.cluster.metadata.MetadataIndexStateService.VERIFIED_READ_ONLY_SETTING; +import static org.hamcrest.Matchers.equalTo; + +public class RollingUpgradeLuceneIndexCompatibilityTestCase extends RollingUpgradeIndexCompatibilityTestCase { + + static { + clusterConfig = config -> config.setting("xpack.license.self_generated.type", "trial"); + } + + public RollingUpgradeLuceneIndexCompatibilityTestCase(List nodesVersions) { + super(nodesVersions); + } + + /** + * Creates an index on N-2, upgrades to N -1 and marks as read-only, then remains searchable during rolling upgrades. + */ + public void testIndexUpgrade() throws Exception { + final String index = suffix("index-rolling-upgraded"); + final int numDocs = 2543; + + if (isFullyUpgradedTo(VERSION_MINUS_2)) { + logger.debug("--> creating index [{}]", index); + createIndex( + client(), + index, + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) + .build() + ); + + logger.debug("--> indexing [{}] docs in [{}]", numDocs, index); + indexDocs(index, numDocs); + return; + } + + ensureGreen(index); + + if (isFullyUpgradedTo(VERSION_MINUS_1)) { + assertThat(indexVersion(index), equalTo(VERSION_MINUS_2)); + assertDocCount(client(), index, numDocs); + + logger.debug("--> flushing [{}]", index); + flush(index, true); + + logger.debug("--> applying write block on [{}]", index); + addIndexWriteBlock(index); + + logger.debug("--> applying verified read-only setting on [{}]", index); + updateIndexSettings(index, Settings.builder().put(VERIFIED_READ_ONLY_SETTING.getKey(), true)); + return; + } + + if (nodesVersions().values().stream().anyMatch(v -> v.onOrAfter(VERSION_CURRENT))) { + var indexSettings = getIndexSettingsAsMap(index); + assertThat(indexSettings.get(IndexMetadata.APIBlock.WRITE.settingName()), equalTo(Boolean.TRUE.toString())); + assertThat(indexSettings.get(VERIFIED_READ_ONLY_SETTING.getKey()), equalTo(Boolean.TRUE.toString())); + + if (isIndexClosed(index)) { + logger.debug("--> re-opening index [{}] after upgrade", index); + openIndex(index); + ensureGreen(index); + } + + assertThat(indexVersion(index), equalTo(VERSION_MINUS_2)); + assertDocCount(client(), index, numDocs); + + updateRandomIndexSettings(index); + updateRandomMappings(index); + + if (randomBoolean()) { + logger.debug("--> random closing of index [{}] before upgrade", index); + closeIndex(index); + ensureGreen(index); + } + } + } + + /** + * Creates an index on N-2, marks as read-only on N-1 and creates a snapshot, then restores the snapshot during rolling upgrades to N. + */ + public void testRestoreIndex() throws Exception { + final String repository = suffix("repository"); + final String snapshot = suffix("snapshot"); + final String index = suffix("index"); + final int numDocs = 1234; + + if (isFullyUpgradedTo(VERSION_MINUS_2)) { + logger.debug("--> registering repository [{}]", repository); + registerRepository(client(), repository, FsRepository.TYPE, true, repositorySettings()); + + logger.debug("--> creating index [{}]", index); + createIndex( + client(), + index, + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) + .build() + ); + + logger.debug("--> indexing [{}] docs in [{}]", numDocs, index); + indexDocs(index, numDocs); + return; + } + + if (isFullyUpgradedTo(VERSION_MINUS_1)) { + ensureGreen(index); + + assertThat(indexVersion(index), equalTo(VERSION_MINUS_2)); + assertDocCount(client(), index, numDocs); + + logger.debug("--> flushing [{}]", index); + flush(index, true); + + logger.debug("--> applying write block on [{}]", index); + addIndexWriteBlock(index); + + logger.debug("--> applying verified read-only setting on [{}]", index); + updateIndexSettings(index, Settings.builder().put(VERIFIED_READ_ONLY_SETTING.getKey(), true)); + + logger.debug("--> creating snapshot [{}]", snapshot); + createSnapshot(client(), repository, snapshot, true); + + logger.debug("--> deleting index [{}]", index); + deleteIndex(index); + return; + } + if (nodesVersions().values().stream().anyMatch(v -> v.onOrAfter(VERSION_CURRENT))) { + var restoredIndex = suffix("index-restored-rolling"); + boolean success = false; + try { + + logger.debug("--> restoring index [{}] as [{}]", index, restoredIndex); + restoreIndex(repository, snapshot, index, restoredIndex); + ensureGreen(restoredIndex); + + assertThat(indexVersion(restoredIndex), equalTo(VERSION_MINUS_2)); + assertDocCount(client(), restoredIndex, numDocs); + + updateRandomIndexSettings(restoredIndex); + updateRandomMappings(restoredIndex); + + logger.debug("--> closing restored index [{}]", restoredIndex); + closeIndex(restoredIndex); + ensureGreen(restoredIndex); + + logger.debug("--> re-opening restored index [{}]", restoredIndex); + openIndex(restoredIndex); + ensureGreen(restoredIndex); + + assertDocCount(client(), restoredIndex, numDocs); + + logger.debug("--> deleting restored index [{}]", restoredIndex); + deleteIndex(restoredIndex); + + success = true; + } finally { + if (success == false) { + try { + client().performRequest(new Request("DELETE", "/" + restoredIndex)); + } catch (ResponseException e) { + logger.warn("Failed to delete restored index [" + restoredIndex + ']', e); + } + } + } + } + } +} diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadataVerifier.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadataVerifier.java index be2563c4732b..1cae6f96d8fa 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadataVerifier.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadataVerifier.java @@ -22,6 +22,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.analysis.AnalyzerScope; import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.mapper.MapperMetrics; @@ -139,39 +140,37 @@ public class IndexMetadataVerifier { ); } - private static boolean isFullySupportedVersion(IndexMetadata indexMetadata, IndexVersion minimumIndexCompatibilityVersion) { + public static boolean isFullySupportedVersion(IndexMetadata indexMetadata, IndexVersion minimumIndexCompatibilityVersion) { return indexMetadata.getCompatibilityVersion().onOrAfter(minimumIndexCompatibilityVersion); } /** - * Returns {@code true} if the index version is compatible in read-only mode. As of today, only searchable snapshots and archive indices - * in version N-2 with a write block are read-only compatible. This method throws an {@link IllegalStateException} if the index is - * either a searchable snapshot or an archive index with a read-only compatible version but is missing the write block. + * Returns {@code true} if the index version is compatible with read-only mode. A regular index is read-only compatible if it was + * created in version N-2 and if it was marked as read-only on version N-1, a process which involves adding a write block and a special + * index setting indicating that the shard was "verified". Searchable snapshots and Archives indices created in version N-2 are also + * read-only compatible by nature as long as they have a write block. Other type of indices like CCR are not read-only compatible. * - * @param indexMetadata the index metadata - * @param minimumIndexCompatibilityVersion the min. index compatible version for reading and writing indices (used in assertion) - * @param minReadOnlyIndexCompatibilityVersion the min. index compatible version for only reading indices + * @param indexMetadata the index metadata + * @param minimumCompatible the min. index compatible version for reading and writing indices (used in assertion) + * @param minimumReadOnlyCompatible the min. index compatible version for only reading indices * * @return {@code true} if the index version is compatible in read-only mode, {@code false} otherwise. - * @throws IllegalStateException if the index is read-only compatible but has no write block in place. + * @throws IllegalStateException if the index is read-only compatible but has no write block or no verification index setting in place. */ public static boolean isReadOnlySupportedVersion( IndexMetadata indexMetadata, - IndexVersion minimumIndexCompatibilityVersion, - IndexVersion minReadOnlyIndexCompatibilityVersion + IndexVersion minimumCompatible, + IndexVersion minimumReadOnlyCompatible ) { - boolean isReadOnlySupportedVersion = indexMetadata.getCompatibilityVersion().onOrAfter(minReadOnlyIndexCompatibilityVersion); - assert isFullySupportedVersion(indexMetadata, minimumIndexCompatibilityVersion) == false; - - if (isReadOnlySupportedVersion - && (indexMetadata.isSearchableSnapshot() || indexMetadata.getCreationVersion().isLegacyIndexVersion())) { - boolean isReadOnly = IndexMetadata.INDEX_BLOCKS_WRITE_SETTING.get(indexMetadata.getSettings()); + if (isReadOnlyCompatible(indexMetadata, minimumCompatible, minimumReadOnlyCompatible)) { + assert isFullySupportedVersion(indexMetadata, minimumCompatible) == false : indexMetadata; + final boolean isReadOnly = hasIndexWritesBlock(indexMetadata); if (isReadOnly == false) { throw new IllegalStateException( "The index " + indexMetadata.getIndex() + " created in version [" - + indexMetadata.getCreationVersion() + + indexMetadata.getCreationVersion().toReleaseVersion() + "] with current compatibility version [" + indexMetadata.getCompatibilityVersion().toReleaseVersion() + "] must be marked as read-only using the setting [" @@ -186,6 +185,45 @@ public class IndexMetadataVerifier { return false; } + private static boolean isReadOnlyCompatible( + IndexMetadata indexMetadata, + IndexVersion minimumCompatible, + IndexVersion minimumReadOnlyCompatible + ) { + var compatibilityVersion = indexMetadata.getCompatibilityVersion(); + if (compatibilityVersion.onOrAfter(minimumReadOnlyCompatible)) { + // searchable snapshots are read-only compatible + if (indexMetadata.isSearchableSnapshot()) { + return true; + } + // archives are read-only compatible + if (indexMetadata.getCreationVersion().isLegacyIndexVersion()) { + return true; + } + // indices (other than CCR and old-style frozen indices) are read-only compatible + return compatibilityVersion.before(minimumCompatible) + && indexMetadata.getSettings().getAsBoolean("index.frozen", false) == false + && indexMetadata.getSettings().getAsBoolean("index.xpack.ccr.following_index", false) == false; + } + return false; + } + + private static boolean hasIndexWritesBlock(IndexMetadata indexMetadata) { + if (IndexMetadata.INDEX_BLOCKS_WRITE_SETTING.get(indexMetadata.getSettings())) { + return indexMetadata.isSearchableSnapshot() + || indexMetadata.getCreationVersion().isLegacyIndexVersion() + || MetadataIndexStateService.VERIFIED_READ_ONLY_SETTING.get(indexMetadata.getSettings()); + } + return false; + } + + public static boolean isReadOnlyVerified(IndexMetadata indexMetadata) { + if (isReadOnlyCompatible(indexMetadata, IndexVersions.MINIMUM_COMPATIBLE, IndexVersions.MINIMUM_READONLY_COMPATIBLE)) { + return hasIndexWritesBlock(indexMetadata); + } + return false; + } + /** * Check that we can parse the mappings. * diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexStateService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexStateService.java index f4318eb017eb..21b07571a435 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexStateService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexStateService.java @@ -11,6 +11,7 @@ package org.elasticsearch.cluster.metadata; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.Build; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; @@ -114,6 +115,15 @@ public class MetadataIndexStateService { Setting.Property.PrivateIndex ); + public static final Setting VERIFIED_READ_ONLY_SETTING = Setting.boolSetting( + "index.verified_read_only", + false, + Setting.Property.IndexScope, + Setting.Property.NotCopyableOnResize, + // Allow the setting to be updated in snapshot builds + Build.current().isSnapshot() ? Setting.Property.OperatorDynamic : Setting.Property.PrivateIndex + ); + private final ClusterService clusterService; private final AllocationService allocationService; private final IndexMetadataVerifier indexMetadataVerifier; diff --git a/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java index 3c1f53ca4a2c..b5a513777756 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java @@ -174,6 +174,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { IndexSettings.DEFAULT_PIPELINE, IndexSettings.FINAL_PIPELINE, MetadataIndexStateService.VERIFIED_BEFORE_CLOSE_SETTING, + MetadataIndexStateService.VERIFIED_READ_ONLY_SETTING, ExistingShardsAllocator.EXISTING_SHARDS_ALLOCATOR_SETTING, DiskThresholdDecider.SETTING_IGNORE_DISK_WATERMARKS, ShardLimitValidator.INDEX_SETTING_SHARD_LIMIT_GROUP, diff --git a/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java b/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java index 010fc1bd9e41..63a4696ddb08 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java @@ -97,7 +97,7 @@ public class ReadOnlyEngine extends Engine { @SuppressWarnings("this-escape") public ReadOnlyEngine( EngineConfig config, - SeqNoStats seqNoStats, + @Nullable SeqNoStats seqNoStats, @Nullable TranslogStats translogStats, boolean obtainLock, Function readerWrapperFunction, diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index ec260a40452b..5f7d1e1106a1 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -1489,7 +1489,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl } /** - * @return true the shard has a translog. + * @return true the shard has a translog. In the case there is no translog, the shard is not writeable. */ public boolean hasTranslog() { return translogConfig.hasTranslog(); diff --git a/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java b/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java index 06f9b3e6c894..89d9a780728f 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java +++ b/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java @@ -59,6 +59,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.BiConsumer; import java.util.stream.Collectors; +import static org.elasticsearch.cluster.metadata.IndexMetadataVerifier.isReadOnlyVerified; import static org.elasticsearch.common.lucene.Lucene.indexWriterConfigWithNoMerging; import static org.elasticsearch.core.TimeValue.timeValueMillis; @@ -625,7 +626,9 @@ public final class StoreRecovery { try { final var translogLocation = indexShard.shardPath().resolveTranslog(); if (indexShard.hasTranslog() == false) { - Translog.deleteAll(translogLocation); + if (isReadOnlyVerified(indexShard.indexSettings().getIndexMetadata())) { + Translog.deleteAll(translogLocation); + } return; } store.bootstrapNewHistory(); diff --git a/server/src/main/java/org/elasticsearch/index/store/Store.java b/server/src/main/java/org/elasticsearch/index/store/Store.java index 64bbd15198b4..af28bc3bb32d 100644 --- a/server/src/main/java/org/elasticsearch/index/store/Store.java +++ b/server/src/main/java/org/elasticsearch/index/store/Store.java @@ -38,6 +38,7 @@ import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.Version; import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.Strings; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.bytes.BytesReference; @@ -1572,6 +1573,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref } private IndexWriterConfig newTemporaryIndexWriterConfig() { + assert assertIndexWriter(indexSettings); // this config is only used for temporary IndexWriter instances, used to initialize the index or update the commit data, // so we don't want any merges to happen var iwc = indexWriterConfigWithNoMerging(null).setSoftDeletesField(Lucene.SOFT_DELETES_FIELD).setCommitOnClose(false); @@ -1581,4 +1583,17 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref } return iwc; } + + private static boolean assertIndexWriter(IndexSettings indexSettings) { + final var version = IndexMetadata.SETTING_INDEX_VERSION_COMPATIBILITY.get(indexSettings.getSettings()); + assert version.onOrAfter(IndexVersions.MINIMUM_COMPATIBLE) + : "index created on version [" + + indexSettings.getIndexVersionCreated() + + "] with compatibility version [" + + version + + "] cannot be written by current version [" + + IndexVersion.current() + + ']'; + return true; + } } diff --git a/server/src/main/java/org/elasticsearch/index/translog/TranslogConfig.java b/server/src/main/java/org/elasticsearch/index/translog/TranslogConfig.java index 280e319335b1..8e26443044ec 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/TranslogConfig.java +++ b/server/src/main/java/org/elasticsearch/index/translog/TranslogConfig.java @@ -14,6 +14,7 @@ import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.shard.ShardId; import java.nio.file.Path; @@ -149,7 +150,10 @@ public final class TranslogConfig { * translog, the shard is not writeable. */ public boolean hasTranslog() { - // Expect no translog files to exist for searchable snapshots - return false == indexSettings.getIndexMetadata().isSearchableSnapshot(); + var compatibilityVersion = indexSettings.getIndexMetadata().getCompatibilityVersion(); + if (compatibilityVersion.before(IndexVersions.MINIMUM_COMPATIBLE) || indexSettings.getIndexMetadata().isSearchableSnapshot()) { + return false; + } + return true; } } diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesService.java b/server/src/main/java/org/elasticsearch/indices/IndicesService.java index f22a99cb27fa..0a3baf2c52f5 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -45,6 +45,7 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.CheckedBiConsumer; import org.elasticsearch.common.CheckedSupplier; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.component.AbstractLifecycleComponent; @@ -96,6 +97,7 @@ import org.elasticsearch.index.engine.CommitStats; import org.elasticsearch.index.engine.EngineFactory; import org.elasticsearch.index.engine.InternalEngineFactory; import org.elasticsearch.index.engine.NoOpEngine; +import org.elasticsearch.index.engine.ReadOnlyEngine; import org.elasticsearch.index.fielddata.IndexFieldDataCache; import org.elasticsearch.index.flush.FlushStats; import org.elasticsearch.index.get.GetStats; @@ -125,6 +127,7 @@ import org.elasticsearch.index.shard.IndexingOperationListener; import org.elasticsearch.index.shard.IndexingStats; import org.elasticsearch.index.shard.SearchOperationListener; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.translog.TranslogStats; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.cluster.IndicesClusterStateService; import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache; @@ -179,11 +182,15 @@ import java.util.function.LongSupplier; import java.util.stream.Collectors; import static java.util.Collections.emptyList; +import static org.elasticsearch.cluster.metadata.IndexMetadataVerifier.isFullySupportedVersion; +import static org.elasticsearch.cluster.metadata.IndexMetadataVerifier.isReadOnlySupportedVersion; import static org.elasticsearch.common.util.CollectionUtils.arrayAsArrayList; import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory; import static org.elasticsearch.core.Strings.format; import static org.elasticsearch.index.IndexService.IndexCreationContext.CREATE_INDEX; import static org.elasticsearch.index.IndexService.IndexCreationContext.METADATA_VERIFICATION; +import static org.elasticsearch.index.IndexVersions.MINIMUM_COMPATIBLE; +import static org.elasticsearch.index.IndexVersions.MINIMUM_READONLY_COMPATIBLE; import static org.elasticsearch.index.query.AbstractQueryBuilder.parseTopLevelQuery; import static org.elasticsearch.search.SearchService.ALLOW_EXPENSIVE_QUERIES; @@ -801,6 +808,22 @@ public class IndicesService extends AbstractLifecycleComponent .filter(maybe -> Objects.requireNonNull(maybe).isPresent()) .toList(); if (engineFactories.isEmpty()) { + if (indexMetadata == null || isFullySupportedVersion(indexMetadata, MINIMUM_COMPATIBLE)) { + return new InternalEngineFactory(); + } else if (isReadOnlySupportedVersion(indexMetadata, MINIMUM_COMPATIBLE, MINIMUM_READONLY_COMPATIBLE)) { + return config -> { + return new ReadOnlyEngine( + config, + null, + config.getTranslogConfig().hasTranslog() ? null : new TranslogStats(0, 0, 0, 0, 0), + true, + Function.identity(), + true, + true + ); + }; + } + assert false : "unsupported: " + Strings.toString(indexMetadata); return new InternalEngineFactory(); } else if (engineFactories.size() == 1) { assert engineFactories.get(0).isPresent(); diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java index 362a62c838e3..e297ddbf0399 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java @@ -51,6 +51,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; +import static org.elasticsearch.cluster.metadata.IndexMetadataVerifier.isReadOnlyVerified; import static org.elasticsearch.core.Strings.format; /** @@ -642,7 +643,9 @@ public class RecoveryTarget extends AbstractRefCounted implements RecoveryTarget assert localCheckpoint == globalCheckpoint : localCheckpoint + " != " + globalCheckpoint; } } - Translog.deleteAll(translogLocation); + if (isReadOnlyVerified(indexShard.indexSettings().getIndexMetadata())) { + Translog.deleteAll(translogLocation); + } return; } final String translogUUID = Translog.createEmptyTranslog( diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/NodeJoinExecutorTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/NodeJoinExecutorTests.java index 270315f23a53..cc381d047fdf 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/NodeJoinExecutorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/NodeJoinExecutorTests.java @@ -21,6 +21,7 @@ import org.elasticsearch.cluster.metadata.DesiredNodes; import org.elasticsearch.cluster.metadata.DesiredNodesTestCase; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.metadata.MetadataIndexStateService; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeRole; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; @@ -64,6 +65,7 @@ import static org.elasticsearch.test.VersionUtils.maxCompatibleVersion; import static org.elasticsearch.test.VersionUtils.randomCompatibleVersion; import static org.elasticsearch.test.VersionUtils.randomVersion; import static org.elasticsearch.test.VersionUtils.randomVersionBetween; +import static org.elasticsearch.test.index.IndexVersionUtils.getPreviousVersion; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -228,17 +230,66 @@ public class NodeJoinExecutorTests extends ESTestCase { ) ); } + var indexCreated = IndexVersionUtils.randomVersionBetween( + random(), + IndexVersions.MINIMUM_READONLY_COMPATIBLE, + getPreviousVersion(IndexVersions.MINIMUM_COMPATIBLE) + ); { - var indexMetadata = IndexMetadata.builder("read-only-compatible-but-unsupported") + var indexMetadata = IndexMetadata.builder("regular") .settings( Settings.builder() - .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersions.MINIMUM_READONLY_COMPATIBLE) .put(IndexMetadata.SETTING_BLOCKS_WRITE, true) + .put(MetadataIndexStateService.VERIFIED_READ_ONLY_SETTING.getKey(), true) + .put(IndexMetadata.SETTING_VERSION_CREATED, indexCreated) + .build() ) .numberOfShards(1) .numberOfReplicas(1) .build(); + NodeJoinExecutor.ensureIndexCompatibility( + IndexVersions.MINIMUM_COMPATIBLE, + IndexVersions.MINIMUM_READONLY_COMPATIBLE, + IndexVersion.current(), + Metadata.builder().put(indexMetadata, false).build() + ); + } + { + var settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, indexCreated); + if (randomBoolean()) { + settings.put(MetadataIndexStateService.VERIFIED_READ_ONLY_SETTING.getKey(), randomBoolean()); + } + if (randomBoolean()) { + settings.put(IndexMetadata.SETTING_BLOCKS_WRITE, false); + } + var indexMetadata = IndexMetadata.builder("regular").settings(settings).numberOfShards(1).numberOfReplicas(1).build(); + + expectThrows( + IllegalStateException.class, + () -> NodeJoinExecutor.ensureIndexCompatibility( + IndexVersions.MINIMUM_COMPATIBLE, + IndexVersions.MINIMUM_READONLY_COMPATIBLE, + IndexVersion.current(), + Metadata.builder().put(indexMetadata, false).build() + ) + ); + } + { + var settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, indexCreated); + if (randomBoolean()) { + settings.put(MetadataIndexStateService.VERIFIED_READ_ONLY_SETTING.getKey(), false); + } + if (randomBoolean()) { + settings.put(IndexMetadata.SETTING_BLOCKS_WRITE, randomBoolean()); + } + + var indexMetadata = IndexMetadata.builder("regular-not-read-only-verified") + .settings(settings) + .numberOfShards(1) + .numberOfReplicas(1) + .build(); + expectThrows( IllegalStateException.class, () -> NodeJoinExecutor.ensureIndexCompatibility( diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataVerifierTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataVerifierTests.java index 417ae89da0a6..44ce491b1e51 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataVerifierTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataVerifierTests.java @@ -25,6 +25,8 @@ import org.elasticsearch.test.index.IndexVersionUtils; import java.util.Collections; import static org.elasticsearch.index.IndexModule.INDEX_STORE_TYPE_SETTING; +import static org.elasticsearch.test.index.IndexVersionUtils.getPreviousVersion; +import static org.elasticsearch.test.index.IndexVersionUtils.randomVersionBetween; import static org.hamcrest.Matchers.equalTo; public class IndexMetadataVerifierTests extends ESTestCase { @@ -105,7 +107,7 @@ public class IndexMetadataVerifierTests extends ESTestCase { public void testIncompatibleVersion() { IndexMetadataVerifier service = getIndexMetadataVerifier(); - IndexVersion minCompat = IndexVersions.MINIMUM_COMPATIBLE; + IndexVersion minCompat = IndexVersions.MINIMUM_READONLY_COMPATIBLE; IndexVersion indexCreated = IndexVersion.fromId(randomIntBetween(1000099, minCompat.id() - 1)); final IndexMetadata metadata = newIndexMeta( "foo", @@ -124,7 +126,7 @@ public class IndexMetadataVerifierTests extends ESTestCase { + indexCreated.toReleaseVersion() + "] " + "but the minimum compatible version is [" - + minCompat.toReleaseVersion() + + IndexVersions.MINIMUM_COMPATIBLE.toReleaseVersion() + "]. It should be re-indexed in Elasticsearch " + (Version.CURRENT.major - 1) + ".x before upgrading to " @@ -133,20 +135,20 @@ public class IndexMetadataVerifierTests extends ESTestCase { ) ); - indexCreated = IndexVersionUtils.randomVersionBetween(random(), minCompat, IndexVersion.current()); + indexCreated = randomVersionBetween(random(), IndexVersions.MINIMUM_COMPATIBLE, IndexVersion.current()); IndexMetadata goodMeta = newIndexMeta("foo", Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, indexCreated).build()); service.verifyIndexMetadata(goodMeta, IndexVersions.MINIMUM_COMPATIBLE, IndexVersions.MINIMUM_READONLY_COMPATIBLE); } public void testReadOnlyVersionCompatibility() { var service = getIndexMetadataVerifier(); - var indexCreated = IndexVersions.MINIMUM_READONLY_COMPATIBLE; { var idxMetadata = newIndexMeta( - "not-searchable-snapshot", + "legacy", Settings.builder() .put(IndexMetadata.SETTING_BLOCKS_WRITE, randomBoolean()) - .put(IndexMetadata.SETTING_VERSION_CREATED, indexCreated) + .put(MetadataIndexStateService.VERIFIED_READ_ONLY_SETTING.getKey(), randomBoolean()) + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.fromId(6080099)) .build() ); String message = expectThrows( @@ -156,12 +158,9 @@ public class IndexMetadataVerifierTests extends ESTestCase { assertThat( message, equalTo( - "The index [not-searchable-snapshot/" + "The index [legacy/" + idxMetadata.getIndexUUID() - + "] has current compatibility version [" - + indexCreated.toReleaseVersion() - + "] " - + "but the minimum compatible version is [" + + "] has current compatibility version [6.8.0] but the minimum compatible version is [" + IndexVersions.MINIMUM_COMPATIBLE.toReleaseVersion() + "]. It should be re-indexed in Elasticsearch " + (Version.CURRENT.major - 1) @@ -171,14 +170,32 @@ public class IndexMetadataVerifierTests extends ESTestCase { ) ); } + var indexCreated = randomVersionBetween( + random(), + IndexVersions.MINIMUM_READONLY_COMPATIBLE, + getPreviousVersion(IndexVersions.MINIMUM_COMPATIBLE) + ); { var idxMetadata = newIndexMeta( - "not-read-only", + "regular", Settings.builder() + .put(IndexMetadata.SETTING_BLOCKS_WRITE, true) + .put(MetadataIndexStateService.VERIFIED_READ_ONLY_SETTING.getKey(), true) .put(IndexMetadata.SETTING_VERSION_CREATED, indexCreated) - .put(INDEX_STORE_TYPE_SETTING.getKey(), SearchableSnapshotsSettings.SEARCHABLE_SNAPSHOT_STORE_TYPE) .build() ); + service.verifyIndexMetadata(idxMetadata, IndexVersions.MINIMUM_COMPATIBLE, IndexVersions.MINIMUM_READONLY_COMPATIBLE); + } + { + var settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, indexCreated); + if (randomBoolean()) { + settings.put(MetadataIndexStateService.VERIFIED_READ_ONLY_SETTING.getKey(), randomBoolean()); + } + if (randomBoolean()) { + settings.put(IndexMetadata.SETTING_BLOCKS_WRITE, false); + } + + var idxMetadata = newIndexMeta("regular-no-write-block", settings.build()); String message = expectThrows( IllegalStateException.class, () -> service.verifyIndexMetadata(idxMetadata, IndexVersions.MINIMUM_COMPATIBLE, IndexVersions.MINIMUM_READONLY_COMPATIBLE) @@ -186,10 +203,39 @@ public class IndexMetadataVerifierTests extends ESTestCase { assertThat( message, equalTo( - "The index [not-read-only/" + "The index [regular-no-write-block/" + idxMetadata.getIndexUUID() + "] created in version [" - + indexCreated + + indexCreated.toReleaseVersion() + + "] with current compatibility version [" + + indexCreated.toReleaseVersion() + + "] must be marked as read-only using the setting [index.blocks.write] set to [true] before upgrading to " + + Build.current().version() + + "." + ) + ); + } + { + var settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, indexCreated); + if (randomBoolean()) { + settings.put(MetadataIndexStateService.VERIFIED_READ_ONLY_SETTING.getKey(), false); + } + if (randomBoolean()) { + settings.put(IndexMetadata.SETTING_BLOCKS_WRITE, randomBoolean()); + } + + var idxMetadata = newIndexMeta("regular-not-read-only-verified", settings.build()); + String message = expectThrows( + IllegalStateException.class, + () -> service.verifyIndexMetadata(idxMetadata, IndexVersions.MINIMUM_COMPATIBLE, IndexVersions.MINIMUM_READONLY_COMPATIBLE) + ).getMessage(); + assertThat( + message, + equalTo( + "The index [regular-not-read-only-verified/" + + idxMetadata.getIndexUUID() + + "] created in version [" + + indexCreated.toReleaseVersion() + "] with current compatibility version [" + indexCreated.toReleaseVersion() + "] must be marked as read-only using the setting [index.blocks.write] set to [true] before upgrading to " @@ -200,7 +246,7 @@ public class IndexMetadataVerifierTests extends ESTestCase { } { var idxMetadata = newIndexMeta( - "good", + "searchable-snapshot", Settings.builder() .put(IndexMetadata.SETTING_BLOCKS_WRITE, true) .put(IndexMetadata.SETTING_VERSION_CREATED, indexCreated) @@ -209,6 +255,42 @@ public class IndexMetadataVerifierTests extends ESTestCase { ); service.verifyIndexMetadata(idxMetadata, IndexVersions.MINIMUM_COMPATIBLE, IndexVersions.MINIMUM_READONLY_COMPATIBLE); } + { + var idxMetadata = newIndexMeta( + "archive", + Settings.builder() + .put(IndexMetadata.SETTING_BLOCKS_WRITE, true) + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.fromId(6080099)) + .put(IndexMetadata.SETTING_VERSION_COMPATIBILITY, indexCreated) + .build() + ); + service.verifyIndexMetadata(idxMetadata, IndexVersions.MINIMUM_COMPATIBLE, IndexVersions.MINIMUM_READONLY_COMPATIBLE); + } + { + var idxMetadata = newIndexMeta( + "archive-no-write-block", + Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.fromId(6080099)) + .put(IndexMetadata.SETTING_VERSION_COMPATIBILITY, indexCreated) + .build() + ); + String message = expectThrows( + IllegalStateException.class, + () -> service.verifyIndexMetadata(idxMetadata, IndexVersions.MINIMUM_COMPATIBLE, IndexVersions.MINIMUM_READONLY_COMPATIBLE) + ).getMessage(); + assertThat( + message, + equalTo( + "The index [archive-no-write-block/" + + idxMetadata.getIndexUUID() + + "] created in version [6.8.0] with current compatibility version [" + + indexCreated.toReleaseVersion() + + "] must be marked as read-only using the setting [index.blocks.write] set to [true] before upgrading to " + + Build.current().version() + + "." + ) + ); + } } private IndexMetadataVerifier getIndexMetadataVerifier() { diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowAction.java index 01a4076c58bd..5749bf762e2e 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowAction.java @@ -17,6 +17,7 @@ import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.MetadataIndexStateService; import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.cluster.routing.allocation.DataTier; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; @@ -531,7 +532,8 @@ public class TransportResumeFollowAction extends AcknowledgedTransportMasterNode MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING, EngineConfig.INDEX_CODEC_SETTING, DataTier.TIER_PREFERENCE_SETTING, - IndexSettings.BLOOM_FILTER_ID_FIELD_ENABLED_SETTING + IndexSettings.BLOOM_FILTER_ID_FIELD_ENABLED_SETTING, + MetadataIndexStateService.VERIFIED_READ_ONLY_SETTING ); public static Settings filter(Settings originalSettings) { From 7ac250077300e4b206e740da166fd3eec9dac389 Mon Sep 17 00:00:00 2001 From: Luke Whiting Date: Thu, 16 Jan 2025 16:01:08 +0000 Subject: [PATCH 18/30] Make requests_per_second configurable to throttle reindexing (#120207) * Make requests_per_second configurable to throttle reindexing * Update docs/changelog/120207.yaml * Add restrictions to prevent zero or negative rate limit Also allow -1 as infinite * PR Changes - Switch to cluster settings for rate limit retrieval --- docs/changelog/120207.yaml | 5 + ...indexDatastreamIndexTransportActionIT.java | 27 --- .../xpack/migrate/MigratePlugin.java | 2 + ...ReindexDataStreamIndexTransportAction.java | 34 ++- ...exDataStreamIndexTransportActionTests.java | 206 ++++++++++++++++++ 5 files changed, 246 insertions(+), 28 deletions(-) create mode 100644 docs/changelog/120207.yaml create mode 100644 x-pack/plugin/migrate/src/test/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamIndexTransportActionTests.java diff --git a/docs/changelog/120207.yaml b/docs/changelog/120207.yaml new file mode 100644 index 000000000000..c01dfc6aecf7 --- /dev/null +++ b/docs/changelog/120207.yaml @@ -0,0 +1,5 @@ +pr: 120207 +summary: Make `requests_per_second` configurable to throttle reindexing +area: Data streams +type: enhancement +issues: [] diff --git a/x-pack/plugin/migrate/src/internalClusterTest/java/org/elasticsearch/xpack/migrate/action/ReindexDatastreamIndexTransportActionIT.java b/x-pack/plugin/migrate/src/internalClusterTest/java/org/elasticsearch/xpack/migrate/action/ReindexDatastreamIndexTransportActionIT.java index b5eb1e584673..cfd4f0901336 100644 --- a/x-pack/plugin/migrate/src/internalClusterTest/java/org/elasticsearch/xpack/migrate/action/ReindexDatastreamIndexTransportActionIT.java +++ b/x-pack/plugin/migrate/src/internalClusterTest/java/org/elasticsearch/xpack/migrate/action/ReindexDatastreamIndexTransportActionIT.java @@ -498,31 +498,4 @@ public class ReindexDatastreamIndexTransportActionIT extends ESIntegTestCase { .get(IndexMetadata.SETTING_INDEX_UUID); } - public void testGenerateDestIndexName_noDotPrefix() { - String sourceIndex = "sourceindex"; - String expectedDestIndex = "migrated-sourceindex"; - String actualDestIndex = ReindexDataStreamIndexTransportAction.generateDestIndexName(sourceIndex); - assertEquals(expectedDestIndex, actualDestIndex); - } - - public void testGenerateDestIndexName_withDotPrefix() { - String sourceIndex = ".sourceindex"; - String expectedDestIndex = ".migrated-sourceindex"; - String actualDestIndex = ReindexDataStreamIndexTransportAction.generateDestIndexName(sourceIndex); - assertEquals(expectedDestIndex, actualDestIndex); - } - - public void testGenerateDestIndexName_withHyphen() { - String sourceIndex = "source-index"; - String expectedDestIndex = "migrated-source-index"; - String actualDestIndex = ReindexDataStreamIndexTransportAction.generateDestIndexName(sourceIndex); - assertEquals(expectedDestIndex, actualDestIndex); - } - - public void testGenerateDestIndexName_withUnderscore() { - String sourceIndex = "source_index"; - String expectedDestIndex = "migrated-source_index"; - String actualDestIndex = ReindexDataStreamIndexTransportAction.generateDestIndexName(sourceIndex); - assertEquals(expectedDestIndex, actualDestIndex); - } } diff --git a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/MigratePlugin.java b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/MigratePlugin.java index 55ec4065be8c..93b90e551e72 100644 --- a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/MigratePlugin.java +++ b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/MigratePlugin.java @@ -59,6 +59,7 @@ import java.util.function.Predicate; import java.util.function.Supplier; import static org.elasticsearch.xpack.migrate.action.ReindexDataStreamAction.REINDEX_DATA_STREAM_FEATURE_FLAG; +import static org.elasticsearch.xpack.migrate.action.ReindexDataStreamIndexTransportAction.REINDEX_MAX_REQUESTS_PER_SECOND_SETTING; import static org.elasticsearch.xpack.migrate.task.ReindexDataStreamPersistentTaskExecutor.MAX_CONCURRENT_INDICES_REINDEXED_PER_DATA_STREAM_SETTING; public class MigratePlugin extends Plugin implements ActionPlugin, PersistentTaskPlugin { @@ -160,6 +161,7 @@ public class MigratePlugin extends Plugin implements ActionPlugin, PersistentTas public List> getSettings() { List> pluginSettings = new ArrayList<>(); pluginSettings.add(MAX_CONCURRENT_INDICES_REINDEXED_PER_DATA_STREAM_SETTING); + pluginSettings.add(REINDEX_MAX_REQUESTS_PER_SECOND_SETTING); return pluginSettings; } } diff --git a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamIndexTransportAction.java b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamIndexTransportAction.java index bd0128e658c3..f1810d85ffd1 100644 --- a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamIndexTransportAction.java +++ b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamIndexTransportAction.java @@ -28,6 +28,7 @@ import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Assertions; import org.elasticsearch.core.TimeValue; @@ -52,8 +53,37 @@ public class ReindexDataStreamIndexTransportAction extends HandledTransportActio ReindexDataStreamIndexAction.Request, ReindexDataStreamIndexAction.Response> { + public static final String REINDEX_MAX_REQUESTS_PER_SECOND_KEY = "migrate.data_stream_reindex_max_request_per_second"; + + public static final Setting REINDEX_MAX_REQUESTS_PER_SECOND_SETTING = new Setting<>( + REINDEX_MAX_REQUESTS_PER_SECOND_KEY, + Float.toString(10f), + s -> { + if (s.equals("-1")) { + return Float.POSITIVE_INFINITY; + } else { + return Float.parseFloat(s); + } + }, + value -> { + if (value <= 0f) { + throw new IllegalArgumentException( + "Failed to parse value [" + + value + + "] for setting [" + + REINDEX_MAX_REQUESTS_PER_SECOND_KEY + + "] " + + "must be greater than 0 or -1 for infinite" + ); + } + }, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + private static final Logger logger = LogManager.getLogger(ReindexDataStreamIndexTransportAction.class); private static final IndicesOptions IGNORE_MISSING_OPTIONS = IndicesOptions.fromOptions(true, true, false, false); + private final ClusterService clusterService; private final Client client; @@ -183,7 +213,8 @@ public class ReindexDataStreamIndexTransportAction extends HandledTransportActio client.execute(CreateIndexFromSourceAction.INSTANCE, request, failIfNotAcknowledged(listener, errorMessage)); } - private void reindex(String sourceIndexName, String destIndexName, ActionListener listener, TaskId parentTaskId) { + // Visible for testing + void reindex(String sourceIndexName, String destIndexName, ActionListener listener, TaskId parentTaskId) { logger.debug("Reindex to destination index [{}] from source index [{}]", destIndexName, sourceIndexName); var reindexRequest = new ReindexRequest(); reindexRequest.setSourceIndices(sourceIndexName); @@ -191,6 +222,7 @@ public class ReindexDataStreamIndexTransportAction extends HandledTransportActio reindexRequest.getSearchRequest().source().fetchSource(true); reindexRequest.setDestIndex(destIndexName); reindexRequest.setParentTask(parentTaskId); + reindexRequest.setRequestsPerSecond(clusterService.getClusterSettings().get(REINDEX_MAX_REQUESTS_PER_SECOND_SETTING)); reindexRequest.setSlices(0); // equivalent to slices=auto in rest api client.execute(ReindexAction.INSTANCE, reindexRequest, listener); } diff --git a/x-pack/plugin/migrate/src/test/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamIndexTransportActionTests.java b/x-pack/plugin/migrate/src/test/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamIndexTransportActionTests.java new file mode 100644 index 000000000000..99e1031dec3a --- /dev/null +++ b/x-pack/plugin/migrate/src/test/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamIndexTransportActionTests.java @@ -0,0 +1,206 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.migrate.action; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.reindex.BulkByScrollResponse; +import org.elasticsearch.index.reindex.ReindexAction; +import org.elasticsearch.index.reindex.ReindexRequest; +import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.transport.TransportService; +import org.junit.After; +import org.junit.Before; +import org.mockito.Answers; +import org.mockito.ArgumentCaptor; +import org.mockito.Captor; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; + +import java.util.Collections; + +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.when; + +public class ReindexDataStreamIndexTransportActionTests extends ESTestCase { + + @Mock(answer = Answers.RETURNS_DEEP_STUBS) + private TransportService transportService; + @Mock + private ClusterService clusterService; + @Mock + private ActionFilters actionFilters; + @Mock + private Client client; + + @InjectMocks + private ReindexDataStreamIndexTransportAction action; + + @Captor + private ArgumentCaptor request; + + private AutoCloseable mocks; + + @Before + public void setUp() throws Exception { + super.setUp(); + mocks = MockitoAnnotations.openMocks(this); + } + + @After + public void tearDown() throws Exception { + super.tearDown(); + mocks.close(); + } + + public void testGenerateDestIndexName_noDotPrefix() { + String sourceIndex = "sourceindex"; + String expectedDestIndex = "migrated-sourceindex"; + String actualDestIndex = ReindexDataStreamIndexTransportAction.generateDestIndexName(sourceIndex); + assertEquals(expectedDestIndex, actualDestIndex); + } + + public void testGenerateDestIndexName_withDotPrefix() { + String sourceIndex = ".sourceindex"; + String expectedDestIndex = ".migrated-sourceindex"; + String actualDestIndex = ReindexDataStreamIndexTransportAction.generateDestIndexName(sourceIndex); + assertEquals(expectedDestIndex, actualDestIndex); + } + + public void testGenerateDestIndexName_withHyphen() { + String sourceIndex = "source-index"; + String expectedDestIndex = "migrated-source-index"; + String actualDestIndex = ReindexDataStreamIndexTransportAction.generateDestIndexName(sourceIndex); + assertEquals(expectedDestIndex, actualDestIndex); + } + + public void testGenerateDestIndexName_withUnderscore() { + String sourceIndex = "source_index"; + String expectedDestIndex = "migrated-source_index"; + String actualDestIndex = ReindexDataStreamIndexTransportAction.generateDestIndexName(sourceIndex); + assertEquals(expectedDestIndex, actualDestIndex); + } + + public void testReindexIncludesRateLimit() { + var targetRateLimit = randomFloatBetween(1, 100, true); + Settings settings = Settings.builder() + .put(ReindexDataStreamIndexTransportAction.REINDEX_MAX_REQUESTS_PER_SECOND_SETTING.getKey(), targetRateLimit) + .build(); + + String sourceIndex = randomAlphanumericOfLength(10); + String destIndex = randomAlphanumericOfLength(10); + ActionListener listener = ActionListener.noop(); + TaskId taskId = TaskId.EMPTY_TASK_ID; + + when(clusterService.getClusterSettings()).thenReturn( + new ClusterSettings( + settings, + Collections.singleton(ReindexDataStreamIndexTransportAction.REINDEX_MAX_REQUESTS_PER_SECOND_SETTING) + ) + ); + + doNothing().when(client).execute(eq(ReindexAction.INSTANCE), request.capture(), eq(listener)); + + action.reindex(sourceIndex, destIndex, listener, taskId); + + ReindexRequest requestValue = request.getValue(); + + assertEquals(targetRateLimit, requestValue.getRequestsPerSecond(), 0.0); + } + + public void testReindexIncludesInfiniteRateLimit() { + Settings settings = Settings.builder() + .put(ReindexDataStreamIndexTransportAction.REINDEX_MAX_REQUESTS_PER_SECOND_SETTING.getKey(), "-1") + .build(); + + String sourceIndex = randomAlphanumericOfLength(10); + String destIndex = randomAlphanumericOfLength(10); + ActionListener listener = ActionListener.noop(); + TaskId taskId = TaskId.EMPTY_TASK_ID; + + when(clusterService.getClusterSettings()).thenReturn( + new ClusterSettings( + settings, + Collections.singleton(ReindexDataStreamIndexTransportAction.REINDEX_MAX_REQUESTS_PER_SECOND_SETTING) + ) + ); + doNothing().when(client).execute(eq(ReindexAction.INSTANCE), request.capture(), eq(listener)); + + action.reindex(sourceIndex, destIndex, listener, taskId); + + ReindexRequest requestValue = request.getValue(); + + assertEquals(Float.POSITIVE_INFINITY, requestValue.getRequestsPerSecond(), 0.0); + } + + public void testReindexZeroRateLimitThrowsError() { + Settings settings = Settings.builder() + .put(ReindexDataStreamIndexTransportAction.REINDEX_MAX_REQUESTS_PER_SECOND_SETTING.getKey(), "0") + .build(); + + String sourceIndex = randomAlphanumericOfLength(10); + String destIndex = randomAlphanumericOfLength(10); + ActionListener listener = ActionListener.noop(); + TaskId taskId = TaskId.EMPTY_TASK_ID; + + when(clusterService.getClusterSettings()).thenReturn( + new ClusterSettings( + settings, + Collections.singleton(ReindexDataStreamIndexTransportAction.REINDEX_MAX_REQUESTS_PER_SECOND_SETTING) + ) + ); + + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> action.reindex(sourceIndex, destIndex, listener, taskId) + ); + assertEquals( + "Failed to parse value [0.0] for setting [migrate.data_stream_reindex_max_request_per_second]" + + " must be greater than 0 or -1 for infinite", + e.getMessage() + ); + } + + public void testReindexNegativeRateLimitThrowsError() { + float targetRateLimit = randomFloatBetween(-100, -1, true); + Settings settings = Settings.builder() + .put(ReindexDataStreamIndexTransportAction.REINDEX_MAX_REQUESTS_PER_SECOND_SETTING.getKey(), targetRateLimit) + .build(); + + String sourceIndex = randomAlphanumericOfLength(10); + String destIndex = randomAlphanumericOfLength(10); + ActionListener listener = ActionListener.noop(); + TaskId taskId = TaskId.EMPTY_TASK_ID; + + when(clusterService.getClusterSettings()).thenReturn( + new ClusterSettings( + settings, + Collections.singleton(ReindexDataStreamIndexTransportAction.REINDEX_MAX_REQUESTS_PER_SECOND_SETTING) + ) + ); + + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> action.reindex(sourceIndex, destIndex, listener, taskId) + ); + assertEquals( + "Failed to parse value [" + + targetRateLimit + + "] for setting [migrate.data_stream_reindex_max_request_per_second]" + + " must be greater than 0 or -1 for infinite", + e.getMessage() + ); + } +} From b1fb31e6572a4abf314c072a6aafebf725e650af Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mariusz=20J=C3=B3zala?= <377355+jozala@users.noreply.github.com> Date: Thu, 16 Jan 2025 17:40:22 +0100 Subject: [PATCH 19/30] Revert "[TEST] Use Docker Compose v2 for TestFixturePlugin (#120214)" (#120294) Reverts elastic/elasticsearch#120214 Reverting this change because this is causing failing tests on AmazonLinux 2023. The problem with wrong version of `docker-compose` has been fixed in the VM images so builds should no longer fail. --- .../gradle/internal/testfixtures/TestFixturesPlugin.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/testfixtures/TestFixturesPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/testfixtures/TestFixturesPlugin.java index fac7d86701d5..ab28a66d9306 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/testfixtures/TestFixturesPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/testfixtures/TestFixturesPlugin.java @@ -122,7 +122,7 @@ public class TestFixturesPlugin implements Plugin { composeExtension.getRemoveContainers().set(true); composeExtension.getCaptureContainersOutput() .set(EnumSet.of(LogLevel.INFO, LogLevel.DEBUG).contains(project.getGradle().getStartParameter().getLogLevel())); - composeExtension.getUseDockerComposeV2().set(true); + composeExtension.getUseDockerComposeV2().set(false); composeExtension.getExecutable().set(this.providerFactory.provider(() -> { String composePath = dockerSupport.get().getDockerAvailability().dockerComposePath(); LOGGER.debug("Docker Compose path: {}", composePath); From 28dde9176248ec39be0f5f8dd9a5aeb32c64b7d2 Mon Sep 17 00:00:00 2001 From: Alexey Ivanov Date: Thu, 16 Jan 2025 16:54:25 +0000 Subject: [PATCH 20/30] Add support for specifying reindexing script for system index migration (#119001) Adds support for setting a reindexing script for system index migration during /_migration/system_features task. Script is set using SystemIndexDescriptor.Builder.setMigrationScript method. --- docs/changelog/119001.yaml | 5 +++ .../indices/TestSystemIndexDescriptor.java | 2 ++ ...ransportGetFeatureUpgradeStatusAction.java | 2 +- .../indices/SystemIndexDescriptor.java | 33 +++++++++++++++++++ .../upgrades/SystemIndexMigrationInfo.java | 8 +++++ .../upgrades/SystemIndexMigrator.java | 5 +++ 6 files changed, 54 insertions(+), 1 deletion(-) create mode 100644 docs/changelog/119001.yaml diff --git a/docs/changelog/119001.yaml b/docs/changelog/119001.yaml new file mode 100644 index 000000000000..d54c7a58c83e --- /dev/null +++ b/docs/changelog/119001.yaml @@ -0,0 +1,5 @@ +pr: 119001 +summary: Add support for specifying reindexing script for system index migration +area: Infra/Core +type: enhancement +issues: [] diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/TestSystemIndexDescriptor.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/TestSystemIndexDescriptor.java index 487bbf7c9a4b..03e7660d6d46 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/TestSystemIndexDescriptor.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/TestSystemIndexDescriptor.java @@ -51,6 +51,7 @@ public class TestSystemIndexDescriptor extends SystemIndexDescriptor { INDEX_NAME, 0, "stack", + null, Type.INTERNAL_MANAGED, List.of(), List.of(), @@ -70,6 +71,7 @@ public class TestSystemIndexDescriptor extends SystemIndexDescriptor { name, 0, "stack", + null, Type.INTERNAL_MANAGED, List.of(), List.of(), diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/TransportGetFeatureUpgradeStatusAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/TransportGetFeatureUpgradeStatusAction.java index afe615add28d..6f3e27981adf 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/TransportGetFeatureUpgradeStatusAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/TransportGetFeatureUpgradeStatusAction.java @@ -53,7 +53,7 @@ public class TransportGetFeatureUpgradeStatusAction extends TransportMasterNodeA GetFeatureUpgradeStatusResponse> { /** - * Once all feature migrations for 8.x -> 9.x have been tested, we can bump this to Version.V_8_0_0 + * Once all feature migrations for 9.x -> 10.x have been tested, we can bump this to Version.V_9_0_0 */ public static final Version NO_UPGRADE_REQUIRED_VERSION = Version.V_8_0_0; public static final IndexVersion NO_UPGRADE_REQUIRED_INDEX_VERSION = IndexVersions.V_8_0_0; diff --git a/server/src/main/java/org/elasticsearch/indices/SystemIndexDescriptor.java b/server/src/main/java/org/elasticsearch/indices/SystemIndexDescriptor.java index 9b58c37fff27..48d492e2cc76 100644 --- a/server/src/main/java/org/elasticsearch/indices/SystemIndexDescriptor.java +++ b/server/src/main/java/org/elasticsearch/indices/SystemIndexDescriptor.java @@ -13,6 +13,7 @@ import org.apache.lucene.util.automaton.Automaton; import org.apache.lucene.util.automaton.CharacterRunAutomaton; import org.apache.lucene.util.automaton.Operations; import org.apache.lucene.util.automaton.RegExp; +import org.elasticsearch.action.admin.cluster.migration.TransportGetFeatureUpgradeStatusAction; import org.elasticsearch.action.admin.indices.create.AutoCreateAction; import org.elasticsearch.action.admin.indices.create.TransportCreateIndexAction; import org.elasticsearch.cluster.metadata.IndexMetadata; @@ -145,6 +146,24 @@ public class SystemIndexDescriptor implements IndexPatternMatcher, Comparable + * Note: the script usually should only exist in the versions supporting migration to the next major release - + * specifically, the last (two) minors of the current major. + * It should be created once the last minor branch has diverged from the next major branch (main). + * This ensures the script is available only in the versions where it is needed + * and avoids removing and maintaining it in the next major branch. + * For example: In order to migrate an index created in v7 when upgrading to v9, + * the script should be in the v8 minors supporting upgrade to v9 - 8.18 and 8.19. + *
+ * See: Reindex scripts + */ + private final String migrationScript; + /** Mapping version from the descriptor */ private final MappingsVersion mappingsVersion; @@ -197,6 +216,7 @@ public class SystemIndexDescriptor implements IndexPatternMatcher, Comparable allowedElasticProductOrigins, List priorSystemIndexDescriptors, @@ -346,6 +367,7 @@ public class SystemIndexDescriptor implements IndexPatternMatcher, Comparable allowedElasticProductOrigins = List.of(); private List priorSystemIndexDescriptors = List.of(); @@ -721,6 +748,11 @@ public class SystemIndexDescriptor implements IndexPatternMatcher, Comparable { private final Settings settings; private final String mapping; private final String origin; + private final String migrationScript; private final SystemIndices.Feature owningFeature; private final boolean allowsTemplates; @@ -57,6 +58,7 @@ class SystemIndexMigrationInfo implements Comparable { Settings settings, String mapping, String origin, + String migrationScript, SystemIndices.Feature owningFeature, boolean allowsTemplates ) { @@ -65,6 +67,7 @@ class SystemIndexMigrationInfo implements Comparable { this.settings = settings; this.mapping = mapping; this.origin = origin; + this.migrationScript = migrationScript; this.owningFeature = owningFeature; this.allowsTemplates = allowsTemplates; } @@ -118,6 +121,10 @@ class SystemIndexMigrationInfo implements Comparable { return origin; } + String getMigrationScript() { + return migrationScript; + } + /** * By default, system indices should not be affected by user defined templates, so this * method should return false in almost all cases. At the moment certain Kibana indices use @@ -217,6 +224,7 @@ class SystemIndexMigrationInfo implements Comparable { settings, mapping, descriptor.getOrigin(), + descriptor.getMigrationScript(), feature, descriptor.allowsTemplates() ); diff --git a/server/src/main/java/org/elasticsearch/upgrades/SystemIndexMigrator.java b/server/src/main/java/org/elasticsearch/upgrades/SystemIndexMigrator.java index e037b0d99438..186618f3662f 100644 --- a/server/src/main/java/org/elasticsearch/upgrades/SystemIndexMigrator.java +++ b/server/src/main/java/org/elasticsearch/upgrades/SystemIndexMigrator.java @@ -45,6 +45,7 @@ import org.elasticsearch.index.reindex.ReindexAction; import org.elasticsearch.index.reindex.ReindexRequest; import org.elasticsearch.indices.SystemIndices; import org.elasticsearch.persistent.AllocatedPersistentTask; +import org.elasticsearch.script.Script; import org.elasticsearch.tasks.TaskId; import java.util.LinkedList; @@ -563,6 +564,10 @@ public class SystemIndexMigrator extends AllocatedPersistentTask { reindexRequest.setSourceIndices(migrationInfo.getCurrentIndexName()); reindexRequest.setDestIndex(migrationInfo.getNextIndexName()); reindexRequest.setRefresh(true); + String migrationScript = migrationInfo.getMigrationScript(); + if (Strings.isNullOrEmpty(migrationScript) == false) { + reindexRequest.setScript(Script.parse(migrationScript)); + } migrationInfo.createClient(baseClient).execute(ReindexAction.INSTANCE, reindexRequest, listener); } From 874bf7ba1e29997a8dd1475aa23624fcbcfd9f55 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Thu, 16 Jan 2025 17:55:09 +0100 Subject: [PATCH 21/30] Replace LegacyGeoShapeWithDocValuesIT with unit tests (#120266) This change removes LegacyGeoShapeWithDocValuesIT which contains two specific test for legacy "geo_shape" mappings that can still be used in version 7 indices. The functionality checked in covered by "testMappingUpdate" should be covered by GeoShapeWithDocValuesFieldMapperTests#testGeoShapeLegacyMerge already, the test with a Circle shape is moved to GeoShapeWithDocValuesFieldMapperTests. --- .../search/LegacyGeoShapeWithDocValuesIT.java | 109 ------------------ ...GeoShapeWithDocValuesFieldMapperTests.java | 40 +++++++ 2 files changed, 40 insertions(+), 109 deletions(-) delete mode 100644 x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/LegacyGeoShapeWithDocValuesIT.java diff --git a/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/LegacyGeoShapeWithDocValuesIT.java b/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/LegacyGeoShapeWithDocValuesIT.java deleted file mode 100644 index 66954cbf4306..000000000000 --- a/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/LegacyGeoShapeWithDocValuesIT.java +++ /dev/null @@ -1,109 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.spatial.search; - -import org.apache.lucene.tests.util.LuceneTestCase; -import org.elasticsearch.core.UpdateForV9; -import org.elasticsearch.geometry.Circle; -import org.elasticsearch.index.IndexVersion; -import org.elasticsearch.index.IndexVersions; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.search.geo.GeoShapeIntegTestCase; -import org.elasticsearch.test.index.IndexVersionUtils; -import org.elasticsearch.xcontent.ToXContent; -import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentType; -import org.elasticsearch.xpack.spatial.LocalStateSpatialPlugin; - -import java.io.IOException; -import java.util.Collection; -import java.util.Collections; - -import static org.elasticsearch.index.query.QueryBuilders.geoShapeQuery; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -import static org.hamcrest.Matchers.containsString; - -@UpdateForV9(owner = UpdateForV9.Owner.SEARCH_ANALYTICS) -@LuceneTestCase.AwaitsFix(bugUrl = "this is testing legacy functionality so can likely be removed in 9.0") -public class LegacyGeoShapeWithDocValuesIT extends GeoShapeIntegTestCase { - - @Override - protected Collection> nodePlugins() { - return Collections.singleton(LocalStateSpatialPlugin.class); - } - - @Override - protected void getGeoShapeMapping(XContentBuilder b) throws IOException { - b.field("type", "geo_shape"); - b.field("strategy", "recursive"); - } - - @Override - protected IndexVersion randomSupportedVersion() { - // legacy shapes can only be created in version lower than 8.x - return IndexVersionUtils.randomPreviousCompatibleVersion(random(), IndexVersions.V_8_0_0); - } - - @Override - protected boolean allowExpensiveQueries() { - return false; - } - - public void testMappingUpdate() { - // create index - assertAcked( - indicesAdmin().prepareCreate("test") - .setSettings(settings(randomSupportedVersion()).build()) - .setMapping("shape", "type=geo_shape,strategy=recursive") - ); - ensureGreen(); - - String update = """ - { - "properties": { - "shape": { - "type": "geo_shape" - } - } - }"""; - - IllegalArgumentException e = expectThrows( - IllegalArgumentException.class, - () -> indicesAdmin().preparePutMapping("test").setSource(update, XContentType.JSON).get() - ); - assertThat(e.getMessage(), containsString("mapper [shape] of type [geo_shape] cannot change strategy from [recursive] to [BKD]")); - } - - /** - * Test that the circle is still supported for the legacy shapes - */ - public void testLegacyCircle() throws Exception { - // create index - assertAcked( - prepareCreate("test").setSettings(settings(randomSupportedVersion()).build()) - .setMapping("shape", "type=geo_shape,strategy=recursive,tree=geohash") - ); - ensureGreen(); - - indexRandom(true, prepareIndex("test").setId("0").setSource("shape", (ToXContent) (builder, params) -> { - builder.startObject() - .field("type", "circle") - .startArray("coordinates") - .value(30) - .value(50) - .endArray() - .field("radius", "77km") - .endObject(); - return builder; - })); - - // test self crossing of circles - assertHitCount(client().prepareSearch("test").setQuery(geoShapeQuery("shape", new Circle(30, 50, 77000))), 1L); - } -} diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/GeoShapeWithDocValuesFieldMapperTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/GeoShapeWithDocValuesFieldMapperTests.java index 394edc5df5ea..712113b1960e 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/GeoShapeWithDocValuesFieldMapperTests.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/GeoShapeWithDocValuesFieldMapperTests.java @@ -6,7 +6,12 @@ */ package org.elasticsearch.xpack.spatial.index.mapper; +import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexableField; +import org.apache.lucene.search.TopDocs; +import org.apache.lucene.search.TotalHits; +import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.Strings; import org.elasticsearch.common.geo.GeoJson; @@ -14,6 +19,7 @@ import org.elasticsearch.common.geo.Orientation; import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.common.geo.SpatialStrategy; import org.elasticsearch.geo.GeometryTestUtils; +import org.elasticsearch.geometry.Circle; import org.elasticsearch.geometry.Geometry; import org.elasticsearch.geometry.MultiPoint; import org.elasticsearch.geometry.Point; @@ -27,12 +33,14 @@ import org.elasticsearch.index.mapper.AbstractShapeGeometryFieldMapper; import org.elasticsearch.index.mapper.AbstractShapeGeometryFieldMapper.AbstractShapeGeometryFieldType; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentParsingException; +import org.elasticsearch.index.mapper.LuceneDocument; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.SourceToParse; +import org.elasticsearch.index.query.GeoShapeQueryBuilder; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.legacygeo.mapper.LegacyGeoShapeFieldMapper; import org.elasticsearch.legacygeo.mapper.LegacyGeoShapeFieldMapper.GeoShapeFieldType; @@ -390,6 +398,38 @@ public class GeoShapeWithDocValuesFieldMapperTests extends GeoFieldMapperTests { assertFieldWarnings("strategy"); } + public void testGeoShapeLegacyCircle() throws Exception { + IndexVersion version = IndexVersionUtils.randomPreviousCompatibleVersion(random(), IndexVersions.V_8_0_0); + MapperService mapperService = createMapperService(version, fieldMapping(b -> { + b.field("type", getFieldName()); + b.field("strategy", "recursive"); + b.field("tree", "geohash"); + })); + assertCriticalWarnings( + "Parameter [strategy] is deprecated and will be removed in a future version", + "Parameter [tree] is deprecated and will be removed in a future version" + ); + + try (Directory directory = newDirectory()) { + RandomIndexWriter iw = new RandomIndexWriter(random(), directory); + Circle circle = new Circle(30, 50, 77000); + + LuceneDocument doc = mapperService.documentMapper().parse(source(b -> { + b.field("field"); + GeoJson.toXContent(circle, b, null); + })).rootDoc(); + iw.addDocument(doc); + iw.close(); + try (DirectoryReader reader = DirectoryReader.open(directory)) { + SearchExecutionContext context = createSearchExecutionContext(mapperService, newSearcher(reader)); + GeoShapeQueryBuilder queryBuilder = new GeoShapeQueryBuilder("field", new Circle(30, 50, 77000)); + TopDocs docs = context.searcher().search(queryBuilder.toQuery(context), 1); + assertThat(docs.totalHits.value(), equalTo(1L)); + assertThat(docs.totalHits.relation(), equalTo(TotalHits.Relation.EQUAL_TO)); + } + } + } + private void assertFieldWarnings(String... fieldNames) { String[] warnings = new String[fieldNames.length]; for (int i = 0; i < fieldNames.length; ++i) { From 8babdcc744abb279eb22735fa5d0360e64316bc9 Mon Sep 17 00:00:00 2001 From: Simon Cooper Date: Thu, 16 Jan 2025 17:06:42 +0000 Subject: [PATCH 22/30] Mark Mapper BWC feature as assumed (#120286) The first step in removing the (now unnecessary) feature --- .../java/org/elasticsearch/index/mapper/MapperFeatures.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java index 8e669a91fd9e..1097c1f0ea16 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java @@ -21,7 +21,7 @@ public class MapperFeatures implements FeatureSpecification { // Used to avoid noise in mixed cluster and rest compatibility tests. Must not be backported to 8.x branch. // This label gets added to tests with such failures before merging with main, then removed when backported to 8.x. - public static final NodeFeature BWC_WORKAROUND_9_0 = new NodeFeature("mapper.bwc_workaround_9_0"); + public static final NodeFeature BWC_WORKAROUND_9_0 = new NodeFeature("mapper.bwc_workaround_9_0", true); @Override public Set getFeatures() { From d19f3d37899a8f95757fd0c1fcdc8d342adff999 Mon Sep 17 00:00:00 2001 From: Artem Prigoda Date: Thu, 16 Jan 2025 18:08:48 +0100 Subject: [PATCH 23/30] Add support for relocating shards with externally acquired primary permits (#120098) For hollow shards, we HollowIndexEngine holds primary permits and we can relocate an index shard with the permits provided by HollowIndexEngine --- .../elasticsearch/index/shard/IndexShard.java | 29 +++++++++++++++++-- 1 file changed, 26 insertions(+), 3 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 5f7d1e1106a1..f52ea41d811c 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -779,10 +779,28 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl final String targetAllocationId, final BiConsumer> consumer, final ActionListener listener + ) throws IllegalIndexShardStateException, IllegalStateException { + relocated(targetNodeId, targetAllocationId, consumer, listener, null); + } + + /** + * Provides an variant of {@link IndexShard#relocated(String, String, BiConsumer, ActionListener, Releasable)} with an option + * to relocate the shard under externally acquired primary permits. + * + * @param acquiredPrimaryPermits if null, waits until all the primary permits are acquired, otherwise it calls the consumer immediately + */ + public void relocated( + final String targetNodeId, + final String targetAllocationId, + final BiConsumer> consumer, + final ActionListener listener, + @Nullable final Releasable acquiredPrimaryPermits ) throws IllegalIndexShardStateException, IllegalStateException { assert shardRouting.primary() : "only primaries can be marked as relocated: " + shardRouting; + assert acquiredPrimaryPermits == null || indexShardOperationPermits.getActiveOperationsCount() == OPERATIONS_BLOCKED + : "external primary permits are provided but not held by the shard"; try (Releasable forceRefreshes = refreshListeners.forceRefreshes()) { - indexShardOperationPermits.blockOperations(new ActionListener<>() { + ActionListener onAcquired = new ActionListener<>() { @Override public void onResponse(Releasable releasable) { boolean success = false; @@ -860,8 +878,13 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl listener.onFailure(e); } } - }, 30L, TimeUnit.MINUTES, EsExecutors.DIRECT_EXECUTOR_SERVICE); // Wait on current thread because this execution is wrapped by - // CancellableThreads and we want to be able to interrupt it + }; + if (acquiredPrimaryPermits == null) { + // Wait on current thread because this execution is wrapped by CancellableThreads and we want to be able to interrupt it + indexShardOperationPermits.blockOperations(onAcquired, 30L, TimeUnit.MINUTES, EsExecutors.DIRECT_EXECUTOR_SERVICE); + } else { + ActionListener.completeWith(onAcquired, () -> acquiredPrimaryPermits); + } } } From a356e9586ecfed321dd79847d1896174abaf2cac Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Thu, 16 Jan 2025 18:19:41 +0100 Subject: [PATCH 24/30] Remove GeoBoundingBoxQueryLegacyGeoShapeWithDocValuesIT (#120279) This change removes GeoBoundingBoxQueryLegacyGeoShapeWithDocValuesIT because it tests the deprecated LegacyGeoShapeFieldMapper that is only used in read-only version 7 indices now. An equivallent test already exists in modules/legacy-geo (GeoBoundingBoxQueryLegacyGeoShapeIT). --- ...BoxQueryLegacyGeoShapeWithDocValuesIT.java | 53 ------------------- 1 file changed, 53 deletions(-) delete mode 100644 x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/GeoBoundingBoxQueryLegacyGeoShapeWithDocValuesIT.java diff --git a/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/GeoBoundingBoxQueryLegacyGeoShapeWithDocValuesIT.java b/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/GeoBoundingBoxQueryLegacyGeoShapeWithDocValuesIT.java deleted file mode 100644 index 3f496e1d320c..000000000000 --- a/x-pack/plugin/spatial/src/internalClusterTest/java/org/elasticsearch/xpack/spatial/search/GeoBoundingBoxQueryLegacyGeoShapeWithDocValuesIT.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.spatial.search; - -import org.apache.lucene.tests.util.LuceneTestCase; -import org.elasticsearch.core.UpdateForV9; -import org.elasticsearch.index.IndexVersion; -import org.elasticsearch.index.IndexVersions; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.search.geo.GeoBoundingBoxQueryIntegTestCase; -import org.elasticsearch.test.index.IndexVersionUtils; -import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentFactory; -import org.elasticsearch.xpack.spatial.LocalStateSpatialPlugin; - -import java.io.IOException; -import java.util.Collection; -import java.util.Collections; - -@UpdateForV9(owner = UpdateForV9.Owner.SEARCH_ANALYTICS) -@LuceneTestCase.AwaitsFix(bugUrl = "this is testing legacy functionality so can likely be removed in 9.0") -public class GeoBoundingBoxQueryLegacyGeoShapeWithDocValuesIT extends GeoBoundingBoxQueryIntegTestCase { - - @Override - protected Collection> nodePlugins() { - return Collections.singleton(LocalStateSpatialPlugin.class); - } - - @Override - public XContentBuilder getMapping() throws IOException { - return XContentFactory.jsonBuilder() - .startObject() - .startObject("_doc") - .startObject("properties") - .startObject("location") - .field("type", "geo_shape") - .field("strategy", "recursive") - .endObject() - .endObject() - .endObject() - .endObject(); - } - - @Override - public IndexVersion randomSupportedVersion() { - return IndexVersionUtils.randomPreviousCompatibleVersion(random(), IndexVersions.V_8_0_0); - } -} From 3129851b8f2208f39b66c20a85ee9a4855468dc9 Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Thu, 16 Jan 2025 09:30:20 -0800 Subject: [PATCH 25/30] [DOCS] Move settings out of reindex API (#120260) --- docs/reference/docs/reindex.asciidoc | 89 +-------------- .../modules/indices/index_management.asciidoc | 105 ++++++++++++++++-- 2 files changed, 99 insertions(+), 95 deletions(-) diff --git a/docs/reference/docs/reindex.asciidoc b/docs/reference/docs/reindex.asciidoc index 2f6ddd344eaa..455410ad943a 100644 --- a/docs/reference/docs/reindex.asciidoc +++ b/docs/reference/docs/reindex.asciidoc @@ -1132,91 +1132,4 @@ Reindex from remote supports configurable SSL settings. These must be specified in the `elasticsearch.yml` file, with the exception of the secure settings, which you add in the Elasticsearch keystore. It is not possible to configure SSL in the body of the `_reindex` request. - -The following settings are supported: - -`reindex.ssl.certificate_authorities`:: -List of paths to PEM encoded certificate files that should be trusted. -You cannot specify both `reindex.ssl.certificate_authorities` and -`reindex.ssl.truststore.path`. - -`reindex.ssl.truststore.path`:: -The path to the Java Keystore file that contains the certificates to trust. -This keystore can be in "JKS" or "PKCS#12" format. -You cannot specify both `reindex.ssl.certificate_authorities` and -`reindex.ssl.truststore.path`. - -`reindex.ssl.truststore.password`:: -The password to the truststore (`reindex.ssl.truststore.path`). -deprecated:[7.17.0] Prefer `reindex.ssl.truststore.secure_password` instead. -This setting cannot be used with `reindex.ssl.truststore.secure_password`. - -`reindex.ssl.truststore.secure_password` (<>):: -The password to the truststore (`reindex.ssl.truststore.path`). -This setting cannot be used with `reindex.ssl.truststore.password`. - -`reindex.ssl.truststore.type`:: -The type of the truststore (`reindex.ssl.truststore.path`). -Must be either `jks` or `PKCS12`. If the truststore path ends in ".p12", ".pfx" -or "pkcs12", this setting defaults to `PKCS12`. Otherwise, it defaults to `jks`. - -`reindex.ssl.verification_mode`:: -Indicates the type of verification to protect against man in the middle attacks -and certificate forgery. -One of `full` (verify the hostname and the certificate path), `certificate` -(verify the certificate path, but not the hostname) or `none` (perform no -verification - this is strongly discouraged in production environments). -Defaults to `full`. - -`reindex.ssl.certificate`:: -Specifies the path to the PEM encoded certificate (or certificate chain) to be -used for HTTP client authentication (if required by the remote cluster) -This setting requires that `reindex.ssl.key` also be set. -You cannot specify both `reindex.ssl.certificate` and `reindex.ssl.keystore.path`. - -`reindex.ssl.key`:: -Specifies the path to the PEM encoded private key associated with the -certificate used for client authentication (`reindex.ssl.certificate`). -You cannot specify both `reindex.ssl.key` and `reindex.ssl.keystore.path`. - -`reindex.ssl.key_passphrase`:: -Specifies the passphrase to decrypt the PEM encoded private key -(`reindex.ssl.key`) if it is encrypted. -deprecated:[7.17.0] Prefer `reindex.ssl.secure_key_passphrase` instead. -Cannot be used with `reindex.ssl.secure_key_passphrase`. - -`reindex.ssl.secure_key_passphrase` (<>):: -Specifies the passphrase to decrypt the PEM encoded private key -(`reindex.ssl.key`) if it is encrypted. -Cannot be used with `reindex.ssl.key_passphrase`. - -`reindex.ssl.keystore.path`:: -Specifies the path to the keystore that contains a private key and certificate -to be used for HTTP client authentication (if required by the remote cluster). -This keystore can be in "JKS" or "PKCS#12" format. -You cannot specify both `reindex.ssl.key` and `reindex.ssl.keystore.path`. - -`reindex.ssl.keystore.type`:: -The type of the keystore (`reindex.ssl.keystore.path`). Must be either `jks` or `PKCS12`. -If the keystore path ends in ".p12", ".pfx" or "pkcs12", this setting defaults -to `PKCS12`. Otherwise, it defaults to `jks`. - -`reindex.ssl.keystore.password`:: -The password to the keystore (`reindex.ssl.keystore.path`). -deprecated:[7.17.0] Prefer `reindex.ssl.keystore.secure_password` instead. -This setting cannot be used with `reindex.ssl.keystore.secure_password`. - -`reindex.ssl.keystore.secure_password` (<>):: -The password to the keystore (`reindex.ssl.keystore.path`). -This setting cannot be used with `reindex.ssl.keystore.password`. - -`reindex.ssl.keystore.key_password`:: -The password for the key in the keystore (`reindex.ssl.keystore.path`). -Defaults to the keystore password. -deprecated:[7.17.0] Prefer `reindex.ssl.keystore.secure_key_password` instead. -This setting cannot be used with `reindex.ssl.keystore.secure_key_password`. - -`reindex.ssl.keystore.secure_key_password` (<>):: -The password for the key in the keystore (`reindex.ssl.keystore.path`). -Defaults to the keystore password. This setting cannot be used with -`reindex.ssl.keystore.key_password`. +Refer to <>. diff --git a/docs/reference/modules/indices/index_management.asciidoc b/docs/reference/modules/indices/index_management.asciidoc index 5f7274b2271d..7aea86bd5ac7 100644 --- a/docs/reference/modules/indices/index_management.asciidoc +++ b/docs/reference/modules/indices/index_management.asciidoc @@ -27,13 +27,6 @@ cannot close open indices. Defaults to `true`. + NOTE: Closed indices still consume a significant amount of disk space. -[[reindex-remote-whitelist]] -// tag::reindex-remote-whitelist[] -`reindex.remote.whitelist` {ess-icon}:: -(<>) -Specifies the hosts that can be <>. Expects a YAML array of `host:port` strings. Consists of a comma-delimited list of `host:port` entries. Defaults to `["\*.io:*", "\*.com:*"]`. -// end::reindex-remote-whitelist[] - [[stack-templates-enabled]] `stack.templates.enabled`:: + @@ -52,3 +45,101 @@ This setting also affects the following built-in component templates: include::{es-ref-dir}/indices/put-component-template.asciidoc[tag=built-in-component-templates] -- + + +[discrete] +[[reindex-settings]] +==== Reindex settings + +[[reindex-remote-whitelist]] +// tag::reindex-remote-whitelist[] +`reindex.remote.whitelist` {ess-icon}:: +(<>) +Specifies the hosts that can be <>. Expects a YAML array of `host:port` strings. Consists of a comma-delimited list of `host:port` entries. Defaults to `["\*.io:*", "\*.com:*"]`. +// end::reindex-remote-whitelist[] + +`reindex.ssl.certificate`:: +Specifies the path to the PEM encoded certificate (or certificate chain) to be +used for HTTP client authentication (if required by the remote cluster) +This setting requires that `reindex.ssl.key` also be set. +You cannot specify both `reindex.ssl.certificate` and `reindex.ssl.keystore.path`. + +`reindex.ssl.certificate_authorities`:: +List of paths to PEM encoded certificate files that should be trusted. +You cannot specify both `reindex.ssl.certificate_authorities` and +`reindex.ssl.truststore.path`. + +`reindex.ssl.key`:: +Specifies the path to the PEM encoded private key associated with the +certificate used for client authentication (`reindex.ssl.certificate`). +You cannot specify both `reindex.ssl.key` and `reindex.ssl.keystore.path`. + +`reindex.ssl.key_passphrase`:: +Specifies the passphrase to decrypt the PEM encoded private key +(`reindex.ssl.key`) if it is encrypted. +deprecated:[7.17.0] Prefer `reindex.ssl.secure_key_passphrase` instead. +Cannot be used with `reindex.ssl.secure_key_passphrase`. + +`reindex.ssl.keystore.key_password`:: +The password for the key in the keystore (`reindex.ssl.keystore.path`). +Defaults to the keystore password. +deprecated:[7.17.0] Prefer `reindex.ssl.keystore.secure_key_password` instead. +This setting cannot be used with `reindex.ssl.keystore.secure_key_password`. + +`reindex.ssl.keystore.password`:: +The password to the keystore (`reindex.ssl.keystore.path`). +deprecated:[7.17.0] Prefer `reindex.ssl.keystore.secure_password` instead. +This setting cannot be used with `reindex.ssl.keystore.secure_password`. + +`reindex.ssl.keystore.path`:: +Specifies the path to the keystore that contains a private key and certificate +to be used for HTTP client authentication (if required by the remote cluster). +This keystore can be in "JKS" or "PKCS#12" format. +You cannot specify both `reindex.ssl.key` and `reindex.ssl.keystore.path`. + +`reindex.ssl.keystore.type`:: +The type of the keystore (`reindex.ssl.keystore.path`). Must be either `jks` or `PKCS12`. +If the keystore path ends in ".p12", ".pfx" or "pkcs12", this setting defaults +to `PKCS12`. Otherwise, it defaults to `jks`. + +`reindex.ssl.secure_key_passphrase` (<>):: +Specifies the passphrase to decrypt the PEM encoded private key +(`reindex.ssl.key`) if it is encrypted. +Cannot be used with `reindex.ssl.key_passphrase`. + +`reindex.ssl.keystore.secure_key_password` (<>):: +The password for the key in the keystore (`reindex.ssl.keystore.path`). +Defaults to the keystore password. This setting cannot be used with +`reindex.ssl.keystore.key_password`. + +`reindex.ssl.keystore.secure_password` (<>):: +The password to the keystore (`reindex.ssl.keystore.path`). +This setting cannot be used with `reindex.ssl.keystore.password`. + +`reindex.ssl.truststore.password`:: +The password to the truststore (`reindex.ssl.truststore.path`). +deprecated:[7.17.0] Prefer `reindex.ssl.truststore.secure_password` instead. +This setting cannot be used with `reindex.ssl.truststore.secure_password`. + +`reindex.ssl.truststore.path`:: +The path to the Java Keystore file that contains the certificates to trust. +This keystore can be in "JKS" or "PKCS#12" format. +You cannot specify both `reindex.ssl.certificate_authorities` and +`reindex.ssl.truststore.path`. + +`reindex.ssl.truststore.secure_password` (<>):: +The password to the truststore (`reindex.ssl.truststore.path`). +This setting cannot be used with `reindex.ssl.truststore.password`. + +`reindex.ssl.truststore.type`:: +The type of the truststore (`reindex.ssl.truststore.path`). +Must be either `jks` or `PKCS12`. If the truststore path ends in ".p12", ".pfx" +or "pkcs12", this setting defaults to `PKCS12`. Otherwise, it defaults to `jks`. + +`reindex.ssl.verification_mode`:: +Indicates the type of verification to protect against man in the middle attacks +and certificate forgery. +One of `full` (verify the hostname and the certificate path), `certificate` +(verify the certificate path, but not the hostname) or `none` (perform no +verification - this is strongly discouraged in production environments). +Defaults to `full`. \ No newline at end of file From 3d41d3bcd5ab70c6f9b843871b42945f28145758 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Fri, 17 Jan 2025 05:14:10 +1100 Subject: [PATCH 26/30] Mute org.elasticsearch.xpack.migrate.action.ReindexDatastreamIndexTransportActionIT testTsdbStartEndSet #120314 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index d95095ac81df..d26a8350b387 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -238,6 +238,9 @@ tests: - class: org.elasticsearch.oldrepos.OldRepositoryAccessIT method: testOldSourceOnlyRepoAccess issue: https://github.com/elastic/elasticsearch/issues/120080 +- class: org.elasticsearch.xpack.migrate.action.ReindexDatastreamIndexTransportActionIT + method: testTsdbStartEndSet + issue: https://github.com/elastic/elasticsearch/issues/120314 # Examples: # From a1e6f5f8415dcedbaa44c25381a93ee6995ceda7 Mon Sep 17 00:00:00 2001 From: Bogdan Pintea Date: Thu, 16 Jan 2025 19:34:25 +0100 Subject: [PATCH 27/30] Shorten method name in PostOptimizationVerificationAware (#120307) This simply renames a method in the interface: s/postLogicalOptimizationVerification/postOptimizationVerification It also drops a javadoc reference. --- .../capabilities/PostOptimizationVerificationAware.java | 7 +++---- .../xpack/esql/expression/function/fulltext/Match.java | 2 +- .../xpack/esql/expression/function/fulltext/Term.java | 2 +- .../xpack/esql/expression/function/grouping/Bucket.java | 2 +- .../function/scalar/convert/FoldablesConvertFunction.java | 2 +- .../esql/expression/function/scalar/multivalue/MvSort.java | 2 +- .../xpack/esql/optimizer/LogicalVerifier.java | 2 +- 7 files changed, 9 insertions(+), 10 deletions(-) diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/capabilities/PostOptimizationVerificationAware.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/capabilities/PostOptimizationVerificationAware.java index e3628d2554a7..6be3a3d48285 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/capabilities/PostOptimizationVerificationAware.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/capabilities/PostOptimizationVerificationAware.java @@ -8,7 +8,6 @@ package org.elasticsearch.xpack.esql.capabilities; import org.elasticsearch.xpack.esql.common.Failures; -import org.elasticsearch.xpack.esql.expression.function.grouping.Bucket; /** * Interface implemented by expressions that require validation post logical optimization, @@ -21,13 +20,13 @@ public interface PostOptimizationVerificationAware { * {@link Failures} class. * *

- * Example: the {@link Bucket} function, which produces buckets over a numerical or date field, based on a number of literal + * Example: the {@code Bucket} function, which produces buckets over a numerical or date field, based on a number of literal * arguments needs to check if its arguments are all indeed literals. This is how this verification is performed: *

      *     {@code
      *
      *      @Override
-     *      public void postLogicalOptimizationVerification(Failures failures) {
+     *      public void postOptimizationVerification(Failures failures) {
      *          String operation = sourceText();
      *
      *          failures.add(isFoldable(buckets, operation, SECOND))
@@ -38,5 +37,5 @@ public interface PostOptimizationVerificationAware {
      *     
* */ - void postLogicalOptimizationVerification(Failures failures); + void postOptimizationVerification(Failures failures); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/fulltext/Match.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/fulltext/Match.java index 93f23d2f7ad0..f74c779899a1 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/fulltext/Match.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/fulltext/Match.java @@ -203,7 +203,7 @@ public class Match extends FullTextFunction implements PostOptimizationVerificat } @Override - public void postLogicalOptimizationVerification(Failures failures) { + public void postOptimizationVerification(Failures failures) { Expression fieldExpression = field(); // Field may be converted to other data type (field_name :: data_type), so we need to check the original field if (fieldExpression instanceof AbstractConvertFunction convertFunction) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/fulltext/Term.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/fulltext/Term.java index e77f95073050..d3bb08586f49 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/fulltext/Term.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/fulltext/Term.java @@ -100,7 +100,7 @@ public class Term extends FullTextFunction implements PostOptimizationVerificati } @Override - public void postLogicalOptimizationVerification(Failures failures) { + public void postOptimizationVerification(Failures failures) { if (field instanceof FieldAttribute == false) { failures.add( Failure.fail( diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/grouping/Bucket.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/grouping/Bucket.java index 7a3e080f5c83..90b35e469ddc 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/grouping/Bucket.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/grouping/Bucket.java @@ -409,7 +409,7 @@ public class Bucket extends GroupingFunction implements PostOptimizationVerifica } @Override - public void postLogicalOptimizationVerification(Failures failures) { + public void postOptimizationVerification(Failures failures) { String operation = sourceText(); failures.add(isFoldable(buckets, operation, SECOND)) diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/FoldablesConvertFunction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/FoldablesConvertFunction.java index 57f362f86ff4..6d6da794e070 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/FoldablesConvertFunction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/FoldablesConvertFunction.java @@ -71,7 +71,7 @@ public abstract class FoldablesConvertFunction extends AbstractConvertFunction i } @Override - public final void postLogicalOptimizationVerification(Failures failures) { + public final void postOptimizationVerification(Failures failures) { failures.add(isFoldable(field(), sourceText(), null)); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSort.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSort.java index b68718acfcd0..8c272ae70d8d 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSort.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSort.java @@ -231,7 +231,7 @@ public class MvSort extends EsqlScalarFunction implements OptionalArgument, Post } @Override - public void postLogicalOptimizationVerification(Failures failures) { + public void postOptimizationVerification(Failures failures) { if (order == null) { return; } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalVerifier.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalVerifier.java index 7e6eddd3ef04..94248ce2ecd0 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalVerifier.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalVerifier.java @@ -29,7 +29,7 @@ public final class LogicalVerifier { if (failures.hasFailures() == false) { p.forEachExpression(ex -> { if (ex instanceof PostOptimizationVerificationAware va) { - va.postLogicalOptimizationVerification(failures); + va.postOptimizationVerification(failures); } }); } From 40c34cd896e9a5e60ca71b3e90bfa68c3822fc2f Mon Sep 17 00:00:00 2001 From: Craig Taverner Date: Thu, 16 Jan 2025 19:43:51 +0100 Subject: [PATCH 28/30] Optimize ST_EXTENT_AGG for geo_shape and cartesian_shape (#119889) Support for `ST_EXTENT_AGG` was added in https://github.com/elastic/elasticsearch/pull/118829, and then partially optimized in https://github.com/elastic/elasticsearch/pull/118829. This optimization worked only for cartesian_shape fields, and worked by extracting the Extent from the doc-values and re-encoding it as a WKB `BBOX` geometry. This does not work for geo_shape, where we need to retain all 6 integers stored in the doc-values, in order to perform the datelline choice only at reduce time during the final phase of the aggregation. Since both geo_shape and cartesian_shape perform the aggregations using integers, and the original Extent values in the doc-values are integers, this PR expands the previous optimization by: * Saving all Extent values into a multi-valued field in an IntBlock for both cartesian_shape and geo_shape * Simplifying the logic around merging intermediate states for all cases (geo/cartesian and grouped and non-grouped aggs) * Widening test cases for testing more combinations of aggregations and types, and fixing a few bugs found * Enhancing cartesian extent to convert from 6 ints to 4 ints at block loading time (for efficiency) * Fixing bugs in both cartesian and geo extents for generating intermediate state with missing groups (flaky tests in serverless) * Moved the int order to always match Rectangle for 4-int and Extent for 6-int cases (improved internal consistency) Since the PR already changed the meaning of the invalid/infinite values of the intermediate state integers, it was already not compatible with the previous cluster versions. We disabled mixed-cluster testing to prevent errors as a result of that. This leaves us the opportunity to make further changes that are mixed-cluster incompatible, hence the decision to perform this consistency update now. --- docs/changelog/119889.yaml | 5 + .../utils/SpatialEnvelopeVisitor.java | 130 +++++--- .../mapper/LegacyGeoShapeFieldMapper.java | 14 +- .../mapper/AbstractGeometryFieldMapper.java | 6 - .../AbstractShapeGeometryFieldMapper.java | 58 ++-- ...AbstractShapeGeometryFieldMapperTests.java | 114 ------- .../mapper/ShapeGeometryFieldMapperTests.java | 201 ++++++++++++ .../compute/gen/AggregatorImplementer.java | 120 ++++--- .../gen/GroupingAggregatorImplementer.java | 167 +++++----- ...esianShapeDocValuesAggregatorFunction.java | 182 +++++++++++ ...peDocValuesAggregatorFunctionSupplier.java | 41 +++ ...peDocValuesGroupingAggregatorFunction.java | 219 +++++++++++++ ...nShapeSourceValuesAggregatorFunction.java} | 24 +- ...ourceValuesAggregatorFunctionSupplier.java | 41 +++ ...urceValuesGroupingAggregatorFunction.java} | 28 +- ...ntGeoPointDocValuesAggregatorFunction.java | 62 ++-- ...ntDocValuesGroupingAggregatorFunction.java | 52 ++-- ...eoPointSourceValuesAggregatorFunction.java | 62 ++-- ...ourceValuesGroupingAggregatorFunction.java | 52 ++-- ...ntGeoShapeDocValuesAggregatorFunction.java | 196 ++++++++++++ ...eDocValuesAggregatorFunctionSupplier.java} | 16 +- ...peDocValuesGroupingAggregatorFunction.java | 231 ++++++++++++++ ...oShapeSourceValuesAggregatorFunction.java} | 84 ++--- ...urceValuesAggregatorFunctionSupplier.java} | 17 +- ...urceValuesGroupingAggregatorFunction.java} | 78 ++--- .../spatial/GeoPointEnvelopeVisitor.java | 63 ---- .../spatial/SpatialAggregationUtils.java | 30 +- ...tentCartesianPointDocValuesAggregator.java | 3 + ...tCartesianPointSourceValuesAggregator.java | 5 +- ...tentCartesianShapeDocValuesAggregator.java | 45 +++ ...CartesianShapeSourceValuesAggregator.java} | 9 +- ...tialExtentGeoPointDocValuesAggregator.java | 16 +- ...lExtentGeoPointSourceValuesAggregator.java | 18 +- ...tialExtentGeoShapeDocValuesAggregator.java | 48 +++ ...ExtentGeoShapeSourceValuesAggregator.java} | 23 +- .../spatial/SpatialExtentGroupingState.java | 48 ++- ...entGroupingStateWrappedLongitudeState.java | 183 ++++++----- ...tialExtentLongitudeWrappingAggregator.java | 28 +- .../spatial/SpatialExtentState.java | 31 ++ ...atialExtentStateWrappedLongitudeState.java | 102 ++++-- .../src/main/resources/spatial.csv-spec | 294 +++++++++++++++++- .../xpack/esql/action/EsqlCapabilities.java | 5 +- .../aggregate/SpatialAggregateFunction.java | 2 +- .../function/aggregate/SpatialCentroid.java | 4 +- .../function/aggregate/SpatialExtent.java | 28 +- .../local/SpatialDocValuesExtraction.java | 5 +- .../local/SpatialShapeBoundsExtraction.java | 39 ++- .../xpack/esql/planner/AggregateMapper.java | 6 +- .../xpack/esql/planner/PlannerUtils.java | 10 +- .../optimizer/PhysicalPlanOptimizerTests.java | 190 ++++++----- .../TestPhysicalOperationProviders.java | 158 ++++++++-- .../GeoShapeWithDocValuesFieldMapper.java | 16 +- .../index/mapper/ShapeFieldMapper.java | 25 +- 53 files changed, 2669 insertions(+), 965 deletions(-) create mode 100644 docs/changelog/119889.yaml delete mode 100644 server/src/test/java/org/elasticsearch/index/mapper/AbstractShapeGeometryFieldMapperTests.java create mode 100644 server/src/test/java/org/elasticsearch/index/mapper/ShapeGeometryFieldMapperTests.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentCartesianShapeDocValuesAggregatorFunction.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentCartesianShapeDocValuesAggregatorFunctionSupplier.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentCartesianShapeDocValuesGroupingAggregatorFunction.java rename x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/{SpatialExtentCartesianShapeAggregatorFunction.java => SpatialExtentCartesianShapeSourceValuesAggregatorFunction.java} (81%) create mode 100644 x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentCartesianShapeSourceValuesAggregatorFunctionSupplier.java rename x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/{SpatialExtentCartesianShapeGroupingAggregatorFunction.java => SpatialExtentCartesianShapeSourceValuesGroupingAggregatorFunction.java} (82%) create mode 100644 x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentGeoShapeDocValuesAggregatorFunction.java rename x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/{SpatialExtentGeoShapeAggregatorFunctionSupplier.java => SpatialExtentGeoShapeDocValuesAggregatorFunctionSupplier.java} (55%) create mode 100644 x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentGeoShapeDocValuesGroupingAggregatorFunction.java rename x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/{SpatialExtentGeoShapeAggregatorFunction.java => SpatialExtentGeoShapeSourceValuesAggregatorFunction.java} (62%) rename x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/{SpatialExtentCartesianShapeAggregatorFunctionSupplier.java => SpatialExtentGeoShapeSourceValuesAggregatorFunctionSupplier.java} (54%) rename x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/{SpatialExtentGeoShapeGroupingAggregatorFunction.java => SpatialExtentGeoShapeSourceValuesGroupingAggregatorFunction.java} (67%) delete mode 100644 x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/GeoPointEnvelopeVisitor.java create mode 100644 x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/SpatialExtentCartesianShapeDocValuesAggregator.java rename x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/{SpatialExtentCartesianShapeAggregator.java => SpatialExtentCartesianShapeSourceValuesAggregator.java} (67%) create mode 100644 x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/SpatialExtentGeoShapeDocValuesAggregator.java rename x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/{SpatialExtentGeoShapeAggregator.java => SpatialExtentGeoShapeSourceValuesAggregator.java} (52%) diff --git a/docs/changelog/119889.yaml b/docs/changelog/119889.yaml new file mode 100644 index 000000000000..e07d8643e379 --- /dev/null +++ b/docs/changelog/119889.yaml @@ -0,0 +1,5 @@ +pr: 119889 +summary: Optimize ST_EXTENT_AGG for `geo_shape` and `cartesian_shape` +area: "ES|QL" +type: enhancement +issues: [] diff --git a/libs/geo/src/main/java/org/elasticsearch/geometry/utils/SpatialEnvelopeVisitor.java b/libs/geo/src/main/java/org/elasticsearch/geometry/utils/SpatialEnvelopeVisitor.java index 696be2808ed1..f00db4f1e660 100644 --- a/libs/geo/src/main/java/org/elasticsearch/geometry/utils/SpatialEnvelopeVisitor.java +++ b/libs/geo/src/main/java/org/elasticsearch/geometry/utils/SpatialEnvelopeVisitor.java @@ -116,6 +116,9 @@ public class SpatialEnvelopeVisitor implements GeometryVisitor */ public static class GeoPointVisitor implements PointVisitor { - protected double minY = Double.POSITIVE_INFINITY; - protected double maxY = Double.NEGATIVE_INFINITY; - protected double minNegX = Double.POSITIVE_INFINITY; - protected double maxNegX = Double.NEGATIVE_INFINITY; - protected double minPosX = Double.POSITIVE_INFINITY; - protected double maxPosX = Double.NEGATIVE_INFINITY; + protected double top = Double.NEGATIVE_INFINITY; + protected double bottom = Double.POSITIVE_INFINITY; + protected double negLeft = Double.POSITIVE_INFINITY; + protected double negRight = Double.NEGATIVE_INFINITY; + protected double posLeft = Double.POSITIVE_INFINITY; + protected double posRight = Double.NEGATIVE_INFINITY; private final WrapLongitude wrapLongitude; @@ -199,69 +210,104 @@ public class SpatialEnvelopeVisitor implements GeometryVisitor= 0) { - minPosX = Math.min(minPosX, x); - maxPosX = Math.max(maxPosX, x); + posLeft = Math.min(posLeft, x); + posRight = Math.max(posRight, x); } else { - minNegX = Math.min(minNegX, x); - maxNegX = Math.max(maxNegX, x); + negLeft = Math.min(negLeft, x); + negRight = Math.max(negRight, x); } } @Override public boolean isValid() { - return minY != Double.POSITIVE_INFINITY; + return bottom != Double.POSITIVE_INFINITY; } @Override public Rectangle getResult() { - return getResult(minNegX, minPosX, maxNegX, maxPosX, maxY, minY, wrapLongitude); + return getResult(top, bottom, negLeft, negRight, posLeft, posRight, wrapLongitude); } - protected static Rectangle getResult( - double minNegX, - double minPosX, - double maxNegX, - double maxPosX, - double maxY, - double minY, + @Override + public void reset() { + bottom = Double.POSITIVE_INFINITY; + top = Double.NEGATIVE_INFINITY; + negLeft = Double.POSITIVE_INFINITY; + negRight = Double.NEGATIVE_INFINITY; + posLeft = Double.POSITIVE_INFINITY; + posRight = Double.NEGATIVE_INFINITY; + } + + public static Rectangle getResult( + double top, + double bottom, + double negLeft, + double negRight, + double posLeft, + double posRight, WrapLongitude wrapLongitude ) { - assert Double.isFinite(maxY); - if (Double.isInfinite(minPosX)) { - return new Rectangle(minNegX, maxNegX, maxY, minY); - } else if (Double.isInfinite(minNegX)) { - return new Rectangle(minPosX, maxPosX, maxY, minY); + assert Double.isFinite(top); + if (posRight == Double.NEGATIVE_INFINITY) { + return new Rectangle(negLeft, negRight, top, bottom); + } else if (negLeft == Double.POSITIVE_INFINITY) { + return new Rectangle(posLeft, posRight, top, bottom); } else { return switch (wrapLongitude) { - case NO_WRAP -> new Rectangle(minNegX, maxPosX, maxY, minY); - case WRAP -> maybeWrap(minNegX, minPosX, maxNegX, maxPosX, maxY, minY); + case NO_WRAP -> new Rectangle(negLeft, posRight, top, bottom); + case WRAP -> maybeWrap(top, bottom, negLeft, negRight, posLeft, posRight); }; } } - private static Rectangle maybeWrap(double minNegX, double minPosX, double maxNegX, double maxPosX, double maxY, double minY) { - double unwrappedWidth = maxPosX - minNegX; - double wrappedWidth = 360 + maxNegX - minPosX; + private static Rectangle maybeWrap(double top, double bottom, double negLeft, double negRight, double posLeft, double posRight) { + double unwrappedWidth = posRight - negLeft; + double wrappedWidth = 360 + negRight - posLeft; return unwrappedWidth <= wrappedWidth - ? new Rectangle(minNegX, maxPosX, maxY, minY) - : new Rectangle(minPosX, maxNegX, maxY, minY); + ? new Rectangle(negLeft, posRight, top, bottom) + : new Rectangle(posLeft, negRight, top, bottom); } } diff --git a/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldMapper.java b/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldMapper.java index 6127b4beb71f..d2bda8c4cc81 100644 --- a/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldMapper.java +++ b/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldMapper.java @@ -33,6 +33,7 @@ import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.mapper.AbstractShapeGeometryFieldMapper; +import org.elasticsearch.index.mapper.BlockLoader; import org.elasticsearch.index.mapper.DocumentParserContext; import org.elasticsearch.index.mapper.DocumentParsingException; import org.elasticsearch.index.mapper.FieldMapper; @@ -46,7 +47,6 @@ import org.elasticsearch.legacygeo.XShapeCollection; import org.elasticsearch.legacygeo.builders.ShapeBuilder; import org.elasticsearch.legacygeo.parsers.ShapeParser; import org.elasticsearch.legacygeo.query.LegacyGeoShapeQueryProcessor; -import org.elasticsearch.lucene.spatial.CoordinateEncoder; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; import org.locationtech.spatial4j.shape.Point; @@ -84,6 +84,7 @@ import java.util.stream.Collectors; * "field" : "POLYGON ((100.0 0.0, 101.0 0.0, 101.0 1.0, 100.0 1.0, 100.0 0.0)) * * @deprecated use the field mapper in the spatial module + * TODO: Remove this class once we no longer need to supported reading 7.x indices that might have this field type */ @Deprecated public class LegacyGeoShapeFieldMapper extends AbstractShapeGeometryFieldMapper> { @@ -533,14 +534,9 @@ public class LegacyGeoShapeFieldMapper extends AbstractShapeGeometryFieldMapper< } @Override - protected boolean isBoundsExtractionSupported() { - // Extracting bounds for geo shapes is not implemented yet. - return false; - } - - @Override - protected CoordinateEncoder coordinateEncoder() { - return CoordinateEncoder.GEO; + public BlockLoader blockLoader(BlockLoaderContext blContext) { + // Legacy geo-shapes do not support doc-values, we can only lead from source in ES|QL + return blockLoaderFromSource(blContext); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/AbstractGeometryFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/AbstractGeometryFieldMapper.java index c38b5beeb55a..6e00cc765bd8 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/AbstractGeometryFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/AbstractGeometryFieldMapper.java @@ -180,12 +180,6 @@ public abstract class AbstractGeometryFieldMapper extends FieldMapper { }; } - @Override - public BlockLoader blockLoader(BlockLoaderContext blContext) { - // Currently we can only load from source in ESQL - return blockLoaderFromSource(blContext); - } - protected BlockLoader blockLoaderFromSource(BlockLoaderContext blContext) { ValueFetcher fetcher = valueFetcher(blContext.sourcePaths(name()), nullValue, GeometryFormatterFactory.WKB); // TODO consider optimization using BlockSourceReader.lookupFromFieldNames(blContext.fieldNames(), name()) diff --git a/server/src/main/java/org/elasticsearch/index/mapper/AbstractShapeGeometryFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/AbstractShapeGeometryFieldMapper.java index 318e877c7ebb..22b198b10a7a 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/AbstractShapeGeometryFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/AbstractShapeGeometryFieldMapper.java @@ -10,16 +10,12 @@ package org.elasticsearch.index.mapper; import org.apache.lucene.index.BinaryDocValues; import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.geo.Orientation; -import org.elasticsearch.geometry.Rectangle; -import org.elasticsearch.geometry.utils.WellKnownBinary; -import org.elasticsearch.lucene.spatial.CoordinateEncoder; +import org.elasticsearch.lucene.spatial.Extent; import org.elasticsearch.lucene.spatial.GeometryDocValueReader; import java.io.IOException; -import java.nio.ByteOrder; import java.util.Map; import java.util.function.Function; @@ -75,29 +71,27 @@ public abstract class AbstractShapeGeometryFieldMapper extends AbstractGeomet @Override protected Object nullValueAsSource(T nullValue) { - // we don't support null value fors shapes + // we don't support null value for shapes return nullValue; } - @Override - public BlockLoader blockLoader(BlockLoaderContext blContext) { - return blContext.fieldExtractPreference() == FieldExtractPreference.EXTRACT_SPATIAL_BOUNDS && isBoundsExtractionSupported() - ? new BoundsBlockLoader(name(), coordinateEncoder()) - : blockLoaderFromSource(blContext); - } - - protected abstract boolean isBoundsExtractionSupported(); - - protected abstract CoordinateEncoder coordinateEncoder(); - - // Visible for testing - static class BoundsBlockLoader extends BlockDocValuesReader.DocValuesBlockLoader { + protected static class BoundsBlockLoader extends BlockDocValuesReader.DocValuesBlockLoader { private final String fieldName; - private final CoordinateEncoder encoder; - BoundsBlockLoader(String fieldName, CoordinateEncoder encoder) { + protected BoundsBlockLoader(String fieldName) { this.fieldName = fieldName; - this.encoder = encoder; + } + + protected void writeExtent(BlockLoader.IntBuilder builder, Extent extent) { + // We store the 6 values as a single multi-valued field, in the same order as the fields in the Extent class + builder.beginPositionEntry(); + builder.appendInt(extent.top); + builder.appendInt(extent.bottom); + builder.appendInt(extent.negLeft); + builder.appendInt(extent.negRight); + builder.appendInt(extent.posLeft); + builder.appendInt(extent.posRight); + builder.endPositionEntry(); } @Override @@ -107,7 +101,7 @@ public abstract class AbstractShapeGeometryFieldMapper extends AbstractGeomet public BlockLoader.Block read(BlockLoader.BlockFactory factory, BlockLoader.Docs docs) throws IOException { var binaryDocValues = context.reader().getBinaryDocValues(fieldName); var reader = new GeometryDocValueReader(); - try (var builder = factory.bytesRefs(docs.count())) { + try (var builder = factory.ints(docs.count())) { for (int i = 0; i < docs.count(); i++) { read(binaryDocValues, docs.get(i), reader, builder); } @@ -119,27 +113,17 @@ public abstract class AbstractShapeGeometryFieldMapper extends AbstractGeomet public void read(int docId, BlockLoader.StoredFields storedFields, BlockLoader.Builder builder) throws IOException { var binaryDocValues = context.reader().getBinaryDocValues(fieldName); var reader = new GeometryDocValueReader(); - read(binaryDocValues, docId, reader, (BytesRefBuilder) builder); + read(binaryDocValues, docId, reader, (IntBuilder) builder); } - private void read(BinaryDocValues binaryDocValues, int doc, GeometryDocValueReader reader, BytesRefBuilder builder) + private void read(BinaryDocValues binaryDocValues, int doc, GeometryDocValueReader reader, IntBuilder builder) throws IOException { if (binaryDocValues.advanceExact(doc) == false) { builder.appendNull(); return; } reader.reset(binaryDocValues.binaryValue()); - var extent = reader.getExtent(); - // This is rather silly: an extent is already encoded as ints, but we convert it to Rectangle to - // preserve its properties as a WKB shape, only to convert it back to ints when we compute the - // aggregation. An obvious optimization would be to avoid this back-and-forth conversion. - var rectangle = new Rectangle( - encoder.decodeX(extent.minX()), - encoder.decodeX(extent.maxX()), - encoder.decodeY(extent.maxY()), - encoder.decodeY(extent.minY()) - ); - builder.appendBytesRef(new BytesRef(WellKnownBinary.toWKB(rectangle, ByteOrder.LITTLE_ENDIAN))); + writeExtent(builder, reader.getExtent()); } @Override @@ -151,7 +135,7 @@ public abstract class AbstractShapeGeometryFieldMapper extends AbstractGeomet @Override public BlockLoader.Builder builder(BlockLoader.BlockFactory factory, int expectedCount) { - return factory.bytesRefs(expectedCount); + return factory.ints(expectedCount); } } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/AbstractShapeGeometryFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/AbstractShapeGeometryFieldMapperTests.java deleted file mode 100644 index 130c10130c4f..000000000000 --- a/server/src/test/java/org/elasticsearch/index/mapper/AbstractShapeGeometryFieldMapperTests.java +++ /dev/null @@ -1,114 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ - -package org.elasticsearch.index.mapper; - -import org.apache.lucene.document.Document; -import org.apache.lucene.index.DirectoryReader; -import org.apache.lucene.index.LeafReader; -import org.apache.lucene.store.Directory; -import org.apache.lucene.tests.index.RandomIndexWriter; -import org.apache.lucene.util.BytesRef; -import org.elasticsearch.common.geo.Orientation; -import org.elasticsearch.core.Strings; -import org.elasticsearch.geo.GeometryTestUtils; -import org.elasticsearch.geo.ShapeTestUtils; -import org.elasticsearch.geometry.Geometry; -import org.elasticsearch.geometry.Rectangle; -import org.elasticsearch.geometry.utils.SpatialEnvelopeVisitor; -import org.elasticsearch.lucene.spatial.BinaryShapeDocValuesField; -import org.elasticsearch.lucene.spatial.CartesianShapeIndexer; -import org.elasticsearch.lucene.spatial.CoordinateEncoder; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.hamcrest.RectangleMatcher; -import org.elasticsearch.test.hamcrest.WellKnownBinaryBytesRefMatcher; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Optional; -import java.util.function.Function; -import java.util.function.Supplier; -import java.util.stream.IntStream; - -public class AbstractShapeGeometryFieldMapperTests extends ESTestCase { - public void testCartesianBoundsBlockLoader() throws IOException { - testBoundsBlockLoaderAux( - CoordinateEncoder.CARTESIAN, - () -> ShapeTestUtils.randomGeometryWithoutCircle(0, false), - CartesianShapeIndexer::new, - SpatialEnvelopeVisitor::visitCartesian - ); - } - - // TODO when we turn this optimization on for geo, this test should pass. - public void ignoreTestGeoBoundsBlockLoader() throws IOException { - testBoundsBlockLoaderAux( - CoordinateEncoder.GEO, - () -> GeometryTestUtils.randomGeometryWithoutCircle(0, false), - field -> new GeoShapeIndexer(Orientation.RIGHT, field), - g -> SpatialEnvelopeVisitor.visitGeo(g, SpatialEnvelopeVisitor.WrapLongitude.WRAP) - ); - } - - private static void testBoundsBlockLoaderAux( - CoordinateEncoder encoder, - Supplier generator, - Function indexerFactory, - Function> visitor - ) throws IOException { - var geometries = IntStream.range(0, 50).mapToObj(i -> generator.get()).toList(); - var loader = new AbstractShapeGeometryFieldMapper.AbstractShapeGeometryFieldType.BoundsBlockLoader("field", encoder); - try (Directory directory = newDirectory()) { - try (var iw = new RandomIndexWriter(random(), directory)) { - for (Geometry geometry : geometries) { - var shape = new BinaryShapeDocValuesField("field", encoder); - shape.add(indexerFactory.apply("field").indexShape(geometry), geometry); - var doc = new Document(); - doc.add(shape); - iw.addDocument(doc); - } - } - - var expected = new ArrayList(); - var byteRefResults = new ArrayList(); - int currentIndex = 0; - try (DirectoryReader reader = DirectoryReader.open(directory)) { - for (var leaf : reader.leaves()) { - LeafReader leafReader = leaf.reader(); - int numDocs = leafReader.numDocs(); - // We specifically check just the even indices, to verify the loader can skip documents correctly. - int[] array = evenArray(numDocs); - for (int i = 0; i < array.length; i += 1) { - expected.add(visitor.apply(geometries.get(array[i] + currentIndex)).get()); - } - try (var block = (TestBlock) loader.reader(leaf).read(TestBlock.factory(leafReader.numDocs()), TestBlock.docs(array))) { - for (int i = 0; i < block.size(); i++) { - byteRefResults.add((BytesRef) block.get(i)); - } - } - currentIndex += numDocs; - } - } - - for (int i = 0; i < expected.size(); i++) { - Rectangle rectangle = expected.get(i); - var geoString = rectangle.toString(); - assertThat( - Strings.format("geometry '%s' wasn't extracted correctly", geoString), - byteRefResults.get(i), - WellKnownBinaryBytesRefMatcher.encodes(RectangleMatcher.closeToFloat(rectangle, 1e-3, encoder)) - ); - } - } - } - - private static int[] evenArray(int maxIndex) { - return IntStream.range(0, maxIndex / 2).map(x -> x * 2).toArray(); - } -} diff --git a/server/src/test/java/org/elasticsearch/index/mapper/ShapeGeometryFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/ShapeGeometryFieldMapperTests.java new file mode 100644 index 000000000000..0322286277b2 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/mapper/ShapeGeometryFieldMapperTests.java @@ -0,0 +1,201 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.index.mapper; + +import org.apache.lucene.document.Document; +import org.apache.lucene.geo.GeoEncodingUtils; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.LeafReader; +import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; +import org.elasticsearch.common.geo.GeometryNormalizer; +import org.elasticsearch.core.Strings; +import org.elasticsearch.geo.GeometryTestUtils; +import org.elasticsearch.geo.ShapeTestUtils; +import org.elasticsearch.geometry.Geometry; +import org.elasticsearch.geometry.Rectangle; +import org.elasticsearch.geometry.utils.SpatialEnvelopeVisitor; +import org.elasticsearch.lucene.spatial.BinaryShapeDocValuesField; +import org.elasticsearch.lucene.spatial.CartesianShapeIndexer; +import org.elasticsearch.lucene.spatial.CoordinateEncoder; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.hamcrest.RectangleMatcher; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Optional; +import java.util.function.BiFunction; +import java.util.function.Function; +import java.util.function.Supplier; +import java.util.stream.IntStream; + +import static org.apache.lucene.geo.GeoEncodingUtils.decodeLongitude; +import static org.elasticsearch.common.geo.Orientation.RIGHT; + +public class ShapeGeometryFieldMapperTests extends ESTestCase { + public void testCartesianBoundsBlockLoader() throws IOException { + testBoundsBlockLoader( + CoordinateEncoder.CARTESIAN, + () -> ShapeTestUtils.randomGeometryWithoutCircle(0, false), + CartesianShapeIndexer::new, + SpatialEnvelopeVisitor::visitCartesian, + ShapeGeometryFieldMapperTests::makeCartesianRectangle + ); + } + + // TODO: Re-enable this test after fixing the bug in the ShapeEnvelopeVisitor regarding Rectangle crossing the dateline + // Currently it is flaky if the geometries include a Rectangle like one defined in the test below + public void ignoreTestGeoBoundsBlockLoader() throws IOException { + testBoundsBlockLoader( + CoordinateEncoder.GEO, + () -> normalize(GeometryTestUtils.randomGeometryWithoutCircle(0, false)), + field -> new GeoShapeIndexer(RIGHT, field), + g -> SpatialEnvelopeVisitor.visitGeo(g, SpatialEnvelopeVisitor.WrapLongitude.WRAP), + ShapeGeometryFieldMapperTests::makeGeoRectangle + ); + } + + // TODO: Re-enable this test after fixing the bug in the SpatialEnvelopeVisitor regarding Rectangle crossing the dateline + // See the difference between GeoShapeIndexer.visitRectangle() and SpatialEnvelopeVisitor.GeoPointVisitor.visitRectangle() + public void ignoreTestRectangleCrossingDateline() throws IOException { + var geometries = new ArrayList(); + geometries.add(new Rectangle(180, 51.62247094594227, -18.5, -24.902304006345503)); + testBoundsBlockLoaderAux( + CoordinateEncoder.GEO, + geometries, + field -> new GeoShapeIndexer(RIGHT, field), + g -> SpatialEnvelopeVisitor.visitGeo(g, SpatialEnvelopeVisitor.WrapLongitude.WRAP), + ShapeGeometryFieldMapperTests::makeGeoRectangle + ); + } + + private Geometry normalize(Geometry geometry) { + return GeometryNormalizer.needsNormalize(RIGHT, geometry) ? GeometryNormalizer.apply(RIGHT, geometry) : geometry; + } + + private static void testBoundsBlockLoader( + CoordinateEncoder encoder, + Supplier generator, + Function indexerFactory, + Function> visitor, + BiFunction rectangleMaker + ) throws IOException { + var geometries = IntStream.range(0, 50).mapToObj(i -> generator.get()).toList(); + testBoundsBlockLoaderAux(encoder, geometries, indexerFactory, visitor, rectangleMaker); + } + + private static void testBoundsBlockLoaderAux( + CoordinateEncoder encoder, + java.util.List geometries, + Function indexerFactory, + Function> visitor, + BiFunction rectangleMaker + ) throws IOException { + var loader = new AbstractShapeGeometryFieldMapper.AbstractShapeGeometryFieldType.BoundsBlockLoader("field"); + try (Directory directory = newDirectory()) { + try (var iw = new RandomIndexWriter(random(), directory)) { + for (Geometry geometry : geometries) { + var shape = new BinaryShapeDocValuesField("field", encoder); + shape.add(indexerFactory.apply("field").indexShape(geometry), geometry); + var doc = new Document(); + doc.add(shape); + iw.addDocument(doc); + } + } + + var expected = new ArrayList(); + ArrayList intArrayResults = new ArrayList<>(); + int currentIndex = 0; + try (DirectoryReader reader = DirectoryReader.open(directory)) { + for (var leaf : reader.leaves()) { + LeafReader leafReader = leaf.reader(); + int numDocs = leafReader.numDocs(); + // We specifically check just the even indices, to verify the loader can skip documents correctly. + int[] array = evenArray(numDocs); + for (int j : array) { + expected.add(visitor.apply(geometries.get(j + currentIndex)).get()); + } + try (var block = (TestBlock) loader.reader(leaf).read(TestBlock.factory(leafReader.numDocs()), TestBlock.docs(array))) { + for (int i = 0; i < block.size(); i++) { + intArrayResults.add(block.get(i)); + } + } + currentIndex += numDocs; + } + } + + for (int i = 0; i < expected.size(); i++) { + Rectangle rectangle = expected.get(i); + var geoString = rectangle.toString(); + Rectangle result = rectangleMaker.apply(encoder, intArrayResults.get(i)); + assertThat( + Strings.format("geometry[%d] '%s' wasn't extracted correctly", i, geoString), + result, + RectangleMatcher.closeToFloat(rectangle, 1e-3, encoder) + ); + } + } + } + + private static Rectangle makeCartesianRectangle(CoordinateEncoder encoder, Object integers) { + if (integers instanceof ArrayList list) { + int[] ints = list.stream().mapToInt(x -> (int) x).toArray(); + if (list.size() == 6) { + // Data in order defined by Extent class + double top = encoder.decodeY(ints[0]); + double bottom = encoder.decodeY(ints[1]); + double negLeft = encoder.decodeX(ints[2]); + double negRight = encoder.decodeX(ints[3]); + double posLeft = encoder.decodeX(ints[4]); + double posRight = encoder.decodeX(ints[5]); + return new Rectangle(Math.min(negLeft, posLeft), Math.max(negRight, posRight), top, bottom); + } else if (list.size() == 4) { + // Data in order defined by Rectangle class + return new Rectangle( + encoder.decodeX(ints[0]), + encoder.decodeX(ints[1]), + encoder.decodeY(ints[2]), + encoder.decodeY(ints[3]) + ); + } else { + throw new IllegalArgumentException("Expected 4 or 6 integers"); + } + } + throw new IllegalArgumentException("Expected an array of integers"); + } + + private static Rectangle makeGeoRectangle(CoordinateEncoder encoder, Object integers) { + if (integers instanceof ArrayList list) { + int[] ints = list.stream().mapToInt(x -> (int) x).toArray(); + if (list.size() != 6) { + throw new IllegalArgumentException("Expected 6 integers"); + } + // Data in order defined by Extent class + return asGeoRectangle(ints[0], ints[1], ints[2], ints[3], ints[4], ints[5]); + } + throw new IllegalArgumentException("Expected an array of integers"); + } + + private static Rectangle asGeoRectangle(int top, int bottom, int negLeft, int negRight, int posLeft, int posRight) { + return SpatialEnvelopeVisitor.GeoPointVisitor.getResult( + GeoEncodingUtils.decodeLatitude(top), + GeoEncodingUtils.decodeLatitude(bottom), + negLeft <= 0 ? decodeLongitude(negLeft) : Double.POSITIVE_INFINITY, + negRight <= 0 ? decodeLongitude(negRight) : Double.NEGATIVE_INFINITY, + posLeft >= 0 ? decodeLongitude(posLeft) : Double.POSITIVE_INFINITY, + posRight >= 0 ? decodeLongitude(posRight) : Double.NEGATIVE_INFINITY, + SpatialEnvelopeVisitor.WrapLongitude.WRAP + ); + } + + private static int[] evenArray(int maxIndex) { + return IntStream.range(0, maxIndex / 2).map(x -> x * 2).toArray(); + } +} diff --git a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/AggregatorImplementer.java b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/AggregatorImplementer.java index 4aee9ea517d8..4589ab13a4e3 100644 --- a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/AggregatorImplementer.java +++ b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/AggregatorImplementer.java @@ -29,6 +29,7 @@ import java.util.stream.Stream; import javax.lang.model.element.ExecutableElement; import javax.lang.model.element.Modifier; import javax.lang.model.element.TypeElement; +import javax.lang.model.type.TypeKind; import javax.lang.model.type.TypeMirror; import javax.lang.model.util.Elements; @@ -85,6 +86,7 @@ public class AggregatorImplementer { private final boolean stateTypeHasSeen; private final boolean stateTypeHasFailed; private final boolean valuesIsBytesRef; + private final boolean valuesIsArray; private final List intermediateState; private final List createParameters; @@ -126,7 +128,8 @@ public class AggregatorImplementer { elements.getPackageOf(declarationType).toString(), (declarationType.getSimpleName() + "AggregatorFunction").replace("AggregatorAggregator", "Aggregator") ); - this.valuesIsBytesRef = BYTES_REF.equals(TypeName.get(combine.getParameters().get(combine.getParameters().size() - 1).asType())); + this.valuesIsBytesRef = BYTES_REF.equals(valueTypeName()); + this.valuesIsArray = TypeKind.ARRAY.equals(valueTypeKind()); intermediateState = Arrays.stream(interStateAnno).map(IntermediateStateDesc::newIntermediateStateDesc).toList(); } @@ -143,10 +146,11 @@ public class AggregatorImplementer { if (false == initReturn.isPrimitive()) { return initReturn; } + String simpleName = firstUpper(initReturn.toString()); if (warnExceptions.isEmpty()) { - return ClassName.get("org.elasticsearch.compute.aggregation", firstUpper(initReturn.toString()) + "State"); + return ClassName.get("org.elasticsearch.compute.aggregation", simpleName + "State"); } - return ClassName.get("org.elasticsearch.compute.aggregation", firstUpper(initReturn.toString()) + "FallibleState"); + return ClassName.get("org.elasticsearch.compute.aggregation", simpleName + "FallibleState"); } static String valueType(ExecutableElement init, ExecutableElement combine) { @@ -177,7 +181,7 @@ public class AggregatorImplementer { case "double" -> DOUBLE_BLOCK; case "float" -> FLOAT_BLOCK; case "long" -> LONG_BLOCK; - case "int" -> INT_BLOCK; + case "int", "int[]" -> INT_BLOCK; case "org.apache.lucene.util.BytesRef" -> BYTES_REF_BLOCK; default -> throw new IllegalArgumentException("unknown block type for " + valueType(init, combine)); }; @@ -189,7 +193,7 @@ public class AggregatorImplementer { case "double" -> DOUBLE_VECTOR; case "float" -> FLOAT_VECTOR; case "long" -> LONG_VECTOR; - case "int" -> INT_VECTOR; + case "int", "int[]" -> INT_VECTOR; case "org.apache.lucene.util.BytesRef" -> BYTES_REF_VECTOR; default -> throw new IllegalArgumentException("unknown vector type for " + valueType(init, combine)); }; @@ -390,6 +394,10 @@ public class AggregatorImplementer { if (masked) { builder.addParameter(BOOLEAN_VECTOR, "mask"); } + if (valuesIsArray) { + builder.addComment("This type does not support vectors because all values are multi-valued"); + return builder.build(); + } if (stateTypeHasSeen) { builder.addStatement("state.seen(true)"); @@ -437,9 +445,18 @@ public class AggregatorImplementer { } builder.addStatement("int start = block.getFirstValueIndex(p)"); builder.addStatement("int end = start + block.getValueCount(p)"); - builder.beginControlFlow("for (int i = start; i < end; i++)"); - combineRawInput(builder, "block"); - builder.endControlFlow(); + if (valuesIsArray) { + String arrayType = valueTypeString(); + builder.addStatement("$L[] valuesArray = new $L[end - start]", arrayType, arrayType); + builder.beginControlFlow("for (int i = start; i < end; i++)"); + builder.addStatement("valuesArray[i-start] = $L.get$L(i)", "block", firstUpper(arrayType)); + builder.endControlFlow(); + combineRawInputForArray(builder, "valuesArray"); + } else { + builder.beginControlFlow("for (int i = start; i < end; i++)"); + combineRawInput(builder, "block"); + builder.endControlFlow(); + } } builder.endControlFlow(); if (combineValueCount != null) { @@ -450,26 +467,17 @@ public class AggregatorImplementer { private void combineRawInput(MethodSpec.Builder builder, String blockVariable) { TypeName returnType = TypeName.get(combine.getReturnType()); - if (warnExceptions.isEmpty() == false) { - builder.beginControlFlow("try"); - } - if (valuesIsBytesRef) { - combineRawInputForBytesRef(builder, blockVariable); - } else if (returnType.isPrimitive()) { - combineRawInputForPrimitive(returnType, builder, blockVariable); - } else if (returnType == TypeName.VOID) { - combineRawInputForVoid(builder, blockVariable); - } else { - throw new IllegalArgumentException("combine must return void or a primitive"); - } - if (warnExceptions.isEmpty() == false) { - String catchPattern = "catch (" + warnExceptions.stream().map(m -> "$T").collect(Collectors.joining(" | ")) + " e)"; - builder.nextControlFlow(catchPattern, warnExceptions.stream().map(TypeName::get).toArray()); - builder.addStatement("warnings.registerException(e)"); - builder.addStatement("state.failed(true)"); - builder.addStatement("return"); - builder.endControlFlow(); - } + warningsBlock(builder, () -> { + if (valuesIsBytesRef) { + combineRawInputForBytesRef(builder, blockVariable); + } else if (returnType.isPrimitive()) { + combineRawInputForPrimitive(returnType, builder, blockVariable); + } else if (returnType == TypeName.VOID) { + combineRawInputForVoid(builder, blockVariable); + } else { + throw new IllegalArgumentException("combine must return void or a primitive"); + } + }); } private void combineRawInputForPrimitive(TypeName returnType, MethodSpec.Builder builder, String blockVariable) { @@ -483,6 +491,10 @@ public class AggregatorImplementer { ); } + private void combineRawInputForArray(MethodSpec.Builder builder, String arrayVariable) { + warningsBlock(builder, () -> builder.addStatement("$T.combine(state, $L)", declarationType, arrayVariable)); + } + private void combineRawInputForVoid(MethodSpec.Builder builder, String blockVariable) { builder.addStatement( "$T.combine(state, $L.get$L(i))", @@ -497,6 +509,21 @@ public class AggregatorImplementer { builder.addStatement("$T.combine(state, $L.getBytesRef(i, scratch))", declarationType, blockVariable); } + private void warningsBlock(MethodSpec.Builder builder, Runnable block) { + if (warnExceptions.isEmpty() == false) { + builder.beginControlFlow("try"); + } + block.run(); + if (warnExceptions.isEmpty() == false) { + String catchPattern = "catch (" + warnExceptions.stream().map(m -> "$T").collect(Collectors.joining(" | ")) + " e)"; + builder.nextControlFlow(catchPattern, warnExceptions.stream().map(TypeName::get).toArray()); + builder.addStatement("warnings.registerException(e)"); + builder.addStatement("state.failed(true)"); + builder.addStatement("return"); + builder.endControlFlow(); + } + } + private MethodSpec addIntermediateInput() { MethodSpec.Builder builder = MethodSpec.methodBuilder("addIntermediateInput"); builder.addAnnotation(Override.class).addModifiers(Modifier.PUBLIC).addParameter(PAGE, "page"); @@ -529,20 +556,12 @@ public class AggregatorImplementer { builder.nextControlFlow("else if (seen.getBoolean(0))"); } - if (warnExceptions.isEmpty() == false) { - builder.beginControlFlow("try"); - } - var state = intermediateState.get(0); - var s = "state.$L($T.combine(state.$L(), " + state.name() + "." + vectorAccessorName(state.elementType()) + "(0)))"; - builder.addStatement(s, primitiveStateMethod(), declarationType, primitiveStateMethod()); - builder.addStatement("state.seen(true)"); - if (warnExceptions.isEmpty() == false) { - String catchPattern = "catch (" + warnExceptions.stream().map(m -> "$T").collect(Collectors.joining(" | ")) + " e)"; - builder.nextControlFlow(catchPattern, warnExceptions.stream().map(TypeName::get).toArray()); - builder.addStatement("warnings.registerException(e)"); - builder.addStatement("state.failed(true)"); - builder.endControlFlow(); - } + warningsBlock(builder, () -> { + var state = intermediateState.get(0); + var s = "state.$L($T.combine(state.$L(), " + state.name() + "." + vectorAccessorName(state.elementType()) + "(0)))"; + builder.addStatement(s, primitiveStateMethod(), declarationType, primitiveStateMethod()); + builder.addStatement("state.seen(true)"); + }); builder.endControlFlow(); } else { throw new IllegalArgumentException("Don't know how to combine intermediate input. Define combineIntermediate"); @@ -693,4 +712,21 @@ public class AggregatorImplementer { } } } + + private TypeMirror valueTypeMirror() { + return combine.getParameters().get(combine.getParameters().size() - 1).asType(); + } + + private TypeName valueTypeName() { + return TypeName.get(valueTypeMirror()); + } + + private TypeKind valueTypeKind() { + return valueTypeMirror().getKind(); + } + + private String valueTypeString() { + String valueTypeString = TypeName.get(valueTypeMirror()).toString(); + return valuesIsArray ? valueTypeString.substring(0, valueTypeString.length() - 2) : valueTypeString; + } } diff --git a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/GroupingAggregatorImplementer.java b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/GroupingAggregatorImplementer.java index 9e8112e10f87..bae8800d3d62 100644 --- a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/GroupingAggregatorImplementer.java +++ b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/GroupingAggregatorImplementer.java @@ -20,7 +20,6 @@ import org.elasticsearch.compute.ann.IntermediateState; import java.util.Arrays; import java.util.List; -import java.util.Locale; import java.util.function.Consumer; import java.util.regex.Pattern; import java.util.stream.Collectors; @@ -28,10 +27,12 @@ import java.util.stream.Collectors; import javax.lang.model.element.ExecutableElement; import javax.lang.model.element.Modifier; import javax.lang.model.element.TypeElement; +import javax.lang.model.type.TypeKind; import javax.lang.model.type.TypeMirror; import javax.lang.model.util.Elements; import static java.util.stream.Collectors.joining; +import static org.elasticsearch.compute.gen.AggregatorImplementer.firstUpper; import static org.elasticsearch.compute.gen.AggregatorImplementer.valueBlockType; import static org.elasticsearch.compute.gen.AggregatorImplementer.valueVectorType; import static org.elasticsearch.compute.gen.Methods.findMethod; @@ -74,6 +75,7 @@ public class GroupingAggregatorImplementer { private final ExecutableElement combineIntermediate; private final TypeName stateType; private final boolean valuesIsBytesRef; + private final boolean valuesIsArray; private final List createParameters; private final ClassName implementation; private final List intermediateState; @@ -102,7 +104,8 @@ public class GroupingAggregatorImplementer { this.combineStates = findMethod(declarationType, "combineStates"); this.combineIntermediate = findMethod(declarationType, "combineIntermediate"); this.evaluateFinal = findMethod(declarationType, "evaluateFinal"); - this.valuesIsBytesRef = BYTES_REF.equals(TypeName.get(combine.getParameters().get(combine.getParameters().size() - 1).asType())); + this.valuesIsBytesRef = BYTES_REF.equals(valueTypeName()); + this.valuesIsArray = TypeKind.ARRAY.equals(valueTypeKind()); this.createParameters = init.getParameters() .stream() .map(Parameter::from) @@ -133,12 +136,11 @@ public class GroupingAggregatorImplementer { if (false == initReturn.isPrimitive()) { return initReturn; } - String head = initReturn.toString().substring(0, 1).toUpperCase(Locale.ROOT); - String tail = initReturn.toString().substring(1); + String simpleName = firstUpper(initReturn.toString()); if (warnExceptions.isEmpty()) { - return ClassName.get("org.elasticsearch.compute.aggregation", head + tail + "ArrayState"); + return ClassName.get("org.elasticsearch.compute.aggregation", simpleName + "ArrayState"); } - return ClassName.get("org.elasticsearch.compute.aggregation", head + tail + "FallibleArrayState"); + return ClassName.get("org.elasticsearch.compute.aggregation", simpleName + "FallibleArrayState"); } public JavaFile sourceFile() { @@ -364,6 +366,10 @@ public class GroupingAggregatorImplementer { // Add bytes_ref scratch var that will be used for bytes_ref blocks/vectors builder.addStatement("$T scratch = new $T()", BYTES_REF, BYTES_REF); } + if (valuesIsArray && valuesIsBlock == false) { + builder.addComment("This type does not support vectors because all values are multi-valued"); + return builder.build(); + } builder.beginControlFlow("for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++)"); { @@ -391,9 +397,18 @@ public class GroupingAggregatorImplementer { builder.endControlFlow(); builder.addStatement("int valuesStart = values.getFirstValueIndex(groupPosition + positionOffset)"); builder.addStatement("int valuesEnd = valuesStart + values.getValueCount(groupPosition + positionOffset)"); - builder.beginControlFlow("for (int v = valuesStart; v < valuesEnd; v++)"); - combineRawInput(builder, "values", "v"); - builder.endControlFlow(); + if (valuesIsArray) { + String arrayType = valueTypeString(); + builder.addStatement("$L[] valuesArray = new $L[valuesEnd - valuesStart]", arrayType, arrayType); + builder.beginControlFlow("for (int v = valuesStart; v < valuesEnd; v++)"); + builder.addStatement("valuesArray[v-valuesStart] = $L.get$L(v)", "values", firstUpper(arrayType)); + builder.endControlFlow(); + combineRawInputForArray(builder, "valuesArray"); + } else { + builder.beginControlFlow("for (int v = valuesStart; v < valuesEnd; v++)"); + combineRawInput(builder, "values", "v"); + builder.endControlFlow(); + } } else { combineRawInput(builder, "values", "groupPosition + positionOffset"); } @@ -407,70 +422,52 @@ public class GroupingAggregatorImplementer { } private void combineRawInput(MethodSpec.Builder builder, String blockVariable, String offsetVariable) { - TypeName valueType = TypeName.get(combine.getParameters().get(combine.getParameters().size() - 1).asType()); - String secondParameterGetter = "get" - + valueType.toString().substring(0, 1).toUpperCase(Locale.ROOT) - + valueType.toString().substring(1); + TypeName valueType = valueTypeName(); TypeName returnType = TypeName.get(combine.getReturnType()); - if (warnExceptions.isEmpty() == false) { - builder.beginControlFlow("try"); - } - if (valuesIsBytesRef) { - combineRawInputForBytesRef(builder, blockVariable, offsetVariable); - } else if (includeTimestampVector) { - combineRawInputWithTimestamp(builder, offsetVariable); - } else if (valueType.isPrimitive() == false) { - throw new IllegalArgumentException("second parameter to combine must be a primitive"); - } else if (returnType.isPrimitive()) { - combineRawInputForPrimitive(builder, secondParameterGetter, blockVariable, offsetVariable); - } else if (returnType == TypeName.VOID) { - combineRawInputForVoid(builder, secondParameterGetter, blockVariable, offsetVariable); - } else { - throw new IllegalArgumentException("combine must return void or a primitive"); - } - if (warnExceptions.isEmpty() == false) { - String catchPattern = "catch (" + warnExceptions.stream().map(m -> "$T").collect(Collectors.joining(" | ")) + " e)"; - builder.nextControlFlow(catchPattern, warnExceptions.stream().map(TypeName::get).toArray()); - builder.addStatement("warnings.registerException(e)"); - builder.addStatement("state.setFailed(groupId)"); - builder.endControlFlow(); - } + warningsBlock(builder, () -> { + if (valuesIsBytesRef) { + combineRawInputForBytesRef(builder, blockVariable, offsetVariable); + } else if (includeTimestampVector) { + combineRawInputWithTimestamp(builder, offsetVariable); + } else if (valueType.isPrimitive() == false) { + throw new IllegalArgumentException("second parameter to combine must be a primitive, array or BytesRef: " + valueType); + } else if (returnType.isPrimitive()) { + combineRawInputForPrimitive(builder, blockVariable, offsetVariable); + } else if (returnType == TypeName.VOID) { + combineRawInputForVoid(builder, blockVariable, offsetVariable); + } else { + throw new IllegalArgumentException("combine must return void or a primitive"); + } + }); } - private void combineRawInputForPrimitive( - MethodSpec.Builder builder, - String secondParameterGetter, - String blockVariable, - String offsetVariable - ) { + private void combineRawInputForPrimitive(MethodSpec.Builder builder, String blockVariable, String offsetVariable) { builder.addStatement( - "state.set(groupId, $T.combine(state.getOrDefault(groupId), $L.$L($L)))", + "state.set(groupId, $T.combine(state.getOrDefault(groupId), $L.get$L($L)))", declarationType, blockVariable, - secondParameterGetter, + firstUpper(valueTypeName().toString()), offsetVariable ); } - private void combineRawInputForVoid( - MethodSpec.Builder builder, - String secondParameterGetter, - String blockVariable, - String offsetVariable - ) { + private void combineRawInputForArray(MethodSpec.Builder builder, String arrayVariable) { + warningsBlock(builder, () -> builder.addStatement("$T.combine(state, groupId, $L)", declarationType, arrayVariable)); + } + + private void combineRawInputForVoid(MethodSpec.Builder builder, String blockVariable, String offsetVariable) { builder.addStatement( - "$T.combine(state, groupId, $L.$L($L))", + "$T.combine(state, groupId, $L.get$L($L))", declarationType, blockVariable, - secondParameterGetter, + firstUpper(valueTypeName().toString()), offsetVariable ); } private void combineRawInputWithTimestamp(MethodSpec.Builder builder, String offsetVariable) { - TypeName valueType = TypeName.get(combine.getParameters().get(combine.getParameters().size() - 1).asType()); - String blockType = valueType.toString().substring(0, 1).toUpperCase(Locale.ROOT) + valueType.toString().substring(1); + String blockType = firstUpper(valueTypeName().toString()); if (offsetVariable.contains(" + ")) { builder.addStatement("var valuePosition = $L", offsetVariable); offsetVariable = "valuePosition"; @@ -489,6 +486,20 @@ public class GroupingAggregatorImplementer { builder.addStatement("$T.combine(state, groupId, $L.getBytesRef($L, scratch))", declarationType, blockVariable, offsetVariable); } + private void warningsBlock(MethodSpec.Builder builder, Runnable block) { + if (warnExceptions.isEmpty() == false) { + builder.beginControlFlow("try"); + } + block.run(); + if (warnExceptions.isEmpty() == false) { + String catchPattern = "catch (" + warnExceptions.stream().map(m -> "$T").collect(Collectors.joining(" | ")) + " e)"; + builder.nextControlFlow(catchPattern, warnExceptions.stream().map(TypeName::get).toArray()); + builder.addStatement("warnings.registerException(e)"); + builder.addStatement("state.setFailed(groupId)"); + builder.endControlFlow(); + } + } + private MethodSpec selectedMayContainUnseenGroups() { MethodSpec.Builder builder = MethodSpec.methodBuilder("selectedMayContainUnseenGroups"); builder.addAnnotation(Override.class).addModifiers(Modifier.PUBLIC); @@ -544,24 +555,16 @@ public class GroupingAggregatorImplementer { builder.nextControlFlow("else if (seen.getBoolean(groupPosition + positionOffset))"); } - if (warnExceptions.isEmpty() == false) { - builder.beginControlFlow("try"); - } - var name = intermediateState.get(0).name(); - var vectorAccessor = vectorAccessorName(intermediateState.get(0).elementType()); - builder.addStatement( - "state.set(groupId, $T.combine(state.getOrDefault(groupId), $L.$L(groupPosition + positionOffset)))", - declarationType, - name, - vectorAccessor - ); - if (warnExceptions.isEmpty() == false) { - String catchPattern = "catch (" + warnExceptions.stream().map(m -> "$T").collect(Collectors.joining(" | ")) + " e)"; - builder.nextControlFlow(catchPattern, warnExceptions.stream().map(TypeName::get).toArray()); - builder.addStatement("warnings.registerException(e)"); - builder.addStatement("state.setFailed(groupId)"); - builder.endControlFlow(); - } + warningsBlock(builder, () -> { + var name = intermediateState.get(0).name(); + var vectorAccessor = vectorAccessorName(intermediateState.get(0).elementType()); + builder.addStatement( + "state.set(groupId, $T.combine(state.getOrDefault(groupId), $L.$L(groupPosition + positionOffset)))", + declarationType, + name, + vectorAccessor + ); + }); builder.endControlFlow(); } else { builder.addStatement("$T.combineIntermediate(state, groupId, " + intermediateStateRowAccess() + ")", declarationType); @@ -657,4 +660,24 @@ public class GroupingAggregatorImplementer { private boolean hasPrimitiveState() { return PRIMITIVE_STATE_PATTERN.matcher(stateType.toString()).matches(); } + + private TypeMirror valueTypeMirror() { + return combine.getParameters().get(combine.getParameters().size() - 1).asType(); + } + + private TypeName valueTypeName() { + return TypeName.get(valueTypeMirror()); + } + + private TypeKind valueTypeKind() { + return valueTypeMirror().getKind(); + } + + private String valueTypeString() { + String valueTypeString = TypeName.get(valueTypeMirror()).toString(); + if (valuesIsArray) { + valueTypeString = valueTypeString.substring(0, valueTypeString.length() - 2); + } + return valueTypeString; + } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentCartesianShapeDocValuesAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentCartesianShapeDocValuesAggregatorFunction.java new file mode 100644 index 000000000000..3471aafc3a53 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentCartesianShapeDocValuesAggregatorFunction.java @@ -0,0 +1,182 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation.spatial; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.lang.StringBuilder; +import java.util.List; +import org.elasticsearch.compute.aggregation.AggregatorFunction; +import org.elasticsearch.compute.aggregation.IntermediateStateDesc; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BooleanVector; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link AggregatorFunction} implementation for {@link SpatialExtentCartesianShapeDocValuesAggregator}. + * This class is generated. Do not edit it. + */ +public final class SpatialExtentCartesianShapeDocValuesAggregatorFunction implements AggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("minX", ElementType.INT), + new IntermediateStateDesc("maxX", ElementType.INT), + new IntermediateStateDesc("maxY", ElementType.INT), + new IntermediateStateDesc("minY", ElementType.INT) ); + + private final DriverContext driverContext; + + private final SpatialExtentState state; + + private final List channels; + + public SpatialExtentCartesianShapeDocValuesAggregatorFunction(DriverContext driverContext, + List channels, SpatialExtentState state) { + this.driverContext = driverContext; + this.channels = channels; + this.state = state; + } + + public static SpatialExtentCartesianShapeDocValuesAggregatorFunction create( + DriverContext driverContext, List channels) { + return new SpatialExtentCartesianShapeDocValuesAggregatorFunction(driverContext, channels, SpatialExtentCartesianShapeDocValuesAggregator.initSingle()); + } + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public void addRawInput(Page page, BooleanVector mask) { + if (mask.allFalse()) { + // Entire page masked away + return; + } + if (mask.allTrue()) { + // No masking + IntBlock block = page.getBlock(channels.get(0)); + IntVector vector = block.asVector(); + if (vector != null) { + addRawVector(vector); + } else { + addRawBlock(block); + } + return; + } + // Some positions masked away, others kept + IntBlock block = page.getBlock(channels.get(0)); + IntVector vector = block.asVector(); + if (vector != null) { + addRawVector(vector, mask); + } else { + addRawBlock(block, mask); + } + } + + private void addRawVector(IntVector vector) { + // This type does not support vectors because all values are multi-valued + } + + private void addRawVector(IntVector vector, BooleanVector mask) { + // This type does not support vectors because all values are multi-valued + } + + private void addRawBlock(IntBlock block) { + for (int p = 0; p < block.getPositionCount(); p++) { + if (block.isNull(p)) { + continue; + } + int start = block.getFirstValueIndex(p); + int end = start + block.getValueCount(p); + int[] valuesArray = new int[end - start]; + for (int i = start; i < end; i++) { + valuesArray[i-start] = block.getInt(i); + } + SpatialExtentCartesianShapeDocValuesAggregator.combine(state, valuesArray); + } + } + + private void addRawBlock(IntBlock block, BooleanVector mask) { + for (int p = 0; p < block.getPositionCount(); p++) { + if (mask.getBoolean(p) == false) { + continue; + } + if (block.isNull(p)) { + continue; + } + int start = block.getFirstValueIndex(p); + int end = start + block.getValueCount(p); + int[] valuesArray = new int[end - start]; + for (int i = start; i < end; i++) { + valuesArray[i-start] = block.getInt(i); + } + SpatialExtentCartesianShapeDocValuesAggregator.combine(state, valuesArray); + } + } + + @Override + public void addIntermediateInput(Page page) { + assert channels.size() == intermediateBlockCount(); + assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); + Block minXUncast = page.getBlock(channels.get(0)); + if (minXUncast.areAllValuesNull()) { + return; + } + IntVector minX = ((IntBlock) minXUncast).asVector(); + assert minX.getPositionCount() == 1; + Block maxXUncast = page.getBlock(channels.get(1)); + if (maxXUncast.areAllValuesNull()) { + return; + } + IntVector maxX = ((IntBlock) maxXUncast).asVector(); + assert maxX.getPositionCount() == 1; + Block maxYUncast = page.getBlock(channels.get(2)); + if (maxYUncast.areAllValuesNull()) { + return; + } + IntVector maxY = ((IntBlock) maxYUncast).asVector(); + assert maxY.getPositionCount() == 1; + Block minYUncast = page.getBlock(channels.get(3)); + if (minYUncast.areAllValuesNull()) { + return; + } + IntVector minY = ((IntBlock) minYUncast).asVector(); + assert minY.getPositionCount() == 1; + SpatialExtentCartesianShapeDocValuesAggregator.combineIntermediate(state, minX.getInt(0), maxX.getInt(0), maxY.getInt(0), minY.getInt(0)); + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + state.toIntermediate(blocks, offset, driverContext); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, DriverContext driverContext) { + blocks[offset] = SpatialExtentCartesianShapeDocValuesAggregator.evaluateFinal(state, driverContext); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + state.close(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentCartesianShapeDocValuesAggregatorFunctionSupplier.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentCartesianShapeDocValuesAggregatorFunctionSupplier.java new file mode 100644 index 000000000000..b53d779912fc --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentCartesianShapeDocValuesAggregatorFunctionSupplier.java @@ -0,0 +1,41 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation.spatial; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.util.List; +import org.elasticsearch.compute.aggregation.AggregatorFunctionSupplier; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link AggregatorFunctionSupplier} implementation for {@link SpatialExtentCartesianShapeDocValuesAggregator}. + * This class is generated. Do not edit it. + */ +public final class SpatialExtentCartesianShapeDocValuesAggregatorFunctionSupplier implements AggregatorFunctionSupplier { + private final List channels; + + public SpatialExtentCartesianShapeDocValuesAggregatorFunctionSupplier(List channels) { + this.channels = channels; + } + + @Override + public SpatialExtentCartesianShapeDocValuesAggregatorFunction aggregator( + DriverContext driverContext) { + return SpatialExtentCartesianShapeDocValuesAggregatorFunction.create(driverContext, channels); + } + + @Override + public SpatialExtentCartesianShapeDocValuesGroupingAggregatorFunction groupingAggregator( + DriverContext driverContext) { + return SpatialExtentCartesianShapeDocValuesGroupingAggregatorFunction.create(channels, driverContext); + } + + @Override + public String describe() { + return "spatial_extent_cartesian_shape_doc of valuess"; + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentCartesianShapeDocValuesGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentCartesianShapeDocValuesGroupingAggregatorFunction.java new file mode 100644 index 000000000000..aa3c1a7ba56a --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentCartesianShapeDocValuesGroupingAggregatorFunction.java @@ -0,0 +1,219 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation.spatial; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.lang.StringBuilder; +import java.util.List; +import org.elasticsearch.compute.aggregation.GroupingAggregatorFunction; +import org.elasticsearch.compute.aggregation.IntermediateStateDesc; +import org.elasticsearch.compute.aggregation.SeenGroupIds; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link GroupingAggregatorFunction} implementation for {@link SpatialExtentCartesianShapeDocValuesAggregator}. + * This class is generated. Do not edit it. + */ +public final class SpatialExtentCartesianShapeDocValuesGroupingAggregatorFunction implements GroupingAggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("minX", ElementType.INT), + new IntermediateStateDesc("maxX", ElementType.INT), + new IntermediateStateDesc("maxY", ElementType.INT), + new IntermediateStateDesc("minY", ElementType.INT) ); + + private final SpatialExtentGroupingState state; + + private final List channels; + + private final DriverContext driverContext; + + public SpatialExtentCartesianShapeDocValuesGroupingAggregatorFunction(List channels, + SpatialExtentGroupingState state, DriverContext driverContext) { + this.channels = channels; + this.state = state; + this.driverContext = driverContext; + } + + public static SpatialExtentCartesianShapeDocValuesGroupingAggregatorFunction create( + List channels, DriverContext driverContext) { + return new SpatialExtentCartesianShapeDocValuesGroupingAggregatorFunction(channels, SpatialExtentCartesianShapeDocValuesAggregator.initGrouping(), driverContext); + } + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public GroupingAggregatorFunction.AddInput prepareProcessPage(SeenGroupIds seenGroupIds, + Page page) { + IntBlock valuesBlock = page.getBlock(channels.get(0)); + IntVector valuesVector = valuesBlock.asVector(); + if (valuesVector == null) { + if (valuesBlock.mayHaveNulls()) { + state.enableGroupIdTracking(seenGroupIds); + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntBlock groupIds) { + addRawInput(positionOffset, groupIds, valuesBlock); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valuesBlock); + } + + @Override + public void close() { + } + }; + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntBlock groupIds) { + addRawInput(positionOffset, groupIds, valuesVector); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valuesVector); + } + + @Override + public void close() { + } + }; + } + + private void addRawInput(int positionOffset, IntVector groups, IntBlock values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = groups.getInt(groupPosition); + if (values.isNull(groupPosition + positionOffset)) { + continue; + } + int valuesStart = values.getFirstValueIndex(groupPosition + positionOffset); + int valuesEnd = valuesStart + values.getValueCount(groupPosition + positionOffset); + int[] valuesArray = new int[valuesEnd - valuesStart]; + for (int v = valuesStart; v < valuesEnd; v++) { + valuesArray[v-valuesStart] = values.getInt(v); + } + SpatialExtentCartesianShapeDocValuesAggregator.combine(state, groupId, valuesArray); + } + } + + private void addRawInput(int positionOffset, IntVector groups, IntVector values) { + // This type does not support vectors because all values are multi-valued + } + + private void addRawInput(int positionOffset, IntBlock groups, IntBlock values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + if (values.isNull(groupPosition + positionOffset)) { + continue; + } + int valuesStart = values.getFirstValueIndex(groupPosition + positionOffset); + int valuesEnd = valuesStart + values.getValueCount(groupPosition + positionOffset); + int[] valuesArray = new int[valuesEnd - valuesStart]; + for (int v = valuesStart; v < valuesEnd; v++) { + valuesArray[v-valuesStart] = values.getInt(v); + } + SpatialExtentCartesianShapeDocValuesAggregator.combine(state, groupId, valuesArray); + } + } + } + + private void addRawInput(int positionOffset, IntBlock groups, IntVector values) { + // This type does not support vectors because all values are multi-valued + } + + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + state.enableGroupIdTracking(seenGroupIds); + } + + @Override + public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + assert channels.size() == intermediateBlockCount(); + Block minXUncast = page.getBlock(channels.get(0)); + if (minXUncast.areAllValuesNull()) { + return; + } + IntVector minX = ((IntBlock) minXUncast).asVector(); + Block maxXUncast = page.getBlock(channels.get(1)); + if (maxXUncast.areAllValuesNull()) { + return; + } + IntVector maxX = ((IntBlock) maxXUncast).asVector(); + Block maxYUncast = page.getBlock(channels.get(2)); + if (maxYUncast.areAllValuesNull()) { + return; + } + IntVector maxY = ((IntBlock) maxYUncast).asVector(); + Block minYUncast = page.getBlock(channels.get(3)); + if (minYUncast.areAllValuesNull()) { + return; + } + IntVector minY = ((IntBlock) minYUncast).asVector(); + assert minX.getPositionCount() == maxX.getPositionCount() && minX.getPositionCount() == maxY.getPositionCount() && minX.getPositionCount() == minY.getPositionCount(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = groups.getInt(groupPosition); + SpatialExtentCartesianShapeDocValuesAggregator.combineIntermediate(state, groupId, minX.getInt(groupPosition + positionOffset), maxX.getInt(groupPosition + positionOffset), maxY.getInt(groupPosition + positionOffset), minY.getInt(groupPosition + positionOffset)); + } + } + + @Override + public void addIntermediateRowInput(int groupId, GroupingAggregatorFunction input, int position) { + if (input.getClass() != getClass()) { + throw new IllegalArgumentException("expected " + getClass() + "; got " + input.getClass()); + } + SpatialExtentGroupingState inState = ((SpatialExtentCartesianShapeDocValuesGroupingAggregatorFunction) input).state; + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + SpatialExtentCartesianShapeDocValuesAggregator.combineStates(state, groupId, inState, position); + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, IntVector selected) { + state.toIntermediate(blocks, offset, selected, driverContext); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, IntVector selected, + DriverContext driverContext) { + blocks[offset] = SpatialExtentCartesianShapeDocValuesAggregator.evaluateFinal(state, selected, driverContext); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + state.close(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentCartesianShapeAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentCartesianShapeSourceValuesAggregatorFunction.java similarity index 81% rename from x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentCartesianShapeAggregatorFunction.java rename to x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentCartesianShapeSourceValuesAggregatorFunction.java index 19aa4f7ca78a..014a2d454f57 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentCartesianShapeAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentCartesianShapeSourceValuesAggregatorFunction.java @@ -23,10 +23,10 @@ import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.operator.DriverContext; /** - * {@link AggregatorFunction} implementation for {@link SpatialExtentCartesianShapeAggregator}. + * {@link AggregatorFunction} implementation for {@link SpatialExtentCartesianShapeSourceValuesAggregator}. * This class is generated. Do not edit it. */ -public final class SpatialExtentCartesianShapeAggregatorFunction implements AggregatorFunction { +public final class SpatialExtentCartesianShapeSourceValuesAggregatorFunction implements AggregatorFunction { private static final List INTERMEDIATE_STATE_DESC = List.of( new IntermediateStateDesc("minX", ElementType.INT), new IntermediateStateDesc("maxX", ElementType.INT), @@ -39,16 +39,16 @@ public final class SpatialExtentCartesianShapeAggregatorFunction implements Aggr private final List channels; - public SpatialExtentCartesianShapeAggregatorFunction(DriverContext driverContext, + public SpatialExtentCartesianShapeSourceValuesAggregatorFunction(DriverContext driverContext, List channels, SpatialExtentState state) { this.driverContext = driverContext; this.channels = channels; this.state = state; } - public static SpatialExtentCartesianShapeAggregatorFunction create(DriverContext driverContext, - List channels) { - return new SpatialExtentCartesianShapeAggregatorFunction(driverContext, channels, SpatialExtentCartesianShapeAggregator.initSingle()); + public static SpatialExtentCartesianShapeSourceValuesAggregatorFunction create( + DriverContext driverContext, List channels) { + return new SpatialExtentCartesianShapeSourceValuesAggregatorFunction(driverContext, channels, SpatialExtentCartesianShapeSourceValuesAggregator.initSingle()); } public static List intermediateStateDesc() { @@ -90,7 +90,7 @@ public final class SpatialExtentCartesianShapeAggregatorFunction implements Aggr private void addRawVector(BytesRefVector vector) { BytesRef scratch = new BytesRef(); for (int i = 0; i < vector.getPositionCount(); i++) { - SpatialExtentCartesianShapeAggregator.combine(state, vector.getBytesRef(i, scratch)); + SpatialExtentCartesianShapeSourceValuesAggregator.combine(state, vector.getBytesRef(i, scratch)); } } @@ -100,7 +100,7 @@ public final class SpatialExtentCartesianShapeAggregatorFunction implements Aggr if (mask.getBoolean(i) == false) { continue; } - SpatialExtentCartesianShapeAggregator.combine(state, vector.getBytesRef(i, scratch)); + SpatialExtentCartesianShapeSourceValuesAggregator.combine(state, vector.getBytesRef(i, scratch)); } } @@ -113,7 +113,7 @@ public final class SpatialExtentCartesianShapeAggregatorFunction implements Aggr int start = block.getFirstValueIndex(p); int end = start + block.getValueCount(p); for (int i = start; i < end; i++) { - SpatialExtentCartesianShapeAggregator.combine(state, block.getBytesRef(i, scratch)); + SpatialExtentCartesianShapeSourceValuesAggregator.combine(state, block.getBytesRef(i, scratch)); } } } @@ -130,7 +130,7 @@ public final class SpatialExtentCartesianShapeAggregatorFunction implements Aggr int start = block.getFirstValueIndex(p); int end = start + block.getValueCount(p); for (int i = start; i < end; i++) { - SpatialExtentCartesianShapeAggregator.combine(state, block.getBytesRef(i, scratch)); + SpatialExtentCartesianShapeSourceValuesAggregator.combine(state, block.getBytesRef(i, scratch)); } } } @@ -163,7 +163,7 @@ public final class SpatialExtentCartesianShapeAggregatorFunction implements Aggr } IntVector minY = ((IntBlock) minYUncast).asVector(); assert minY.getPositionCount() == 1; - SpatialExtentCartesianShapeAggregator.combineIntermediate(state, minX.getInt(0), maxX.getInt(0), maxY.getInt(0), minY.getInt(0)); + SpatialExtentCartesianShapeSourceValuesAggregator.combineIntermediate(state, minX.getInt(0), maxX.getInt(0), maxY.getInt(0), minY.getInt(0)); } @Override @@ -173,7 +173,7 @@ public final class SpatialExtentCartesianShapeAggregatorFunction implements Aggr @Override public void evaluateFinal(Block[] blocks, int offset, DriverContext driverContext) { - blocks[offset] = SpatialExtentCartesianShapeAggregator.evaluateFinal(state, driverContext); + blocks[offset] = SpatialExtentCartesianShapeSourceValuesAggregator.evaluateFinal(state, driverContext); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentCartesianShapeSourceValuesAggregatorFunctionSupplier.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentCartesianShapeSourceValuesAggregatorFunctionSupplier.java new file mode 100644 index 000000000000..c8b1372d44b6 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentCartesianShapeSourceValuesAggregatorFunctionSupplier.java @@ -0,0 +1,41 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation.spatial; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.util.List; +import org.elasticsearch.compute.aggregation.AggregatorFunctionSupplier; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link AggregatorFunctionSupplier} implementation for {@link SpatialExtentCartesianShapeSourceValuesAggregator}. + * This class is generated. Do not edit it. + */ +public final class SpatialExtentCartesianShapeSourceValuesAggregatorFunctionSupplier implements AggregatorFunctionSupplier { + private final List channels; + + public SpatialExtentCartesianShapeSourceValuesAggregatorFunctionSupplier(List channels) { + this.channels = channels; + } + + @Override + public SpatialExtentCartesianShapeSourceValuesAggregatorFunction aggregator( + DriverContext driverContext) { + return SpatialExtentCartesianShapeSourceValuesAggregatorFunction.create(driverContext, channels); + } + + @Override + public SpatialExtentCartesianShapeSourceValuesGroupingAggregatorFunction groupingAggregator( + DriverContext driverContext) { + return SpatialExtentCartesianShapeSourceValuesGroupingAggregatorFunction.create(channels, driverContext); + } + + @Override + public String describe() { + return "spatial_extent_cartesian_shape_source of valuess"; + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentCartesianShapeGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentCartesianShapeSourceValuesGroupingAggregatorFunction.java similarity index 82% rename from x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentCartesianShapeGroupingAggregatorFunction.java rename to x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentCartesianShapeSourceValuesGroupingAggregatorFunction.java index c55c3d9c6694..d932038a26ec 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentCartesianShapeGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentCartesianShapeSourceValuesGroupingAggregatorFunction.java @@ -23,10 +23,10 @@ import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.operator.DriverContext; /** - * {@link GroupingAggregatorFunction} implementation for {@link SpatialExtentCartesianShapeAggregator}. + * {@link GroupingAggregatorFunction} implementation for {@link SpatialExtentCartesianShapeSourceValuesAggregator}. * This class is generated. Do not edit it. */ -public final class SpatialExtentCartesianShapeGroupingAggregatorFunction implements GroupingAggregatorFunction { +public final class SpatialExtentCartesianShapeSourceValuesGroupingAggregatorFunction implements GroupingAggregatorFunction { private static final List INTERMEDIATE_STATE_DESC = List.of( new IntermediateStateDesc("minX", ElementType.INT), new IntermediateStateDesc("maxX", ElementType.INT), @@ -39,16 +39,16 @@ public final class SpatialExtentCartesianShapeGroupingAggregatorFunction impleme private final DriverContext driverContext; - public SpatialExtentCartesianShapeGroupingAggregatorFunction(List channels, + public SpatialExtentCartesianShapeSourceValuesGroupingAggregatorFunction(List channels, SpatialExtentGroupingState state, DriverContext driverContext) { this.channels = channels; this.state = state; this.driverContext = driverContext; } - public static SpatialExtentCartesianShapeGroupingAggregatorFunction create(List channels, - DriverContext driverContext) { - return new SpatialExtentCartesianShapeGroupingAggregatorFunction(channels, SpatialExtentCartesianShapeAggregator.initGrouping(), driverContext); + public static SpatialExtentCartesianShapeSourceValuesGroupingAggregatorFunction create( + List channels, DriverContext driverContext) { + return new SpatialExtentCartesianShapeSourceValuesGroupingAggregatorFunction(channels, SpatialExtentCartesianShapeSourceValuesAggregator.initGrouping(), driverContext); } public static List intermediateStateDesc() { @@ -112,7 +112,7 @@ public final class SpatialExtentCartesianShapeGroupingAggregatorFunction impleme int valuesStart = values.getFirstValueIndex(groupPosition + positionOffset); int valuesEnd = valuesStart + values.getValueCount(groupPosition + positionOffset); for (int v = valuesStart; v < valuesEnd; v++) { - SpatialExtentCartesianShapeAggregator.combine(state, groupId, values.getBytesRef(v, scratch)); + SpatialExtentCartesianShapeSourceValuesAggregator.combine(state, groupId, values.getBytesRef(v, scratch)); } } } @@ -121,7 +121,7 @@ public final class SpatialExtentCartesianShapeGroupingAggregatorFunction impleme BytesRef scratch = new BytesRef(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { int groupId = groups.getInt(groupPosition); - SpatialExtentCartesianShapeAggregator.combine(state, groupId, values.getBytesRef(groupPosition + positionOffset, scratch)); + SpatialExtentCartesianShapeSourceValuesAggregator.combine(state, groupId, values.getBytesRef(groupPosition + positionOffset, scratch)); } } @@ -141,7 +141,7 @@ public final class SpatialExtentCartesianShapeGroupingAggregatorFunction impleme int valuesStart = values.getFirstValueIndex(groupPosition + positionOffset); int valuesEnd = valuesStart + values.getValueCount(groupPosition + positionOffset); for (int v = valuesStart; v < valuesEnd; v++) { - SpatialExtentCartesianShapeAggregator.combine(state, groupId, values.getBytesRef(v, scratch)); + SpatialExtentCartesianShapeSourceValuesAggregator.combine(state, groupId, values.getBytesRef(v, scratch)); } } } @@ -157,7 +157,7 @@ public final class SpatialExtentCartesianShapeGroupingAggregatorFunction impleme int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { int groupId = groups.getInt(g); - SpatialExtentCartesianShapeAggregator.combine(state, groupId, values.getBytesRef(groupPosition + positionOffset, scratch)); + SpatialExtentCartesianShapeSourceValuesAggregator.combine(state, groupId, values.getBytesRef(groupPosition + positionOffset, scratch)); } } } @@ -194,7 +194,7 @@ public final class SpatialExtentCartesianShapeGroupingAggregatorFunction impleme assert minX.getPositionCount() == maxX.getPositionCount() && minX.getPositionCount() == maxY.getPositionCount() && minX.getPositionCount() == minY.getPositionCount(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { int groupId = groups.getInt(groupPosition); - SpatialExtentCartesianShapeAggregator.combineIntermediate(state, groupId, minX.getInt(groupPosition + positionOffset), maxX.getInt(groupPosition + positionOffset), maxY.getInt(groupPosition + positionOffset), minY.getInt(groupPosition + positionOffset)); + SpatialExtentCartesianShapeSourceValuesAggregator.combineIntermediate(state, groupId, minX.getInt(groupPosition + positionOffset), maxX.getInt(groupPosition + positionOffset), maxY.getInt(groupPosition + positionOffset), minY.getInt(groupPosition + positionOffset)); } } @@ -203,9 +203,9 @@ public final class SpatialExtentCartesianShapeGroupingAggregatorFunction impleme if (input.getClass() != getClass()) { throw new IllegalArgumentException("expected " + getClass() + "; got " + input.getClass()); } - SpatialExtentGroupingState inState = ((SpatialExtentCartesianShapeGroupingAggregatorFunction) input).state; + SpatialExtentGroupingState inState = ((SpatialExtentCartesianShapeSourceValuesGroupingAggregatorFunction) input).state; state.enableGroupIdTracking(new SeenGroupIds.Empty()); - SpatialExtentCartesianShapeAggregator.combineStates(state, groupId, inState, position); + SpatialExtentCartesianShapeSourceValuesAggregator.combineStates(state, groupId, inState, position); } @Override @@ -216,7 +216,7 @@ public final class SpatialExtentCartesianShapeGroupingAggregatorFunction impleme @Override public void evaluateFinal(Block[] blocks, int offset, IntVector selected, DriverContext driverContext) { - blocks[offset] = SpatialExtentCartesianShapeAggregator.evaluateFinal(state, selected, driverContext); + blocks[offset] = SpatialExtentCartesianShapeSourceValuesAggregator.evaluateFinal(state, selected, driverContext); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentGeoPointDocValuesAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentGeoPointDocValuesAggregatorFunction.java index c883e82d4598..4e76d3dbe029 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentGeoPointDocValuesAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentGeoPointDocValuesAggregatorFunction.java @@ -27,12 +27,12 @@ import org.elasticsearch.compute.operator.DriverContext; */ public final class SpatialExtentGeoPointDocValuesAggregatorFunction implements AggregatorFunction { private static final List INTERMEDIATE_STATE_DESC = List.of( - new IntermediateStateDesc("minNegX", ElementType.INT), - new IntermediateStateDesc("minPosX", ElementType.INT), - new IntermediateStateDesc("maxNegX", ElementType.INT), - new IntermediateStateDesc("maxPosX", ElementType.INT), - new IntermediateStateDesc("maxY", ElementType.INT), - new IntermediateStateDesc("minY", ElementType.INT) ); + new IntermediateStateDesc("top", ElementType.INT), + new IntermediateStateDesc("bottom", ElementType.INT), + new IntermediateStateDesc("negLeft", ElementType.INT), + new IntermediateStateDesc("negRight", ElementType.INT), + new IntermediateStateDesc("posLeft", ElementType.INT), + new IntermediateStateDesc("posRight", ElementType.INT) ); private final DriverContext driverContext; @@ -136,43 +136,43 @@ public final class SpatialExtentGeoPointDocValuesAggregatorFunction implements A public void addIntermediateInput(Page page) { assert channels.size() == intermediateBlockCount(); assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); - Block minNegXUncast = page.getBlock(channels.get(0)); - if (minNegXUncast.areAllValuesNull()) { + Block topUncast = page.getBlock(channels.get(0)); + if (topUncast.areAllValuesNull()) { return; } - IntVector minNegX = ((IntBlock) minNegXUncast).asVector(); - assert minNegX.getPositionCount() == 1; - Block minPosXUncast = page.getBlock(channels.get(1)); - if (minPosXUncast.areAllValuesNull()) { + IntVector top = ((IntBlock) topUncast).asVector(); + assert top.getPositionCount() == 1; + Block bottomUncast = page.getBlock(channels.get(1)); + if (bottomUncast.areAllValuesNull()) { return; } - IntVector minPosX = ((IntBlock) minPosXUncast).asVector(); - assert minPosX.getPositionCount() == 1; - Block maxNegXUncast = page.getBlock(channels.get(2)); - if (maxNegXUncast.areAllValuesNull()) { + IntVector bottom = ((IntBlock) bottomUncast).asVector(); + assert bottom.getPositionCount() == 1; + Block negLeftUncast = page.getBlock(channels.get(2)); + if (negLeftUncast.areAllValuesNull()) { return; } - IntVector maxNegX = ((IntBlock) maxNegXUncast).asVector(); - assert maxNegX.getPositionCount() == 1; - Block maxPosXUncast = page.getBlock(channels.get(3)); - if (maxPosXUncast.areAllValuesNull()) { + IntVector negLeft = ((IntBlock) negLeftUncast).asVector(); + assert negLeft.getPositionCount() == 1; + Block negRightUncast = page.getBlock(channels.get(3)); + if (negRightUncast.areAllValuesNull()) { return; } - IntVector maxPosX = ((IntBlock) maxPosXUncast).asVector(); - assert maxPosX.getPositionCount() == 1; - Block maxYUncast = page.getBlock(channels.get(4)); - if (maxYUncast.areAllValuesNull()) { + IntVector negRight = ((IntBlock) negRightUncast).asVector(); + assert negRight.getPositionCount() == 1; + Block posLeftUncast = page.getBlock(channels.get(4)); + if (posLeftUncast.areAllValuesNull()) { return; } - IntVector maxY = ((IntBlock) maxYUncast).asVector(); - assert maxY.getPositionCount() == 1; - Block minYUncast = page.getBlock(channels.get(5)); - if (minYUncast.areAllValuesNull()) { + IntVector posLeft = ((IntBlock) posLeftUncast).asVector(); + assert posLeft.getPositionCount() == 1; + Block posRightUncast = page.getBlock(channels.get(5)); + if (posRightUncast.areAllValuesNull()) { return; } - IntVector minY = ((IntBlock) minYUncast).asVector(); - assert minY.getPositionCount() == 1; - SpatialExtentGeoPointDocValuesAggregator.combineIntermediate(state, minNegX.getInt(0), minPosX.getInt(0), maxNegX.getInt(0), maxPosX.getInt(0), maxY.getInt(0), minY.getInt(0)); + IntVector posRight = ((IntBlock) posRightUncast).asVector(); + assert posRight.getPositionCount() == 1; + SpatialExtentGeoPointDocValuesAggregator.combineIntermediate(state, top.getInt(0), bottom.getInt(0), negLeft.getInt(0), negRight.getInt(0), posLeft.getInt(0), posRight.getInt(0)); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentGeoPointDocValuesGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentGeoPointDocValuesGroupingAggregatorFunction.java index eee5bc5df41a..9a97a37b22ca 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentGeoPointDocValuesGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentGeoPointDocValuesGroupingAggregatorFunction.java @@ -27,12 +27,12 @@ import org.elasticsearch.compute.operator.DriverContext; */ public final class SpatialExtentGeoPointDocValuesGroupingAggregatorFunction implements GroupingAggregatorFunction { private static final List INTERMEDIATE_STATE_DESC = List.of( - new IntermediateStateDesc("minNegX", ElementType.INT), - new IntermediateStateDesc("minPosX", ElementType.INT), - new IntermediateStateDesc("maxNegX", ElementType.INT), - new IntermediateStateDesc("maxPosX", ElementType.INT), - new IntermediateStateDesc("maxY", ElementType.INT), - new IntermediateStateDesc("minY", ElementType.INT) ); + new IntermediateStateDesc("top", ElementType.INT), + new IntermediateStateDesc("bottom", ElementType.INT), + new IntermediateStateDesc("negLeft", ElementType.INT), + new IntermediateStateDesc("negRight", ElementType.INT), + new IntermediateStateDesc("posLeft", ElementType.INT), + new IntermediateStateDesc("posRight", ElementType.INT) ); private final SpatialExtentGroupingStateWrappedLongitudeState state; @@ -168,40 +168,40 @@ public final class SpatialExtentGeoPointDocValuesGroupingAggregatorFunction impl public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { state.enableGroupIdTracking(new SeenGroupIds.Empty()); assert channels.size() == intermediateBlockCount(); - Block minNegXUncast = page.getBlock(channels.get(0)); - if (minNegXUncast.areAllValuesNull()) { + Block topUncast = page.getBlock(channels.get(0)); + if (topUncast.areAllValuesNull()) { return; } - IntVector minNegX = ((IntBlock) minNegXUncast).asVector(); - Block minPosXUncast = page.getBlock(channels.get(1)); - if (minPosXUncast.areAllValuesNull()) { + IntVector top = ((IntBlock) topUncast).asVector(); + Block bottomUncast = page.getBlock(channels.get(1)); + if (bottomUncast.areAllValuesNull()) { return; } - IntVector minPosX = ((IntBlock) minPosXUncast).asVector(); - Block maxNegXUncast = page.getBlock(channels.get(2)); - if (maxNegXUncast.areAllValuesNull()) { + IntVector bottom = ((IntBlock) bottomUncast).asVector(); + Block negLeftUncast = page.getBlock(channels.get(2)); + if (negLeftUncast.areAllValuesNull()) { return; } - IntVector maxNegX = ((IntBlock) maxNegXUncast).asVector(); - Block maxPosXUncast = page.getBlock(channels.get(3)); - if (maxPosXUncast.areAllValuesNull()) { + IntVector negLeft = ((IntBlock) negLeftUncast).asVector(); + Block negRightUncast = page.getBlock(channels.get(3)); + if (negRightUncast.areAllValuesNull()) { return; } - IntVector maxPosX = ((IntBlock) maxPosXUncast).asVector(); - Block maxYUncast = page.getBlock(channels.get(4)); - if (maxYUncast.areAllValuesNull()) { + IntVector negRight = ((IntBlock) negRightUncast).asVector(); + Block posLeftUncast = page.getBlock(channels.get(4)); + if (posLeftUncast.areAllValuesNull()) { return; } - IntVector maxY = ((IntBlock) maxYUncast).asVector(); - Block minYUncast = page.getBlock(channels.get(5)); - if (minYUncast.areAllValuesNull()) { + IntVector posLeft = ((IntBlock) posLeftUncast).asVector(); + Block posRightUncast = page.getBlock(channels.get(5)); + if (posRightUncast.areAllValuesNull()) { return; } - IntVector minY = ((IntBlock) minYUncast).asVector(); - assert minNegX.getPositionCount() == minPosX.getPositionCount() && minNegX.getPositionCount() == maxNegX.getPositionCount() && minNegX.getPositionCount() == maxPosX.getPositionCount() && minNegX.getPositionCount() == maxY.getPositionCount() && minNegX.getPositionCount() == minY.getPositionCount(); + IntVector posRight = ((IntBlock) posRightUncast).asVector(); + assert top.getPositionCount() == bottom.getPositionCount() && top.getPositionCount() == negLeft.getPositionCount() && top.getPositionCount() == negRight.getPositionCount() && top.getPositionCount() == posLeft.getPositionCount() && top.getPositionCount() == posRight.getPositionCount(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { int groupId = groups.getInt(groupPosition); - SpatialExtentGeoPointDocValuesAggregator.combineIntermediate(state, groupId, minNegX.getInt(groupPosition + positionOffset), minPosX.getInt(groupPosition + positionOffset), maxNegX.getInt(groupPosition + positionOffset), maxPosX.getInt(groupPosition + positionOffset), maxY.getInt(groupPosition + positionOffset), minY.getInt(groupPosition + positionOffset)); + SpatialExtentGeoPointDocValuesAggregator.combineIntermediate(state, groupId, top.getInt(groupPosition + positionOffset), bottom.getInt(groupPosition + positionOffset), negLeft.getInt(groupPosition + positionOffset), negRight.getInt(groupPosition + positionOffset), posLeft.getInt(groupPosition + positionOffset), posRight.getInt(groupPosition + positionOffset)); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentGeoPointSourceValuesAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentGeoPointSourceValuesAggregatorFunction.java index cf65fbdde594..05bcc79db4f3 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentGeoPointSourceValuesAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentGeoPointSourceValuesAggregatorFunction.java @@ -28,12 +28,12 @@ import org.elasticsearch.compute.operator.DriverContext; */ public final class SpatialExtentGeoPointSourceValuesAggregatorFunction implements AggregatorFunction { private static final List INTERMEDIATE_STATE_DESC = List.of( - new IntermediateStateDesc("minNegX", ElementType.INT), - new IntermediateStateDesc("minPosX", ElementType.INT), - new IntermediateStateDesc("maxNegX", ElementType.INT), - new IntermediateStateDesc("maxPosX", ElementType.INT), - new IntermediateStateDesc("maxY", ElementType.INT), - new IntermediateStateDesc("minY", ElementType.INT) ); + new IntermediateStateDesc("top", ElementType.INT), + new IntermediateStateDesc("bottom", ElementType.INT), + new IntermediateStateDesc("negLeft", ElementType.INT), + new IntermediateStateDesc("negRight", ElementType.INT), + new IntermediateStateDesc("posLeft", ElementType.INT), + new IntermediateStateDesc("posRight", ElementType.INT) ); private final DriverContext driverContext; @@ -141,43 +141,43 @@ public final class SpatialExtentGeoPointSourceValuesAggregatorFunction implement public void addIntermediateInput(Page page) { assert channels.size() == intermediateBlockCount(); assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); - Block minNegXUncast = page.getBlock(channels.get(0)); - if (minNegXUncast.areAllValuesNull()) { + Block topUncast = page.getBlock(channels.get(0)); + if (topUncast.areAllValuesNull()) { return; } - IntVector minNegX = ((IntBlock) minNegXUncast).asVector(); - assert minNegX.getPositionCount() == 1; - Block minPosXUncast = page.getBlock(channels.get(1)); - if (minPosXUncast.areAllValuesNull()) { + IntVector top = ((IntBlock) topUncast).asVector(); + assert top.getPositionCount() == 1; + Block bottomUncast = page.getBlock(channels.get(1)); + if (bottomUncast.areAllValuesNull()) { return; } - IntVector minPosX = ((IntBlock) minPosXUncast).asVector(); - assert minPosX.getPositionCount() == 1; - Block maxNegXUncast = page.getBlock(channels.get(2)); - if (maxNegXUncast.areAllValuesNull()) { + IntVector bottom = ((IntBlock) bottomUncast).asVector(); + assert bottom.getPositionCount() == 1; + Block negLeftUncast = page.getBlock(channels.get(2)); + if (negLeftUncast.areAllValuesNull()) { return; } - IntVector maxNegX = ((IntBlock) maxNegXUncast).asVector(); - assert maxNegX.getPositionCount() == 1; - Block maxPosXUncast = page.getBlock(channels.get(3)); - if (maxPosXUncast.areAllValuesNull()) { + IntVector negLeft = ((IntBlock) negLeftUncast).asVector(); + assert negLeft.getPositionCount() == 1; + Block negRightUncast = page.getBlock(channels.get(3)); + if (negRightUncast.areAllValuesNull()) { return; } - IntVector maxPosX = ((IntBlock) maxPosXUncast).asVector(); - assert maxPosX.getPositionCount() == 1; - Block maxYUncast = page.getBlock(channels.get(4)); - if (maxYUncast.areAllValuesNull()) { + IntVector negRight = ((IntBlock) negRightUncast).asVector(); + assert negRight.getPositionCount() == 1; + Block posLeftUncast = page.getBlock(channels.get(4)); + if (posLeftUncast.areAllValuesNull()) { return; } - IntVector maxY = ((IntBlock) maxYUncast).asVector(); - assert maxY.getPositionCount() == 1; - Block minYUncast = page.getBlock(channels.get(5)); - if (minYUncast.areAllValuesNull()) { + IntVector posLeft = ((IntBlock) posLeftUncast).asVector(); + assert posLeft.getPositionCount() == 1; + Block posRightUncast = page.getBlock(channels.get(5)); + if (posRightUncast.areAllValuesNull()) { return; } - IntVector minY = ((IntBlock) minYUncast).asVector(); - assert minY.getPositionCount() == 1; - SpatialExtentGeoPointSourceValuesAggregator.combineIntermediate(state, minNegX.getInt(0), minPosX.getInt(0), maxNegX.getInt(0), maxPosX.getInt(0), maxY.getInt(0), minY.getInt(0)); + IntVector posRight = ((IntBlock) posRightUncast).asVector(); + assert posRight.getPositionCount() == 1; + SpatialExtentGeoPointSourceValuesAggregator.combineIntermediate(state, top.getInt(0), bottom.getInt(0), negLeft.getInt(0), negRight.getInt(0), posLeft.getInt(0), posRight.getInt(0)); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentGeoPointSourceValuesGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentGeoPointSourceValuesGroupingAggregatorFunction.java index bf8ab2554c7b..1231e2438288 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentGeoPointSourceValuesGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentGeoPointSourceValuesGroupingAggregatorFunction.java @@ -28,12 +28,12 @@ import org.elasticsearch.compute.operator.DriverContext; */ public final class SpatialExtentGeoPointSourceValuesGroupingAggregatorFunction implements GroupingAggregatorFunction { private static final List INTERMEDIATE_STATE_DESC = List.of( - new IntermediateStateDesc("minNegX", ElementType.INT), - new IntermediateStateDesc("minPosX", ElementType.INT), - new IntermediateStateDesc("maxNegX", ElementType.INT), - new IntermediateStateDesc("maxPosX", ElementType.INT), - new IntermediateStateDesc("maxY", ElementType.INT), - new IntermediateStateDesc("minY", ElementType.INT) ); + new IntermediateStateDesc("top", ElementType.INT), + new IntermediateStateDesc("bottom", ElementType.INT), + new IntermediateStateDesc("negLeft", ElementType.INT), + new IntermediateStateDesc("negRight", ElementType.INT), + new IntermediateStateDesc("posLeft", ElementType.INT), + new IntermediateStateDesc("posRight", ElementType.INT) ); private final SpatialExtentGroupingStateWrappedLongitudeState state; @@ -173,40 +173,40 @@ public final class SpatialExtentGeoPointSourceValuesGroupingAggregatorFunction i public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { state.enableGroupIdTracking(new SeenGroupIds.Empty()); assert channels.size() == intermediateBlockCount(); - Block minNegXUncast = page.getBlock(channels.get(0)); - if (minNegXUncast.areAllValuesNull()) { + Block topUncast = page.getBlock(channels.get(0)); + if (topUncast.areAllValuesNull()) { return; } - IntVector minNegX = ((IntBlock) minNegXUncast).asVector(); - Block minPosXUncast = page.getBlock(channels.get(1)); - if (minPosXUncast.areAllValuesNull()) { + IntVector top = ((IntBlock) topUncast).asVector(); + Block bottomUncast = page.getBlock(channels.get(1)); + if (bottomUncast.areAllValuesNull()) { return; } - IntVector minPosX = ((IntBlock) minPosXUncast).asVector(); - Block maxNegXUncast = page.getBlock(channels.get(2)); - if (maxNegXUncast.areAllValuesNull()) { + IntVector bottom = ((IntBlock) bottomUncast).asVector(); + Block negLeftUncast = page.getBlock(channels.get(2)); + if (negLeftUncast.areAllValuesNull()) { return; } - IntVector maxNegX = ((IntBlock) maxNegXUncast).asVector(); - Block maxPosXUncast = page.getBlock(channels.get(3)); - if (maxPosXUncast.areAllValuesNull()) { + IntVector negLeft = ((IntBlock) negLeftUncast).asVector(); + Block negRightUncast = page.getBlock(channels.get(3)); + if (negRightUncast.areAllValuesNull()) { return; } - IntVector maxPosX = ((IntBlock) maxPosXUncast).asVector(); - Block maxYUncast = page.getBlock(channels.get(4)); - if (maxYUncast.areAllValuesNull()) { + IntVector negRight = ((IntBlock) negRightUncast).asVector(); + Block posLeftUncast = page.getBlock(channels.get(4)); + if (posLeftUncast.areAllValuesNull()) { return; } - IntVector maxY = ((IntBlock) maxYUncast).asVector(); - Block minYUncast = page.getBlock(channels.get(5)); - if (minYUncast.areAllValuesNull()) { + IntVector posLeft = ((IntBlock) posLeftUncast).asVector(); + Block posRightUncast = page.getBlock(channels.get(5)); + if (posRightUncast.areAllValuesNull()) { return; } - IntVector minY = ((IntBlock) minYUncast).asVector(); - assert minNegX.getPositionCount() == minPosX.getPositionCount() && minNegX.getPositionCount() == maxNegX.getPositionCount() && minNegX.getPositionCount() == maxPosX.getPositionCount() && minNegX.getPositionCount() == maxY.getPositionCount() && minNegX.getPositionCount() == minY.getPositionCount(); + IntVector posRight = ((IntBlock) posRightUncast).asVector(); + assert top.getPositionCount() == bottom.getPositionCount() && top.getPositionCount() == negLeft.getPositionCount() && top.getPositionCount() == negRight.getPositionCount() && top.getPositionCount() == posLeft.getPositionCount() && top.getPositionCount() == posRight.getPositionCount(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { int groupId = groups.getInt(groupPosition); - SpatialExtentGeoPointSourceValuesAggregator.combineIntermediate(state, groupId, minNegX.getInt(groupPosition + positionOffset), minPosX.getInt(groupPosition + positionOffset), maxNegX.getInt(groupPosition + positionOffset), maxPosX.getInt(groupPosition + positionOffset), maxY.getInt(groupPosition + positionOffset), minY.getInt(groupPosition + positionOffset)); + SpatialExtentGeoPointSourceValuesAggregator.combineIntermediate(state, groupId, top.getInt(groupPosition + positionOffset), bottom.getInt(groupPosition + positionOffset), negLeft.getInt(groupPosition + positionOffset), negRight.getInt(groupPosition + positionOffset), posLeft.getInt(groupPosition + positionOffset), posRight.getInt(groupPosition + positionOffset)); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentGeoShapeDocValuesAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentGeoShapeDocValuesAggregatorFunction.java new file mode 100644 index 000000000000..fefef6edf6dc --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentGeoShapeDocValuesAggregatorFunction.java @@ -0,0 +1,196 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation.spatial; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.lang.StringBuilder; +import java.util.List; +import org.elasticsearch.compute.aggregation.AggregatorFunction; +import org.elasticsearch.compute.aggregation.IntermediateStateDesc; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BooleanVector; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link AggregatorFunction} implementation for {@link SpatialExtentGeoShapeDocValuesAggregator}. + * This class is generated. Do not edit it. + */ +public final class SpatialExtentGeoShapeDocValuesAggregatorFunction implements AggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("top", ElementType.INT), + new IntermediateStateDesc("bottom", ElementType.INT), + new IntermediateStateDesc("negLeft", ElementType.INT), + new IntermediateStateDesc("negRight", ElementType.INT), + new IntermediateStateDesc("posLeft", ElementType.INT), + new IntermediateStateDesc("posRight", ElementType.INT) ); + + private final DriverContext driverContext; + + private final SpatialExtentStateWrappedLongitudeState state; + + private final List channels; + + public SpatialExtentGeoShapeDocValuesAggregatorFunction(DriverContext driverContext, + List channels, SpatialExtentStateWrappedLongitudeState state) { + this.driverContext = driverContext; + this.channels = channels; + this.state = state; + } + + public static SpatialExtentGeoShapeDocValuesAggregatorFunction create(DriverContext driverContext, + List channels) { + return new SpatialExtentGeoShapeDocValuesAggregatorFunction(driverContext, channels, SpatialExtentGeoShapeDocValuesAggregator.initSingle()); + } + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public void addRawInput(Page page, BooleanVector mask) { + if (mask.allFalse()) { + // Entire page masked away + return; + } + if (mask.allTrue()) { + // No masking + IntBlock block = page.getBlock(channels.get(0)); + IntVector vector = block.asVector(); + if (vector != null) { + addRawVector(vector); + } else { + addRawBlock(block); + } + return; + } + // Some positions masked away, others kept + IntBlock block = page.getBlock(channels.get(0)); + IntVector vector = block.asVector(); + if (vector != null) { + addRawVector(vector, mask); + } else { + addRawBlock(block, mask); + } + } + + private void addRawVector(IntVector vector) { + // This type does not support vectors because all values are multi-valued + } + + private void addRawVector(IntVector vector, BooleanVector mask) { + // This type does not support vectors because all values are multi-valued + } + + private void addRawBlock(IntBlock block) { + for (int p = 0; p < block.getPositionCount(); p++) { + if (block.isNull(p)) { + continue; + } + int start = block.getFirstValueIndex(p); + int end = start + block.getValueCount(p); + int[] valuesArray = new int[end - start]; + for (int i = start; i < end; i++) { + valuesArray[i-start] = block.getInt(i); + } + SpatialExtentGeoShapeDocValuesAggregator.combine(state, valuesArray); + } + } + + private void addRawBlock(IntBlock block, BooleanVector mask) { + for (int p = 0; p < block.getPositionCount(); p++) { + if (mask.getBoolean(p) == false) { + continue; + } + if (block.isNull(p)) { + continue; + } + int start = block.getFirstValueIndex(p); + int end = start + block.getValueCount(p); + int[] valuesArray = new int[end - start]; + for (int i = start; i < end; i++) { + valuesArray[i-start] = block.getInt(i); + } + SpatialExtentGeoShapeDocValuesAggregator.combine(state, valuesArray); + } + } + + @Override + public void addIntermediateInput(Page page) { + assert channels.size() == intermediateBlockCount(); + assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); + Block topUncast = page.getBlock(channels.get(0)); + if (topUncast.areAllValuesNull()) { + return; + } + IntVector top = ((IntBlock) topUncast).asVector(); + assert top.getPositionCount() == 1; + Block bottomUncast = page.getBlock(channels.get(1)); + if (bottomUncast.areAllValuesNull()) { + return; + } + IntVector bottom = ((IntBlock) bottomUncast).asVector(); + assert bottom.getPositionCount() == 1; + Block negLeftUncast = page.getBlock(channels.get(2)); + if (negLeftUncast.areAllValuesNull()) { + return; + } + IntVector negLeft = ((IntBlock) negLeftUncast).asVector(); + assert negLeft.getPositionCount() == 1; + Block negRightUncast = page.getBlock(channels.get(3)); + if (negRightUncast.areAllValuesNull()) { + return; + } + IntVector negRight = ((IntBlock) negRightUncast).asVector(); + assert negRight.getPositionCount() == 1; + Block posLeftUncast = page.getBlock(channels.get(4)); + if (posLeftUncast.areAllValuesNull()) { + return; + } + IntVector posLeft = ((IntBlock) posLeftUncast).asVector(); + assert posLeft.getPositionCount() == 1; + Block posRightUncast = page.getBlock(channels.get(5)); + if (posRightUncast.areAllValuesNull()) { + return; + } + IntVector posRight = ((IntBlock) posRightUncast).asVector(); + assert posRight.getPositionCount() == 1; + SpatialExtentGeoShapeDocValuesAggregator.combineIntermediate(state, top.getInt(0), bottom.getInt(0), negLeft.getInt(0), negRight.getInt(0), posLeft.getInt(0), posRight.getInt(0)); + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + state.toIntermediate(blocks, offset, driverContext); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, DriverContext driverContext) { + blocks[offset] = SpatialExtentGeoShapeDocValuesAggregator.evaluateFinal(state, driverContext); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + state.close(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentGeoShapeAggregatorFunctionSupplier.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentGeoShapeDocValuesAggregatorFunctionSupplier.java similarity index 55% rename from x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentGeoShapeAggregatorFunctionSupplier.java rename to x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentGeoShapeDocValuesAggregatorFunctionSupplier.java index 09f210c7085f..d104c74bc507 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentGeoShapeAggregatorFunctionSupplier.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentGeoShapeDocValuesAggregatorFunctionSupplier.java @@ -12,29 +12,29 @@ import org.elasticsearch.compute.aggregation.AggregatorFunctionSupplier; import org.elasticsearch.compute.operator.DriverContext; /** - * {@link AggregatorFunctionSupplier} implementation for {@link SpatialExtentGeoShapeAggregator}. + * {@link AggregatorFunctionSupplier} implementation for {@link SpatialExtentGeoShapeDocValuesAggregator}. * This class is generated. Do not edit it. */ -public final class SpatialExtentGeoShapeAggregatorFunctionSupplier implements AggregatorFunctionSupplier { +public final class SpatialExtentGeoShapeDocValuesAggregatorFunctionSupplier implements AggregatorFunctionSupplier { private final List channels; - public SpatialExtentGeoShapeAggregatorFunctionSupplier(List channels) { + public SpatialExtentGeoShapeDocValuesAggregatorFunctionSupplier(List channels) { this.channels = channels; } @Override - public SpatialExtentGeoShapeAggregatorFunction aggregator(DriverContext driverContext) { - return SpatialExtentGeoShapeAggregatorFunction.create(driverContext, channels); + public SpatialExtentGeoShapeDocValuesAggregatorFunction aggregator(DriverContext driverContext) { + return SpatialExtentGeoShapeDocValuesAggregatorFunction.create(driverContext, channels); } @Override - public SpatialExtentGeoShapeGroupingAggregatorFunction groupingAggregator( + public SpatialExtentGeoShapeDocValuesGroupingAggregatorFunction groupingAggregator( DriverContext driverContext) { - return SpatialExtentGeoShapeGroupingAggregatorFunction.create(channels, driverContext); + return SpatialExtentGeoShapeDocValuesGroupingAggregatorFunction.create(channels, driverContext); } @Override public String describe() { - return "spatial_extent_geo of shapes"; + return "spatial_extent_geo_shape_doc of valuess"; } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentGeoShapeDocValuesGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentGeoShapeDocValuesGroupingAggregatorFunction.java new file mode 100644 index 000000000000..7d286eba12ff --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentGeoShapeDocValuesGroupingAggregatorFunction.java @@ -0,0 +1,231 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation.spatial; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.lang.StringBuilder; +import java.util.List; +import org.elasticsearch.compute.aggregation.GroupingAggregatorFunction; +import org.elasticsearch.compute.aggregation.IntermediateStateDesc; +import org.elasticsearch.compute.aggregation.SeenGroupIds; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link GroupingAggregatorFunction} implementation for {@link SpatialExtentGeoShapeDocValuesAggregator}. + * This class is generated. Do not edit it. + */ +public final class SpatialExtentGeoShapeDocValuesGroupingAggregatorFunction implements GroupingAggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("top", ElementType.INT), + new IntermediateStateDesc("bottom", ElementType.INT), + new IntermediateStateDesc("negLeft", ElementType.INT), + new IntermediateStateDesc("negRight", ElementType.INT), + new IntermediateStateDesc("posLeft", ElementType.INT), + new IntermediateStateDesc("posRight", ElementType.INT) ); + + private final SpatialExtentGroupingStateWrappedLongitudeState state; + + private final List channels; + + private final DriverContext driverContext; + + public SpatialExtentGeoShapeDocValuesGroupingAggregatorFunction(List channels, + SpatialExtentGroupingStateWrappedLongitudeState state, DriverContext driverContext) { + this.channels = channels; + this.state = state; + this.driverContext = driverContext; + } + + public static SpatialExtentGeoShapeDocValuesGroupingAggregatorFunction create( + List channels, DriverContext driverContext) { + return new SpatialExtentGeoShapeDocValuesGroupingAggregatorFunction(channels, SpatialExtentGeoShapeDocValuesAggregator.initGrouping(), driverContext); + } + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public GroupingAggregatorFunction.AddInput prepareProcessPage(SeenGroupIds seenGroupIds, + Page page) { + IntBlock valuesBlock = page.getBlock(channels.get(0)); + IntVector valuesVector = valuesBlock.asVector(); + if (valuesVector == null) { + if (valuesBlock.mayHaveNulls()) { + state.enableGroupIdTracking(seenGroupIds); + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntBlock groupIds) { + addRawInput(positionOffset, groupIds, valuesBlock); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valuesBlock); + } + + @Override + public void close() { + } + }; + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntBlock groupIds) { + addRawInput(positionOffset, groupIds, valuesVector); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valuesVector); + } + + @Override + public void close() { + } + }; + } + + private void addRawInput(int positionOffset, IntVector groups, IntBlock values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = groups.getInt(groupPosition); + if (values.isNull(groupPosition + positionOffset)) { + continue; + } + int valuesStart = values.getFirstValueIndex(groupPosition + positionOffset); + int valuesEnd = valuesStart + values.getValueCount(groupPosition + positionOffset); + int[] valuesArray = new int[valuesEnd - valuesStart]; + for (int v = valuesStart; v < valuesEnd; v++) { + valuesArray[v-valuesStart] = values.getInt(v); + } + SpatialExtentGeoShapeDocValuesAggregator.combine(state, groupId, valuesArray); + } + } + + private void addRawInput(int positionOffset, IntVector groups, IntVector values) { + // This type does not support vectors because all values are multi-valued + } + + private void addRawInput(int positionOffset, IntBlock groups, IntBlock values) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + if (values.isNull(groupPosition + positionOffset)) { + continue; + } + int valuesStart = values.getFirstValueIndex(groupPosition + positionOffset); + int valuesEnd = valuesStart + values.getValueCount(groupPosition + positionOffset); + int[] valuesArray = new int[valuesEnd - valuesStart]; + for (int v = valuesStart; v < valuesEnd; v++) { + valuesArray[v-valuesStart] = values.getInt(v); + } + SpatialExtentGeoShapeDocValuesAggregator.combine(state, groupId, valuesArray); + } + } + } + + private void addRawInput(int positionOffset, IntBlock groups, IntVector values) { + // This type does not support vectors because all values are multi-valued + } + + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + state.enableGroupIdTracking(seenGroupIds); + } + + @Override + public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + assert channels.size() == intermediateBlockCount(); + Block topUncast = page.getBlock(channels.get(0)); + if (topUncast.areAllValuesNull()) { + return; + } + IntVector top = ((IntBlock) topUncast).asVector(); + Block bottomUncast = page.getBlock(channels.get(1)); + if (bottomUncast.areAllValuesNull()) { + return; + } + IntVector bottom = ((IntBlock) bottomUncast).asVector(); + Block negLeftUncast = page.getBlock(channels.get(2)); + if (negLeftUncast.areAllValuesNull()) { + return; + } + IntVector negLeft = ((IntBlock) negLeftUncast).asVector(); + Block negRightUncast = page.getBlock(channels.get(3)); + if (negRightUncast.areAllValuesNull()) { + return; + } + IntVector negRight = ((IntBlock) negRightUncast).asVector(); + Block posLeftUncast = page.getBlock(channels.get(4)); + if (posLeftUncast.areAllValuesNull()) { + return; + } + IntVector posLeft = ((IntBlock) posLeftUncast).asVector(); + Block posRightUncast = page.getBlock(channels.get(5)); + if (posRightUncast.areAllValuesNull()) { + return; + } + IntVector posRight = ((IntBlock) posRightUncast).asVector(); + assert top.getPositionCount() == bottom.getPositionCount() && top.getPositionCount() == negLeft.getPositionCount() && top.getPositionCount() == negRight.getPositionCount() && top.getPositionCount() == posLeft.getPositionCount() && top.getPositionCount() == posRight.getPositionCount(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = groups.getInt(groupPosition); + SpatialExtentGeoShapeDocValuesAggregator.combineIntermediate(state, groupId, top.getInt(groupPosition + positionOffset), bottom.getInt(groupPosition + positionOffset), negLeft.getInt(groupPosition + positionOffset), negRight.getInt(groupPosition + positionOffset), posLeft.getInt(groupPosition + positionOffset), posRight.getInt(groupPosition + positionOffset)); + } + } + + @Override + public void addIntermediateRowInput(int groupId, GroupingAggregatorFunction input, int position) { + if (input.getClass() != getClass()) { + throw new IllegalArgumentException("expected " + getClass() + "; got " + input.getClass()); + } + SpatialExtentGroupingStateWrappedLongitudeState inState = ((SpatialExtentGeoShapeDocValuesGroupingAggregatorFunction) input).state; + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + SpatialExtentGeoShapeDocValuesAggregator.combineStates(state, groupId, inState, position); + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, IntVector selected) { + state.toIntermediate(blocks, offset, selected, driverContext); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, IntVector selected, + DriverContext driverContext) { + blocks[offset] = SpatialExtentGeoShapeDocValuesAggregator.evaluateFinal(state, selected, driverContext); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + state.close(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentGeoShapeAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentGeoShapeSourceValuesAggregatorFunction.java similarity index 62% rename from x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentGeoShapeAggregatorFunction.java rename to x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentGeoShapeSourceValuesAggregatorFunction.java index abee9a1cee28..a16f8911d781 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentGeoShapeAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentGeoShapeSourceValuesAggregatorFunction.java @@ -23,17 +23,17 @@ import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.operator.DriverContext; /** - * {@link AggregatorFunction} implementation for {@link SpatialExtentGeoShapeAggregator}. + * {@link AggregatorFunction} implementation for {@link SpatialExtentGeoShapeSourceValuesAggregator}. * This class is generated. Do not edit it. */ -public final class SpatialExtentGeoShapeAggregatorFunction implements AggregatorFunction { +public final class SpatialExtentGeoShapeSourceValuesAggregatorFunction implements AggregatorFunction { private static final List INTERMEDIATE_STATE_DESC = List.of( - new IntermediateStateDesc("minNegX", ElementType.INT), - new IntermediateStateDesc("minPosX", ElementType.INT), - new IntermediateStateDesc("maxNegX", ElementType.INT), - new IntermediateStateDesc("maxPosX", ElementType.INT), - new IntermediateStateDesc("maxY", ElementType.INT), - new IntermediateStateDesc("minY", ElementType.INT) ); + new IntermediateStateDesc("top", ElementType.INT), + new IntermediateStateDesc("bottom", ElementType.INT), + new IntermediateStateDesc("negLeft", ElementType.INT), + new IntermediateStateDesc("negRight", ElementType.INT), + new IntermediateStateDesc("posLeft", ElementType.INT), + new IntermediateStateDesc("posRight", ElementType.INT) ); private final DriverContext driverContext; @@ -41,16 +41,16 @@ public final class SpatialExtentGeoShapeAggregatorFunction implements Aggregator private final List channels; - public SpatialExtentGeoShapeAggregatorFunction(DriverContext driverContext, + public SpatialExtentGeoShapeSourceValuesAggregatorFunction(DriverContext driverContext, List channels, SpatialExtentStateWrappedLongitudeState state) { this.driverContext = driverContext; this.channels = channels; this.state = state; } - public static SpatialExtentGeoShapeAggregatorFunction create(DriverContext driverContext, - List channels) { - return new SpatialExtentGeoShapeAggregatorFunction(driverContext, channels, SpatialExtentGeoShapeAggregator.initSingle()); + public static SpatialExtentGeoShapeSourceValuesAggregatorFunction create( + DriverContext driverContext, List channels) { + return new SpatialExtentGeoShapeSourceValuesAggregatorFunction(driverContext, channels, SpatialExtentGeoShapeSourceValuesAggregator.initSingle()); } public static List intermediateStateDesc() { @@ -92,7 +92,7 @@ public final class SpatialExtentGeoShapeAggregatorFunction implements Aggregator private void addRawVector(BytesRefVector vector) { BytesRef scratch = new BytesRef(); for (int i = 0; i < vector.getPositionCount(); i++) { - SpatialExtentGeoShapeAggregator.combine(state, vector.getBytesRef(i, scratch)); + SpatialExtentGeoShapeSourceValuesAggregator.combine(state, vector.getBytesRef(i, scratch)); } } @@ -102,7 +102,7 @@ public final class SpatialExtentGeoShapeAggregatorFunction implements Aggregator if (mask.getBoolean(i) == false) { continue; } - SpatialExtentGeoShapeAggregator.combine(state, vector.getBytesRef(i, scratch)); + SpatialExtentGeoShapeSourceValuesAggregator.combine(state, vector.getBytesRef(i, scratch)); } } @@ -115,7 +115,7 @@ public final class SpatialExtentGeoShapeAggregatorFunction implements Aggregator int start = block.getFirstValueIndex(p); int end = start + block.getValueCount(p); for (int i = start; i < end; i++) { - SpatialExtentGeoShapeAggregator.combine(state, block.getBytesRef(i, scratch)); + SpatialExtentGeoShapeSourceValuesAggregator.combine(state, block.getBytesRef(i, scratch)); } } } @@ -132,7 +132,7 @@ public final class SpatialExtentGeoShapeAggregatorFunction implements Aggregator int start = block.getFirstValueIndex(p); int end = start + block.getValueCount(p); for (int i = start; i < end; i++) { - SpatialExtentGeoShapeAggregator.combine(state, block.getBytesRef(i, scratch)); + SpatialExtentGeoShapeSourceValuesAggregator.combine(state, block.getBytesRef(i, scratch)); } } } @@ -141,43 +141,43 @@ public final class SpatialExtentGeoShapeAggregatorFunction implements Aggregator public void addIntermediateInput(Page page) { assert channels.size() == intermediateBlockCount(); assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); - Block minNegXUncast = page.getBlock(channels.get(0)); - if (minNegXUncast.areAllValuesNull()) { + Block topUncast = page.getBlock(channels.get(0)); + if (topUncast.areAllValuesNull()) { return; } - IntVector minNegX = ((IntBlock) minNegXUncast).asVector(); - assert minNegX.getPositionCount() == 1; - Block minPosXUncast = page.getBlock(channels.get(1)); - if (minPosXUncast.areAllValuesNull()) { + IntVector top = ((IntBlock) topUncast).asVector(); + assert top.getPositionCount() == 1; + Block bottomUncast = page.getBlock(channels.get(1)); + if (bottomUncast.areAllValuesNull()) { return; } - IntVector minPosX = ((IntBlock) minPosXUncast).asVector(); - assert minPosX.getPositionCount() == 1; - Block maxNegXUncast = page.getBlock(channels.get(2)); - if (maxNegXUncast.areAllValuesNull()) { + IntVector bottom = ((IntBlock) bottomUncast).asVector(); + assert bottom.getPositionCount() == 1; + Block negLeftUncast = page.getBlock(channels.get(2)); + if (negLeftUncast.areAllValuesNull()) { return; } - IntVector maxNegX = ((IntBlock) maxNegXUncast).asVector(); - assert maxNegX.getPositionCount() == 1; - Block maxPosXUncast = page.getBlock(channels.get(3)); - if (maxPosXUncast.areAllValuesNull()) { + IntVector negLeft = ((IntBlock) negLeftUncast).asVector(); + assert negLeft.getPositionCount() == 1; + Block negRightUncast = page.getBlock(channels.get(3)); + if (negRightUncast.areAllValuesNull()) { return; } - IntVector maxPosX = ((IntBlock) maxPosXUncast).asVector(); - assert maxPosX.getPositionCount() == 1; - Block maxYUncast = page.getBlock(channels.get(4)); - if (maxYUncast.areAllValuesNull()) { + IntVector negRight = ((IntBlock) negRightUncast).asVector(); + assert negRight.getPositionCount() == 1; + Block posLeftUncast = page.getBlock(channels.get(4)); + if (posLeftUncast.areAllValuesNull()) { return; } - IntVector maxY = ((IntBlock) maxYUncast).asVector(); - assert maxY.getPositionCount() == 1; - Block minYUncast = page.getBlock(channels.get(5)); - if (minYUncast.areAllValuesNull()) { + IntVector posLeft = ((IntBlock) posLeftUncast).asVector(); + assert posLeft.getPositionCount() == 1; + Block posRightUncast = page.getBlock(channels.get(5)); + if (posRightUncast.areAllValuesNull()) { return; } - IntVector minY = ((IntBlock) minYUncast).asVector(); - assert minY.getPositionCount() == 1; - SpatialExtentGeoShapeAggregator.combineIntermediate(state, minNegX.getInt(0), minPosX.getInt(0), maxNegX.getInt(0), maxPosX.getInt(0), maxY.getInt(0), minY.getInt(0)); + IntVector posRight = ((IntBlock) posRightUncast).asVector(); + assert posRight.getPositionCount() == 1; + SpatialExtentGeoShapeSourceValuesAggregator.combineIntermediate(state, top.getInt(0), bottom.getInt(0), negLeft.getInt(0), negRight.getInt(0), posLeft.getInt(0), posRight.getInt(0)); } @Override @@ -187,7 +187,7 @@ public final class SpatialExtentGeoShapeAggregatorFunction implements Aggregator @Override public void evaluateFinal(Block[] blocks, int offset, DriverContext driverContext) { - blocks[offset] = SpatialExtentGeoShapeAggregator.evaluateFinal(state, driverContext); + blocks[offset] = SpatialExtentGeoShapeSourceValuesAggregator.evaluateFinal(state, driverContext); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentCartesianShapeAggregatorFunctionSupplier.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentGeoShapeSourceValuesAggregatorFunctionSupplier.java similarity index 54% rename from x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentCartesianShapeAggregatorFunctionSupplier.java rename to x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentGeoShapeSourceValuesAggregatorFunctionSupplier.java index 9e4b292a0ea2..1eeb17367d85 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentCartesianShapeAggregatorFunctionSupplier.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentGeoShapeSourceValuesAggregatorFunctionSupplier.java @@ -12,29 +12,30 @@ import org.elasticsearch.compute.aggregation.AggregatorFunctionSupplier; import org.elasticsearch.compute.operator.DriverContext; /** - * {@link AggregatorFunctionSupplier} implementation for {@link SpatialExtentCartesianShapeAggregator}. + * {@link AggregatorFunctionSupplier} implementation for {@link SpatialExtentGeoShapeSourceValuesAggregator}. * This class is generated. Do not edit it. */ -public final class SpatialExtentCartesianShapeAggregatorFunctionSupplier implements AggregatorFunctionSupplier { +public final class SpatialExtentGeoShapeSourceValuesAggregatorFunctionSupplier implements AggregatorFunctionSupplier { private final List channels; - public SpatialExtentCartesianShapeAggregatorFunctionSupplier(List channels) { + public SpatialExtentGeoShapeSourceValuesAggregatorFunctionSupplier(List channels) { this.channels = channels; } @Override - public SpatialExtentCartesianShapeAggregatorFunction aggregator(DriverContext driverContext) { - return SpatialExtentCartesianShapeAggregatorFunction.create(driverContext, channels); + public SpatialExtentGeoShapeSourceValuesAggregatorFunction aggregator( + DriverContext driverContext) { + return SpatialExtentGeoShapeSourceValuesAggregatorFunction.create(driverContext, channels); } @Override - public SpatialExtentCartesianShapeGroupingAggregatorFunction groupingAggregator( + public SpatialExtentGeoShapeSourceValuesGroupingAggregatorFunction groupingAggregator( DriverContext driverContext) { - return SpatialExtentCartesianShapeGroupingAggregatorFunction.create(channels, driverContext); + return SpatialExtentGeoShapeSourceValuesGroupingAggregatorFunction.create(channels, driverContext); } @Override public String describe() { - return "spatial_extent_cartesian of shapes"; + return "spatial_extent_geo_shape_source of valuess"; } } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentGeoShapeGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentGeoShapeSourceValuesGroupingAggregatorFunction.java similarity index 67% rename from x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentGeoShapeGroupingAggregatorFunction.java rename to x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentGeoShapeSourceValuesGroupingAggregatorFunction.java index 1200259ea6c4..8c768496e590 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentGeoShapeGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/spatial/SpatialExtentGeoShapeSourceValuesGroupingAggregatorFunction.java @@ -23,17 +23,17 @@ import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.operator.DriverContext; /** - * {@link GroupingAggregatorFunction} implementation for {@link SpatialExtentGeoShapeAggregator}. + * {@link GroupingAggregatorFunction} implementation for {@link SpatialExtentGeoShapeSourceValuesAggregator}. * This class is generated. Do not edit it. */ -public final class SpatialExtentGeoShapeGroupingAggregatorFunction implements GroupingAggregatorFunction { +public final class SpatialExtentGeoShapeSourceValuesGroupingAggregatorFunction implements GroupingAggregatorFunction { private static final List INTERMEDIATE_STATE_DESC = List.of( - new IntermediateStateDesc("minNegX", ElementType.INT), - new IntermediateStateDesc("minPosX", ElementType.INT), - new IntermediateStateDesc("maxNegX", ElementType.INT), - new IntermediateStateDesc("maxPosX", ElementType.INT), - new IntermediateStateDesc("maxY", ElementType.INT), - new IntermediateStateDesc("minY", ElementType.INT) ); + new IntermediateStateDesc("top", ElementType.INT), + new IntermediateStateDesc("bottom", ElementType.INT), + new IntermediateStateDesc("negLeft", ElementType.INT), + new IntermediateStateDesc("negRight", ElementType.INT), + new IntermediateStateDesc("posLeft", ElementType.INT), + new IntermediateStateDesc("posRight", ElementType.INT) ); private final SpatialExtentGroupingStateWrappedLongitudeState state; @@ -41,16 +41,16 @@ public final class SpatialExtentGeoShapeGroupingAggregatorFunction implements Gr private final DriverContext driverContext; - public SpatialExtentGeoShapeGroupingAggregatorFunction(List channels, + public SpatialExtentGeoShapeSourceValuesGroupingAggregatorFunction(List channels, SpatialExtentGroupingStateWrappedLongitudeState state, DriverContext driverContext) { this.channels = channels; this.state = state; this.driverContext = driverContext; } - public static SpatialExtentGeoShapeGroupingAggregatorFunction create(List channels, - DriverContext driverContext) { - return new SpatialExtentGeoShapeGroupingAggregatorFunction(channels, SpatialExtentGeoShapeAggregator.initGrouping(), driverContext); + public static SpatialExtentGeoShapeSourceValuesGroupingAggregatorFunction create( + List channels, DriverContext driverContext) { + return new SpatialExtentGeoShapeSourceValuesGroupingAggregatorFunction(channels, SpatialExtentGeoShapeSourceValuesAggregator.initGrouping(), driverContext); } public static List intermediateStateDesc() { @@ -114,7 +114,7 @@ public final class SpatialExtentGeoShapeGroupingAggregatorFunction implements Gr int valuesStart = values.getFirstValueIndex(groupPosition + positionOffset); int valuesEnd = valuesStart + values.getValueCount(groupPosition + positionOffset); for (int v = valuesStart; v < valuesEnd; v++) { - SpatialExtentGeoShapeAggregator.combine(state, groupId, values.getBytesRef(v, scratch)); + SpatialExtentGeoShapeSourceValuesAggregator.combine(state, groupId, values.getBytesRef(v, scratch)); } } } @@ -123,7 +123,7 @@ public final class SpatialExtentGeoShapeGroupingAggregatorFunction implements Gr BytesRef scratch = new BytesRef(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { int groupId = groups.getInt(groupPosition); - SpatialExtentGeoShapeAggregator.combine(state, groupId, values.getBytesRef(groupPosition + positionOffset, scratch)); + SpatialExtentGeoShapeSourceValuesAggregator.combine(state, groupId, values.getBytesRef(groupPosition + positionOffset, scratch)); } } @@ -143,7 +143,7 @@ public final class SpatialExtentGeoShapeGroupingAggregatorFunction implements Gr int valuesStart = values.getFirstValueIndex(groupPosition + positionOffset); int valuesEnd = valuesStart + values.getValueCount(groupPosition + positionOffset); for (int v = valuesStart; v < valuesEnd; v++) { - SpatialExtentGeoShapeAggregator.combine(state, groupId, values.getBytesRef(v, scratch)); + SpatialExtentGeoShapeSourceValuesAggregator.combine(state, groupId, values.getBytesRef(v, scratch)); } } } @@ -159,7 +159,7 @@ public final class SpatialExtentGeoShapeGroupingAggregatorFunction implements Gr int groupEnd = groupStart + groups.getValueCount(groupPosition); for (int g = groupStart; g < groupEnd; g++) { int groupId = groups.getInt(g); - SpatialExtentGeoShapeAggregator.combine(state, groupId, values.getBytesRef(groupPosition + positionOffset, scratch)); + SpatialExtentGeoShapeSourceValuesAggregator.combine(state, groupId, values.getBytesRef(groupPosition + positionOffset, scratch)); } } } @@ -173,40 +173,40 @@ public final class SpatialExtentGeoShapeGroupingAggregatorFunction implements Gr public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { state.enableGroupIdTracking(new SeenGroupIds.Empty()); assert channels.size() == intermediateBlockCount(); - Block minNegXUncast = page.getBlock(channels.get(0)); - if (minNegXUncast.areAllValuesNull()) { + Block topUncast = page.getBlock(channels.get(0)); + if (topUncast.areAllValuesNull()) { return; } - IntVector minNegX = ((IntBlock) minNegXUncast).asVector(); - Block minPosXUncast = page.getBlock(channels.get(1)); - if (minPosXUncast.areAllValuesNull()) { + IntVector top = ((IntBlock) topUncast).asVector(); + Block bottomUncast = page.getBlock(channels.get(1)); + if (bottomUncast.areAllValuesNull()) { return; } - IntVector minPosX = ((IntBlock) minPosXUncast).asVector(); - Block maxNegXUncast = page.getBlock(channels.get(2)); - if (maxNegXUncast.areAllValuesNull()) { + IntVector bottom = ((IntBlock) bottomUncast).asVector(); + Block negLeftUncast = page.getBlock(channels.get(2)); + if (negLeftUncast.areAllValuesNull()) { return; } - IntVector maxNegX = ((IntBlock) maxNegXUncast).asVector(); - Block maxPosXUncast = page.getBlock(channels.get(3)); - if (maxPosXUncast.areAllValuesNull()) { + IntVector negLeft = ((IntBlock) negLeftUncast).asVector(); + Block negRightUncast = page.getBlock(channels.get(3)); + if (negRightUncast.areAllValuesNull()) { return; } - IntVector maxPosX = ((IntBlock) maxPosXUncast).asVector(); - Block maxYUncast = page.getBlock(channels.get(4)); - if (maxYUncast.areAllValuesNull()) { + IntVector negRight = ((IntBlock) negRightUncast).asVector(); + Block posLeftUncast = page.getBlock(channels.get(4)); + if (posLeftUncast.areAllValuesNull()) { return; } - IntVector maxY = ((IntBlock) maxYUncast).asVector(); - Block minYUncast = page.getBlock(channels.get(5)); - if (minYUncast.areAllValuesNull()) { + IntVector posLeft = ((IntBlock) posLeftUncast).asVector(); + Block posRightUncast = page.getBlock(channels.get(5)); + if (posRightUncast.areAllValuesNull()) { return; } - IntVector minY = ((IntBlock) minYUncast).asVector(); - assert minNegX.getPositionCount() == minPosX.getPositionCount() && minNegX.getPositionCount() == maxNegX.getPositionCount() && minNegX.getPositionCount() == maxPosX.getPositionCount() && minNegX.getPositionCount() == maxY.getPositionCount() && minNegX.getPositionCount() == minY.getPositionCount(); + IntVector posRight = ((IntBlock) posRightUncast).asVector(); + assert top.getPositionCount() == bottom.getPositionCount() && top.getPositionCount() == negLeft.getPositionCount() && top.getPositionCount() == negRight.getPositionCount() && top.getPositionCount() == posLeft.getPositionCount() && top.getPositionCount() == posRight.getPositionCount(); for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { int groupId = groups.getInt(groupPosition); - SpatialExtentGeoShapeAggregator.combineIntermediate(state, groupId, minNegX.getInt(groupPosition + positionOffset), minPosX.getInt(groupPosition + positionOffset), maxNegX.getInt(groupPosition + positionOffset), maxPosX.getInt(groupPosition + positionOffset), maxY.getInt(groupPosition + positionOffset), minY.getInt(groupPosition + positionOffset)); + SpatialExtentGeoShapeSourceValuesAggregator.combineIntermediate(state, groupId, top.getInt(groupPosition + positionOffset), bottom.getInt(groupPosition + positionOffset), negLeft.getInt(groupPosition + positionOffset), negRight.getInt(groupPosition + positionOffset), posLeft.getInt(groupPosition + positionOffset), posRight.getInt(groupPosition + positionOffset)); } } @@ -215,9 +215,9 @@ public final class SpatialExtentGeoShapeGroupingAggregatorFunction implements Gr if (input.getClass() != getClass()) { throw new IllegalArgumentException("expected " + getClass() + "; got " + input.getClass()); } - SpatialExtentGroupingStateWrappedLongitudeState inState = ((SpatialExtentGeoShapeGroupingAggregatorFunction) input).state; + SpatialExtentGroupingStateWrappedLongitudeState inState = ((SpatialExtentGeoShapeSourceValuesGroupingAggregatorFunction) input).state; state.enableGroupIdTracking(new SeenGroupIds.Empty()); - SpatialExtentGeoShapeAggregator.combineStates(state, groupId, inState, position); + SpatialExtentGeoShapeSourceValuesAggregator.combineStates(state, groupId, inState, position); } @Override @@ -228,7 +228,7 @@ public final class SpatialExtentGeoShapeGroupingAggregatorFunction implements Gr @Override public void evaluateFinal(Block[] blocks, int offset, IntVector selected, DriverContext driverContext) { - blocks[offset] = SpatialExtentGeoShapeAggregator.evaluateFinal(state, selected, driverContext); + blocks[offset] = SpatialExtentGeoShapeSourceValuesAggregator.evaluateFinal(state, selected, driverContext); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/GeoPointEnvelopeVisitor.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/GeoPointEnvelopeVisitor.java deleted file mode 100644 index 6bdd028f3d6e..000000000000 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/GeoPointEnvelopeVisitor.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.compute.aggregation.spatial; - -import org.elasticsearch.geometry.Rectangle; -import org.elasticsearch.geometry.utils.SpatialEnvelopeVisitor; -import org.elasticsearch.geometry.utils.SpatialEnvelopeVisitor.WrapLongitude; - -class GeoPointEnvelopeVisitor extends SpatialEnvelopeVisitor.GeoPointVisitor { - GeoPointEnvelopeVisitor() { - super(WrapLongitude.WRAP); - } - - void reset() { - minY = Double.POSITIVE_INFINITY; - maxY = Double.NEGATIVE_INFINITY; - minNegX = Double.POSITIVE_INFINITY; - maxNegX = Double.NEGATIVE_INFINITY; - minPosX = Double.POSITIVE_INFINITY; - maxPosX = Double.NEGATIVE_INFINITY; - } - - double getMinNegX() { - return minNegX; - } - - double getMinPosX() { - return minPosX; - } - - double getMaxNegX() { - return maxNegX; - } - - double getMaxPosX() { - return maxPosX; - } - - double getMaxY() { - return maxY; - } - - double getMinY() { - return minY; - } - - static Rectangle asRectangle( - double minNegX, - double minPosX, - double maxNegX, - double maxPosX, - double maxY, - double minY, - WrapLongitude wrapLongitude - ) { - return SpatialEnvelopeVisitor.GeoPointVisitor.getResult(minNegX, minPosX, maxNegX, maxPosX, maxY, minY, wrapLongitude); - } -} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/SpatialAggregationUtils.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/SpatialAggregationUtils.java index 6b29b20601da..671ef6116ae6 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/SpatialAggregationUtils.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/SpatialAggregationUtils.java @@ -12,12 +12,10 @@ import org.apache.lucene.geo.XYEncodingUtils; import org.apache.lucene.util.BytesRef; import org.elasticsearch.geometry.Geometry; import org.elasticsearch.geometry.Point; -import org.elasticsearch.geometry.Rectangle; import org.elasticsearch.geometry.utils.GeometryValidator; -import org.elasticsearch.geometry.utils.SpatialEnvelopeVisitor.WrapLongitude; import org.elasticsearch.geometry.utils.WellKnownBinary; -class SpatialAggregationUtils { +public class SpatialAggregationUtils { private SpatialAggregationUtils() { /* Utility class */ } public static Geometry decode(BytesRef wkb) { @@ -52,26 +50,12 @@ class SpatialAggregationUtils { return GeoEncodingUtils.decodeLatitude((int) (encoded >>> 32)); } - public static int encodeNegativeLongitude(double d) { - return Double.isFinite(d) ? GeoEncodingUtils.encodeLongitude(d) : DEFAULT_NEG; + public static int encodeLongitude(double d) { + return Double.isFinite(d) ? GeoEncodingUtils.encodeLongitude(d) : encodeInfinity(d); } - public static int encodePositiveLongitude(double d) { - return Double.isFinite(d) ? GeoEncodingUtils.encodeLongitude(d) : DEFAULT_POS; - } - - public static Rectangle asRectangle(int minNegX, int minPosX, int maxNegX, int maxPosX, int maxY, int minY) { - assert minNegX <= 0 == maxNegX <= 0; - assert minPosX >= 0 == maxPosX >= 0; - return GeoPointEnvelopeVisitor.asRectangle( - minNegX <= 0 ? decodeLongitude(minNegX) : Double.POSITIVE_INFINITY, - minPosX >= 0 ? decodeLongitude(minPosX) : Double.POSITIVE_INFINITY, - maxNegX <= 0 ? decodeLongitude(maxNegX) : Double.NEGATIVE_INFINITY, - maxPosX >= 0 ? decodeLongitude(maxPosX) : Double.NEGATIVE_INFINITY, - GeoEncodingUtils.decodeLatitude(maxY), - GeoEncodingUtils.decodeLatitude(minY), - WrapLongitude.WRAP - ); + private static int encodeInfinity(double d) { + return d == Double.NEGATIVE_INFINITY ? Integer.MIN_VALUE : Integer.MAX_VALUE; } public static int maxNeg(int a, int b) { @@ -81,8 +65,4 @@ class SpatialAggregationUtils { public static int minPos(int a, int b) { return a >= 0 && b >= 0 ? Math.min(a, b) : Math.max(a, b); } - - // The default values are intentionally non-negative/non-positive, so we can mark unassigned values. - public static final int DEFAULT_POS = -1; - public static final int DEFAULT_NEG = 1; } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/SpatialExtentCartesianPointDocValuesAggregator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/SpatialExtentCartesianPointDocValuesAggregator.java index f64949b77707..3a0775458856 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/SpatialExtentCartesianPointDocValuesAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/SpatialExtentCartesianPointDocValuesAggregator.java @@ -14,6 +14,9 @@ import org.elasticsearch.compute.ann.IntermediateState; /** * Computes the extent of a set of cartesian points. It is assumed the points are encoded as longs. * This requires that the planner has planned that points are loaded from the index as doc-values. + * The intermediate state is the extent of the shapes, encoded as four integers: minX, maxX, maxY, minY. + * The order of the integers is the same as defined in the constructor of the Rectangle class. + * Note that this is very different from the six values used for the intermediate state of geo_shape geometries. */ @Aggregator( { diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/SpatialExtentCartesianPointSourceValuesAggregator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/SpatialExtentCartesianPointSourceValuesAggregator.java index 3488af4525dc..f7a74915f852 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/SpatialExtentCartesianPointSourceValuesAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/SpatialExtentCartesianPointSourceValuesAggregator.java @@ -15,8 +15,9 @@ import org.elasticsearch.compute.ann.IntermediateState; /** * Computes the extent of a set of cartesian points. It is assumed that the cartesian points are encoded as WKB BytesRef. * This requires that the planner has NOT planned that points are loaded from the index as doc-values, but from source instead. - * This is also used for final aggregations and aggregations in the coordinator node, - * even if the local node partial aggregation is done with {@link SpatialExtentCartesianPointDocValuesAggregator}. + * The intermediate state is the extent of the shapes, encoded as four integers: minX, maxX, maxY, minY. + * The order of the integers is the same as defined in the constructor of the Rectangle class. + * Note that this is very different from the six values used for the intermediate state of geo_shape geometries. */ @Aggregator( { diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/SpatialExtentCartesianShapeDocValuesAggregator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/SpatialExtentCartesianShapeDocValuesAggregator.java new file mode 100644 index 000000000000..1305139ab2c2 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/SpatialExtentCartesianShapeDocValuesAggregator.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation.spatial; + +import org.elasticsearch.compute.ann.Aggregator; +import org.elasticsearch.compute.ann.GroupingAggregator; +import org.elasticsearch.compute.ann.IntermediateState; + +/** + * Computes the extent of a set of cartesian shapes read from doc-values, which means they are encoded as an array of integers. + * This requires that the planner has planned that the shape extent is loaded from the index as doc-values. + * The intermediate state is the extent of the shapes, encoded as four integers: minX, maxX, maxY, minY. + * The order of the integers is the same as defined in the constructor of the Rectangle class. + * Note that this is very different from the six values used for the intermediate state of geo_shape geometries. + */ +@Aggregator( + { + @IntermediateState(name = "minX", type = "INT"), + @IntermediateState(name = "maxX", type = "INT"), + @IntermediateState(name = "maxY", type = "INT"), + @IntermediateState(name = "minY", type = "INT") } +) +@GroupingAggregator +class SpatialExtentCartesianShapeDocValuesAggregator extends SpatialExtentAggregator { + public static SpatialExtentState initSingle() { + return new SpatialExtentState(PointType.CARTESIAN); + } + + public static SpatialExtentGroupingState initGrouping() { + return new SpatialExtentGroupingState(PointType.CARTESIAN); + } + + public static void combine(SpatialExtentState current, int[] values) { + current.add(values); + } + + public static void combine(SpatialExtentGroupingState current, int groupId, int[] values) { + current.add(groupId, values); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/SpatialExtentCartesianShapeAggregator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/SpatialExtentCartesianShapeSourceValuesAggregator.java similarity index 67% rename from x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/SpatialExtentCartesianShapeAggregator.java rename to x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/SpatialExtentCartesianShapeSourceValuesAggregator.java index 6d50d27aa5a2..adcf072fbddd 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/SpatialExtentCartesianShapeAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/SpatialExtentCartesianShapeSourceValuesAggregator.java @@ -13,8 +13,11 @@ import org.elasticsearch.compute.ann.GroupingAggregator; import org.elasticsearch.compute.ann.IntermediateState; /** - * Computes the extent of a set of cartesian shapes. It is assumed that the cartesian shapes are encoded as WKB BytesRef. - * We do not currently support reading shape values or extents from doc values. + * Computes the extent of a set of cartesian shapes read from source, which means they are encoded as WKB BytesRef. + * This requires that the planner has NOT planned that shapes are loaded from the index as doc-values, but from source instead. + * The intermediate state is the extent of the shapes, encoded as four integers: minX, maxX, maxY, minY. + * The order of the integers is the same as defined in the constructor of the Rectangle class. + * Note that this is very different from the six values used for the intermediate state of geo_shape geometries. */ @Aggregator( { @@ -24,7 +27,7 @@ import org.elasticsearch.compute.ann.IntermediateState; @IntermediateState(name = "minY", type = "INT") } ) @GroupingAggregator -class SpatialExtentCartesianShapeAggregator extends SpatialExtentAggregator { +class SpatialExtentCartesianShapeSourceValuesAggregator extends SpatialExtentAggregator { public static SpatialExtentState initSingle() { return new SpatialExtentState(PointType.CARTESIAN); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/SpatialExtentGeoPointDocValuesAggregator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/SpatialExtentGeoPointDocValuesAggregator.java index b9b8bf65e116..93008d4ee4ff 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/SpatialExtentGeoPointDocValuesAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/SpatialExtentGeoPointDocValuesAggregator.java @@ -14,15 +14,19 @@ import org.elasticsearch.compute.ann.IntermediateState; /** * Computes the extent of a set of geo points. It is assumed the points are encoded as longs. * This requires that the planner has planned that points are loaded from the index as doc-values. + * The intermediate state is the extent of the shapes, encoded as six integers: top, bottom, negLeft, negRight, posLeft, posRight. + * The order of the integers is the same as defined in the constructor of the Extent class, + * as that is the order in which the values are stored in shape doc-values. + * Note that this is very different from the four values used for the intermediate state of cartesian_shape geometries. */ @Aggregator( { - @IntermediateState(name = "minNegX", type = "INT"), - @IntermediateState(name = "minPosX", type = "INT"), - @IntermediateState(name = "maxNegX", type = "INT"), - @IntermediateState(name = "maxPosX", type = "INT"), - @IntermediateState(name = "maxY", type = "INT"), - @IntermediateState(name = "minY", type = "INT") } + @IntermediateState(name = "top", type = "INT"), + @IntermediateState(name = "bottom", type = "INT"), + @IntermediateState(name = "negLeft", type = "INT"), + @IntermediateState(name = "negRight", type = "INT"), + @IntermediateState(name = "posLeft", type = "INT"), + @IntermediateState(name = "posRight", type = "INT") } ) @GroupingAggregator class SpatialExtentGeoPointDocValuesAggregator extends SpatialExtentLongitudeWrappingAggregator { diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/SpatialExtentGeoPointSourceValuesAggregator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/SpatialExtentGeoPointSourceValuesAggregator.java index 36a4e359f23f..d454b40b1a44 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/SpatialExtentGeoPointSourceValuesAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/SpatialExtentGeoPointSourceValuesAggregator.java @@ -15,17 +15,19 @@ import org.elasticsearch.compute.ann.IntermediateState; /** * Computes the extent of a set of geo points. It is assumed that the geo points are encoded as WKB BytesRef. * This requires that the planner has NOT planned that points are loaded from the index as doc-values, but from source instead. - * This is also used for final aggregations and aggregations in the coordinator node, - * even if the local node partial aggregation is done with {@link SpatialExtentGeoPointDocValuesAggregator}. + * The intermediate state is the extent of the shapes, encoded as six integers: top, bottom, negLeft, negRight, posLeft, posRight. + * The order of the integers is the same as defined in the constructor of the Extent class, + * as that is the order in which the values are stored in shape doc-values. + * Note that this is very different from the four values used for the intermediate state of cartesian_shape geometries. */ @Aggregator( { - @IntermediateState(name = "minNegX", type = "INT"), - @IntermediateState(name = "minPosX", type = "INT"), - @IntermediateState(name = "maxNegX", type = "INT"), - @IntermediateState(name = "maxPosX", type = "INT"), - @IntermediateState(name = "maxY", type = "INT"), - @IntermediateState(name = "minY", type = "INT") } + @IntermediateState(name = "top", type = "INT"), + @IntermediateState(name = "bottom", type = "INT"), + @IntermediateState(name = "negLeft", type = "INT"), + @IntermediateState(name = "negRight", type = "INT"), + @IntermediateState(name = "posLeft", type = "INT"), + @IntermediateState(name = "posRight", type = "INT") } ) @GroupingAggregator class SpatialExtentGeoPointSourceValuesAggregator extends SpatialExtentLongitudeWrappingAggregator { diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/SpatialExtentGeoShapeDocValuesAggregator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/SpatialExtentGeoShapeDocValuesAggregator.java new file mode 100644 index 000000000000..26f8ae156aac --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/SpatialExtentGeoShapeDocValuesAggregator.java @@ -0,0 +1,48 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation.spatial; + +import org.elasticsearch.compute.ann.Aggregator; +import org.elasticsearch.compute.ann.GroupingAggregator; +import org.elasticsearch.compute.ann.IntermediateState; + +/** + * Computes the extent of a set of geo shapes read from doc-values, which means they are encoded as an array of integers. + * This requires that the planner has planned that the shape extent is loaded from the index as doc-values. + * The intermediate state is the extent of the shapes, encoded as six integers: top, bottom, negLeft, negRight, posLeft, posRight. + * The order of the integers is the same as defined in the constructor of the Extent class, + * as that is the order in which the values are stored in shape doc-values. + * Note that this is very different from the four values used for the intermediate state of cartesian_shape geometries. + */ +@Aggregator( + { + @IntermediateState(name = "top", type = "INT"), + @IntermediateState(name = "bottom", type = "INT"), + @IntermediateState(name = "negLeft", type = "INT"), + @IntermediateState(name = "negRight", type = "INT"), + @IntermediateState(name = "posLeft", type = "INT"), + @IntermediateState(name = "posRight", type = "INT") } +) +@GroupingAggregator +class SpatialExtentGeoShapeDocValuesAggregator extends SpatialExtentLongitudeWrappingAggregator { + public static SpatialExtentStateWrappedLongitudeState initSingle() { + return new SpatialExtentStateWrappedLongitudeState(); + } + + public static SpatialExtentGroupingStateWrappedLongitudeState initGrouping() { + return new SpatialExtentGroupingStateWrappedLongitudeState(); + } + + public static void combine(SpatialExtentStateWrappedLongitudeState current, int[] values) { + current.add(values); + } + + public static void combine(SpatialExtentGroupingStateWrappedLongitudeState current, int groupId, int[] values) { + current.add(groupId, values); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/SpatialExtentGeoShapeAggregator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/SpatialExtentGeoShapeSourceValuesAggregator.java similarity index 52% rename from x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/SpatialExtentGeoShapeAggregator.java rename to x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/SpatialExtentGeoShapeSourceValuesAggregator.java index 3d1b9b6300c9..cda0aedfb3ae 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/SpatialExtentGeoShapeAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/SpatialExtentGeoShapeSourceValuesAggregator.java @@ -13,21 +13,24 @@ import org.elasticsearch.compute.ann.GroupingAggregator; import org.elasticsearch.compute.ann.IntermediateState; /** - * Computes the extent of a set of geo shapes. It is assumed that the geo shapes are encoded as WKB BytesRef. - * We do not currently support reading shape values or extents from doc values. + * Computes the extent of a set of geo shapes read from source, which means they are encoded as WKB BytesRef. + * This requires that the planner has NOT planned that shapes are loaded from the index as doc-values, but from source instead. + * The intermediate state is the extent of the shapes, encoded as six integers: top, bottom, negLeft, negRight, posLeft, posRight. + * The order of the integers is the same as defined in the constructor of the Extent class, + * as that is the order in which the values are stored in shape doc-values. + * Note that this is very different from the four values used for the intermediate state of cartesian_shape geometries. */ @Aggregator( { - @IntermediateState(name = "minNegX", type = "INT"), - @IntermediateState(name = "minPosX", type = "INT"), - @IntermediateState(name = "maxNegX", type = "INT"), - @IntermediateState(name = "maxPosX", type = "INT"), - @IntermediateState(name = "maxY", type = "INT"), - @IntermediateState(name = "minY", type = "INT") } + @IntermediateState(name = "top", type = "INT"), + @IntermediateState(name = "bottom", type = "INT"), + @IntermediateState(name = "negLeft", type = "INT"), + @IntermediateState(name = "negRight", type = "INT"), + @IntermediateState(name = "posLeft", type = "INT"), + @IntermediateState(name = "posRight", type = "INT") } ) @GroupingAggregator -class SpatialExtentGeoShapeAggregator extends SpatialExtentLongitudeWrappingAggregator { - // TODO support non-longitude wrapped geo shapes. +class SpatialExtentGeoShapeSourceValuesAggregator extends SpatialExtentLongitudeWrappingAggregator { public static SpatialExtentStateWrappedLongitudeState initSingle() { return new SpatialExtentStateWrappedLongitudeState(); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/SpatialExtentGroupingState.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/SpatialExtentGroupingState.java index cb765e4d6757..9fb548dceaad 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/SpatialExtentGroupingState.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/SpatialExtentGroupingState.java @@ -18,6 +18,7 @@ import org.elasticsearch.compute.operator.DriverContext; import org.elasticsearch.geometry.Geometry; import org.elasticsearch.geometry.Rectangle; import org.elasticsearch.geometry.utils.WellKnownBinary; +import org.elasticsearch.lucene.spatial.GeometryDocValueReader; import java.nio.ByteOrder; @@ -53,11 +54,18 @@ final class SpatialExtentGroupingState extends AbstractArrayState { ) { for (int i = 0; i < selected.getPositionCount(); i++) { int group = selected.getInt(i); - assert hasValue(group); - minXsBuilder.appendInt(minXs.get(group)); - maxXsBuilder.appendInt(maxXs.get(group)); - maxYsBuilder.appendInt(maxYs.get(group)); - minYsBuilder.appendInt(minYs.get(group)); + if (hasValue(group)) { + minXsBuilder.appendInt(minXs.get(group)); + maxXsBuilder.appendInt(maxXs.get(group)); + maxYsBuilder.appendInt(maxYs.get(group)); + minYsBuilder.appendInt(minYs.get(group)); + } else { + // TODO: Should we add Nulls here instead? + minXsBuilder.appendInt(Integer.MAX_VALUE); + maxXsBuilder.appendInt(Integer.MIN_VALUE); + maxYsBuilder.appendInt(Integer.MIN_VALUE); + minYsBuilder.appendInt(Integer.MAX_VALUE); + } } blocks[offset + 0] = minXsBuilder.build(); blocks[offset + 1] = maxXsBuilder.build(); @@ -66,6 +74,32 @@ final class SpatialExtentGroupingState extends AbstractArrayState { } } + /** + * This method is used when extents are extracted from the doc-values field by the {@link GeometryDocValueReader}. + * This optimization is enabled when the field has doc-values and is only used in the ST_EXTENT aggregation. + */ + public void add(int groupId, int[] values) { + if (values.length == 6) { + // Values are stored according to the order defined in the Extent class + int top = values[0]; + int bottom = values[1]; + int negLeft = values[2]; + int negRight = values[3]; + int posLeft = values[4]; + int posRight = values[5]; + add(groupId, Math.min(negLeft, posLeft), Math.max(negRight, posRight), top, bottom); + } else if (values.length == 4) { + // Values are stored according to the order defined in the Rectangle class + int minX = values[0]; + int maxX = values[1]; + int maxY = values[2]; + int minY = values[3]; + add(groupId, minX, maxX, maxY, minY); + } else { + throw new IllegalArgumentException("Expected 4 or 6 values, got " + values.length); + } + } + public void add(int groupId, Geometry geometry) { ensureCapacity(groupId); pointType.computeEnvelope(geometry) @@ -80,6 +114,10 @@ final class SpatialExtentGroupingState extends AbstractArrayState { ); } + /** + * This method is used when the field is a geo_point or cartesian_point and is loaded from doc-values. + * This optimization is enabled when the field has doc-values and is only used in a spatial aggregation. + */ public void add(int groupId, long encoded) { int x = pointType.extractX(encoded); int y = pointType.extractY(encoded); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/SpatialExtentGroupingStateWrappedLongitudeState.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/SpatialExtentGroupingStateWrappedLongitudeState.java index 41bc50abcf6b..9f8fca5236d1 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/SpatialExtentGroupingStateWrappedLongitudeState.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/SpatialExtentGroupingStateWrappedLongitudeState.java @@ -19,20 +19,23 @@ import org.elasticsearch.compute.operator.DriverContext; import org.elasticsearch.geometry.Geometry; import org.elasticsearch.geometry.utils.SpatialEnvelopeVisitor; import org.elasticsearch.geometry.utils.WellKnownBinary; +import org.elasticsearch.lucene.spatial.GeometryDocValueReader; import java.nio.ByteOrder; +import static org.elasticsearch.compute.aggregation.spatial.SpatialExtentStateWrappedLongitudeState.asRectangle; + final class SpatialExtentGroupingStateWrappedLongitudeState extends AbstractArrayState implements GroupingAggregatorState { // Only geo points support longitude wrapping. private static final PointType POINT_TYPE = PointType.GEO; - private IntArray minNegXs; - private IntArray minPosXs; - private IntArray maxNegXs; - private IntArray maxPosXs; - private IntArray maxYs; - private IntArray minYs; + private IntArray tops; + private IntArray bottoms; + private IntArray negLefts; + private IntArray negRights; + private IntArray posLefts; + private IntArray posRights; - private GeoPointEnvelopeVisitor geoPointVisitor = new GeoPointEnvelopeVisitor(); + private final SpatialEnvelopeVisitor.GeoPointVisitor geoPointVisitor; SpatialExtentGroupingStateWrappedLongitudeState() { this(BigArrays.NON_RECYCLING_INSTANCE); @@ -40,44 +43,52 @@ final class SpatialExtentGroupingStateWrappedLongitudeState extends AbstractArra SpatialExtentGroupingStateWrappedLongitudeState(BigArrays bigArrays) { super(bigArrays); - this.minNegXs = bigArrays.newIntArray(0, false); - this.minPosXs = bigArrays.newIntArray(0, false); - this.maxNegXs = bigArrays.newIntArray(0, false); - this.maxPosXs = bigArrays.newIntArray(0, false); - this.maxYs = bigArrays.newIntArray(0, false); - this.minYs = bigArrays.newIntArray(0, false); + this.tops = bigArrays.newIntArray(0, false); + this.bottoms = bigArrays.newIntArray(0, false); + this.negLefts = bigArrays.newIntArray(0, false); + this.negRights = bigArrays.newIntArray(0, false); + this.posLefts = bigArrays.newIntArray(0, false); + this.posRights = bigArrays.newIntArray(0, false); enableGroupIdTracking(new SeenGroupIds.Empty()); + this.geoPointVisitor = new SpatialEnvelopeVisitor.GeoPointVisitor(SpatialEnvelopeVisitor.WrapLongitude.WRAP); } @Override public void toIntermediate(Block[] blocks, int offset, IntVector selected, DriverContext driverContext) { assert blocks.length >= offset; try ( - var minNegXsBuilder = driverContext.blockFactory().newIntBlockBuilder(selected.getPositionCount()); - var minPosXsBuilder = driverContext.blockFactory().newIntBlockBuilder(selected.getPositionCount()); - var maxNegXsBuilder = driverContext.blockFactory().newIntBlockBuilder(selected.getPositionCount()); - var maxPosXsBuilder = driverContext.blockFactory().newIntBlockBuilder(selected.getPositionCount()); - var maxYsBuilder = driverContext.blockFactory().newIntBlockBuilder(selected.getPositionCount()); - var minYsBuilder = driverContext.blockFactory().newIntBlockBuilder(selected.getPositionCount()); + var topsBuilder = driverContext.blockFactory().newIntBlockBuilder(selected.getPositionCount()); + var bottomsBuilder = driverContext.blockFactory().newIntBlockBuilder(selected.getPositionCount()); + var negLeftsBuilder = driverContext.blockFactory().newIntBlockBuilder(selected.getPositionCount()); + var negRightsBuilder = driverContext.blockFactory().newIntBlockBuilder(selected.getPositionCount()); + var posLeftsBuilder = driverContext.blockFactory().newIntBlockBuilder(selected.getPositionCount()); + var posRightsBuilder = driverContext.blockFactory().newIntBlockBuilder(selected.getPositionCount()); ) { for (int i = 0; i < selected.getPositionCount(); i++) { int group = selected.getInt(i); - assert hasValue(group); - assert minNegXs.get(group) <= 0 == maxNegXs.get(group) <= 0; - assert minPosXs.get(group) >= 0 == maxPosXs.get(group) >= 0; - minNegXsBuilder.appendInt(minNegXs.get(group)); - minPosXsBuilder.appendInt(minPosXs.get(group)); - maxNegXsBuilder.appendInt(maxNegXs.get(group)); - maxPosXsBuilder.appendInt(maxPosXs.get(group)); - maxYsBuilder.appendInt(maxYs.get(group)); - minYsBuilder.appendInt(minYs.get(group)); + if (hasValue(group)) { + topsBuilder.appendInt(tops.get(group)); + bottomsBuilder.appendInt(bottoms.get(group)); + negLeftsBuilder.appendInt(negLefts.get(group)); + negRightsBuilder.appendInt(negRights.get(group)); + posLeftsBuilder.appendInt(posLefts.get(group)); + posRightsBuilder.appendInt(posRights.get(group)); + } else { + // TODO: Should we add Nulls here instead? + topsBuilder.appendInt(Integer.MIN_VALUE); + bottomsBuilder.appendInt(Integer.MAX_VALUE); + negLeftsBuilder.appendInt(Integer.MAX_VALUE); + negRightsBuilder.appendInt(Integer.MIN_VALUE); + posLeftsBuilder.appendInt(Integer.MAX_VALUE); + posRightsBuilder.appendInt(Integer.MIN_VALUE); + } } - blocks[offset + 0] = minNegXsBuilder.build(); - blocks[offset + 1] = minPosXsBuilder.build(); - blocks[offset + 2] = maxNegXsBuilder.build(); - blocks[offset + 3] = maxPosXsBuilder.build(); - blocks[offset + 4] = maxYsBuilder.build(); - blocks[offset + 5] = minYsBuilder.build(); + blocks[offset + 0] = topsBuilder.build(); + blocks[offset + 1] = bottomsBuilder.build(); + blocks[offset + 2] = negLeftsBuilder.build(); + blocks[offset + 3] = negRightsBuilder.build(); + blocks[offset + 4] = posLeftsBuilder.build(); + blocks[offset + 5] = posRightsBuilder.build(); } } @@ -87,12 +98,12 @@ final class SpatialExtentGroupingStateWrappedLongitudeState extends AbstractArra if (geo.visit(new SpatialEnvelopeVisitor(geoPointVisitor))) { add( groupId, - SpatialAggregationUtils.encodeNegativeLongitude(geoPointVisitor.getMinNegX()), - SpatialAggregationUtils.encodePositiveLongitude(geoPointVisitor.getMinPosX()), - SpatialAggregationUtils.encodeNegativeLongitude(geoPointVisitor.getMaxNegX()), - SpatialAggregationUtils.encodePositiveLongitude(geoPointVisitor.getMaxPosX()), - POINT_TYPE.encoder().encodeY(geoPointVisitor.getMaxY()), - POINT_TYPE.encoder().encodeY(geoPointVisitor.getMinY()) + POINT_TYPE.encoder().encodeY(geoPointVisitor.getTop()), + POINT_TYPE.encoder().encodeY(geoPointVisitor.getBottom()), + SpatialAggregationUtils.encodeLongitude(geoPointVisitor.getNegLeft()), + SpatialAggregationUtils.encodeLongitude(geoPointVisitor.getNegRight()), + SpatialAggregationUtils.encodeLongitude(geoPointVisitor.getPosLeft()), + SpatialAggregationUtils.encodeLongitude(geoPointVisitor.getPosRight()) ); } } @@ -102,53 +113,73 @@ final class SpatialExtentGroupingStateWrappedLongitudeState extends AbstractArra if (inState.hasValue(inPosition)) { add( groupId, - inState.minNegXs.get(inPosition), - inState.minPosXs.get(inPosition), - inState.maxNegXs.get(inPosition), - inState.maxPosXs.get(inPosition), - inState.maxYs.get(inPosition), - inState.minYs.get(inPosition) + inState.tops.get(inPosition), + inState.bottoms.get(inPosition), + inState.negLefts.get(inPosition), + inState.negRights.get(inPosition), + inState.posLefts.get(inPosition), + inState.posRights.get(inPosition) ); } } + /** + * This method is used when the field is a geo_point or cartesian_point and is loaded from doc-values. + * This optimization is enabled when the field has doc-values and is only used in a spatial aggregation. + */ public void add(int groupId, long encoded) { int x = POINT_TYPE.extractX(encoded); int y = POINT_TYPE.extractY(encoded); - add(groupId, x, x, x, x, y, y); + add(groupId, y, y, x, x, x, x); } - public void add(int groupId, int minNegX, int minPosX, int maxNegX, int maxPosX, int maxY, int minY) { + /** + * This method is used when extents are extracted from the doc-values field by the {@link GeometryDocValueReader}. + * This optimization is enabled when the field has doc-values and is only used in the ST_EXTENT aggregation. + */ + public void add(int groupId, int[] values) { + if (values.length != 6) { + throw new IllegalArgumentException("Expected 6 values, got " + values.length); + } + // Values are stored according to the order defined in the Extent class + int top = values[0]; + int bottom = values[1]; + int negLeft = values[2]; + int negRight = values[3]; + int posLeft = values[4]; + int posRight = values[5]; + add(groupId, top, bottom, negLeft, negRight, posLeft, posRight); + } + + public void add(int groupId, int top, int bottom, int negLeft, int negRight, int posLeft, int posRight) { ensureCapacity(groupId); if (hasValue(groupId)) { - minNegXs.set(groupId, Math.min(minNegXs.get(groupId), minNegX)); - minPosXs.set(groupId, SpatialAggregationUtils.minPos(minPosXs.get(groupId), minPosX)); - maxNegXs.set(groupId, SpatialAggregationUtils.maxNeg(maxNegXs.get(groupId), maxNegX)); - maxPosXs.set(groupId, Math.max(maxPosXs.get(groupId), maxPosX)); - maxYs.set(groupId, Math.max(maxYs.get(groupId), maxY)); - minYs.set(groupId, Math.min(minYs.get(groupId), minY)); + tops.set(groupId, Math.max(tops.get(groupId), top)); + bottoms.set(groupId, Math.min(bottoms.get(groupId), bottom)); + negLefts.set(groupId, Math.min(negLefts.get(groupId), negLeft)); + negRights.set(groupId, SpatialAggregationUtils.maxNeg(negRights.get(groupId), negRight)); + posLefts.set(groupId, SpatialAggregationUtils.minPos(posLefts.get(groupId), posLeft)); + posRights.set(groupId, Math.max(posRights.get(groupId), posRight)); } else { - minNegXs.set(groupId, minNegX); - minPosXs.set(groupId, minPosX); - maxNegXs.set(groupId, maxNegX); - maxPosXs.set(groupId, maxPosX); - maxYs.set(groupId, maxY); - minYs.set(groupId, minY); + tops.set(groupId, top); + bottoms.set(groupId, bottom); + negLefts.set(groupId, negLeft); + negRights.set(groupId, negRight); + posLefts.set(groupId, posLeft); + posRights.set(groupId, posRight); } - assert minNegX <= 0 == maxNegX <= 0 : "minNegX=" + minNegX + " maxNegX=" + maxNegX; - assert minPosX >= 0 == maxPosX >= 0 : "minPosX=" + minPosX + " maxPosX=" + maxPosX; trackGroupId(groupId); } private void ensureCapacity(int groupId) { long requiredSize = groupId + 1; - if (minNegXs.size() < requiredSize) { - minNegXs = bigArrays.grow(minNegXs, requiredSize); - minPosXs = bigArrays.grow(minPosXs, requiredSize); - maxNegXs = bigArrays.grow(maxNegXs, requiredSize); - maxPosXs = bigArrays.grow(maxPosXs, requiredSize); - minYs = bigArrays.grow(minYs, requiredSize); - maxYs = bigArrays.grow(maxYs, requiredSize); + if (negLefts.size() < requiredSize) { + tops = bigArrays.grow(tops, requiredSize); + bottoms = bigArrays.grow(bottoms, requiredSize); + negLefts = bigArrays.grow(negLefts, requiredSize); + negRights = bigArrays.grow(negRights, requiredSize); + posLefts = bigArrays.grow(posLefts, requiredSize); + posRights = bigArrays.grow(posRights, requiredSize); } } @@ -160,13 +191,13 @@ final class SpatialExtentGroupingStateWrappedLongitudeState extends AbstractArra builder.appendBytesRef( new BytesRef( WellKnownBinary.toWKB( - SpatialAggregationUtils.asRectangle( - minNegXs.get(si), - minPosXs.get(si), - maxNegXs.get(si), - maxPosXs.get(si), - maxYs.get(si), - minYs.get(si) + asRectangle( + tops.get(si), + bottoms.get(si), + negLefts.get(si), + negRights.get(si), + posLefts.get(si), + posRights.get(si) ), ByteOrder.LITTLE_ENDIAN ) diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/SpatialExtentLongitudeWrappingAggregator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/SpatialExtentLongitudeWrappingAggregator.java index 80ba2d5e4565..2d89ba78d102 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/SpatialExtentLongitudeWrappingAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/SpatialExtentLongitudeWrappingAggregator.java @@ -16,27 +16,27 @@ import org.elasticsearch.compute.operator.DriverContext; abstract class SpatialExtentLongitudeWrappingAggregator { public static void combineIntermediate( SpatialExtentStateWrappedLongitudeState current, - int minNegX, - int minPosX, - int maxNegX, - int maxPosX, - int maxY, - int minY + int top, + int bottom, + int negLeft, + int negRight, + int posLeft, + int posRight ) { - current.add(minNegX, minPosX, maxNegX, maxPosX, maxY, minY); + current.add(top, bottom, negLeft, negRight, posLeft, posRight); } public static void combineIntermediate( SpatialExtentGroupingStateWrappedLongitudeState current, int groupId, - int minNegX, - int minPosX, - int maxNegX, - int maxPosX, - int maxY, - int minY + int top, + int bottom, + int negLeft, + int negRight, + int posLeft, + int posRight ) { - current.add(groupId, minNegX, minPosX, maxNegX, maxPosX, maxY, minY); + current.add(groupId, top, bottom, negLeft, negRight, posLeft, posRight); } public static Block evaluateFinal(SpatialExtentStateWrappedLongitudeState state, DriverContext driverContext) { diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/SpatialExtentState.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/SpatialExtentState.java index 3dc150d1702a..cd52d346b09f 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/SpatialExtentState.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/SpatialExtentState.java @@ -15,6 +15,7 @@ import org.elasticsearch.geometry.Geometry; import org.elasticsearch.geometry.Rectangle; import org.elasticsearch.geometry.utils.WellKnownBinary; import org.elasticsearch.lucene.spatial.CoordinateEncoder; +import org.elasticsearch.lucene.spatial.GeometryDocValueReader; import java.nio.ByteOrder; @@ -55,6 +56,32 @@ final class SpatialExtentState implements AggregatorState { ); } + /** + * This method is used when extents are extracted from the doc-values field by the {@link GeometryDocValueReader}. + * This optimization is enabled when the field has doc-values and is only used in the ST_EXTENT aggregation. + */ + public void add(int[] values) { + if (values.length == 6) { + // Values are stored according to the order defined in the Extent class + int top = values[0]; + int bottom = values[1]; + int negLeft = values[2]; + int negRight = values[3]; + int posLeft = values[4]; + int posRight = values[5]; + add(Math.min(negLeft, posLeft), Math.max(negRight, posRight), top, bottom); + } else if (values.length == 4) { + // Values are stored according to the order defined in the Rectangle class + int minX = values[0]; + int maxX = values[1]; + int maxY = values[2]; + int minY = values[3]; + add(minX, maxX, maxY, minY); + } else { + throw new IllegalArgumentException("Expected 4 or 6 values, got " + values.length); + } + } + public void add(int minX, int maxX, int maxY, int minY) { seen = true; this.minX = Math.min(this.minX, minX); @@ -63,6 +90,10 @@ final class SpatialExtentState implements AggregatorState { this.minY = Math.min(this.minY, minY); } + /** + * This method is used when the field is a geo_point or cartesian_point and is loaded from doc-values. + * This optimization is enabled when the field has doc-values and is only used in a spatial aggregation. + */ public void add(long encoded) { int x = pointType.extractX(encoded); int y = pointType.extractY(encoded); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/SpatialExtentStateWrappedLongitudeState.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/SpatialExtentStateWrappedLongitudeState.java index 0d6163636fcd..86b41b5b8359 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/SpatialExtentStateWrappedLongitudeState.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/SpatialExtentStateWrappedLongitudeState.java @@ -7,28 +7,35 @@ package org.elasticsearch.compute.aggregation.spatial; +import org.apache.lucene.geo.GeoEncodingUtils; import org.apache.lucene.util.BytesRef; import org.elasticsearch.compute.aggregation.AggregatorState; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.operator.DriverContext; import org.elasticsearch.geometry.Geometry; +import org.elasticsearch.geometry.Rectangle; import org.elasticsearch.geometry.utils.SpatialEnvelopeVisitor; import org.elasticsearch.geometry.utils.WellKnownBinary; +import org.elasticsearch.lucene.spatial.GeometryDocValueReader; import java.nio.ByteOrder; +import static org.elasticsearch.compute.aggregation.spatial.SpatialAggregationUtils.decodeLongitude; + final class SpatialExtentStateWrappedLongitudeState implements AggregatorState { // Only geo points support longitude wrapping. private static final PointType POINT_TYPE = PointType.GEO; private boolean seen = false; - private int minNegX = SpatialAggregationUtils.DEFAULT_NEG; - private int minPosX = SpatialAggregationUtils.DEFAULT_POS; - private int maxNegX = SpatialAggregationUtils.DEFAULT_NEG; - private int maxPosX = SpatialAggregationUtils.DEFAULT_POS; - private int maxY = Integer.MIN_VALUE; - private int minY = Integer.MAX_VALUE; + private int top = Integer.MIN_VALUE; + private int bottom = Integer.MAX_VALUE; + private int negLeft = Integer.MAX_VALUE; + private int negRight = Integer.MIN_VALUE; + private int posLeft = Integer.MAX_VALUE; + private int posRight = Integer.MIN_VALUE; - private GeoPointEnvelopeVisitor geoPointVisitor = new GeoPointEnvelopeVisitor(); + private final SpatialEnvelopeVisitor.GeoPointVisitor geoPointVisitor = new SpatialEnvelopeVisitor.GeoPointVisitor( + SpatialEnvelopeVisitor.WrapLongitude.WRAP + ); @Override public void close() {} @@ -37,44 +44,64 @@ final class SpatialExtentStateWrappedLongitudeState implements AggregatorState { public void toIntermediate(Block[] blocks, int offset, DriverContext driverContext) { assert blocks.length >= offset + 6; var blockFactory = driverContext.blockFactory(); - blocks[offset + 0] = blockFactory.newConstantIntBlockWith(minNegX, 1); - blocks[offset + 1] = blockFactory.newConstantIntBlockWith(minPosX, 1); - blocks[offset + 2] = blockFactory.newConstantIntBlockWith(maxNegX, 1); - blocks[offset + 3] = blockFactory.newConstantIntBlockWith(maxPosX, 1); - blocks[offset + 4] = blockFactory.newConstantIntBlockWith(maxY, 1); - blocks[offset + 5] = blockFactory.newConstantIntBlockWith(minY, 1); + blocks[offset + 0] = blockFactory.newConstantIntBlockWith(top, 1); + blocks[offset + 1] = blockFactory.newConstantIntBlockWith(bottom, 1); + blocks[offset + 2] = blockFactory.newConstantIntBlockWith(negLeft, 1); + blocks[offset + 3] = blockFactory.newConstantIntBlockWith(negRight, 1); + blocks[offset + 4] = blockFactory.newConstantIntBlockWith(posLeft, 1); + blocks[offset + 5] = blockFactory.newConstantIntBlockWith(posRight, 1); } public void add(Geometry geo) { geoPointVisitor.reset(); if (geo.visit(new SpatialEnvelopeVisitor(geoPointVisitor))) { add( - SpatialAggregationUtils.encodeNegativeLongitude(geoPointVisitor.getMinNegX()), - SpatialAggregationUtils.encodePositiveLongitude(geoPointVisitor.getMinPosX()), - SpatialAggregationUtils.encodeNegativeLongitude(geoPointVisitor.getMaxNegX()), - SpatialAggregationUtils.encodePositiveLongitude(geoPointVisitor.getMaxPosX()), - POINT_TYPE.encoder().encodeY(geoPointVisitor.getMaxY()), - POINT_TYPE.encoder().encodeY(geoPointVisitor.getMinY()) + POINT_TYPE.encoder().encodeY(geoPointVisitor.getTop()), + POINT_TYPE.encoder().encodeY(geoPointVisitor.getBottom()), + SpatialAggregationUtils.encodeLongitude(geoPointVisitor.getNegLeft()), + SpatialAggregationUtils.encodeLongitude(geoPointVisitor.getNegRight()), + SpatialAggregationUtils.encodeLongitude(geoPointVisitor.getPosLeft()), + SpatialAggregationUtils.encodeLongitude(geoPointVisitor.getPosRight()) ); } } - public void add(int minNegX, int minPosX, int maxNegX, int maxPosX, int maxY, int minY) { - seen = true; - this.minNegX = Math.min(this.minNegX, minNegX); - this.minPosX = SpatialAggregationUtils.minPos(this.minPosX, minPosX); - this.maxNegX = SpatialAggregationUtils.maxNeg(this.maxNegX, maxNegX); - this.maxPosX = Math.max(this.maxPosX, maxPosX); - this.maxY = Math.max(this.maxY, maxY); - this.minY = Math.min(this.minY, minY); - assert this.minNegX <= 0 == this.maxNegX <= 0 : "minNegX=" + this.minNegX + " maxNegX=" + this.maxNegX; - assert this.minPosX >= 0 == this.maxPosX >= 0 : "minPosX=" + this.minPosX + " maxPosX=" + this.maxPosX; + /** + * This method is used when extents are extracted from the doc-values field by the {@link GeometryDocValueReader}. + * This optimization is enabled when the field has doc-values and is only used in the ST_EXTENT aggregation. + */ + public void add(int[] values) { + if (values.length != 6) { + throw new IllegalArgumentException("Expected 6 values, got " + values.length); + } + // Values are stored according to the order defined in the Extent class + int top = values[0]; + int bottom = values[1]; + int negLeft = values[2]; + int negRight = values[3]; + int posLeft = values[4]; + int posRight = values[5]; + add(top, bottom, negLeft, negRight, posLeft, posRight); } + public void add(int top, int bottom, int negLeft, int negRight, int posLeft, int posRight) { + seen = true; + this.top = Math.max(this.top, top); + this.bottom = Math.min(this.bottom, bottom); + this.negLeft = Math.min(this.negLeft, negLeft); + this.negRight = SpatialAggregationUtils.maxNeg(this.negRight, negRight); + this.posLeft = SpatialAggregationUtils.minPos(this.posLeft, posLeft); + this.posRight = Math.max(this.posRight, posRight); + } + + /** + * This method is used when the field is a geo_point or cartesian_point and is loaded from doc-values. + * This optimization is enabled when the field has doc-values and is only used in a spatial aggregation. + */ public void add(long encoded) { int x = POINT_TYPE.extractX(encoded); int y = POINT_TYPE.extractY(encoded); - add(x, x, x, x, y, y); + add(y, y, x, x, x, x); } public Block toBlock(DriverContext driverContext) { @@ -83,9 +110,18 @@ final class SpatialExtentStateWrappedLongitudeState implements AggregatorState { } private byte[] toWKB() { - return WellKnownBinary.toWKB( - SpatialAggregationUtils.asRectangle(minNegX, minPosX, maxNegX, maxPosX, maxY, minY), - ByteOrder.LITTLE_ENDIAN + return WellKnownBinary.toWKB(asRectangle(top, bottom, negLeft, negRight, posLeft, posRight), ByteOrder.LITTLE_ENDIAN); + } + + static Rectangle asRectangle(int top, int bottom, int negLeft, int negRight, int posLeft, int posRight) { + return SpatialEnvelopeVisitor.GeoPointVisitor.getResult( + GeoEncodingUtils.decodeLatitude(top), + GeoEncodingUtils.decodeLatitude(bottom), + negLeft <= 0 ? decodeLongitude(negLeft) : Double.POSITIVE_INFINITY, + negRight <= 0 ? decodeLongitude(negRight) : Double.NEGATIVE_INFINITY, + posLeft >= 0 ? decodeLongitude(posLeft) : Double.POSITIVE_INFINITY, + posRight >= 0 ? decodeLongitude(posRight) : Double.NEGATIVE_INFINITY, + SpatialEnvelopeVisitor.WrapLongitude.WRAP ); } } diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/spatial.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/spatial.csv-spec index 689c36197ee7..8718112979ce 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/spatial.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/spatial.csv-spec @@ -524,6 +524,8 @@ POINT (42.97109629958868 14.7552534006536) | 1 stExtentSingleGeoPoint required_capability: st_extent_agg +required_capability: st_extent_agg_docvalues + ROW point = TO_GEOPOINT("POINT(42.97109629958868 14.7552534006536)") | STATS extent = ST_EXTENT_AGG(point) ; @@ -534,6 +536,8 @@ BBOX(42.97109629958868, 42.97109629958868, 14.7552534006536, 14.7552534006536) stExtentMultipleGeoPoints required_capability: st_extent_agg +required_capability: st_extent_agg_docvalues + // tag::st_extent_agg-airports[] FROM airports | WHERE country == "India" @@ -547,35 +551,257 @@ BBOX (70.77995480038226, 91.5882289968431, 33.9830909203738, 8.47650992218405) // end::st_extent_agg-airports-result[] ; -stExtentMultipleGeoPointsNoDocValues +stExtentMultipleGeoPointsCount required_capability: st_extent_agg -FROM airports_no_doc_values | WHERE country == "India" | STATS extent = ST_EXTENT_AGG(location) +required_capability: st_extent_agg_docvalues + +FROM airports +| WHERE country == "India" +| STATS extent = ST_EXTENT_AGG(location), count = COUNT() ; -extent:geo_shape -BBOX (70.77995480038226, 91.5882289968431, 33.9830909203738, 8.47650992218405) +extent:geo_shape | count:long +BBOX (70.77995480038226, 91.5882289968431, 33.9830909203738, 8.47650992218405) | 50 +; + +stExtentMultipleGeoPointsCountNoDocValues +required_capability: st_extent_agg +required_capability: st_extent_agg_docvalues + +FROM airports_no_doc_values +| WHERE country == "India" +| STATS extent = ST_EXTENT_AGG(location), count = COUNT() +; + +extent:geo_shape | count:long +BBOX (70.77995480038226, 91.5882289968431, 33.9830909203738, 8.47650992218405) | 50 ; stExtentMultipleGeoPointGrouping required_capability: st_extent_agg -FROM airports | STATS extent = ST_EXTENT_AGG(location) BY country | SORT country | LIMIT 3 +required_capability: st_extent_agg_docvalues + +FROM airports +| STATS extent = ST_EXTENT_AGG(location), count = COUNT() BY country +| SORT count DESC, country ASC +| LIMIT 5 ; -extent:geo_shape | country:keyword -BBOX (69.2100736219436, 69.2100736219436, 34.56339786294848, 34.56339786294848) | Afghanistan -BBOX (19.715032372623682, 19.715032372623682, 41.4208514476195, 41.4208514476195) | Albania -BBOX (-0.6067969836294651, 6.621946580708027, 36.69972063973546, 35.62027471605688) | Algeria +extent:geo_shape | count:long | country:keyword +BBOX (-159.34908430092037, -71.01640669628978, 64.81809809803963, 19.71479767933488) | 129 | United States +BBOX (70.77995480038226, 91.5882289968431, 33.9830909203738, 8.47650992218405) | 50 | India +BBOX (-117.19751106575131, -86.87441730871797, 32.833958650007844, 14.791128113865852) | 45 | Mexico +BBOX (76.01301474496722, 130.45620465651155, 46.84301500674337, 18.309095981530845) | 41 | China +BBOX (-135.07621010765433, -52.743333745747805, 63.751152316108346, 43.163360520266) | 37 | Canada ; stExtentGeoShapes required_capability: st_extent_agg -FROM airport_city_boundaries | WHERE region == "City of New York" | STATS extent = ST_EXTENT_AGG(city_boundary) +required_capability: st_extent_agg_docvalues + +FROM airport_city_boundaries +| WHERE region == "City of New York" +| STATS extent = ST_EXTENT_AGG(city_boundary) ; extent:geo_shape BBOX (-74.25880000926554, -73.70020005851984, 40.91759996954352, 40.47659996431321) ; +stExtentGeoPoints +required_capability: st_extent_agg +required_capability: st_extent_agg_docvalues + +FROM airport_city_boundaries +| WHERE region == "City of New York" +| STATS extent = ST_EXTENT_AGG(city_location) +; + +extent:geo_shape +BBOX (-73.92490002326667, -73.92490002326667, 40.69429999217391, 40.69429999217391) +; + +stExtentGeoShapesAndPoints +required_capability: st_extent_agg +required_capability: st_extent_agg_docvalues + +FROM airport_city_boundaries +| WHERE region == "City of New York" +| STATS extent_shapes = ST_EXTENT_AGG(city_boundary), extent_points = ST_EXTENT_AGG(city_location) +; + +extent_shapes:geo_shape | extent_points:geo_shape +BBOX (-74.25880000926554, -73.70020005851984, 40.91759996954352, 40.47659996431321) | BBOX (-73.92490002326667, -73.92490002326667, 40.69429999217391, 40.69429999217391) +; + +stExtentGeoShapesGrouped +required_capability: st_extent_agg +required_capability: st_extent_agg_docvalues + +FROM airport_city_boundaries +| WHERE region == "City of New York" +| EVAL prefix = SUBSTRING(abbrev, 1, 1) +| STATS extent = ST_EXTENT_AGG(city_boundary) BY prefix +| KEEP prefix, extent +| SORT prefix ASC +; + +prefix:keyword | extent:geo_shape +E | BBOX (-74.25880000926554, -73.70020005851984, 40.91759996954352, 40.47659996431321) +J | BBOX (-74.25880000926554, -73.70020005851984, 40.91759996954352, 40.47659996431321) +L | BBOX (-74.25880000926554, -73.70020005851984, 40.91759996954352, 40.47659996431321) +; + +stExtentGeoPointsGrouped +required_capability: st_extent_agg +required_capability: st_extent_agg_docvalues + +FROM airport_city_boundaries +| WHERE region == "City of New York" +| EVAL prefix = SUBSTRING(abbrev, 1, 1) +| STATS extent = ST_EXTENT_AGG(city_location) BY prefix +| KEEP prefix, extent +| SORT prefix ASC +; + +prefix:keyword | extent:geo_shape +E | BBOX (-73.92490002326667, -73.92490002326667, 40.69429999217391, 40.69429999217391) +J | BBOX (-73.92490002326667, -73.92490002326667, 40.69429999217391, 40.69429999217391) +L | BBOX (-73.92490002326667, -73.92490002326667, 40.69429999217391, 40.69429999217391) +; + +stExtentGeoShapesAndPointsGrouped +required_capability: st_extent_agg +required_capability: st_extent_agg_docvalues + +FROM airport_city_boundaries +| WHERE region == "City of New York" +| EVAL prefix = SUBSTRING(abbrev, 1, 1) +| STATS extent_shapes = ST_EXTENT_AGG(city_boundary), extent_points = ST_EXTENT_AGG(city_location) BY prefix +| KEEP prefix, extent_shapes, extent_points +| SORT prefix ASC +; + +prefix:keyword | extent_shapes:geo_shape | extent_points:geo_shape +E | BBOX (-74.25880000926554, -73.70020005851984, 40.91759996954352, 40.47659996431321) | BBOX (-73.92490002326667, -73.92490002326667, 40.69429999217391, 40.69429999217391) +J | BBOX (-74.25880000926554, -73.70020005851984, 40.91759996954352, 40.47659996431321) | BBOX (-73.92490002326667, -73.92490002326667, 40.69429999217391, 40.69429999217391) +L | BBOX (-74.25880000926554, -73.70020005851984, 40.91759996954352, 40.47659996431321) | BBOX (-73.92490002326667, -73.92490002326667, 40.69429999217391, 40.69429999217391) +; + +stExtentManyGeoShapesGrouped +required_capability: st_extent_agg +required_capability: st_extent_agg_docvalues + +FROM airport_city_boundaries +| EVAL prefix = SUBSTRING(abbrev, 1, 1) +| STATS extent = ST_EXTENT_AGG(city_boundary) BY prefix +| KEEP prefix, extent +| SORT prefix +| LIMIT 3 +; + +prefix:keyword | extent:geo_shape +A | BBOX (-171.91890003159642, 175.90319998562336, 64.61419996339828, -37.36450002528727) +B | BBOX (-116.51340007781982, 153.2021999359131, 60.631899973377585, -41.20620000176132) +C | BBOX (-107.51820000819862, 172.6055999379605, 55.732699991203845, -43.90400002710521) +; + +stExtentManyGeoPointsGrouped +required_capability: st_extent_agg +required_capability: st_extent_agg_docvalues + +FROM airport_city_boundaries +| EVAL prefix = SUBSTRING(abbrev, 1, 1) +| STATS extent = ST_EXTENT_AGG(city_location) BY prefix +| KEEP prefix, extent +| SORT prefix +| LIMIT 3 +; + +prefix:keyword | extent:geo_shape +A | BBOX (-171.75000007264316, 174.73999994806945, 64.54999999143183, -36.84060002211481) +B | BBOX (-116.23080002143979, 153.02809992805123, 60.46669999603182, -41.1500000115484) +C | BBOX (-107.39390007220209, 172.38329996354878, 55.676099974662066, -43.58330002985895) +; + +stExtentManyGeoShapesAndPointsGrouped +required_capability: st_extent_agg +required_capability: st_extent_agg_docvalues + +FROM airport_city_boundaries +| EVAL prefix = SUBSTRING(abbrev, 1, 1) +| STATS extent_shapes = ST_EXTENT_AGG(city_boundary), extent_points = ST_EXTENT_AGG(city_location) BY prefix +| KEEP prefix, extent_shapes, extent_points +| SORT prefix +| LIMIT 3 +; + +prefix:keyword | extent_shapes:geo_shape | extent_points:geo_shape +A | BBOX (-171.91890003159642, 175.90319998562336, 64.61419996339828, -37.36450002528727) | BBOX (-171.75000007264316, 174.73999994806945, 64.54999999143183, -36.84060002211481) +B | BBOX (-116.51340007781982, 153.2021999359131, 60.631899973377585, -41.20620000176132) | BBOX (-116.23080002143979, 153.02809992805123, 60.46669999603182, -41.1500000115484) +C | BBOX (-107.51820000819862, 172.6055999379605, 55.732699991203845, -43.90400002710521) | BBOX (-107.39390007220209, 172.38329996354878, 55.676099974662066, -43.58330002985895) +; + +stExtentManyGeoShapesGroupedEnrich +required_capability: st_extent_agg +required_capability: st_extent_agg_docvalues +required_capability: enrich_load + +FROM airports +| ENRICH city_boundaries ON city_location WITH airport, region, city_boundary +| EVAL prefix = SUBSTRING(abbrev, 1, 1) +| STATS extent = ST_EXTENT_AGG(city_boundary), count = COUNT() BY prefix +| KEEP prefix, count, extent +| SORT count DESC, prefix ASC +| LIMIT 3 +; + +prefix:keyword | count:long | extent:geo_shape +S | 77 | BBOX (-136.45440001040697, 178.8686999771744, 61.38089996762574, -33.92440003808588) +C | 75 | BBOX (-107.51820000819862, 172.6055999379605, 55.732699991203845, -43.90400002710521) +B | 69 | BBOX (-116.51340007781982, 153.2021999359131, 60.631899973377585, -41.20620000176132) +; + +stExtentManyGeoPointsGroupedEnrich +required_capability: st_extent_agg +required_capability: st_extent_agg_docvalues +required_capability: enrich_load + +FROM airports +| ENRICH city_boundaries ON city_location WITH airport, region, city_boundary +| EVAL prefix = SUBSTRING(abbrev, 1, 1) +| STATS extent = ST_EXTENT_AGG(city_location), count = COUNT() BY prefix +| KEEP prefix, count, extent +| SORT count DESC, prefix ASC +| LIMIT 3 +; + +prefix:keyword | count:long | extent:geo_shape +S | 77 | BBOX (-135.3152000438422, 178.54539999738336, 69.21669997740537, -33.8678000215441) +C | 75 | BBOX (-107.39390007220209, 172.38329996354878, 55.676099974662066, -43.58330002985895) +B | 69 | BBOX (-116.23080002143979, 153.02809992805123, 60.46669999603182, -41.1500000115484) +; + +stExtentManyGeoShapesAndPointsGroupedEnrich +required_capability: st_extent_agg +required_capability: st_extent_agg_docvalues +required_capability: enrich_load + +FROM airports +| ENRICH city_boundaries ON city_location WITH airport, region, city_boundary +| EVAL prefix = SUBSTRING(abbrev, 1, 1) +| STATS extent_shapes = ST_EXTENT_AGG(city_boundary), extent_points = ST_EXTENT_AGG(city_location), count = COUNT() BY prefix +| KEEP prefix, count, extent_shapes, extent_points +| SORT count DESC, prefix ASC +| LIMIT 3 +; + +prefix:keyword | count:long | extent_shapes:geo_shape | extent_points:geo_shape +S | 77 | BBOX (-136.45440001040697, 178.8686999771744, 61.38089996762574, -33.92440003808588) | BBOX (-135.3152000438422, 178.54539999738336, 69.21669997740537, -33.8678000215441) +C | 75 | BBOX (-107.51820000819862, 172.6055999379605, 55.732699991203845, -43.90400002710521) | BBOX (-107.39390007220209, 172.38329996354878, 55.676099974662066, -43.58330002985895) +B | 69 | BBOX (-116.51340007781982, 153.2021999359131, 60.631899973377585, -41.20620000176132) | BBOX (-116.23080002143979, 153.02809992805123, 60.46669999603182, -41.1500000115484) +; + ############################################### # Tests for ST_INTERSECTS on GEO_POINT type @@ -1777,6 +2003,18 @@ extent:cartesian_shape BBOX (4783520.5, 1.6168486E7, 8704352.0, -584415.9375) ; +stExtentMultipleCartesianPointsCount +required_capability: st_extent_agg +required_capability: st_extent_agg_docvalues + +FROM airports_web +| STATS extent = ST_EXTENT_AGG(location), count = COUNT() +; + +extent:cartesian_shape | count:long +BBOX (-1.949601E7, 1.9947946E7, 1.4502138E7, -7128878.5) | 849 +; + stExtentMultipleCartesianPointGrouping required_capability: st_extent_agg FROM airports_web | STATS extent = ST_EXTENT_AGG(location) BY scalerank | SORT scalerank DESC | LIMIT 3 @@ -1838,6 +2076,42 @@ count:long | key:keyword | extent:cartesian_shape 4 | Fou | BBOX (0.0, 3.0, 3.0, 0.0) ; +stExtentManyCartesianShapesGrouped +required_capability: st_extent_agg +required_capability: st_extent_agg_docvalues + +FROM countries_bbox_web +| EVAL prefix = SUBSTRING(id, 1, 1) +| STATS extent = ST_EXTENT_AGG(shape) BY prefix +| KEEP prefix, extent +| SORT prefix +| LIMIT 3 +; + +prefix:keyword | extent:cartesian_shape +A | BBOX (-2.0037508E7, 2.0037508E7, 6278042.5, -4.748140544E9) +B | BBOX (-9931524.0, 1.2841846E7, 7591831.0, -3994093.25) +C | BBOX (-1.8462154E7, 1.5002357E7, 1.7926778E7, -7538976.5) +; + +stExtentManyCartesianShapesGroupedCount +required_capability: st_extent_agg +required_capability: st_extent_agg_docvalues + +FROM countries_bbox_web +| EVAL prefix = SUBSTRING(id, 1, 1) +| STATS extent = ST_EXTENT_AGG(shape), count = COUNT() BY prefix +| KEEP prefix, count, extent +| SORT prefix +| LIMIT 3 +; + +prefix:keyword | count:long | extent:cartesian_shape +A | 17 | BBOX (-2.0037508E7, 2.0037508E7, 6278042.5, -4.748140544E9) +B | 18 | BBOX (-9931524.0, 1.2841846E7, 7591831.0, -3994093.25) +C | 19 | BBOX (-1.8462154E7, 1.5002357E7, 1.7926778E7, -7538976.5) +; + ############################################### # Tests for ST_INTERSECTS on CARTESIAN_POINT type diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java index 5468d57392c2..00d239ac9ac1 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java @@ -296,9 +296,12 @@ public class EsqlCapabilities { */ ST_DISTANCE, - /** Support for function {@code ST_EXTENT}. */ + /** Support for function {@code ST_EXTENT_AGG}. */ ST_EXTENT_AGG, + /** Optimization of ST_EXTENT_AGG with doc-values as IntBlock. */ + ST_EXTENT_AGG_DOCVALUES, + /** * Fix determination of CRS types in spatial functions when folding. */ diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/SpatialAggregateFunction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/SpatialAggregateFunction.java index f68f9f248788..248c151bcf94 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/SpatialAggregateFunction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/SpatialAggregateFunction.java @@ -39,7 +39,7 @@ public abstract class SpatialAggregateFunction extends AggregateFunction impleme this.fieldExtractPreference = fieldExtractPreference; } - public abstract SpatialAggregateFunction withDocValues(); + public abstract SpatialAggregateFunction withFieldExtractPreference(FieldExtractPreference preference); @Override public boolean licenseCheck(XPackLicenseState state) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/SpatialCentroid.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/SpatialCentroid.java index 54c05cf1bad5..fad308e38cb2 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/SpatialCentroid.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/SpatialCentroid.java @@ -71,8 +71,8 @@ public class SpatialCentroid extends SpatialAggregateFunction implements ToAggre } @Override - public SpatialCentroid withDocValues() { - return new SpatialCentroid(source(), field(), filter(), FieldExtractPreference.DOC_VALUES); + public SpatialCentroid withFieldExtractPreference(FieldExtractPreference preference) { + return new SpatialCentroid(source(), field(), filter(), preference); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/SpatialExtent.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/SpatialExtent.java index 34e5c9d68fc8..5d56fe1e1169 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/SpatialExtent.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/SpatialExtent.java @@ -11,10 +11,12 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.compute.aggregation.AggregatorFunctionSupplier; import org.elasticsearch.compute.aggregation.spatial.SpatialExtentCartesianPointDocValuesAggregatorFunctionSupplier; import org.elasticsearch.compute.aggregation.spatial.SpatialExtentCartesianPointSourceValuesAggregatorFunctionSupplier; -import org.elasticsearch.compute.aggregation.spatial.SpatialExtentCartesianShapeAggregatorFunctionSupplier; +import org.elasticsearch.compute.aggregation.spatial.SpatialExtentCartesianShapeDocValuesAggregatorFunctionSupplier; +import org.elasticsearch.compute.aggregation.spatial.SpatialExtentCartesianShapeSourceValuesAggregatorFunctionSupplier; import org.elasticsearch.compute.aggregation.spatial.SpatialExtentGeoPointDocValuesAggregatorFunctionSupplier; import org.elasticsearch.compute.aggregation.spatial.SpatialExtentGeoPointSourceValuesAggregatorFunctionSupplier; -import org.elasticsearch.compute.aggregation.spatial.SpatialExtentGeoShapeAggregatorFunctionSupplier; +import org.elasticsearch.compute.aggregation.spatial.SpatialExtentGeoShapeDocValuesAggregatorFunctionSupplier; +import org.elasticsearch.compute.aggregation.spatial.SpatialExtentGeoShapeSourceValuesAggregatorFunctionSupplier; import org.elasticsearch.index.mapper.MappedFieldType.FieldExtractPreference; import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; import org.elasticsearch.xpack.esql.core.expression.Expression; @@ -75,8 +77,8 @@ public final class SpatialExtent extends SpatialAggregateFunction implements ToA } @Override - public org.elasticsearch.xpack.esql.expression.function.aggregate.SpatialExtent withDocValues() { - return new SpatialExtent(source(), field(), filter(), FieldExtractPreference.DOC_VALUES); + public SpatialExtent withFieldExtractPreference(FieldExtractPreference preference) { + return new SpatialExtent(source(), field(), filter(), preference); } @Override @@ -101,7 +103,8 @@ public final class SpatialExtent extends SpatialAggregateFunction implements ToA @Override public AggregatorFunctionSupplier supplier(List inputChannels) { - return switch (field().dataType()) { + DataType type = field().dataType(); + return switch (type) { case DataType.GEO_POINT -> switch (fieldExtractPreference) { case DOC_VALUES -> new SpatialExtentGeoPointDocValuesAggregatorFunctionSupplier(inputChannels); case NONE, EXTRACT_SPATIAL_BOUNDS -> new SpatialExtentGeoPointSourceValuesAggregatorFunctionSupplier(inputChannels); @@ -110,10 +113,17 @@ public final class SpatialExtent extends SpatialAggregateFunction implements ToA case DOC_VALUES -> new SpatialExtentCartesianPointDocValuesAggregatorFunctionSupplier(inputChannels); case NONE, EXTRACT_SPATIAL_BOUNDS -> new SpatialExtentCartesianPointSourceValuesAggregatorFunctionSupplier(inputChannels); }; - // Shapes don't differentiate between source and doc values. - case DataType.GEO_SHAPE -> new SpatialExtentGeoShapeAggregatorFunctionSupplier(inputChannels); - case DataType.CARTESIAN_SHAPE -> new SpatialExtentCartesianShapeAggregatorFunctionSupplier(inputChannels); - default -> throw EsqlIllegalArgumentException.illegalDataType(field().dataType()); + case DataType.GEO_SHAPE -> switch (fieldExtractPreference) { + case EXTRACT_SPATIAL_BOUNDS -> new SpatialExtentGeoShapeDocValuesAggregatorFunctionSupplier(inputChannels); + case NONE -> new SpatialExtentGeoShapeSourceValuesAggregatorFunctionSupplier(inputChannels); + case DOC_VALUES -> throw new EsqlIllegalArgumentException("Illegal field extract preference: " + fieldExtractPreference); + }; + case DataType.CARTESIAN_SHAPE -> switch (fieldExtractPreference) { + case EXTRACT_SPATIAL_BOUNDS -> new SpatialExtentCartesianShapeDocValuesAggregatorFunctionSupplier(inputChannels); + case NONE -> new SpatialExtentCartesianShapeSourceValuesAggregatorFunctionSupplier(inputChannels); + case DOC_VALUES -> throw new EsqlIllegalArgumentException("Illegal field extract preference: " + fieldExtractPreference); + }; + default -> throw EsqlIllegalArgumentException.illegalDataType(type); }; } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/SpatialDocValuesExtraction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/SpatialDocValuesExtraction.java index f66ed5c8e4ec..d70153258871 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/SpatialDocValuesExtraction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/SpatialDocValuesExtraction.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.esql.optimizer.rules.physical.local; +import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.xpack.esql.core.expression.Alias; import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.Expression; @@ -84,7 +85,9 @@ public class SpatialDocValuesExtraction extends PhysicalOptimizerRules.Parameter // We need to both mark the field to load differently, and change the spatial function to know to use it foundAttributes.add(fieldAttribute); changedAggregates = true; - orderedAggregates.add(as.replaceChild(af.withDocValues())); + orderedAggregates.add( + as.replaceChild(af.withFieldExtractPreference(MappedFieldType.FieldExtractPreference.DOC_VALUES)) + ); } else { orderedAggregates.add(aggExpr); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/SpatialShapeBoundsExtraction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/SpatialShapeBoundsExtraction.java index f6f087064a02..eb0d82a59079 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/SpatialShapeBoundsExtraction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/SpatialShapeBoundsExtraction.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.esql.optimizer.rules.physical.local; +import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.lucene.spatial.GeometryDocValueWriter; import org.elasticsearch.xpack.esql.core.expression.Alias; import org.elasticsearch.xpack.esql.core.expression.Attribute; @@ -49,9 +50,20 @@ import java.util.stream.Collectors; public class SpatialShapeBoundsExtraction extends ParameterizedOptimizerRule { @Override protected PhysicalPlan rule(AggregateExec aggregate, LocalPhysicalOptimizerContext ctx) { - var foundAttributes = new HashSet(); + Set foundAttributes = findSpatialShapeBoundsAttributes(aggregate, ctx); + if (foundAttributes.isEmpty()) { + return aggregate; + } + return aggregate.transformDown(PhysicalPlan.class, exec -> switch (exec) { + case AggregateExec agg -> transformAggregateExec(agg, foundAttributes); + case FieldExtractExec fieldExtractExec -> transformFieldExtractExec(fieldExtractExec, foundAttributes); + default -> exec; + }); + } - return aggregate.transformDown(UnaryExec.class, exec -> { + private static Set findSpatialShapeBoundsAttributes(AggregateExec aggregate, LocalPhysicalOptimizerContext ctx) { + var foundAttributes = new HashSet(); + aggregate.transformDown(UnaryExec.class, exec -> { switch (exec) { case AggregateExec agg -> { List aggregateFunctions = agg.aggregates() @@ -84,18 +96,27 @@ public class SpatialShapeBoundsExtraction extends ParameterizedOptimizerRule foundAttributes.removeAll(evalExec.references()); case FilterExec filterExec -> foundAttributes.removeAll(filterExec.condition().references()); - case FieldExtractExec fieldExtractExec -> { - var boundsAttributes = new HashSet<>(foundAttributes); - boundsAttributes.retainAll(fieldExtractExec.attributesToExtract()); - if (boundsAttributes.isEmpty() == false) { - exec = fieldExtractExec.withBoundsAttributes(boundsAttributes); - } - } default -> { // Do nothing } } return exec; }); + return foundAttributes; + } + + private static PhysicalPlan transformFieldExtractExec(FieldExtractExec fieldExtractExec, Set foundAttributes) { + var boundsAttributes = new HashSet<>(foundAttributes); + boundsAttributes.retainAll(fieldExtractExec.attributesToExtract()); + return fieldExtractExec.withBoundsAttributes(boundsAttributes); + } + + private static PhysicalPlan transformAggregateExec(AggregateExec agg, Set foundAttributes) { + return agg.transformExpressionsDown( + SpatialExtent.class, + spatialExtent -> foundAttributes.contains(spatialExtent.field()) + ? spatialExtent.withFieldExtractPreference(MappedFieldType.FieldExtractPreference.EXTRACT_SPATIAL_BOUNDS) + : spatialExtent + ); } private static boolean isShape(DataType dataType) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AggregateMapper.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AggregateMapper.java index 1918e3036e2b..e420cd501ccc 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AggregateMapper.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AggregateMapper.java @@ -180,10 +180,8 @@ final class AggregateMapper { types = List.of("GeoPoint", "CartesianPoint"); extraConfigs = SPATIAL_EXTRA_CONFIGS; } else if (clazz == SpatialExtent.class) { - return Stream.concat( - combine(clazz, List.of("GeoPoint", "CartesianPoint"), SPATIAL_EXTRA_CONFIGS), - combine(clazz, List.of("GeoShape", "CartesianShape"), List.of("")) - ); + types = List.of("GeoPoint", "CartesianPoint", "GeoShape", "CartesianShape"); + extraConfigs = SPATIAL_EXTRA_CONFIGS; } else if (Values.class.isAssignableFrom(clazz)) { // TODO can't we figure this out from the function itself? types = List.of("Int", "Long", "Double", "Boolean", "BytesRef"); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/PlannerUtils.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/PlannerUtils.java index 2f4368155069..a44eb3bbe75f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/PlannerUtils.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/PlannerUtils.java @@ -59,6 +59,7 @@ import java.util.function.Function; import static java.util.Arrays.asList; import static org.elasticsearch.index.mapper.MappedFieldType.FieldExtractPreference.DOC_VALUES; +import static org.elasticsearch.index.mapper.MappedFieldType.FieldExtractPreference.EXTRACT_SPATIAL_BOUNDS; import static org.elasticsearch.index.mapper.MappedFieldType.FieldExtractPreference.NONE; import static org.elasticsearch.xpack.esql.core.util.Queries.Clause.FILTER; import static org.elasticsearch.xpack.esql.optimizer.rules.physical.local.PushFiltersToSource.canPushToSource; @@ -284,7 +285,7 @@ public class PlannerUtils { case DOC_DATA_TYPE -> ElementType.DOC; case TSID_DATA_TYPE -> ElementType.BYTES_REF; case GEO_POINT, CARTESIAN_POINT -> fieldExtractPreference == DOC_VALUES ? ElementType.LONG : ElementType.BYTES_REF; - case GEO_SHAPE, CARTESIAN_SHAPE -> ElementType.BYTES_REF; + case GEO_SHAPE, CARTESIAN_SHAPE -> fieldExtractPreference == EXTRACT_SPATIAL_BOUNDS ? ElementType.INT : ElementType.BYTES_REF; case PARTIAL_AGG -> ElementType.COMPOSITE; case SHORT, BYTE, DATE_PERIOD, TIME_DURATION, OBJECT, FLOAT, HALF_FLOAT, SCALED_FLOAT -> throw EsqlIllegalArgumentException .illegalDataType(dataType); @@ -300,11 +301,4 @@ public class PlannerUtils { new NoopCircuitBreaker("noop-esql-breaker"), BigArrays.NON_RECYCLING_INSTANCE ); - - /** - * Returns DOC_VALUES if the given boolean is set. - */ - public static MappedFieldType.FieldExtractPreference extractPreference(boolean hasPreference) { - return hasPreference ? DOC_VALUES : NONE; - } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java index 75825f4e8f48..504923f6131f 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java @@ -60,6 +60,7 @@ import org.elasticsearch.xpack.esql.core.expression.predicate.logical.And; import org.elasticsearch.xpack.esql.core.expression.predicate.logical.Not; import org.elasticsearch.xpack.esql.core.expression.predicate.logical.Or; import org.elasticsearch.xpack.esql.core.expression.predicate.operator.comparison.BinaryComparison; +import org.elasticsearch.xpack.esql.core.tree.Node; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.type.EsField; @@ -218,7 +219,10 @@ public class PhysicalPlanOptimizerTests extends ESTestCase { private TestDataSource airportsNotIndexed; // Test when spatial field has doc values but is not indexed private TestDataSource airportsNotIndexedNorDocValues; // Test when spatial field is neither indexed nor has doc-values private TestDataSource airportsWeb; // Cartesian point field tests - private TestDataSource airportsCityBoundaries; + private TestDataSource airportsCityBoundaries; // geo_shape field tests + private TestDataSource airportsCityBoundariesNoPointDocValues; // Disable doc-values on geo_point fields, but not geo_shape fields + private TestDataSource airportsCityBoundariesNoShapeDocValues; // Disable doc-values on geo_shape fields, but not geo_point fields + private TestDataSource airportsCityBoundariesNoDocValues; // Dsiable doc-values on both geo_point and geo_shape fields private TestDataSource cartesianMultipolygons; // cartesian_shape field tests private TestDataSource cartesianMultipolygonsNoDocValues; // cartesian_shape field tests but has no doc values private TestDataSource countriesBbox; // geo_shape field tests @@ -296,6 +300,27 @@ public class PhysicalPlanOptimizerTests extends ESTestCase { functionRegistry, enrichResolution ); + this.airportsCityBoundariesNoPointDocValues = makeTestDataSource( + "airports_city_boundaries", + "mapping-airport_city_boundaries.json", + functionRegistry, + enrichResolution, + new TestConfigurableSearchStats().exclude(Config.DOC_VALUES, "location", "city_location") + ); + this.airportsCityBoundariesNoShapeDocValues = makeTestDataSource( + "airports_city_boundaries", + "mapping-airport_city_boundaries.json", + functionRegistry, + enrichResolution, + new TestConfigurableSearchStats().exclude(Config.DOC_VALUES, "city_boundary") + ); + this.airportsCityBoundariesNoDocValues = makeTestDataSource( + "airports_city_boundaries", + "mapping-airport_city_boundaries.json", + functionRegistry, + enrichResolution, + new TestConfigurableSearchStats().exclude(Config.DOC_VALUES, "city_boundary", "location", "city_location") + ); this.cartesianMultipolygons = makeTestDataSource( "cartesian_multipolygons", "mapping-cartesian_multipolygons.json", @@ -3274,39 +3299,39 @@ public class PhysicalPlanOptimizerTests extends ESTestCase { * ][_doc{f}#36], limit[], sort[] estimatedRowSize[204] * */ - public void testSpatialTypesAndStatsExtentOfGeoShapeDoesNotUseBinaryExtraction() { - // TODO: When we get geo_shape working with bounds extraction from doc-values, change the name of this test + public void testSpatialTypesAndStatsExtentOfGeoShapeUsesBinaryExtraction() { var query = "FROM airports_city_boundaries | STATS extent = ST_EXTENT_AGG(city_boundary)"; - var testData = airportsCityBoundaries; - var plan = physicalPlan(query, testData); + for (boolean useDocValues : new Boolean[] { true, false }) { + var testData = useDocValues ? airportsCityBoundaries : airportsCityBoundariesNoDocValues; + var plan = physicalPlan(query, testData); - var limit = as(plan, LimitExec.class); - var agg = as(limit.child(), AggregateExec.class); - // Before optimization the aggregation does not use extent extraction - assertAggregation(agg, "extent", SpatialExtent.class, GEO_SHAPE, FieldExtractPreference.NONE); + var limit = as(plan, LimitExec.class); + var agg = as(limit.child(), AggregateExec.class); + // Before optimization the aggregation does not use extent extraction + assertAggregation(agg, "extent", SpatialExtent.class, GEO_SHAPE, FieldExtractPreference.NONE); - var exchange = as(agg.child(), ExchangeExec.class); - var fragment = as(exchange.child(), FragmentExec.class); - var fAgg = as(fragment.fragment(), Aggregate.class); - as(fAgg.child(), EsRelation.class); + var exchange = as(agg.child(), ExchangeExec.class); + var fragment = as(exchange.child(), FragmentExec.class); + var fAgg = as(fragment.fragment(), Aggregate.class); + as(fAgg.child(), EsRelation.class); - // Now optimize the plan and assert the aggregation uses extent extraction - var optimized = optimizedPlan(plan, testData.stats); - limit = as(optimized, LimitExec.class); - agg = as(limit.child(), AggregateExec.class); - // Above the exchange (in coordinator) the aggregation is not using doc-values - assertAggregation(agg, "extent", SpatialExtent.class, GEO_SHAPE, FieldExtractPreference.NONE); - exchange = as(agg.child(), ExchangeExec.class); - agg = as(exchange.child(), AggregateExec.class); - // below the exchange (in data node) the aggregation is using a specific - assertAggregation(agg, "extent", SpatialExtent.class, GEO_SHAPE, FieldExtractPreference.NONE); - assertChildIsExtractedAs(agg, FieldExtractPreference.EXTRACT_SPATIAL_BOUNDS, GEO_SHAPE); + // Now optimize the plan and assert the aggregation uses extent extraction + var optimized = optimizedPlan(plan, testData.stats); + limit = as(optimized, LimitExec.class); + agg = as(limit.child(), AggregateExec.class); + // Above the exchange (in coordinator) the aggregation is not using doc-values + assertAggregation(agg, "extent", SpatialExtent.class, GEO_SHAPE, FieldExtractPreference.NONE); + exchange = as(agg.child(), ExchangeExec.class); + agg = as(exchange.child(), AggregateExec.class); + // below the exchange (in data node) the aggregation is using a specific int[] which the aggregation needs to know about. + var fieldExtractPreference = useDocValues ? FieldExtractPreference.EXTRACT_SPATIAL_BOUNDS : FieldExtractPreference.NONE; + assertAggregation(agg, "extent", SpatialExtent.class, GEO_SHAPE, fieldExtractPreference); + assertChildIsExtractedAs(agg, fieldExtractPreference, GEO_SHAPE); + } } /** * This test verifies that the aggregation does not use spatial bounds extraction when the shape appears in an eval or filter. - * TODO: Currently this tests nothing, because geo_shape is not supported anyway for bounds extraction, - * but it should be updated when it is supported. */ public void testSpatialTypesAndStatsExtentOfShapesNegativeCases() { for (String query : new String[] { """ @@ -3329,6 +3354,7 @@ public class PhysicalPlanOptimizerTests extends ESTestCase { assertAggregation(agg, "extent", SpatialExtent.class, GEO_SHAPE, FieldExtractPreference.NONE); var exchange = as(agg.child(), ExchangeExec.class); agg = as(exchange.child(), AggregateExec.class); + // Because the shape was used in EVAL/WHERE we cannot use doc-values bounds extraction optimization assertAggregation(agg, "extent", SpatialExtent.class, GEO_SHAPE, FieldExtractPreference.NONE); var exec = agg.child() instanceof FieldExtractExec ? agg : as(agg.child(), UnaryExec.class); assertChildIsExtractedAs(exec, FieldExtractPreference.NONE, GEO_SHAPE); @@ -3354,19 +3380,11 @@ public class PhysicalPlanOptimizerTests extends ESTestCase { var optimized = optimizedPlan(plan, testData.stats); limit = as(optimized, LimitExec.class); agg = as(limit.child(), AggregateExec.class); - // For cartesian_shape extraction, we extract bounds from doc-values directly into a BBOX encoded as BytesRef, - // so the aggregation does not need to know about it. assertAggregation(agg, "extent", SpatialExtent.class, CARTESIAN_SHAPE, FieldExtractPreference.NONE); var exchange = as(agg.child(), ExchangeExec.class); agg = as(exchange.child(), AggregateExec.class); - assertAggregation( - agg, - "extent", - "hasDocValues:" + hasDocValues, - SpatialExtent.class, - CARTESIAN_SHAPE, - FieldExtractPreference.NONE - ); + // We extract bounds from doc-values into a special int[] which the aggregation needs to know about. + assertAggregation(agg, "extent", "hasDocValues:" + hasDocValues, SpatialExtent.class, CARTESIAN_SHAPE, fieldExtractPreference); var exec = agg.child() instanceof FieldExtractExec ? agg : as(agg.child(), UnaryExec.class); // For cartesian_shape, the bounds extraction is done in the FieldExtractExec, so it does need to know about this assertChildIsExtractedAs(exec, fieldExtractPreference, CARTESIAN_SHAPE); @@ -3374,60 +3392,72 @@ public class PhysicalPlanOptimizerTests extends ESTestCase { } /** - * Before local optimizations: + * This tests all four combinations of geo_point and geo_shape with and without doc-values. + * Since each will be extracted differently (points as encoded longs, and shapes as int[5] bounds representing Extents), + * we want to verify that the combinations do not clash and work together. + * The optimized query plan in the case when both points and shapes have doc-values will look like: * * LimitExec[1000[INTEGER]] - * \_AggregateExec[[],[SPATIALEXTENT(city_boundary{f}#13,true[BOOLEAN]) AS extent, SPATIALCENTROID(city_location{f}#12,true[BOOLEA - * N]) AS centroid],...] - * \_ExchangeExec[[..]] - * \_FragmentExec[filter=null, estimatedRowSize=0, reducer=[], fragment=[...]] - * \_EsRelation[airports_city_boundaries][abbrev{f}#8, airport{f}#9, city{f}#11, city_boundar..] - * - * After local optimizations: - * - * LimitExec[1000[INTEGER]] - * \_AggregateExec[[],[SPATIALSTEXTENT(location{f}#48,true[BOOLEAN]) AS extent],FINAL,[minNegX{r}#52, minPosX{r}#53, maxNegX{r}#54, - * maxPosX{r}#55, maxY{r}#56, minY{r}#57],21] - * \_ExchangeExec[[minNegX{r}#52, minPosX{r}#53, maxNegX{r}#54, maxPosX{r}#55, maxY{r}#56, minY{r}#57],true] - * \_AggregateExec[[],[SPATIALSTEXTENT(location{f}#48,true[BOOLEAN]) AS extent],INITIAL,[ - * minNegX{r}#73, minPosX{r}#74, maxNegX{rb#75, maxPosX{r}#76, maxY{r}#77, minY{r}#78],21] - * \_FieldExtractExec[location{f}#48][location{f}#48] - * \_EsQueryExec[airports], indexMode[standard], query[{"exists":{"field":"location","boost":1.0}}][ - * _doc{f}#79], limit[], sort[] estimatedRowSize[25] + * \_AggregateExec[[],[ + * SPATIALEXTENT(city_boundary{f}#13,true[BOOLEAN]) AS extent, + * SPATIALCENTROID(city_location{f}#12,true[BOOLEAN]) AS centroid + * ],FINAL,[...bounds attributes..., ...centroid attributes...],221] + * \_ExchangeExec[[...bounds attributes..., ...centroid attributes...],true] + * \_AggregateExec[[],[ + * SPATIALEXTENT(city_boundary{f}#13,true[BOOLEAN]) AS extent, + * SPATIALCENTROID(city_location{f}#12,true[BOOLEAN]) AS centroid + * ],INITIAL,[...bounds attributes..., ...centroid attributes...],221] + * \_FieldExtractExec[city_boundary{f}#13, city_location{f}#12][city_location{f}#12],[city_boundary{f}#13] + * \_EsQueryExec[airports_city_boundaries], indexMode[standard], query[ + * {"bool":{"should":[ + * {"exists":{"field":"city_boundary","boost":1.0}}, + * {"exists":{"field":"city_location","boost":1.0}} + * ],"boost":1.0}} + * ][_doc{f}#55], limit[], sort[] estimatedRowSize[225] * */ public void testMixedSpatialBoundsAndPointsExtracted() { var query = """ FROM airports_city_boundaries \ | STATS extent = ST_EXTENT_AGG(city_boundary), centroid = ST_CENTROID_AGG(city_location)"""; - var testData = airportsCityBoundaries; - var plan = physicalPlan(query, testData); + for (boolean pointDocValues : new Boolean[] { true, false }) { + for (boolean shapeDocValues : new Boolean[] { true, false }) { + var testData = pointDocValues + ? (shapeDocValues ? airportsCityBoundaries : airportsCityBoundariesNoShapeDocValues) + : (shapeDocValues ? airportsCityBoundariesNoPointDocValues : airportsCityBoundariesNoDocValues); + var msg = "DocValues[point:" + pointDocValues + ", shape:" + shapeDocValues + "]"; + var plan = physicalPlan(query, testData); - var limit = as(plan, LimitExec.class); - var agg = as(limit.child(), AggregateExec.class); - // Before optimization the aggregation does not use doc-values - assertAggregation(agg, "extent", SpatialExtent.class, GEO_SHAPE, FieldExtractPreference.NONE); - assertAggregation(agg, "centroid", SpatialCentroid.class, GEO_POINT, FieldExtractPreference.NONE); + var limit = as(plan, LimitExec.class); + var agg = as(limit.child(), AggregateExec.class); + // Before optimization the aggregation does not use doc-values + assertAggregation(agg, "extent", msg, SpatialExtent.class, GEO_SHAPE, FieldExtractPreference.NONE); + assertAggregation(agg, "centroid", msg, SpatialCentroid.class, GEO_POINT, FieldExtractPreference.NONE); - var exchange = as(agg.child(), ExchangeExec.class); - var fragment = as(exchange.child(), FragmentExec.class); - var fAgg = as(fragment.fragment(), Aggregate.class); - as(fAgg.child(), EsRelation.class); + var exchange = as(agg.child(), ExchangeExec.class); + var fragment = as(exchange.child(), FragmentExec.class); + var fAgg = as(fragment.fragment(), Aggregate.class); + as(fAgg.child(), EsRelation.class); - // Now optimize the plan and assert the aggregation uses both doc-values and bounds extraction - var optimized = optimizedPlan(plan, testData.stats); - limit = as(optimized, LimitExec.class); - agg = as(limit.child(), AggregateExec.class); - // Above the exchange (in coordinator) the aggregation is not field-optimized. - assertAggregation(agg, "extent", SpatialExtent.class, GEO_SHAPE, FieldExtractPreference.NONE); - assertAggregation(agg, "centroid", SpatialCentroid.class, GEO_POINT, FieldExtractPreference.NONE); - exchange = as(agg.child(), ExchangeExec.class); - agg = as(exchange.child(), AggregateExec.class); - // below the exchange (in data node) the aggregation is field optimized. - assertAggregation(agg, "extent", SpatialExtent.class, GEO_SHAPE, FieldExtractPreference.NONE); - var fieldExtractExec = as(agg.child(), FieldExtractExec.class); - assertThat(fieldExtractExec.boundsAttributes().stream().map(a -> a.sourceText()).toList(), equalTo(List.of("city_boundary"))); - assertThat(fieldExtractExec.docValuesAttributes().stream().map(a -> a.sourceText()).toList(), equalTo(List.of("city_location"))); + // Now optimize the plan and assert the aggregation uses both doc-values and bounds extraction + var optimized = optimizedPlan(plan, testData.stats); + limit = as(optimized, LimitExec.class); + agg = as(limit.child(), AggregateExec.class); + // Above the exchange (in coordinator) the aggregation is not field-optimized. + assertAggregation(agg, "extent", msg, SpatialExtent.class, GEO_SHAPE, FieldExtractPreference.NONE); + assertAggregation(agg, "centroid", msg, SpatialCentroid.class, GEO_POINT, FieldExtractPreference.NONE); + exchange = as(agg.child(), ExchangeExec.class); + agg = as(exchange.child(), AggregateExec.class); + var fieldExtractExec = as(agg.child(), FieldExtractExec.class); + // below the exchange (in data node) the aggregation is field optimized. + var shapeExtractPreference = shapeDocValues ? FieldExtractPreference.EXTRACT_SPATIAL_BOUNDS : FieldExtractPreference.NONE; + assertAggregation(agg, "extent", msg, SpatialExtent.class, GEO_SHAPE, shapeExtractPreference); + List boundsAttributes = shapeDocValues ? List.of("city_boundary") : List.of(); + List docValuesAttributes = pointDocValues ? List.of("city_location") : List.of(); + assertThat(fieldExtractExec.boundsAttributes().stream().map(Node::sourceText).toList(), equalTo(boundsAttributes)); + assertThat(fieldExtractExec.docValuesAttributes().stream().map(Node::sourceText).toList(), equalTo(docValuesAttributes)); + } + } } /** @@ -7746,7 +7776,7 @@ public class PhysicalPlanOptimizerTests extends ESTestCase { var aggFunc = assertAggregation(plan, aliasName, aggClass); var aggField = as(aggFunc.field(), Attribute.class); var spatialAgg = as(aggFunc, SpatialAggregateFunction.class); - assertThat(spatialAgg.fieldExtractPreference(), equalTo(fieldExtractPreference)); + assertThat(reason, spatialAgg.fieldExtractPreference(), equalTo(fieldExtractPreference)); assertThat(reason, aggField.dataType(), equalTo(fieldType)); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/TestPhysicalOperationProviders.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/TestPhysicalOperationProviders.java index 628737aa36c6..c5933f134f9a 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/TestPhysicalOperationProviders.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/TestPhysicalOperationProviders.java @@ -21,6 +21,7 @@ import org.elasticsearch.compute.data.BytesRefBlock; import org.elasticsearch.compute.data.DocBlock; import org.elasticsearch.compute.data.DocVector; import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.IntBlock; import org.elasticsearch.compute.data.IntVector; import org.elasticsearch.compute.data.LongBlock; import org.elasticsearch.compute.data.Page; @@ -33,9 +34,14 @@ import org.elasticsearch.compute.operator.SourceOperator.SourceOperatorFactory; import org.elasticsearch.core.Nullable; import org.elasticsearch.env.Environment; import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.geometry.Geometry; +import org.elasticsearch.geometry.utils.GeometryValidator; +import org.elasticsearch.geometry.utils.SpatialEnvelopeVisitor; +import org.elasticsearch.geometry.utils.WellKnownBinary; import org.elasticsearch.index.analysis.AnalysisRegistry; import org.elasticsearch.index.mapper.MappedFieldType.FieldExtractPreference; import org.elasticsearch.indices.analysis.AnalysisModule; +import org.elasticsearch.lucene.spatial.CoordinateEncoder; import org.elasticsearch.plugins.scanners.StablePluginsRegistry; import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; import org.elasticsearch.xpack.esql.TestBlockFactory; @@ -68,6 +74,9 @@ import java.util.stream.IntStream; import static com.carrotsearch.randomizedtesting.generators.RandomNumbers.randomIntBetween; import static java.util.stream.Collectors.joining; import static org.apache.lucene.tests.util.LuceneTestCase.createTempDir; +import static org.elasticsearch.compute.aggregation.spatial.SpatialAggregationUtils.encodeLongitude; +import static org.elasticsearch.index.mapper.MappedFieldType.FieldExtractPreference.DOC_VALUES; +import static org.elasticsearch.index.mapper.MappedFieldType.FieldExtractPreference.EXTRACT_SPATIAL_BOUNDS; public class TestPhysicalOperationProviders extends AbstractPhysicalOperationProviders { private final List indexPages; @@ -103,13 +112,7 @@ public class TestPhysicalOperationProviders extends AbstractPhysicalOperationPro PhysicalOperation op = source; for (Attribute attr : fieldExtractExec.attributesToExtract()) { layout.append(attr); - op = op.with( - new TestFieldExtractOperatorFactory( - attr, - PlannerUtils.extractPreference(fieldExtractExec.docValuesAttributes().contains(attr)) - ), - layout.build() - ); + op = op.with(new TestFieldExtractOperatorFactory(attr, fieldExtractExec.fieldExtractPreference(attr)), layout.build()); } return op; } @@ -397,17 +400,16 @@ public class TestPhysicalOperationProviders extends AbstractPhysicalOperationPro FieldExtractPreference extractPreference, BiFunction extractBlock ) { - BlockFactory blockFactory = docBlock.blockFactory(); - boolean mapToDocValues = shouldMapToDocValues(dataType, extractPreference); try ( - Block.Builder blockBuilder = mapToDocValues - ? blockFactory.newLongBlockBuilder(docBlock.getPositionCount()) - : blockBuilder(dataType, docBlock.getPositionCount(), TestBlockFactory.getNonBreakingInstance()) + Block.Builder blockBuilder = blockBuilder( + dataType, + extractPreference, + docBlock.getPositionCount(), + TestBlockFactory.getNonBreakingInstance() + ) ) { foreachIndexDoc(docBlock, indexDoc -> { - TestBlockCopier blockCopier = mapToDocValues - ? TestSpatialPointStatsBlockCopier.create(indexDoc.asVector().docs(), dataType) - : new TestBlockCopier(indexDoc.asVector().docs()); + TestBlockCopier blockCopier = blockCopier(dataType, extractPreference, indexDoc.asVector().docs()); Block blockForIndex = extractBlock.apply(indexDoc, blockCopier); blockBuilder.copyFrom(blockForIndex, 0, blockForIndex.getPositionCount()); }); @@ -418,10 +420,6 @@ public class TestPhysicalOperationProviders extends AbstractPhysicalOperationPro } } - private boolean shouldMapToDocValues(DataType dataType, FieldExtractPreference extractPreference) { - return extractPreference == FieldExtractPreference.DOC_VALUES && DataType.isSpatialPoint(dataType); - } - private static class TestBlockCopier { protected final IntVector docIndices; @@ -447,7 +445,6 @@ public class TestPhysicalOperationProviders extends AbstractPhysicalOperationPro /** * geo_point and cartesian_point are normally loaded as WKT from source, but for aggregations we can load them as doc-values * which are encoded Long values. This class is used to convert the test loaded WKB into encoded longs for the aggregators. - * TODO: We need a different solution to support geo_shape and cartesian_shape */ private abstract static class TestSpatialPointStatsBlockCopier extends TestBlockCopier { @@ -465,15 +462,15 @@ public class TestPhysicalOperationProviders extends AbstractPhysicalOperationPro for (int c = 0; c < docIndices.getPositionCount(); c++) { int doc = docIndices.getInt(c); int count = bytesRefBlock.getValueCount(doc); - int i = bytesRefBlock.getFirstValueIndex(doc); if (count == 0) { builder.appendNull(); } else { if (count > 1) { builder.beginPositionEntry(); } - for (int v = 0; v < count; v++) { - builder.appendLong(encode(bytesRefBlock.getBytesRef(i + v, scratch))); + int firstValueIndex = bytesRefBlock.getFirstValueIndex(doc); + for (int i = firstValueIndex; i < firstValueIndex + count; i++) { + builder.appendLong(encode(bytesRefBlock.getBytesRef(i, scratch))); } if (count > 1) { builder.endPositionEntry(); @@ -499,12 +496,123 @@ public class TestPhysicalOperationProviders extends AbstractPhysicalOperationPro } } - private static Block.Builder blockBuilder(DataType dataType, int estimatedSize, BlockFactory blockFactory) { + /** + * geo_shape and cartesian_shape are normally loaded as WKT from source, but for ST_EXTENT_AGG we can load them from doc-values + * extracting the spatial Extent information. This class is used to convert the test loaded WKB into the int[6] used in the aggregators. + */ + private abstract static class TestSpatialShapeExtentBlockCopier extends TestBlockCopier { + protected final SpatialEnvelopeVisitor.PointVisitor pointVisitor; + private final SpatialEnvelopeVisitor visitor; + + private TestSpatialShapeExtentBlockCopier(IntVector docIndices, SpatialEnvelopeVisitor.PointVisitor pointVisitor) { + super(docIndices); + this.pointVisitor = pointVisitor; + this.visitor = new SpatialEnvelopeVisitor(pointVisitor); + } + + @Override + protected Block copyBlock(Block originalData) { + BytesRef scratch = new BytesRef(100); + BytesRefBlock bytesRefBlock = (BytesRefBlock) originalData; + try (IntBlock.Builder builder = bytesRefBlock.blockFactory().newIntBlockBuilder(docIndices.getPositionCount())) { + for (int c = 0; c < docIndices.getPositionCount(); c++) { + int doc = docIndices.getInt(c); + int count = bytesRefBlock.getValueCount(doc); + if (count == 0) { + builder.appendNull(); + } else { + pointVisitor.reset(); + int firstValueIndex = bytesRefBlock.getFirstValueIndex(doc); + for (int i = firstValueIndex; i < firstValueIndex + count; i++) { + BytesRef wkb = bytesRefBlock.getBytesRef(i, scratch); + Geometry geometry = WellKnownBinary.fromWKB(GeometryValidator.NOOP, false, wkb.bytes, wkb.offset, wkb.length); + geometry.visit(visitor); + } + encodeExtent(builder); + } + } + return builder.build(); + } + } + + protected abstract void encodeExtent(IntBlock.Builder builder); + + private static TestSpatialShapeExtentBlockCopier create(IntVector docIndices, DataType dataType) { + return switch (dataType) { + case GEO_SHAPE -> new TestGeoCopier(docIndices); + case CARTESIAN_SHAPE -> new TestCartesianCopier(docIndices); + default -> throw new IllegalArgumentException("Unsupported spatial data type: " + dataType); + }; + } + + private static class TestGeoCopier extends TestSpatialShapeExtentBlockCopier { + private TestGeoCopier(IntVector docIndices) { + super(docIndices, new SpatialEnvelopeVisitor.GeoPointVisitor(SpatialEnvelopeVisitor.WrapLongitude.WRAP)); + } + + @Override + protected void encodeExtent(IntBlock.Builder builder) { + // We store the 6 values as a single multi-valued field, in the same order as the fields in the Extent class + // This requires that consumers also know the meaning of the values, which they can learn from the Extent class + SpatialEnvelopeVisitor.GeoPointVisitor visitor = (SpatialEnvelopeVisitor.GeoPointVisitor) pointVisitor; + builder.beginPositionEntry(); + builder.appendInt(CoordinateEncoder.GEO.encodeY(visitor.getTop())); + builder.appendInt(CoordinateEncoder.GEO.encodeY(visitor.getBottom())); + builder.appendInt(encodeLongitude(visitor.getNegLeft())); + builder.appendInt(encodeLongitude(visitor.getNegRight())); + builder.appendInt(encodeLongitude(visitor.getPosLeft())); + builder.appendInt(encodeLongitude(visitor.getPosRight())); + builder.endPositionEntry(); + } + } + + private static class TestCartesianCopier extends TestSpatialShapeExtentBlockCopier { + private TestCartesianCopier(IntVector docIndices) { + super(docIndices, new SpatialEnvelopeVisitor.CartesianPointVisitor()); + } + + @Override + protected void encodeExtent(IntBlock.Builder builder) { + // We store the 4 values as a single multi-valued field, in the same order as the fields in the Rectangle class + // This requires that consumers also know the meaning of the values, which they can learn from the Rectangle class + SpatialEnvelopeVisitor.CartesianPointVisitor visitor = (SpatialEnvelopeVisitor.CartesianPointVisitor) pointVisitor; + builder.beginPositionEntry(); + builder.appendInt(CoordinateEncoder.CARTESIAN.encodeX(visitor.getMinX())); + builder.appendInt(CoordinateEncoder.CARTESIAN.encodeX(visitor.getMaxX())); + builder.appendInt(CoordinateEncoder.CARTESIAN.encodeY(visitor.getMaxY())); + builder.appendInt(CoordinateEncoder.CARTESIAN.encodeY(visitor.getMinY())); + builder.endPositionEntry(); + } + } + } + + private static Block.Builder blockBuilder( + DataType dataType, + FieldExtractPreference extractPreference, + int estimatedSize, + BlockFactory blockFactory + ) { ElementType elementType = switch (dataType) { case SHORT -> ElementType.INT; case FLOAT, HALF_FLOAT, SCALED_FLOAT -> ElementType.DOUBLE; default -> PlannerUtils.toElementType(dataType); }; - return elementType.newBlockBuilder(estimatedSize, blockFactory); + if (extractPreference == DOC_VALUES && DataType.isSpatialPoint(dataType)) { + return blockFactory.newLongBlockBuilder(estimatedSize); + } else if (extractPreference == EXTRACT_SPATIAL_BOUNDS && DataType.isSpatial(dataType)) { + return blockFactory.newIntBlockBuilder(estimatedSize); + } else { + return elementType.newBlockBuilder(estimatedSize, blockFactory); + } + } + + private static TestBlockCopier blockCopier(DataType dataType, FieldExtractPreference extractPreference, IntVector docIndices) { + if (extractPreference == DOC_VALUES && DataType.isSpatialPoint(dataType)) { + return TestSpatialPointStatsBlockCopier.create(docIndices, dataType); + } else if (extractPreference == EXTRACT_SPATIAL_BOUNDS && DataType.isSpatial(dataType)) { + return TestSpatialShapeExtentBlockCopier.create(docIndices, dataType); + } else { + return new TestBlockCopier(docIndices); + } } } diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/GeoShapeWithDocValuesFieldMapper.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/GeoShapeWithDocValuesFieldMapper.java index 67d25556a2aa..f7c5f1b8072f 100644 --- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/GeoShapeWithDocValuesFieldMapper.java +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/GeoShapeWithDocValuesFieldMapper.java @@ -33,6 +33,7 @@ import org.elasticsearch.index.fielddata.FieldDataContext; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.ScriptDocValues; import org.elasticsearch.index.mapper.AbstractShapeGeometryFieldMapper; +import org.elasticsearch.index.mapper.BlockLoader; import org.elasticsearch.index.mapper.DocumentParserContext; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.GeoShapeIndexer; @@ -300,14 +301,17 @@ public class GeoShapeWithDocValuesFieldMapper extends AbstractShapeGeometryField } @Override - protected boolean isBoundsExtractionSupported() { - // Extracting bounds for geo shapes is not implemented yet. - return false; + public BlockLoader blockLoader(BlockLoaderContext blContext) { + return blContext.fieldExtractPreference() == FieldExtractPreference.EXTRACT_SPATIAL_BOUNDS + ? new GeoBoundsBlockLoader(name()) + : blockLoaderFromSource(blContext); } - @Override - protected CoordinateEncoder coordinateEncoder() { - return CoordinateEncoder.GEO; + static class GeoBoundsBlockLoader extends AbstractShapeGeometryFieldMapper.AbstractShapeGeometryFieldType.BoundsBlockLoader { + + GeoBoundsBlockLoader(String fieldName) { + super(fieldName); + } } } diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/ShapeFieldMapper.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/ShapeFieldMapper.java index f1140093f236..198e0ba3011b 100644 --- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/ShapeFieldMapper.java +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/ShapeFieldMapper.java @@ -22,6 +22,7 @@ import org.elasticsearch.index.fielddata.FieldDataContext; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.ScriptDocValues; import org.elasticsearch.index.mapper.AbstractShapeGeometryFieldMapper; +import org.elasticsearch.index.mapper.BlockLoader; import org.elasticsearch.index.mapper.DocumentParserContext; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; @@ -31,6 +32,7 @@ import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.lucene.spatial.BinaryShapeDocValuesField; import org.elasticsearch.lucene.spatial.CartesianShapeIndexer; import org.elasticsearch.lucene.spatial.CoordinateEncoder; +import org.elasticsearch.lucene.spatial.Extent; import org.elasticsearch.lucene.spatial.XYQueriesUtils; import org.elasticsearch.script.field.AbstractScriptFieldFactory; import org.elasticsearch.script.field.DocValuesScriptFieldFactory; @@ -186,13 +188,26 @@ public class ShapeFieldMapper extends AbstractShapeGeometryFieldMapper } @Override - protected boolean isBoundsExtractionSupported() { - return true; + public BlockLoader blockLoader(BlockLoaderContext blContext) { + return blContext.fieldExtractPreference() == FieldExtractPreference.EXTRACT_SPATIAL_BOUNDS + ? new CartesianBoundsBlockLoader(name()) + : blockLoaderFromSource(blContext); } - @Override - protected CoordinateEncoder coordinateEncoder() { - return CoordinateEncoder.CARTESIAN; + static class CartesianBoundsBlockLoader extends BoundsBlockLoader { + protected CartesianBoundsBlockLoader(String fieldName) { + super(fieldName); + } + + protected void writeExtent(BlockLoader.IntBuilder builder, Extent extent) { + // For cartesian_shape we store 4 values as a multi-valued field, in the same order as the fields in the Rectangle class + builder.beginPositionEntry(); + builder.appendInt(Math.min(extent.negLeft, extent.posLeft)); + builder.appendInt(Math.max(extent.negRight, extent.posRight)); + builder.appendInt(extent.top); + builder.appendInt(extent.bottom); + builder.endPositionEntry(); + } } } From 1515898e8cb3da62916627c6f2215a7d3d4fbed1 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Thu, 16 Jan 2025 11:12:43 -0800 Subject: [PATCH 29/30] Preparation for 9.0.0-alpha1 --- .buildkite/scripts/dra-workflow.sh | 16 ++++++++++++++++ .buildkite/scripts/dra-workflow.trigger.sh | 3 ++- 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/.buildkite/scripts/dra-workflow.sh b/.buildkite/scripts/dra-workflow.sh index f2dc40ca1927..b286ffe6d80b 100755 --- a/.buildkite/scripts/dra-workflow.sh +++ b/.buildkite/scripts/dra-workflow.sh @@ -29,6 +29,11 @@ if [[ "$WORKFLOW" == "snapshot" ]]; then VERSION_SUFFIX="-SNAPSHOT" fi +if [[ -n "$VERSION_QUALIFER" ]]; then + ES_VERSION = "${ES_VERSION}-${VERSION_QUALIFER}" + echo "Version qualifier specified. ES_VERSION=${ES_VERSION}." +fi + BEATS_BUILD_ID="$(./.ci/scripts/resolve-dra-manifest.sh beats "$RM_BRANCH" "$ES_VERSION" "$WORKFLOW")" echo "BEATS_BUILD_ID=$BEATS_BUILD_ID" @@ -37,6 +42,7 @@ echo "ML_CPP_BUILD_ID=$ML_CPP_BUILD_ID" LICENSE_KEY_ARG="" BUILD_SNAPSHOT_ARG="" +VERSION_QUALIFIER_ARG="" if [[ "$WORKFLOW" == "staging" ]]; then LICENSE_KEY=$(mktemp -d)/license.key @@ -47,6 +53,10 @@ if [[ "$WORKFLOW" == "staging" ]]; then BUILD_SNAPSHOT_ARG="-Dbuild.snapshot=false" fi +if [[ -n "$VERSION_QUALIFER" ]]; then + VERSION_QUALIFIER_ARG="-Dbuild.version_qualifier=$VERSION_QUALIFER" +fi + echo --- Building release artifacts .ci/scripts/run-gradle.sh -Ddra.artifacts=true \ @@ -56,12 +66,17 @@ echo --- Building release artifacts -Dcsv="$WORKSPACE/build/distributions/dependencies-${ES_VERSION}${VERSION_SUFFIX}.csv" \ $LICENSE_KEY_ARG \ $BUILD_SNAPSHOT_ARG \ + $VERSION_QUALIFIER_ARG \ buildReleaseArtifacts \ exportCompressedDockerImages \ :distribution:generateDependenciesReport PATH="$PATH:${JAVA_HOME}/bin" # Required by the following script +if [[ -z "$VERSION_QUALIFER" ]]; then x-pack/plugin/sql/connectors/tableau/package.sh asm qualifier="$VERSION_SUFFIX" +else +x-pack/plugin/sql/connectors/tableau/package.sh asm qualifier="$VERSION_QUALIFER" +fi # we regenerate this file as part of the release manager invocation rm "build/distributions/elasticsearch-jdbc-${ES_VERSION}${VERSION_SUFFIX}.taco.sha512" @@ -88,6 +103,7 @@ docker run --rm \ --branch "$RM_BRANCH" \ --commit "$BUILDKITE_COMMIT" \ --workflow "$WORKFLOW" \ + --qualifier "${VERSION_QUALIFER:-}" \ --version "$ES_VERSION" \ --artifact-set main \ --dependency "beats:https://artifacts-${WORKFLOW}.elastic.co/beats/${BEATS_BUILD_ID}/manifest-${ES_VERSION}${VERSION_SUFFIX}.json" \ diff --git a/.buildkite/scripts/dra-workflow.trigger.sh b/.buildkite/scripts/dra-workflow.trigger.sh index 5ef756c30bcc..43c4b42ecf00 100755 --- a/.buildkite/scripts/dra-workflow.trigger.sh +++ b/.buildkite/scripts/dra-workflow.trigger.sh @@ -8,7 +8,7 @@ source .buildkite/scripts/branches.sh for BRANCH in "${BRANCHES[@]}"; do if [[ "$BRANCH" == "main" ]]; then - continue + export VERSION_QUALIFIER="alpha1" fi INTAKE_PIPELINE_SLUG="elasticsearch-intake" @@ -24,5 +24,6 @@ for BRANCH in "${BRANCHES[@]}"; do commit: "$LAST_GOOD_COMMIT" env: DRA_WORKFLOW: staging + VERSION_QUALIFIER: ${VERSION_QUALIFIER:-} EOF done From 34059c9dbdd0a2689756ef78d1ebb3bc33c21d03 Mon Sep 17 00:00:00 2001 From: Patrick Doyle <810052+prdoyle@users.noreply.github.com> Date: Thu, 16 Jan 2025 14:30:23 -0500 Subject: [PATCH 30/30] Limit ByteSizeUnit to 2 decimals (#120142) * Exhaustive testParseFractionalNumber * Refactor: encapsulate ByteSizeUnit constructor * Refactor: store size in bytes * Support up to 2 decimals in parsed ByteSizeValue * Fix test for rounding up with no warnings * ByteSizeUnit transport changes * Update docs/changelog/120142.yaml * Changelog details and impact * Fix change log breaking.area * Address PR comments --- .../bytes/BytesArrayReadLongBenchmark.java | 2 +- .../PagedBytesReferenceReadLongBenchmark.java | 2 +- docs/changelog/120142.yaml | 13 + .../ingest/common/BytesProcessorTests.java | 5 +- .../RemoteScrollableHitSourceTests.java | 2 +- .../azure/AzureBlobStoreRepositoryTests.java | 2 +- .../AzureStorageCleanupThirdPartyTests.java | 2 +- .../repositories/azure/AzureBlobStore.java | 4 +- .../repositories/azure/AzureRepository.java | 2 +- .../azure/AzureStorageService.java | 2 +- .../azure/AbstractAzureServerTestCase.java | 2 +- .../azure/AzureRepositorySettingsTests.java | 2 +- ...eCloudStorageBlobStoreRepositoryTests.java | 2 +- .../gcs/GoogleCloudStorageBlobStore.java | 2 +- .../gcs/GoogleCloudStorageRepository.java | 2 +- .../repositories/s3/S3BlobContainer.java | 2 +- .../repositories/s3/S3Repository.java | 8 +- .../s3/S3BlobContainerRetriesTests.java | 4 +- .../repositories/s3/S3RepositoryTests.java | 4 +- .../common/blobstore/url/URLBlobStore.java | 2 +- .../netty4/Netty4HttpRequestSizeLimitIT.java | 2 +- .../Netty4IncrementalRequestHandlingIT.java | 2 +- .../transport/netty4/Netty4Plugin.java | 4 +- .../admin/indices/rollover/RolloverIT.java | 16 +- .../action/bulk/BulkProcessor2IT.java | 8 +- .../action/bulk/BulkProcessorIT.java | 12 +- .../allocation/DiskThresholdMonitorIT.java | 2 +- .../decider/DiskThresholdDeciderIT.java | 2 +- .../index/shard/IndexShardIT.java | 5 +- .../RemoveCorruptedShardDataCommandIT.java | 2 +- .../index/store/CorruptedFileIT.java | 8 +- .../index/store/CorruptedTranslogIT.java | 2 +- .../breaker/CircuitBreakerServiceIT.java | 2 +- .../indices/recovery/IndexRecoveryIT.java | 2 +- .../indices/settings/UpdateSettingsIT.java | 4 +- .../indices/state/CloseIndexIT.java | 2 +- .../recovery/TruncatedRecoveryIT.java | 5 +- .../fs/FsBlobStoreRepositoryIT.java | 2 +- .../snapshots/SnapshotShutdownIT.java | 2 +- .../org/elasticsearch/TransportVersions.java | 1 + .../action/bulk/BulkProcessor.java | 2 +- .../action/bulk/BulkProcessor2.java | 4 +- .../common/unit/ByteSizeValue.java | 317 ++++++++++++------ .../http/HttpTransportSettings.java | 8 +- .../elasticsearch/index/IndexSettings.java | 8 +- .../index/MergePolicyConfig.java | 6 +- .../index/engine/InternalEngine.java | 2 +- .../index/shard/PrimaryReplicaSyncer.java | 2 +- .../index/translog/TranslogConfig.java | 2 +- .../indices/IndexingMemoryController.java | 2 +- .../indices/recovery/RecoverySettings.java | 22 +- .../org/elasticsearch/monitor/fs/FsInfo.java | 8 +- .../elasticsearch/search/SearchService.java | 2 +- .../elasticsearch/threadpool/ThreadPool.java | 2 +- .../transport/InboundDecoder.java | 4 +- .../transport/RemoteClusterPortSettings.java | 6 +- .../rollover/RolloverConditionsTests.java | 8 +- .../indices/shrink/ResizeRequestTests.java | 2 +- .../action/bulk/BulkProcessor2Tests.java | 4 +- .../action/bulk/BulkProcessorTests.java | 2 +- .../settings/MemorySizeSettingsTests.java | 2 +- .../common/settings/SettingTests.java | 8 +- .../common/settings/SettingsTests.java | 2 +- .../common/unit/ByteSizeValueTests.java | 271 ++++++++------- .../unit/RelativeByteSizeValueTests.java | 6 +- .../common/util/BitArrayTests.java | 2 +- .../index/MergePolicyConfigTests.java | 10 +- .../index/translog/TranslogTests.java | 8 +- .../IndexingMemoryControllerTests.java | 4 +- .../HierarchyCircuitBreakerServiceTests.java | 24 +- .../PeerRecoveryTargetServiceTests.java | 12 +- .../recovery/RecoverySettingsTests.java | 10 +- .../recovery/RecoverySourceHandlerTests.java | 2 +- .../elasticsearch/test/ESIntegTestCase.java | 8 +- .../test/InternalTestCluster.java | 4 +- .../topmetrics/TopMetricsAggregatorTests.java | 2 +- ...bstractFrozenAutoscalingIntegTestCase.java | 2 +- .../FrozenShardsDeciderServiceTests.java | 2 +- .../shared/SharedBlobCacheServiceTests.java | 4 +- .../xpack/ccr/CcrRetentionLeaseIT.java | 2 +- .../xpack/ccr/FollowerFailOverIT.java | 8 +- .../xpack/ccr/IndexFollowingIT.java | 4 +- .../elasticsearch/xpack/ccr/CcrSettings.java | 8 +- .../action/TransportResumeFollowAction.java | 4 +- .../ccr/rest/RestShardChangesAction.java | 2 +- .../ResumeFollowActionRequestTests.java | 2 +- .../ccr/action/ShardChangesActionTests.java | 14 +- .../ShardFollowNodeTaskRandomTests.java | 2 +- .../ccr/action/ShardFollowNodeTaskTests.java | 2 +- .../ShardFollowTaskReplicationTests.java | 12 +- ...tActivateAutoFollowPatternActionTests.java | 6 +- .../TransportFollowStatsActionTests.java | 2 +- .../action/TransportUnfollowActionTests.java | 2 +- .../core/ilm/PauseFollowerIndexStepTests.java | 6 +- .../ilm/WaitForRolloverReadyStepTests.java | 16 +- .../action/ForecastJobActionRequestTests.java | 2 +- .../DataFrameAnalyticsConfigTests.java | 18 +- .../explain/MemoryEstimationTests.java | 16 +- .../xpack/core/ml/job/config/JobTests.java | 6 +- .../core/ml/job/config/JobUpdateTests.java | 6 +- .../downsample/DownsampleShardIndexer.java | 4 +- .../inference/external/http/HttpSettings.java | 4 +- .../common/SizeLimitInputStreamTests.java | 2 +- .../action/ModelLoaderUtils.java | 2 +- .../ClusterStatsMonitoringDocTests.java | 4 +- ...rozenSearchableSnapshotsIntegTestCase.java | 6 +- .../BaseSearchableSnapshotsIntegTestCase.java | 16 +- ...chableSnapshotDiskThresholdIntegTests.java | 2 +- ...ableSnapshotsBlobStoreCacheIntegTests.java | 2 +- .../SearchableSnapshots.java | 4 +- .../cache/full/CacheService.java | 6 +- .../store/IndexInputStats.java | 2 +- .../AbstractSearchableSnapshotsTestCase.java | 2 +- ...SearchableSnapshotDirectoryStatsTests.java | 6 +- .../DirectBlobContainerIndexInputTests.java | 8 +- .../elasticsearch/xpack/watcher/Watcher.java | 6 +- .../watcher/common/http/HttpSettings.java | 4 +- .../watcher/common/http/HttpClientTests.java | 2 +- .../http/SizeLimitInputStreamTests.java | 4 +- 119 files changed, 663 insertions(+), 510 deletions(-) create mode 100644 docs/changelog/120142.yaml diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/bytes/BytesArrayReadLongBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/bytes/BytesArrayReadLongBenchmark.java index 35c15eca45a0..58de886a30d1 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/bytes/BytesArrayReadLongBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/bytes/BytesArrayReadLongBenchmark.java @@ -47,7 +47,7 @@ public class BytesArrayReadLongBenchmark { @Setup public void initResults() throws IOException { final BytesStreamOutput tmp = new BytesStreamOutput(); - final long bytes = new ByteSizeValue(dataMb, ByteSizeUnit.MB).getBytes(); + final long bytes = ByteSizeValue.of(dataMb, ByteSizeUnit.MB).getBytes(); for (int i = 0; i < bytes / 8; i++) { tmp.writeLong(i); } diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/bytes/PagedBytesReferenceReadLongBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/bytes/PagedBytesReferenceReadLongBenchmark.java index 24ce3b6fe1c6..7ee1dec94c38 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/bytes/PagedBytesReferenceReadLongBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/bytes/PagedBytesReferenceReadLongBenchmark.java @@ -47,7 +47,7 @@ public class PagedBytesReferenceReadLongBenchmark { @Setup public void initResults() throws IOException { final BytesStreamOutput tmp = new BytesStreamOutput(); - final long bytes = new ByteSizeValue(dataMb, ByteSizeUnit.MB).getBytes(); + final long bytes = ByteSizeValue.of(dataMb, ByteSizeUnit.MB).getBytes(); for (int i = 0; i < bytes / 8; i++) { tmp.writeLong(i); } diff --git a/docs/changelog/120142.yaml b/docs/changelog/120142.yaml new file mode 100644 index 000000000000..febb0f90c075 --- /dev/null +++ b/docs/changelog/120142.yaml @@ -0,0 +1,13 @@ +pr: 120142 +summary: Limit `ByteSizeUnit` to 2 decimals +area: Infra/Core +type: breaking +issues: [] +breaking: + title: Limit `ByteSizeUnit` to 2 decimals + area: Cluster and node setting + details: In the past, byte values like `1.25 mb` were allowed but deprecated. Now, values with up to two decimal places are allowed, + unless the unit is bytes, in which case no decimals are allowed. Values with too many decimal places result in an error. + impact: Values with more than two decimal places, like `0.123 mb` will be rejected as an error, + where in the past, they'd be accepted with a deprecation warning. + notable: false diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/BytesProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/BytesProcessorTests.java index b88c5b0da293..6851eb5247c1 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/BytesProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/BytesProcessorTests.java @@ -77,9 +77,6 @@ public class BytesProcessorTests extends AbstractStringProcessorTestCase { String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, "1.1kb"); Processor processor = newProcessor(fieldName, randomBoolean(), fieldName); processor.execute(ingestDocument); - assertThat(ingestDocument.getFieldValue(fieldName, expectedResultType()), equalTo(1126L)); - assertWarnings( - "Fractional bytes values are deprecated. Use non-fractional bytes values instead: [1.1kb] found for setting " + "[Ingest Field]" - ); + assertThat(ingestDocument.getFieldValue(fieldName, expectedResultType()), equalTo(1127L)); } } diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/remote/RemoteScrollableHitSourceTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/remote/RemoteScrollableHitSourceTests.java index c91b2e448bf7..2bff467da58a 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/remote/RemoteScrollableHitSourceTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/remote/RemoteScrollableHitSourceTests.java @@ -438,7 +438,7 @@ public class RemoteScrollableHitSourceTests extends ESTestCase { public Future answer(InvocationOnMock invocationOnMock) throws Throwable { HeapBufferedAsyncResponseConsumer consumer = (HeapBufferedAsyncResponseConsumer) invocationOnMock.getArguments()[1]; FutureCallback callback = (FutureCallback) invocationOnMock.getArguments()[3]; - assertEquals(new ByteSizeValue(100, ByteSizeUnit.MB).bytesAsInt(), consumer.getBufferLimit()); + assertEquals(ByteSizeValue.of(100, ByteSizeUnit.MB).bytesAsInt(), consumer.getBufferLimit()); callback.failed(tooLong); return null; } diff --git a/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryTests.java b/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryTests.java index f3101890d818..f0242b6c3ef3 100644 --- a/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryTests.java +++ b/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryTests.java @@ -90,7 +90,7 @@ public class AzureBlobStoreRepositoryTests extends ESMockAPIBasedRepositoryInteg protected Settings repositorySettings(String repoName) { Settings.Builder settingsBuilder = Settings.builder() .put(super.repositorySettings(repoName)) - .put(AzureRepository.Repository.MAX_SINGLE_PART_UPLOAD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.MB)) + .put(AzureRepository.Repository.MAX_SINGLE_PART_UPLOAD_SIZE_SETTING.getKey(), ByteSizeValue.of(1, ByteSizeUnit.MB)) .put(AzureRepository.Repository.CONTAINER_SETTING.getKey(), "container") .put(AzureStorageSettings.ACCOUNT_SETTING.getKey(), "test") .put(AzureRepository.Repository.DELETION_BATCH_SIZE_SETTING.getKey(), randomIntBetween(5, 256)) diff --git a/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java b/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java index 40be0f8ca78c..b3c12ce57257 100644 --- a/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java +++ b/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java @@ -115,7 +115,7 @@ public class AzureStorageCleanupThirdPartyTests extends AbstractThirdPartyReposi Settings.builder() .put("container", System.getProperty("test.azure.container")) .put("base_path", System.getProperty("test.azure.base") + randomAlphaOfLength(8)) - .put("max_single_part_upload_size", new ByteSizeValue(1, ByteSizeUnit.MB)) + .put("max_single_part_upload_size", ByteSizeValue.of(1, ByteSizeUnit.MB)) ) .get(); assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); diff --git a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java index 3cac0dc4bb6d..34c13703521b 100644 --- a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java +++ b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java @@ -107,8 +107,8 @@ public class AzureBlobStore implements BlobStore { private static final Logger logger = LogManager.getLogger(AzureBlobStore.class); // See https://learn.microsoft.com/en-us/rest/api/storageservices/blob-batch#request-body public static final int MAX_ELEMENTS_PER_BATCH = 256; - private static final long DEFAULT_READ_CHUNK_SIZE = new ByteSizeValue(32, ByteSizeUnit.MB).getBytes(); - private static final int DEFAULT_UPLOAD_BUFFERS_SIZE = (int) new ByteSizeValue(64, ByteSizeUnit.KB).getBytes(); + private static final long DEFAULT_READ_CHUNK_SIZE = ByteSizeValue.of(32, ByteSizeUnit.MB).getBytes(); + private static final int DEFAULT_UPLOAD_BUFFERS_SIZE = (int) ByteSizeValue.of(64, ByteSizeUnit.KB).getBytes(); private final AzureStorageService service; private final BigArrays bigArrays; diff --git a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java index 316db4844e59..ff6bdfaa0072 100644 --- a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java +++ b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java @@ -81,7 +81,7 @@ public class AzureRepository extends MeteredBlobStoreRepository { ); public static final Setting READONLY_SETTING = Setting.boolSetting(READONLY_SETTING_KEY, false, Property.NodeScope); // see ModelHelper.BLOB_DEFAULT_MAX_SINGLE_UPLOAD_SIZE - private static final ByteSizeValue DEFAULT_MAX_SINGLE_UPLOAD_SIZE = new ByteSizeValue(256, ByteSizeUnit.MB); + private static final ByteSizeValue DEFAULT_MAX_SINGLE_UPLOAD_SIZE = ByteSizeValue.of(256, ByteSizeUnit.MB); public static final Setting MAX_SINGLE_PART_UPLOAD_SIZE_SETTING = Setting.byteSizeSetting( "max_single_part_upload_size", DEFAULT_MAX_SINGLE_UPLOAD_SIZE, diff --git a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java index 4c7d42e6080c..e26e98481093 100644 --- a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java +++ b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java @@ -37,7 +37,7 @@ public class AzureStorageService { * The maximum size of a BlockBlob block. * See https://docs.microsoft.com/en-us/rest/api/storageservices/understanding-block-blobs--append-blobs--and-page-blobs */ - public static final ByteSizeValue MAX_BLOCK_SIZE = new ByteSizeValue(100, ByteSizeUnit.MB); + public static final ByteSizeValue MAX_BLOCK_SIZE = ByteSizeValue.of(100, ByteSizeUnit.MB); /** * The maximum number of blocks. diff --git a/modules/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AbstractAzureServerTestCase.java b/modules/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AbstractAzureServerTestCase.java index cb9facc061a2..902096fe027e 100644 --- a/modules/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AbstractAzureServerTestCase.java +++ b/modules/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AbstractAzureServerTestCase.java @@ -165,7 +165,7 @@ public abstract class AbstractAzureServerTestCase extends ESTestCase { .put(CONTAINER_SETTING.getKey(), CONTAINER) .put(ACCOUNT_SETTING.getKey(), clientName) .put(LOCATION_MODE_SETTING.getKey(), locationMode) - .put(MAX_SINGLE_PART_UPLOAD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.MB)) + .put(MAX_SINGLE_PART_UPLOAD_SIZE_SETTING.getKey(), ByteSizeValue.of(1, ByteSizeUnit.MB)) .build() ); diff --git a/modules/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureRepositorySettingsTests.java b/modules/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureRepositorySettingsTests.java index 3afacb5b7426..b6b6c96f7aff 100644 --- a/modules/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureRepositorySettingsTests.java +++ b/modules/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureRepositorySettingsTests.java @@ -133,7 +133,7 @@ public class AzureRepositorySettingsTests extends ESTestCase { // chunk size in settings int size = randomIntBetween(1, 256); azureRepository = azureRepository(Settings.builder().put("chunk_size", size + "mb").build()); - assertEquals(new ByteSizeValue(size, ByteSizeUnit.MB), azureRepository.chunkSize()); + assertEquals(ByteSizeValue.of(size, ByteSizeUnit.MB), azureRepository.chunkSize()); // zero bytes is not allowed IllegalArgumentException e = expectThrows( diff --git a/modules/repository-gcs/src/internalClusterTest/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java b/modules/repository-gcs/src/internalClusterTest/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java index 6fa8cb9be562..6505b7234966 100644 --- a/modules/repository-gcs/src/internalClusterTest/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java +++ b/modules/repository-gcs/src/internalClusterTest/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java @@ -143,7 +143,7 @@ public class GoogleCloudStorageBlobStoreRepositoryTests extends ESMockAPIBasedRe Settings.builder().put("chunk_size", size + "mb").build() ); chunkSize = GoogleCloudStorageRepository.getSetting(GoogleCloudStorageRepository.CHUNK_SIZE, repositoryMetadata); - assertEquals(new ByteSizeValue(size, ByteSizeUnit.MB), chunkSize); + assertEquals(ByteSizeValue.of(size, ByteSizeUnit.MB), chunkSize); // zero bytes is not allowed IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> { diff --git a/modules/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java b/modules/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java index c68217a1a373..6284129c0825 100644 --- a/modules/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java +++ b/modules/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java @@ -83,7 +83,7 @@ class GoogleCloudStorageBlobStore implements BlobStore { final String key = "es.repository_gcs.large_blob_threshold_byte_size"; final String largeBlobThresholdByteSizeProperty = System.getProperty(key); if (largeBlobThresholdByteSizeProperty == null) { - LARGE_BLOB_THRESHOLD_BYTE_SIZE = Math.toIntExact(new ByteSizeValue(5, ByteSizeUnit.MB).getBytes()); + LARGE_BLOB_THRESHOLD_BYTE_SIZE = Math.toIntExact(ByteSizeValue.of(5, ByteSizeUnit.MB).getBytes()); } else { final int largeBlobThresholdByteSize; try { diff --git a/modules/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java b/modules/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java index b9de9132738e..36944e61d9c1 100644 --- a/modules/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java +++ b/modules/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java @@ -40,7 +40,7 @@ class GoogleCloudStorageRepository extends MeteredBlobStoreRepository { * Maximum allowed object size in GCS. * @see GCS documentation for details. */ - static final ByteSizeValue MAX_CHUNK_SIZE = new ByteSizeValue(5, ByteSizeUnit.TB); + static final ByteSizeValue MAX_CHUNK_SIZE = ByteSizeValue.of(5, ByteSizeUnit.TB); static final String TYPE = "gcs"; diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java index bf693222a4b7..ea1365796401 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java @@ -129,7 +129,7 @@ class S3BlobContainer extends AbstractBlobContainer { @Override public long readBlobPreferredLength() { // This container returns streams that must be fully consumed, so we tell consumers to make bounded requests. - return new ByteSizeValue(32, ByteSizeUnit.MB).getBytes(); + return ByteSizeValue.of(32, ByteSizeUnit.MB).getBytes(); } /** diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java index 591350c34ab8..cd6ac4df4d39 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java @@ -99,13 +99,13 @@ class S3Repository extends MeteredBlobStoreRepository { /** * Maximum size of files that can be uploaded using a single upload request. */ - static final ByteSizeValue MAX_FILE_SIZE = new ByteSizeValue(5, ByteSizeUnit.GB); + static final ByteSizeValue MAX_FILE_SIZE = ByteSizeValue.of(5, ByteSizeUnit.GB); /** * Minimum size of parts that can be uploaded using the Multipart Upload API. * (see http://docs.aws.amazon.com/AmazonS3/latest/dev/qfacts.html) */ - static final ByteSizeValue MIN_PART_SIZE_USING_MULTIPART = new ByteSizeValue(5, ByteSizeUnit.MB); + static final ByteSizeValue MIN_PART_SIZE_USING_MULTIPART = ByteSizeValue.of(5, ByteSizeUnit.MB); /** * Maximum size of parts that can be uploaded using the Multipart Upload API. @@ -116,7 +116,7 @@ class S3Repository extends MeteredBlobStoreRepository { /** * Maximum size of files that can be uploaded using the Multipart Upload API. */ - static final ByteSizeValue MAX_FILE_SIZE_USING_MULTIPART = new ByteSizeValue(5, ByteSizeUnit.TB); + static final ByteSizeValue MAX_FILE_SIZE_USING_MULTIPART = ByteSizeValue.of(5, ByteSizeUnit.TB); /** * Minimum threshold below which the chunk is uploaded using a single request. Beyond this threshold, @@ -137,7 +137,7 @@ class S3Repository extends MeteredBlobStoreRepository { static final Setting CHUNK_SIZE_SETTING = Setting.byteSizeSetting( "chunk_size", MAX_FILE_SIZE_USING_MULTIPART, - new ByteSizeValue(5, ByteSizeUnit.MB), + ByteSizeValue.of(5, ByteSizeUnit.MB), MAX_FILE_SIZE_USING_MULTIPART ); diff --git a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java index b292dc587299..fc9b2141a30a 100644 --- a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java +++ b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java @@ -335,7 +335,7 @@ public class S3BlobContainerRetriesTests extends AbstractBlobContainerRetriesTes public void testWriteLargeBlob() throws Exception { final boolean useTimeout = rarely(); final TimeValue readTimeout = useTimeout ? TimeValue.timeValueMillis(randomIntBetween(100, 500)) : null; - final ByteSizeValue bufferSize = new ByteSizeValue(5, ByteSizeUnit.MB); + final ByteSizeValue bufferSize = ByteSizeValue.of(5, ByteSizeUnit.MB); final BlobContainer blobContainer = createBlobContainer(null, readTimeout, true, bufferSize); final int parts = randomIntBetween(1, 5); @@ -436,7 +436,7 @@ public class S3BlobContainerRetriesTests extends AbstractBlobContainerRetriesTes public void testWriteLargeBlobStreaming() throws Exception { final boolean useTimeout = rarely(); final TimeValue readTimeout = useTimeout ? TimeValue.timeValueMillis(randomIntBetween(100, 500)) : null; - final ByteSizeValue bufferSize = new ByteSizeValue(5, ByteSizeUnit.MB); + final ByteSizeValue bufferSize = ByteSizeValue.of(5, ByteSizeUnit.MB); final BlobContainer blobContainer = createBlobContainer(null, readTimeout, true, bufferSize); final int parts = randomIntBetween(1, 5); diff --git a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java index 3817af4def88..5ee8f4ba1850 100644 --- a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java +++ b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java @@ -93,8 +93,8 @@ public class S3RepositoryTests extends ESTestCase { private Settings bufferAndChunkSettings(long buffer, long chunk) { return Settings.builder() .put(S3Repository.BUCKET_SETTING.getKey(), "bucket") - .put(S3Repository.BUFFER_SIZE_SETTING.getKey(), new ByteSizeValue(buffer, ByteSizeUnit.MB).getStringRep()) - .put(S3Repository.CHUNK_SIZE_SETTING.getKey(), new ByteSizeValue(chunk, ByteSizeUnit.MB).getStringRep()) + .put(S3Repository.BUFFER_SIZE_SETTING.getKey(), ByteSizeValue.of(buffer, ByteSizeUnit.MB).getStringRep()) + .put(S3Repository.CHUNK_SIZE_SETTING.getKey(), ByteSizeValue.of(chunk, ByteSizeUnit.MB).getStringRep()) .build(); } diff --git a/modules/repository-url/src/main/java/org/elasticsearch/common/blobstore/url/URLBlobStore.java b/modules/repository-url/src/main/java/org/elasticsearch/common/blobstore/url/URLBlobStore.java index 0e9c735b22fd..0e1c198e059a 100644 --- a/modules/repository-url/src/main/java/org/elasticsearch/common/blobstore/url/URLBlobStore.java +++ b/modules/repository-url/src/main/java/org/elasticsearch/common/blobstore/url/URLBlobStore.java @@ -33,7 +33,7 @@ public class URLBlobStore implements BlobStore { static final Setting BUFFER_SIZE_SETTING = Setting.byteSizeSetting( "repositories.uri.buffer_size", - new ByteSizeValue(100, ByteSizeUnit.KB), + ByteSizeValue.of(100, ByteSizeUnit.KB), Setting.Property.NodeScope ); diff --git a/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/http/netty4/Netty4HttpRequestSizeLimitIT.java b/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/http/netty4/Netty4HttpRequestSizeLimitIT.java index fcd45e9f9f47..d409d6d4e250 100644 --- a/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/http/netty4/Netty4HttpRequestSizeLimitIT.java +++ b/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/http/netty4/Netty4HttpRequestSizeLimitIT.java @@ -42,7 +42,7 @@ import static org.hamcrest.Matchers.hasSize; @ClusterScope(scope = Scope.TEST, supportsDedicatedMasters = false, numClientNodes = 0, numDataNodes = 1) public class Netty4HttpRequestSizeLimitIT extends ESNetty4IntegTestCase { - private static final ByteSizeValue LIMIT = new ByteSizeValue(2, ByteSizeUnit.KB); + private static final ByteSizeValue LIMIT = ByteSizeValue.of(2, ByteSizeUnit.KB); @Override protected boolean addMockHttpTransport() { diff --git a/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/http/netty4/Netty4IncrementalRequestHandlingIT.java b/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/http/netty4/Netty4IncrementalRequestHandlingIT.java index ab2fb41d5a22..d825ec0a83f5 100644 --- a/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/http/netty4/Netty4IncrementalRequestHandlingIT.java +++ b/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/http/netty4/Netty4IncrementalRequestHandlingIT.java @@ -96,7 +96,7 @@ public class Netty4IncrementalRequestHandlingIT extends ESNetty4IntegTestCase { @Override protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { Settings.Builder builder = Settings.builder().put(super.nodeSettings(nodeOrdinal, otherSettings)); - builder.put(HttpTransportSettings.SETTING_HTTP_MAX_CONTENT_LENGTH.getKey(), new ByteSizeValue(50, ByteSizeUnit.MB)); + builder.put(HttpTransportSettings.SETTING_HTTP_MAX_CONTENT_LENGTH.getKey(), ByteSizeValue.of(50, ByteSizeUnit.MB)); return builder.build(); } diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Plugin.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Plugin.java index 3feaa2874ebd..953337d17635 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Plugin.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Plugin.java @@ -57,7 +57,7 @@ public class Netty4Plugin extends Plugin implements NetworkPlugin { ); public static final Setting SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_SIZE = byteSizeSetting( "http.netty.receive_predictor_size", - new ByteSizeValue(64, ByteSizeUnit.KB), + ByteSizeValue.of(64, ByteSizeUnit.KB), Setting.Property.NodeScope ); public static final Setting WORKER_COUNT = new Setting<>( @@ -68,7 +68,7 @@ public class Netty4Plugin extends Plugin implements NetworkPlugin { ); private static final Setting NETTY_RECEIVE_PREDICTOR_SIZE = byteSizeSetting( "transport.netty.receive_predictor_size", - new ByteSizeValue(64, ByteSizeUnit.KB), + ByteSizeValue.of(64, ByteSizeUnit.KB), Setting.Property.NodeScope ); public static final Setting NETTY_RECEIVE_PREDICTOR_MAX = byteSizeSetting( diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java index a7cb39ed3df9..84f71864281e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java @@ -316,7 +316,7 @@ public class RolloverIT extends ESIntegTestCase { final RolloverResponse response = indicesAdmin().prepareRolloverIndex("test_alias") .setConditions( RolloverConditions.newBuilder() - .addMaxIndexSizeCondition(new ByteSizeValue(10, ByteSizeUnit.MB)) + .addMaxIndexSizeCondition(ByteSizeValue.of(10, ByteSizeUnit.MB)) .addMaxIndexAgeCondition(TimeValue.timeValueHours(4)) ) .get(); @@ -330,7 +330,7 @@ public class RolloverIT extends ESIntegTestCase { assertThat( conditions, containsInAnyOrder( - new MaxSizeCondition(new ByteSizeValue(10, ByteSizeUnit.MB)).toString(), + new MaxSizeCondition(ByteSizeValue.of(10, ByteSizeUnit.MB)).toString(), new MaxAgeCondition(TimeValue.timeValueHours(4)).toString() ) ); @@ -447,7 +447,7 @@ public class RolloverIT extends ESIntegTestCase { final RolloverResponse response = indicesAdmin().prepareRolloverIndex("test_alias") .setConditions( RolloverConditions.newBuilder() - .addMaxIndexSizeCondition(new ByteSizeValue(randomIntBetween(100, 50 * 1024), ByteSizeUnit.MB)) + .addMaxIndexSizeCondition(ByteSizeValue.of(randomIntBetween(100, 50 * 1024), ByteSizeUnit.MB)) ) .get(); assertThat(response.getOldIndex(), equalTo("test-1")); @@ -459,7 +459,7 @@ public class RolloverIT extends ESIntegTestCase { // A small max_size { - ByteSizeValue maxSizeValue = new ByteSizeValue(randomIntBetween(1, 20), ByteSizeUnit.BYTES); + ByteSizeValue maxSizeValue = ByteSizeValue.of(randomIntBetween(1, 20), ByteSizeUnit.BYTES); long beforeTime = client().threadPool().absoluteTimeInMillis() - 1000L; final RolloverResponse response = indicesAdmin().prepareRolloverIndex("test_alias") .setConditions(RolloverConditions.newBuilder().addMaxIndexSizeCondition(maxSizeValue)) @@ -482,7 +482,7 @@ public class RolloverIT extends ESIntegTestCase { final RolloverResponse response = indicesAdmin().prepareRolloverIndex("test_alias") .setConditions( RolloverConditions.newBuilder() - .addMaxIndexSizeCondition(new ByteSizeValue(randomNonNegativeLong(), ByteSizeUnit.BYTES)) + .addMaxIndexSizeCondition(ByteSizeValue.of(randomNonNegativeLong(), ByteSizeUnit.BYTES)) .addMinIndexDocsCondition(1L) ) .get(); @@ -512,7 +512,7 @@ public class RolloverIT extends ESIntegTestCase { final RolloverResponse response = indicesAdmin().prepareRolloverIndex("test_alias") .setConditions( RolloverConditions.newBuilder() - .addMaxPrimaryShardSizeCondition(new ByteSizeValue(randomIntBetween(100, 50 * 1024), ByteSizeUnit.MB)) + .addMaxPrimaryShardSizeCondition(ByteSizeValue.of(randomIntBetween(100, 50 * 1024), ByteSizeUnit.MB)) ) .get(); assertThat(response.getOldIndex(), equalTo("test-1")); @@ -524,7 +524,7 @@ public class RolloverIT extends ESIntegTestCase { // A small max_primary_shard_size { - ByteSizeValue maxPrimaryShardSizeCondition = new ByteSizeValue(randomIntBetween(1, 20), ByteSizeUnit.BYTES); + ByteSizeValue maxPrimaryShardSizeCondition = ByteSizeValue.of(randomIntBetween(1, 20), ByteSizeUnit.BYTES); long beforeTime = client().threadPool().absoluteTimeInMillis() - 1000L; final RolloverResponse response = indicesAdmin().prepareRolloverIndex("test_alias") .setConditions(RolloverConditions.newBuilder().addMaxPrimaryShardSizeCondition(maxPrimaryShardSizeCondition)) @@ -547,7 +547,7 @@ public class RolloverIT extends ESIntegTestCase { final RolloverResponse response = indicesAdmin().prepareRolloverIndex("test_alias") .setConditions( RolloverConditions.newBuilder() - .addMaxPrimaryShardSizeCondition(new ByteSizeValue(randomNonNegativeLong(), ByteSizeUnit.BYTES)) + .addMaxPrimaryShardSizeCondition(ByteSizeValue.of(randomNonNegativeLong(), ByteSizeUnit.BYTES)) .addMinIndexDocsCondition(1L) ) .get(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkProcessor2IT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkProcessor2IT.java index 2ba969c57004..d7ffc4e5ea94 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkProcessor2IT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkProcessor2IT.java @@ -53,7 +53,7 @@ public class BulkProcessor2IT extends ESIntegTestCase { // let's make sure that the bulk action limit trips, one single execution will index all the documents .setBulkActions(numDocs) .setFlushInterval(TimeValue.timeValueHours(24)) - .setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB)) + .setBulkSize(ByteSizeValue.of(1, ByteSizeUnit.GB)) .build(); try { @@ -89,7 +89,7 @@ public class BulkProcessor2IT extends ESIntegTestCase { .setBulkActions(bulkActions) // set interval and size to high values .setFlushInterval(TimeValue.timeValueHours(24)) - .setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB)) + .setBulkSize(ByteSizeValue.of(1, ByteSizeUnit.GB)) .build(); try { @@ -134,7 +134,7 @@ public class BulkProcessor2IT extends ESIntegTestCase { // let's make sure that the bulk action limit trips, one single execution will index all the documents .setBulkActions(numDocs) .setFlushInterval(TimeValue.timeValueHours(24)) - .setBulkSize(new ByteSizeValue(randomIntBetween(1, 10), RandomPicks.randomFrom(random(), ByteSizeUnit.values()))) + .setBulkSize(ByteSizeValue.of(randomIntBetween(1, 10), RandomPicks.randomFrom(random(), ByteSizeUnit.values()))) .build(); MultiGetRequestBuilder multiGetRequestBuilder = indexDocs(client(), processor, numDocs); @@ -169,7 +169,7 @@ public class BulkProcessor2IT extends ESIntegTestCase { .setBulkActions(bulkActions) // set interval and size to high values .setFlushInterval(TimeValue.timeValueHours(24)) - .setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB)) + .setBulkSize(ByteSizeValue.of(1, ByteSizeUnit.GB)) .build(); try { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkProcessorIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkProcessorIT.java index 2c5ee976e7c1..21e20226e657 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkProcessorIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkProcessorIT.java @@ -55,7 +55,7 @@ public class BulkProcessorIT extends ESIntegTestCase { .setConcurrentRequests(randomIntBetween(0, 1)) .setBulkActions(numDocs) .setFlushInterval(TimeValue.timeValueHours(24)) - .setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB)) + .setBulkSize(ByteSizeValue.of(1, ByteSizeUnit.GB)) .build() ) { @@ -83,7 +83,7 @@ public class BulkProcessorIT extends ESIntegTestCase { .setConcurrentRequests(randomIntBetween(0, 10)) .setBulkActions(numDocs + randomIntBetween(1, 100)) .setFlushInterval(TimeValue.timeValueHours(24)) - .setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB)) + .setBulkSize(ByteSizeValue.of(1, ByteSizeUnit.GB)) .build() ) { @@ -115,7 +115,7 @@ public class BulkProcessorIT extends ESIntegTestCase { .setConcurrentRequests(randomIntBetween(0, 10)) .setBulkActions(numDocs + randomIntBetween(1, 100)) .setFlushInterval(TimeValue.timeValueHours(24)) - .setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB)) + .setBulkSize(ByteSizeValue.of(1, ByteSizeUnit.GB)) .setFlushCondition(flushEnabled::get) .build() ) { @@ -159,7 +159,7 @@ public class BulkProcessorIT extends ESIntegTestCase { .setBulkActions(bulkActions) // set interval and size to high values .setFlushInterval(TimeValue.timeValueHours(24)) - .setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB)) + .setBulkSize(ByteSizeValue.of(1, ByteSizeUnit.GB)) .build() ) { @@ -202,7 +202,7 @@ public class BulkProcessorIT extends ESIntegTestCase { .setConcurrentRequests(randomIntBetween(0, 1)) .setBulkActions(numDocs) .setFlushInterval(TimeValue.timeValueHours(24)) - .setBulkSize(new ByteSizeValue(randomIntBetween(1, 10), RandomPicks.randomFrom(random(), ByteSizeUnit.values()))) + .setBulkSize(ByteSizeValue.of(randomIntBetween(1, 10), RandomPicks.randomFrom(random(), ByteSizeUnit.values()))) .build(); MultiGetRequestBuilder multiGetRequestBuilder = indexDocs(client(), processor, numDocs); @@ -250,7 +250,7 @@ public class BulkProcessorIT extends ESIntegTestCase { .setBulkActions(bulkActions) // set interval and size to high values .setFlushInterval(TimeValue.timeValueHours(24)) - .setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB)) + .setBulkSize(ByteSizeValue.of(1, ByteSizeUnit.GB)) .build() ) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitorIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitorIT.java index 10378b4d61d2..520d9f7d6072 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitorIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitorIT.java @@ -43,7 +43,7 @@ import static org.hamcrest.Matchers.equalTo; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) public class DiskThresholdMonitorIT extends DiskUsageIntegTestCase { - private static final long FLOOD_STAGE_BYTES = new ByteSizeValue(10, ByteSizeUnit.KB).getBytes(); + private static final long FLOOD_STAGE_BYTES = ByteSizeValue.of(10, ByteSizeUnit.KB).getBytes(); @Override protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java index 19b0f0bd7323..8b8f6a358ad0 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java @@ -60,7 +60,7 @@ import static org.hamcrest.Matchers.is; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) public class DiskThresholdDeciderIT extends DiskUsageIntegTestCase { - private static final long WATERMARK_BYTES = new ByteSizeValue(10, ByteSizeUnit.KB).getBytes(); + private static final long WATERMARK_BYTES = ByteSizeValue.of(10, ByteSizeUnit.KB).getBytes(); @Override protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/shard/IndexShardIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/shard/IndexShardIT.java index 870947db5bd8..a130a5b869ad 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/shard/IndexShardIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/shard/IndexShardIT.java @@ -98,7 +98,6 @@ import static org.elasticsearch.cluster.routing.TestShardRouting.shardRoutingBui import static org.elasticsearch.index.shard.IndexShardTestCase.closeShardNoCheck; import static org.elasticsearch.index.shard.IndexShardTestCase.getTranslog; import static org.elasticsearch.index.shard.IndexShardTestCase.recoverFromStore; -import static org.elasticsearch.indices.cluster.AbstractIndicesClusterStateServiceTestCase.awaitIndexShardCloseAsyncTasks; import static org.elasticsearch.test.LambdaMatchers.falseWith; import static org.elasticsearch.test.LambdaMatchers.trueWith; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -332,7 +331,7 @@ public class IndexShardIT extends ESSingleNodeTestCase { Settings.builder() .put( IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), - new ByteSizeValue(135 /* size of the operation + one generation header&footer*/, ByteSizeUnit.BYTES) + ByteSizeValue.of(135 /* size of the operation + one generation header&footer*/, ByteSizeUnit.BYTES) ) .build() ) @@ -372,7 +371,7 @@ public class IndexShardIT extends ESSingleNodeTestCase { indicesAdmin().prepareUpdateSettings("test") .setSettings( Settings.builder() - .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(size, ByteSizeUnit.BYTES)) + .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), ByteSizeValue.of(size, ByteSizeUnit.BYTES)) .build() ) .get(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandIT.java index bd58526c6143..4e9e4b4d641d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandIT.java @@ -604,7 +604,7 @@ public class RemoveCorruptedShardDataCommandIT extends ESIntegTestCase { private static void disableTranslogFlush(String index) { updateIndexSettings( Settings.builder() - .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.PB)), + .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), ByteSizeValue.of(1, ByteSizeUnit.PB)), index ); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedFileIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedFileIT.java index e6fc4c45219c..1594514d2f41 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedFileIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedFileIT.java @@ -155,7 +155,7 @@ public class CorruptedFileIT extends ESIntegTestCase { // no checkindex - we corrupt shards on purpose .put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false) // no translog based flush - it might change the .liv / segments.N files - .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.PB)) + .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), ByteSizeValue.of(1, ByteSizeUnit.PB)) ) ); ensureGreen(); @@ -269,7 +269,7 @@ public class CorruptedFileIT extends ESIntegTestCase { .put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false) // no checkindex - we corrupt shards on // purpose // no translog based flush - it might change the .liv / segments.N files - .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.PB)) + .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), ByteSizeValue.of(1, ByteSizeUnit.PB)) ) ); ensureGreen(); @@ -544,7 +544,7 @@ public class CorruptedFileIT extends ESIntegTestCase { // no checkindex - we corrupt shards on purpose .put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false) // no translog based flush - it might change the .liv / segments.N files - .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.PB)) + .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), ByteSizeValue.of(1, ByteSizeUnit.PB)) ) ); ensureGreen(); @@ -612,7 +612,7 @@ public class CorruptedFileIT extends ESIntegTestCase { // no checkindex - we corrupt shards on purpose .put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false) // no translog based flush - it might change the .liv / segments.N files - .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.PB)) + .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), ByteSizeValue.of(1, ByteSizeUnit.PB)) ) ); ensureGreen(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedTranslogIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedTranslogIT.java index 7e3fb4d8bf5e..887491755dcc 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedTranslogIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedTranslogIT.java @@ -54,7 +54,7 @@ public class CorruptedTranslogIT extends ESIntegTestCase { prepareCreate("test").setSettings( indexSettings(1, 0).put("index.refresh_interval", "-1") .put(MockEngineSupport.DISABLE_FLUSH_ON_CLOSE.getKey(), true) // never flush - always recover from translog - .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.PB)) + .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), ByteSizeValue.of(1, ByteSizeUnit.PB)) ) ); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceIT.java index 0da05f54bc5b..9beb3d0ef6c4 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceIT.java @@ -332,7 +332,7 @@ public class CircuitBreakerServiceIT extends ESIntegTestCase { } public void testLimitsRequestSize() { - ByteSizeValue inFlightRequestsLimit = new ByteSizeValue(8, ByteSizeUnit.KB); + ByteSizeValue inFlightRequestsLimit = ByteSizeValue.of(8, ByteSizeUnit.KB); if (noopBreakerUsed()) { logger.info("--> noop breakers used, skipping test"); return; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java index fa1348c82d71..a8caca94289b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java @@ -256,7 +256,7 @@ public class IndexRecoveryIT extends AbstractIndexRecoveryIntegTestCase { public Settings.Builder createRecoverySettingsChunkPerSecond(long chunkSizeBytes) { return Settings.builder() // Set the chunk size in bytes - .put(RecoverySettings.INDICES_RECOVERY_CHUNK_SIZE.getKey(), new ByteSizeValue(chunkSizeBytes, ByteSizeUnit.BYTES)) + .put(RecoverySettings.INDICES_RECOVERY_CHUNK_SIZE.getKey(), ByteSizeValue.of(chunkSizeBytes, ByteSizeUnit.BYTES)) // Set one chunk of bytes per second. .put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), chunkSizeBytes, ByteSizeUnit.BYTES); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java index 4dacc6fbb6bc..9a7a77bf77a8 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java @@ -270,7 +270,7 @@ public class UpdateSettingsIT extends ESIntegTestCase { IndexService indexService = service.indexService(resolveIndex("test")); if (indexService != null) { assertEquals(indexService.getIndexSettings().getRefreshInterval().millis(), -1); - assertEquals(indexService.getIndexSettings().getFlushThresholdSize(new ByteSizeValue(1, ByteSizeUnit.TB)).getBytes(), 1024); + assertEquals(indexService.getIndexSettings().getFlushThresholdSize(ByteSizeValue.of(1, ByteSizeUnit.TB)).getBytes(), 1024); assertEquals(indexService.getIndexSettings().getGenerationThresholdSize().getBytes(), 4096); } } @@ -281,7 +281,7 @@ public class UpdateSettingsIT extends ESIntegTestCase { IndexService indexService = service.indexService(resolveIndex("test")); if (indexService != null) { assertEquals(indexService.getIndexSettings().getRefreshInterval().millis(), 1000); - assertEquals(indexService.getIndexSettings().getFlushThresholdSize(new ByteSizeValue(1, ByteSizeUnit.TB)).getBytes(), 1024); + assertEquals(indexService.getIndexSettings().getFlushThresholdSize(ByteSizeValue.of(1, ByteSizeUnit.TB)).getBytes(), 1024); assertEquals(indexService.getIndexSettings().getGenerationThresholdSize().getBytes(), 4096); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseIndexIT.java index aaa78cc681a1..c25a2d634f3d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseIndexIT.java @@ -74,7 +74,7 @@ public class CloseIndexIT extends ESIntegTestCase { .put(super.indexSettings()) .put( IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), - new ByteSizeValue(randomIntBetween(1, 4096), ByteSizeUnit.KB) + ByteSizeValue.of(randomIntBetween(1, 4096), ByteSizeUnit.KB) ) .build(); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java index ca2ff69ac9b1..58e2bcd66313 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java @@ -63,10 +63,7 @@ public class TruncatedRecoveryIT extends ESIntegTestCase { public void testCancelRecoveryAndResume() throws Exception { updateClusterSettings( Settings.builder() - .put( - RecoverySettings.INDICES_RECOVERY_CHUNK_SIZE.getKey(), - new ByteSizeValue(randomIntBetween(50, 300), ByteSizeUnit.BYTES) - ) + .put(RecoverySettings.INDICES_RECOVERY_CHUNK_SIZE.getKey(), ByteSizeValue.of(randomIntBetween(50, 300), ByteSizeUnit.BYTES)) ); NodesStatsResponse nodeStats = clusterAdmin().prepareNodesStats().get(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/repositories/fs/FsBlobStoreRepositoryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/repositories/fs/FsBlobStoreRepositoryIT.java index d536632b8539..fefd18fa9369 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/repositories/fs/FsBlobStoreRepositoryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/repositories/fs/FsBlobStoreRepositoryIT.java @@ -21,7 +21,7 @@ public class FsBlobStoreRepositoryIT extends ESFsBasedRepositoryIntegTestCase { final Settings.Builder settings = Settings.builder().put("compress", randomBoolean()).put("location", randomRepoPath()); if (randomBoolean()) { long size = 1 << randomInt(10); - settings.put("chunk_size", new ByteSizeValue(size, ByteSizeUnit.KB)); + settings.put("chunk_size", ByteSizeValue.of(size, ByteSizeUnit.KB)); } return settings.build(); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotShutdownIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotShutdownIT.java index aeac8959df61..72317a7220ec 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotShutdownIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotShutdownIT.java @@ -497,7 +497,7 @@ public class SnapshotShutdownIT extends AbstractSnapshotIntegTestCase { final String nodeForRemovalId = internalCluster().getInstance(NodeEnvironment.class, nodeForRemoval).nodeId(); final var indexName = randomIdentifier(); createIndexWithContent(indexName, indexSettings(numShards, 0).put(REQUIRE_NODE_NAME_SETTING, nodeForRemoval).build()); - indexAllShardsToAnEqualOrGreaterMinimumSize(indexName, new ByteSizeValue(2, ByteSizeUnit.KB).getBytes()); + indexAllShardsToAnEqualOrGreaterMinimumSize(indexName, ByteSizeValue.of(2, ByteSizeUnit.KB).getBytes()); // Start the snapshot with blocking in place on the data node not to allow shard snapshots to finish yet. final var clusterService = internalCluster().getCurrentMasterNodeInstance(ClusterService.class); diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 1ab8cdfc2af7..e66cb31d6f9a 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -156,6 +156,7 @@ public class TransportVersions { public static final TransportVersion ELASTIC_INFERENCE_SERVICE_UNIFIED_CHAT_COMPLETIONS_INTEGRATION = def(8_822_00_0); public static final TransportVersion KQL_QUERY_TECH_PREVIEW = def(8_823_00_0); public static final TransportVersion ESQL_PROFILE_ROWS_PROCESSED = def(8_824_00_0); + public static final TransportVersion BYTE_SIZE_VALUE_ALWAYS_USES_BYTES = def(8_825_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java index 8d39644bbf5b..a8bd6ac35103 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java @@ -84,7 +84,7 @@ public class BulkProcessor implements Closeable { private final Runnable onClose; private int concurrentRequests = 1; private int bulkActions = 1000; - private ByteSizeValue bulkSize = new ByteSizeValue(5, ByteSizeUnit.MB); + private ByteSizeValue bulkSize = ByteSizeValue.of(5, ByteSizeUnit.MB); private TimeValue flushInterval = null; private BackoffPolicy backoffPolicy = BackoffPolicy.exponentialBackoff(); private String globalIndex; diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkProcessor2.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkProcessor2.java index 916c37f93e35..3487c6089bfb 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkProcessor2.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkProcessor2.java @@ -76,8 +76,8 @@ public class BulkProcessor2 implements Closeable { private final Listener listener; private final ThreadPool threadPool; private int maxRequestsInBulk = 1000; - private ByteSizeValue maxBulkSizeInBytes = new ByteSizeValue(5, ByteSizeUnit.MB); - private ByteSizeValue maxBytesInFlight = new ByteSizeValue(50, ByteSizeUnit.MB); + private ByteSizeValue maxBulkSizeInBytes = ByteSizeValue.of(5, ByteSizeUnit.MB); + private ByteSizeValue maxBytesInFlight = ByteSizeValue.of(50, ByteSizeUnit.MB); private TimeValue flushInterval = null; private int maxNumberOfRetries = 3; diff --git a/server/src/main/java/org/elasticsearch/common/unit/ByteSizeValue.java b/server/src/main/java/org/elasticsearch/common/unit/ByteSizeValue.java index 589ea1a2ac56..3a4f832d6adc 100644 --- a/server/src/main/java/org/elasticsearch/common/unit/ByteSizeValue.java +++ b/server/src/main/java/org/elasticsearch/common/unit/ByteSizeValue.java @@ -14,84 +14,44 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.logging.DeprecationCategory; import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.LogConfigurator; import org.elasticsearch.xcontent.ToXContentFragment; import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; +import java.math.BigDecimal; +import java.math.RoundingMode; import java.util.Locale; import java.util.Objects; +import static org.elasticsearch.TransportVersions.BYTE_SIZE_VALUE_ALWAYS_USES_BYTES; +import static org.elasticsearch.common.unit.ByteSizeUnit.BYTES; +import static org.elasticsearch.common.unit.ByteSizeUnit.GB; +import static org.elasticsearch.common.unit.ByteSizeUnit.KB; +import static org.elasticsearch.common.unit.ByteSizeUnit.MB; +import static org.elasticsearch.common.unit.ByteSizeUnit.PB; +import static org.elasticsearch.common.unit.ByteSizeUnit.TB; + public class ByteSizeValue implements Writeable, Comparable, ToXContentFragment { /** * We have to lazy initialize the deprecation logger as otherwise a static logger here would be constructed before logging is configured - * leading to a runtime failure (see {@link LogConfigurator#checkErrorListener()} ). The premature construction would come from any + * leading to a runtime failure (see {@code LogConfigurator.checkErrorListener()} ). The premature construction would come from any * {@link ByteSizeValue} object constructed in, for example, settings in {@link org.elasticsearch.common.network.NetworkService}. */ static class DeprecationLoggerHolder { static DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(ByteSizeValue.class); } - public static final ByteSizeValue ZERO = new ByteSizeValue(0, ByteSizeUnit.BYTES); - public static final ByteSizeValue ONE = new ByteSizeValue(1, ByteSizeUnit.BYTES); - public static final ByteSizeValue MINUS_ONE = new ByteSizeValue(-1, ByteSizeUnit.BYTES); + public static final ByteSizeValue ZERO = new ByteSizeValue(0, BYTES); + public static final ByteSizeValue ONE = new ByteSizeValue(1, BYTES); + public static final ByteSizeValue MINUS_ONE = new ByteSizeValue(-1, BYTES); - public static ByteSizeValue ofBytes(long size) { - if (size == 0) { - return ZERO; - } - if (size == 1) { - return ONE; - } - if (size == -1) { - return MINUS_ONE; - } - return new ByteSizeValue(size, ByteSizeUnit.BYTES); - } - - public static ByteSizeValue ofKb(long size) { - return new ByteSizeValue(size, ByteSizeUnit.KB); - } - - public static ByteSizeValue ofMb(long size) { - return new ByteSizeValue(size, ByteSizeUnit.MB); - } - - public static ByteSizeValue ofGb(long size) { - return new ByteSizeValue(size, ByteSizeUnit.GB); - } - - public static ByteSizeValue ofTb(long size) { - return new ByteSizeValue(size, ByteSizeUnit.TB); - } - - public static ByteSizeValue ofPb(long size) { - return new ByteSizeValue(size, ByteSizeUnit.PB); - } - - private final long size; - private final ByteSizeUnit unit; - - public static ByteSizeValue readFrom(StreamInput in) throws IOException { - long size = in.readZLong(); - ByteSizeUnit unit = ByteSizeUnit.readFrom(in); - if (unit == ByteSizeUnit.BYTES) { - return ofBytes(size); - } - return new ByteSizeValue(size, unit); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeZLong(size); - unit.writeTo(out); - } - - public ByteSizeValue(long size, ByteSizeUnit unit) { - if (size < -1 || (size == -1 && unit != ByteSizeUnit.BYTES)) { + /** + * @param size the number of {@code unit}s + */ + public static ByteSizeValue of(long size, ByteSizeUnit unit) { + if (size < -1 || (size == -1 && unit != BYTES)) { throw new IllegalArgumentException("Values less than -1 bytes are not supported: " + size + unit.getSuffix()); } if (size > Long.MAX_VALUE / unit.toBytes(1)) { @@ -99,18 +59,88 @@ public class ByteSizeValue implements Writeable, Comparable, ToXC "Values greater than " + Long.MAX_VALUE + " bytes are not supported: " + size + unit.getSuffix() ); } - this.size = size; - this.unit = unit; + return newByteSizeValue(size * unit.toBytes(1), unit); + } + + public static ByteSizeValue ofBytes(long size) { + return of(size, BYTES); + } + + public static ByteSizeValue ofKb(long size) { + return of(size, KB); + } + + public static ByteSizeValue ofMb(long size) { + return of(size, MB); + } + + public static ByteSizeValue ofGb(long size) { + return of(size, GB); + } + + public static ByteSizeValue ofTb(long size) { + return of(size, TB); + } + + public static ByteSizeValue ofPb(long size) { + return of(size, PB); + } + + static ByteSizeValue newByteSizeValue(long sizeInBytes, ByteSizeUnit desiredUnit) { + // Peel off some common cases to avoid allocations + if (desiredUnit == BYTES) { + if (sizeInBytes == 0) { + return ZERO; + } + if (sizeInBytes == 1) { + return ONE; + } + if (sizeInBytes == -1) { + return MINUS_ONE; + } + } + if (sizeInBytes < 0) { + throw new IllegalArgumentException("Values less than -1 bytes are not supported: " + sizeInBytes); + } + return new ByteSizeValue(sizeInBytes, desiredUnit); + } + + private final long sizeInBytes; + private final ByteSizeUnit desiredUnit; + + public static ByteSizeValue readFrom(StreamInput in) throws IOException { + long size = in.readZLong(); + ByteSizeUnit unit = ByteSizeUnit.readFrom(in); + if (in.getTransportVersion().onOrAfter(BYTE_SIZE_VALUE_ALWAYS_USES_BYTES)) { + return newByteSizeValue(size, unit); + } else { + return of(size, unit); + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + if (out.getTransportVersion().onOrAfter(BYTE_SIZE_VALUE_ALWAYS_USES_BYTES)) { + out.writeZLong(sizeInBytes); + } else { + out.writeZLong(Math.divideExact(sizeInBytes, desiredUnit.toBytes(1))); + } + desiredUnit.writeTo(out); + } + + ByteSizeValue(long sizeInBytes, ByteSizeUnit desiredUnit) { + this.sizeInBytes = sizeInBytes; + this.desiredUnit = desiredUnit; } // For testing - long getSize() { - return size; + long getSizeInBytes() { + return sizeInBytes; } // For testing - ByteSizeUnit getUnit() { - return unit; + ByteSizeUnit getDesiredUnit() { + return desiredUnit; } @Deprecated @@ -123,27 +153,27 @@ public class ByteSizeValue implements Writeable, Comparable, ToXC } public long getBytes() { - return unit.toBytes(size); + return sizeInBytes; } public long getKb() { - return unit.toKB(size); + return getBytes() / KB.toBytes(1); } public long getMb() { - return unit.toMB(size); + return getBytes() / MB.toBytes(1); } public long getGb() { - return unit.toGB(size); + return getBytes() / GB.toBytes(1); } public long getTb() { - return unit.toTB(size); + return getBytes() / TB.toBytes(1); } public long getPb() { - return unit.toPB(size); + return getBytes() / PB.toBytes(1); } public double getKbFrac() { @@ -175,32 +205,41 @@ public class ByteSizeValue implements Writeable, Comparable, ToXC * serialising the value to JSON. */ public String getStringRep() { - if (size <= 0) { - return String.valueOf(size); + if (sizeInBytes <= 0) { + return String.valueOf(sizeInBytes); + } + long numUnits = sizeInBytes / desiredUnit.toBytes(1); + long residue = sizeInBytes % desiredUnit.toBytes(1); + if (residue == 0) { + return numUnits + desiredUnit.getSuffix(); + } else { + return sizeInBytes + BYTES.getSuffix(); } - return size + unit.getSuffix(); } + /** + * @return a string with at most one decimal point whose magnitude is close to {@code this}. + */ @Override public String toString() { long bytes = getBytes(); double value = bytes; - String suffix = ByteSizeUnit.BYTES.getSuffix(); + String suffix = BYTES.getSuffix(); if (bytes >= ByteSizeUnit.C5) { value = getPbFrac(); - suffix = ByteSizeUnit.PB.getSuffix(); + suffix = PB.getSuffix(); } else if (bytes >= ByteSizeUnit.C4) { value = getTbFrac(); - suffix = ByteSizeUnit.TB.getSuffix(); + suffix = TB.getSuffix(); } else if (bytes >= ByteSizeUnit.C3) { value = getGbFrac(); - suffix = ByteSizeUnit.GB.getSuffix(); + suffix = GB.getSuffix(); } else if (bytes >= ByteSizeUnit.C2) { value = getMbFrac(); - suffix = ByteSizeUnit.MB.getSuffix(); + suffix = MB.getSuffix(); } else if (bytes >= ByteSizeUnit.C1) { value = getKbFrac(); - suffix = ByteSizeUnit.KB.getSuffix(); + suffix = KB.getSuffix(); } return Strings.format1Decimals(value, suffix); } @@ -231,25 +270,25 @@ public class ByteSizeValue implements Writeable, Comparable, ToXC } String lowerSValue = sValue.toLowerCase(Locale.ROOT).trim(); if (lowerSValue.endsWith("k")) { - return parse(sValue, lowerSValue, "k", ByteSizeUnit.KB, settingName); + return parse(sValue, lowerSValue, "k", KB, settingName); } else if (lowerSValue.endsWith("kb")) { - return parse(sValue, lowerSValue, "kb", ByteSizeUnit.KB, settingName); + return parse(sValue, lowerSValue, "kb", KB, settingName); } else if (lowerSValue.endsWith("m")) { - return parse(sValue, lowerSValue, "m", ByteSizeUnit.MB, settingName); + return parse(sValue, lowerSValue, "m", MB, settingName); } else if (lowerSValue.endsWith("mb")) { - return parse(sValue, lowerSValue, "mb", ByteSizeUnit.MB, settingName); + return parse(sValue, lowerSValue, "mb", MB, settingName); } else if (lowerSValue.endsWith("g")) { - return parse(sValue, lowerSValue, "g", ByteSizeUnit.GB, settingName); + return parse(sValue, lowerSValue, "g", GB, settingName); } else if (lowerSValue.endsWith("gb")) { - return parse(sValue, lowerSValue, "gb", ByteSizeUnit.GB, settingName); + return parse(sValue, lowerSValue, "gb", GB, settingName); } else if (lowerSValue.endsWith("t")) { - return parse(sValue, lowerSValue, "t", ByteSizeUnit.TB, settingName); + return parse(sValue, lowerSValue, "t", TB, settingName); } else if (lowerSValue.endsWith("tb")) { - return parse(sValue, lowerSValue, "tb", ByteSizeUnit.TB, settingName); + return parse(sValue, lowerSValue, "tb", TB, settingName); } else if (lowerSValue.endsWith("p")) { - return parse(sValue, lowerSValue, "p", ByteSizeUnit.PB, settingName); + return parse(sValue, lowerSValue, "p", PB, settingName); } else if (lowerSValue.endsWith("pb")) { - return parse(sValue, lowerSValue, "pb", ByteSizeUnit.PB, settingName); + return parse(sValue, lowerSValue, "pb", PB, settingName); } else if (lowerSValue.endsWith("b")) { return parseBytes(lowerSValue, settingName, sValue); } else { @@ -285,24 +324,16 @@ public class ByteSizeValue implements Writeable, Comparable, ToXC ByteSizeUnit unit, final String settingName ) { + assert unit != BYTES : "Use parseBytes"; final String s = normalized.substring(0, normalized.length() - suffix.length()).trim(); try { try { - return new ByteSizeValue(Long.parseLong(s), unit); + return of(Long.parseLong(s), unit); } catch (final NumberFormatException e) { - try { - final double doubleValue = Double.parseDouble(s); - DeprecationLoggerHolder.deprecationLogger.warn( - DeprecationCategory.PARSING, - "fractional_byte_values", - "Fractional bytes values are deprecated. Use non-fractional bytes values instead: [{}] found for setting [{}]", - initialInput, - settingName - ); - return ByteSizeValue.ofBytes((long) (doubleValue * unit.toBytes(1))); - } catch (final NumberFormatException ignored) { - throw new ElasticsearchParseException("failed to parse setting [{}] with value [{}]", e, settingName, initialInput); - } + // If it's not an integer, it could be a valid number with a decimal + BigDecimal decimalValue = parseDecimal(s, settingName, initialInput, e); + long sizeInBytes = convertToBytes(decimalValue, unit, settingName, initialInput, e); + return new ByteSizeValue(sizeInBytes, unit); } } catch (IllegalArgumentException e) { throw new ElasticsearchParseException( @@ -314,6 +345,82 @@ public class ByteSizeValue implements Writeable, Comparable, ToXC } } + /** + * @param numericPortion the number to parse + * @param settingName for error reporting - the name of the setting we're parsing + * @param settingValue for error reporting - the whole string value of the setting + * @param originalException for error reporting - the exception that occurred when we tried to parse the setting as an integer + */ + private static BigDecimal parseDecimal( + String numericPortion, + String settingName, + String settingValue, + NumberFormatException originalException + ) { + BigDecimal decimalValue; + try { + decimalValue = new BigDecimal(numericPortion); + } catch (NumberFormatException e) { + // Here, we choose to use originalException as the cause, because a NumberFormatException here + // indicates the string wasn't actually a valid BigDecimal after all, so there's no reason + // to confuse matters by reporting BigDecimal in the stack trace. + ElasticsearchParseException toThrow = new ElasticsearchParseException( + "failed to parse setting [{}] with value [{}]", + originalException, + settingName, + settingValue + ); + toThrow.addSuppressed(e); + throw toThrow; + } + if (decimalValue.signum() < 0) { + throw new ElasticsearchParseException("failed to parse setting [{}] with value [{}]", settingName, settingValue); + } else if (decimalValue.scale() > 2) { + throw new ElasticsearchParseException( + "failed to parse setting [{}] with more than two decimals in value [{}]", + settingName, + settingValue + ); + } + return decimalValue; + } + + /** + * @param decimalValue the number of {@code unit}s + * @param unit the specified {@link ByteSizeUnit} + * @param settingName for error reporting - the name of the setting we're parsing + * @param settingValue for error reporting - the whole string value of the setting + * @param originalException for error reporting - the exception that occurred when we tried to parse the setting as an integer + */ + private static long convertToBytes( + BigDecimal decimalValue, + ByteSizeUnit unit, + String settingName, + String settingValue, + NumberFormatException originalException + ) { + BigDecimal sizeInBytes = decimalValue.multiply(new BigDecimal(unit.toBytes(1))); + try { + // Note we always round up here for two reasons: + // 1. Practically: toString truncates, so if we ever round down, we'll lose a tenth + // 2. In principle: if the user asks for 1.1kb, which is 1126.4 bytes, and we only give then 1126, then + // we have not given them what they asked for. + return sizeInBytes.setScale(0, RoundingMode.UP).longValueExact(); + } catch (ArithmeticException e) { + // Here, we choose to use the ArithmeticException as the cause, because we already know the + // number is a valid BigDecimal, so it makes sense to supply that context in the stack trace. + ElasticsearchParseException toThrow = new ElasticsearchParseException( + "failed to parse setting [{}] with value beyond {}: [{}]", + e, + settingName, + Long.MAX_VALUE, + settingValue + ); + toThrow.addSuppressed(originalException); + throw toThrow; + } + } + @Override public boolean equals(Object o) { if (this == o) { @@ -328,7 +435,7 @@ public class ByteSizeValue implements Writeable, Comparable, ToXC @Override public int hashCode() { - return Long.hashCode(size * unit.toBytes(1)); + return Long.hashCode(getBytes()); } @Override diff --git a/server/src/main/java/org/elasticsearch/http/HttpTransportSettings.java b/server/src/main/java/org/elasticsearch/http/HttpTransportSettings.java index 07122dce8cb6..f55087522b20 100644 --- a/server/src/main/java/org/elasticsearch/http/HttpTransportSettings.java +++ b/server/src/main/java/org/elasticsearch/http/HttpTransportSettings.java @@ -87,19 +87,19 @@ public final class HttpTransportSettings { ); public static final Setting SETTING_HTTP_MAX_CONTENT_LENGTH = Setting.byteSizeSetting( "http.max_content_length", - new ByteSizeValue(100, ByteSizeUnit.MB), + ByteSizeValue.of(100, ByteSizeUnit.MB), ByteSizeValue.ZERO, ByteSizeValue.ofBytes(Integer.MAX_VALUE), Property.NodeScope ); public static final Setting SETTING_HTTP_MAX_CHUNK_SIZE = Setting.byteSizeSetting( "http.max_chunk_size", - new ByteSizeValue(8, ByteSizeUnit.KB), + ByteSizeValue.of(8, ByteSizeUnit.KB), Property.NodeScope ); public static final Setting SETTING_HTTP_MAX_HEADER_SIZE = Setting.byteSizeSetting( "http.max_header_size", - new ByteSizeValue(16, ByteSizeUnit.KB), + ByteSizeValue.of(16, ByteSizeUnit.KB), Property.NodeScope ); public static final Setting SETTING_HTTP_MAX_WARNING_HEADER_COUNT = intSetting( @@ -115,7 +115,7 @@ public final class HttpTransportSettings { ); public static final Setting SETTING_HTTP_MAX_INITIAL_LINE_LENGTH = Setting.byteSizeSetting( "http.max_initial_line_length", - new ByteSizeValue(4, ByteSizeUnit.KB), + ByteSizeValue.of(4, ByteSizeUnit.KB), Property.NodeScope ); diff --git a/server/src/main/java/org/elasticsearch/index/IndexSettings.java b/server/src/main/java/org/elasticsearch/index/IndexSettings.java index 284140460a43..cd0d16cb3e89 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexSettings.java +++ b/server/src/main/java/org/elasticsearch/index/IndexSettings.java @@ -353,7 +353,7 @@ public final class IndexSettings { * Prevent the translog from growing over 10GB or 20% of the recommended shard size of 50GB. This helps bound the maximum disk usage * overhead of translogs. */ - new ByteSizeValue(10, ByteSizeUnit.GB), + ByteSizeValue.of(10, ByteSizeUnit.GB), /* * An empty translog occupies 55 bytes on disk. If the flush threshold is below this, the flush thread * can get stuck in an infinite loop as the shouldPeriodicallyFlush can still be true after flushing. @@ -385,7 +385,7 @@ public final class IndexSettings { */ public static final Setting INDEX_FLUSH_AFTER_MERGE_THRESHOLD_SIZE_SETTING = Setting.byteSizeSetting( "index.flush_after_merge", - new ByteSizeValue(512, ByteSizeUnit.MB), + ByteSizeValue.of(512, ByteSizeUnit.MB), ByteSizeValue.ZERO, // always flush after merge ByteSizeValue.ofBytes(Long.MAX_VALUE), // never flush after merge Property.Dynamic, @@ -398,7 +398,7 @@ public final class IndexSettings { */ public static final Setting INDEX_TRANSLOG_GENERATION_THRESHOLD_SIZE_SETTING = Setting.byteSizeSetting( "index.translog.generation_threshold_size", - new ByteSizeValue(64, ByteSizeUnit.MB), + ByteSizeValue.of(64, ByteSizeUnit.MB), /* * An empty translog occupies 55 bytes on disk. If the generation threshold is * below this, the flush thread can get stuck in an infinite loop repeatedly @@ -1431,7 +1431,7 @@ public final class IndexSettings { } assert onePercentOfTotalDiskSpace > Translog.DEFAULT_HEADER_SIZE_IN_BYTES; if (onePercentOfTotalDiskSpace < flushThresholdSize.getBytes()) { - return new ByteSizeValue(onePercentOfTotalDiskSpace, ByteSizeUnit.BYTES); + return ByteSizeValue.of(onePercentOfTotalDiskSpace, ByteSizeUnit.BYTES); } else { return flushThresholdSize; } diff --git a/server/src/main/java/org/elasticsearch/index/MergePolicyConfig.java b/server/src/main/java/org/elasticsearch/index/MergePolicyConfig.java index 2532a6311cdc..a7a004ac60b5 100644 --- a/server/src/main/java/org/elasticsearch/index/MergePolicyConfig.java +++ b/server/src/main/java/org/elasticsearch/index/MergePolicyConfig.java @@ -116,9 +116,9 @@ public final class MergePolicyConfig { private final ByteSizeValue defaultMaxTimeBasedMergedSegment; public static final double DEFAULT_EXPUNGE_DELETES_ALLOWED = 10d; - public static final ByteSizeValue DEFAULT_FLOOR_SEGMENT = new ByteSizeValue(2, ByteSizeUnit.MB); + public static final ByteSizeValue DEFAULT_FLOOR_SEGMENT = ByteSizeValue.of(2, ByteSizeUnit.MB); public static final int DEFAULT_MAX_MERGE_AT_ONCE = 10; - public static final ByteSizeValue DEFAULT_MAX_MERGED_SEGMENT = new ByteSizeValue(5, ByteSizeUnit.GB); + public static final ByteSizeValue DEFAULT_MAX_MERGED_SEGMENT = ByteSizeValue.of(5, ByteSizeUnit.GB); public static final Setting DEFAULT_MAX_MERGED_SEGMENT_SETTING = Setting.byteSizeSetting( "indices.merge.policy.max_merged_segment", DEFAULT_MAX_MERGED_SEGMENT, @@ -131,7 +131,7 @@ public final class MergePolicyConfig { * of merging fewer segments together than the merge factor, which in-turn increases write amplification. So we set an arbitrarily high * roof that serves as a protection that we expect to never hit. */ - public static final ByteSizeValue DEFAULT_MAX_TIME_BASED_MERGED_SEGMENT = new ByteSizeValue(100, ByteSizeUnit.GB); + public static final ByteSizeValue DEFAULT_MAX_TIME_BASED_MERGED_SEGMENT = ByteSizeValue.of(100, ByteSizeUnit.GB); public static final Setting DEFAULT_MAX_TIME_BASED_MERGED_SEGMENT_SETTING = Setting.byteSizeSetting( "indices.merge.policy.max_time_based_merged_segment", DEFAULT_MAX_TIME_BASED_MERGED_SEGMENT, diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 40839d8e1878..d3d7dcd8e930 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -266,7 +266,7 @@ public class InternalEngine extends Engine { ); assert translog.getGeneration() != null; this.translog = translog; - this.totalDiskSpace = new ByteSizeValue(Environment.getFileStore(translog.location()).getTotalSpace(), ByteSizeUnit.BYTES); + this.totalDiskSpace = ByteSizeValue.of(Environment.getFileStore(translog.location()).getTotalSpace(), ByteSizeUnit.BYTES); this.lastCommittedSegmentInfos = store.readLastCommittedSegmentsInfo(); this.softDeletesPolicy = newSoftDeletesPolicy(); this.combinedDeletionPolicy = new CombinedDeletionPolicy( diff --git a/server/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java b/server/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java index 1143da30c295..4786cfdaddd3 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java +++ b/server/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java @@ -49,7 +49,7 @@ public class PrimaryReplicaSyncer { private final TransportService transportService; private final SyncAction syncAction; - public static final ByteSizeValue DEFAULT_CHUNK_SIZE = new ByteSizeValue(512, ByteSizeUnit.KB); + public static final ByteSizeValue DEFAULT_CHUNK_SIZE = ByteSizeValue.of(512, ByteSizeUnit.KB); private volatile ByteSizeValue chunkSize = DEFAULT_CHUNK_SIZE; diff --git a/server/src/main/java/org/elasticsearch/index/translog/TranslogConfig.java b/server/src/main/java/org/elasticsearch/index/translog/TranslogConfig.java index 8e26443044ec..4af0c0ad58ab 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/TranslogConfig.java +++ b/server/src/main/java/org/elasticsearch/index/translog/TranslogConfig.java @@ -26,7 +26,7 @@ import java.nio.file.Path; */ public final class TranslogConfig { - public static final ByteSizeValue DEFAULT_BUFFER_SIZE = new ByteSizeValue(1, ByteSizeUnit.MB); + public static final ByteSizeValue DEFAULT_BUFFER_SIZE = ByteSizeValue.of(1, ByteSizeUnit.MB); public static final ByteSizeValue EMPTY_TRANSLOG_BUFFER_SIZE = ByteSizeValue.ofBytes(10); public static final OperationListener NOOP_OPERATION_LISTENER = (d, s, l) -> {}; diff --git a/server/src/main/java/org/elasticsearch/indices/IndexingMemoryController.java b/server/src/main/java/org/elasticsearch/indices/IndexingMemoryController.java index 70d8c9da3b86..71c05be1f25a 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndexingMemoryController.java +++ b/server/src/main/java/org/elasticsearch/indices/IndexingMemoryController.java @@ -56,7 +56,7 @@ public class IndexingMemoryController implements IndexingOperationListener, Clos * to set a floor on the actual size in bytes (default: 48 MB). */ public static final Setting MIN_INDEX_BUFFER_SIZE_SETTING = Setting.byteSizeSetting( "indices.memory.min_index_buffer_size", - new ByteSizeValue(48, ByteSizeUnit.MB), + ByteSizeValue.of(48, ByteSizeUnit.MB), ByteSizeValue.ZERO, ByteSizeValue.ofBytes(Long.MAX_VALUE), Property.NodeScope diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java index 475f83de9cae..2e6cdf0e9358 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java @@ -201,7 +201,7 @@ public class RecoverySettings { return s -> Setting.parseDouble(s, 0d, 1d, key, false); } - static final ByteSizeValue DEFAULT_MAX_BYTES_PER_SEC = new ByteSizeValue(40L, ByteSizeUnit.MB); + static final ByteSizeValue DEFAULT_MAX_BYTES_PER_SEC = ByteSizeValue.of(40L, ByteSizeUnit.MB); public static final Setting INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING = Setting.byteSizeSetting( "indices.recovery.max_bytes_per_sec", @@ -227,16 +227,16 @@ public class RecoverySettings { */ final ByteSizeValue totalPhysicalMemory = TOTAL_PHYSICAL_MEMORY_OVERRIDING_TEST_SETTING.get(s); final ByteSizeValue maxBytesPerSec; - if (totalPhysicalMemory.compareTo(new ByteSizeValue(4, ByteSizeUnit.GB)) <= 0) { - maxBytesPerSec = new ByteSizeValue(40, ByteSizeUnit.MB); - } else if (totalPhysicalMemory.compareTo(new ByteSizeValue(8, ByteSizeUnit.GB)) <= 0) { - maxBytesPerSec = new ByteSizeValue(60, ByteSizeUnit.MB); - } else if (totalPhysicalMemory.compareTo(new ByteSizeValue(16, ByteSizeUnit.GB)) <= 0) { - maxBytesPerSec = new ByteSizeValue(90, ByteSizeUnit.MB); - } else if (totalPhysicalMemory.compareTo(new ByteSizeValue(32, ByteSizeUnit.GB)) <= 0) { - maxBytesPerSec = new ByteSizeValue(125, ByteSizeUnit.MB); + if (totalPhysicalMemory.compareTo(ByteSizeValue.of(4, ByteSizeUnit.GB)) <= 0) { + maxBytesPerSec = ByteSizeValue.of(40, ByteSizeUnit.MB); + } else if (totalPhysicalMemory.compareTo(ByteSizeValue.of(8, ByteSizeUnit.GB)) <= 0) { + maxBytesPerSec = ByteSizeValue.of(60, ByteSizeUnit.MB); + } else if (totalPhysicalMemory.compareTo(ByteSizeValue.of(16, ByteSizeUnit.GB)) <= 0) { + maxBytesPerSec = ByteSizeValue.of(90, ByteSizeUnit.MB); + } else if (totalPhysicalMemory.compareTo(ByteSizeValue.of(32, ByteSizeUnit.GB)) <= 0) { + maxBytesPerSec = ByteSizeValue.of(125, ByteSizeUnit.MB); } else { - maxBytesPerSec = new ByteSizeValue(250, ByteSizeUnit.MB); + maxBytesPerSec = ByteSizeValue.of(250, ByteSizeUnit.MB); } return maxBytesPerSec.getStringRep(); }, @@ -397,7 +397,7 @@ public class RecoverySettings { Property.NodeScope ); - public static final ByteSizeValue DEFAULT_CHUNK_SIZE = new ByteSizeValue(512, ByteSizeUnit.KB); + public static final ByteSizeValue DEFAULT_CHUNK_SIZE = ByteSizeValue.of(512, ByteSizeUnit.KB); /** * The maximum allowable size, in bytes, for buffering source documents during recovery. diff --git a/server/src/main/java/org/elasticsearch/monitor/fs/FsInfo.java b/server/src/main/java/org/elasticsearch/monitor/fs/FsInfo.java index 07c2a802ed21..99162a4068c6 100644 --- a/server/src/main/java/org/elasticsearch/monitor/fs/FsInfo.java +++ b/server/src/main/java/org/elasticsearch/monitor/fs/FsInfo.java @@ -102,14 +102,14 @@ public class FsInfo implements Iterable, Writeable, ToXContentFragm } public void setEffectiveWatermarks(final DiskThresholdSettings masterThresholdSettings, boolean isDedicatedFrozenNode) { - lowWatermarkFreeSpace = masterThresholdSettings.getFreeBytesThresholdLowStage(new ByteSizeValue(total, ByteSizeUnit.BYTES)); - highWatermarkFreeSpace = masterThresholdSettings.getFreeBytesThresholdHighStage(new ByteSizeValue(total, ByteSizeUnit.BYTES)); + lowWatermarkFreeSpace = masterThresholdSettings.getFreeBytesThresholdLowStage(ByteSizeValue.of(total, ByteSizeUnit.BYTES)); + highWatermarkFreeSpace = masterThresholdSettings.getFreeBytesThresholdHighStage(ByteSizeValue.of(total, ByteSizeUnit.BYTES)); floodStageWatermarkFreeSpace = masterThresholdSettings.getFreeBytesThresholdFloodStage( - new ByteSizeValue(total, ByteSizeUnit.BYTES) + ByteSizeValue.of(total, ByteSizeUnit.BYTES) ); if (isDedicatedFrozenNode) { frozenFloodStageWatermarkFreeSpace = masterThresholdSettings.getFreeBytesThresholdFrozenFloodStage( - new ByteSizeValue(total, ByteSizeUnit.BYTES) + ByteSizeValue.of(total, ByteSizeUnit.BYTES) ); } } diff --git a/server/src/main/java/org/elasticsearch/search/SearchService.java b/server/src/main/java/org/elasticsearch/search/SearchService.java index d228d93f897f..9284bc594a26 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchService.java +++ b/server/src/main/java/org/elasticsearch/search/SearchService.java @@ -261,7 +261,7 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv public static final Setting MAX_ASYNC_SEARCH_RESPONSE_SIZE_SETTING = Setting.byteSizeSetting( "search.max_async_search_response_size", - new ByteSizeValue(10, ByteSizeUnit.MB), + ByteSizeValue.of(10, ByteSizeUnit.MB), Property.Dynamic, Property.NodeScope ); diff --git a/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java b/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java index 37a3ec586d10..cf549f7f4b0b 100644 --- a/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java +++ b/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java @@ -649,7 +649,7 @@ public class ThreadPool implements ReportingService, Scheduler, static int getMaxSnapshotThreadPoolSize(int allocatedProcessors, final ByteSizeValue maxHeapSize) { // While on larger data nodes, larger snapshot threadpool size improves snapshotting on high latency blob stores, // smaller instances can run into OOM issues and need a smaller snapshot threadpool size. - if (maxHeapSize.compareTo(new ByteSizeValue(750, ByteSizeUnit.MB)) < 0) { + if (maxHeapSize.compareTo(ByteSizeValue.of(750, ByteSizeUnit.MB)) < 0) { return halfAllocatedProcessorsMaxFive(allocatedProcessors); } return 10; diff --git a/server/src/main/java/org/elasticsearch/transport/InboundDecoder.java b/server/src/main/java/org/elasticsearch/transport/InboundDecoder.java index e2a1b010bad0..eed3cbd7e824 100644 --- a/server/src/main/java/org/elasticsearch/transport/InboundDecoder.java +++ b/server/src/main/java/org/elasticsearch/transport/InboundDecoder.java @@ -40,11 +40,11 @@ public class InboundDecoder implements Releasable { private final ChannelType channelType; public InboundDecoder(Recycler recycler) { - this(recycler, new ByteSizeValue(2, ByteSizeUnit.GB), ChannelType.MIX); + this(recycler, ByteSizeValue.of(2, ByteSizeUnit.GB), ChannelType.MIX); } public InboundDecoder(Recycler recycler, ChannelType channelType) { - this(recycler, new ByteSizeValue(2, ByteSizeUnit.GB), channelType); + this(recycler, ByteSizeValue.of(2, ByteSizeUnit.GB), channelType); } public InboundDecoder(Recycler recycler, ByteSizeValue maxHeaderSize, ChannelType channelType) { diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterPortSettings.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterPortSettings.java index f9eded1b9ad0..ee32ec756aea 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterPortSettings.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterPortSettings.java @@ -135,9 +135,9 @@ public class RemoteClusterPortSettings { public static final Setting MAX_REQUEST_HEADER_SIZE = Setting.byteSizeSetting( REMOTE_CLUSTER_PREFIX + "max_request_header_size", - new ByteSizeValue(64, ByteSizeUnit.KB), // should cover typical querying user/key authn serialized to the fulfilling cluster - new ByteSizeValue(64, ByteSizeUnit.BYTES), // toBytes must be higher than fixed header length - new ByteSizeValue(2, ByteSizeUnit.GB), // toBytes must be lower than INT_MAX (>2 GB) + ByteSizeValue.of(64, ByteSizeUnit.KB), // should cover typical querying user/key authn serialized to the fulfilling cluster + ByteSizeValue.of(64, ByteSizeUnit.BYTES), // toBytes must be higher than fixed header length + ByteSizeValue.of(2, ByteSizeUnit.GB), // toBytes must be lower than INT_MAX (>2 GB) Setting.Property.NodeScope ); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverConditionsTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverConditionsTests.java index 341290ba3542..d85e10f13e3b 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverConditionsTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverConditionsTests.java @@ -84,22 +84,22 @@ public class RolloverConditionsTests extends AbstractXContentSerializingTestCase switch (between(0, 9)) { case 0 -> maxSize = randomValueOtherThan(maxSize, () -> { ByteSizeUnit maxSizeUnit = randomFrom(ByteSizeUnit.values()); - return new ByteSizeValue(randomNonNegativeLong() / maxSizeUnit.toBytes(1), maxSizeUnit); + return ByteSizeValue.of(randomNonNegativeLong() / maxSizeUnit.toBytes(1), maxSizeUnit); }); case 1 -> maxPrimaryShardSize = randomValueOtherThan(maxPrimaryShardSize, () -> { ByteSizeUnit maxPrimaryShardSizeUnit = randomFrom(ByteSizeUnit.values()); - return new ByteSizeValue(randomNonNegativeLong() / maxPrimaryShardSizeUnit.toBytes(1), maxPrimaryShardSizeUnit); + return ByteSizeValue.of(randomNonNegativeLong() / maxPrimaryShardSizeUnit.toBytes(1), maxPrimaryShardSizeUnit); }); case 2 -> maxAge = randomValueOtherThan(maxAge, () -> randomPositiveTimeValue()); case 3 -> maxDocs = maxDocs == null ? randomNonNegativeLong() : maxDocs + 1; case 4 -> maxPrimaryShardDocs = maxPrimaryShardDocs == null ? randomNonNegativeLong() : maxPrimaryShardDocs + 1; case 5 -> minSize = randomValueOtherThan(minSize, () -> { ByteSizeUnit minSizeUnit = randomFrom(ByteSizeUnit.values()); - return new ByteSizeValue(randomNonNegativeLong() / minSizeUnit.toBytes(1), minSizeUnit); + return ByteSizeValue.of(randomNonNegativeLong() / minSizeUnit.toBytes(1), minSizeUnit); }); case 6 -> minPrimaryShardSize = randomValueOtherThan(minPrimaryShardSize, () -> { ByteSizeUnit minPrimaryShardSizeUnit = randomFrom(ByteSizeUnit.values()); - return new ByteSizeValue(randomNonNegativeLong() / minPrimaryShardSizeUnit.toBytes(1), minPrimaryShardSizeUnit); + return ByteSizeValue.of(randomNonNegativeLong() / minPrimaryShardSizeUnit.toBytes(1), minPrimaryShardSizeUnit); }); case 7 -> minAge = randomValueOtherThan(minAge, () -> randomPositiveTimeValue()); case 8 -> minDocs = minDocs == null ? randomNonNegativeLong() : minDocs + 1; diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequestTests.java index fc0107caddf5..e1316b40e7ce 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequestTests.java @@ -67,7 +67,7 @@ public class ResizeRequestTests extends AbstractWireSerializingTestCase {}, diff --git a/server/src/test/java/org/elasticsearch/common/settings/MemorySizeSettingsTests.java b/server/src/test/java/org/elasticsearch/common/settings/MemorySizeSettingsTests.java index ef395e8b4d2d..1f956422dda4 100644 --- a/server/src/test/java/org/elasticsearch/common/settings/MemorySizeSettingsTests.java +++ b/server/src/test/java/org/elasticsearch/common/settings/MemorySizeSettingsTests.java @@ -62,7 +62,7 @@ public class MemorySizeSettingsTests extends ESTestCase { public void testCircuitBreakerSettings() { // default is chosen based on actual heap size double defaultTotalPercentage; - if (JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() < new ByteSizeValue(1, ByteSizeUnit.GB).getBytes()) { + if (JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() < ByteSizeValue.of(1, ByteSizeUnit.GB).getBytes()) { defaultTotalPercentage = 0.95d; } else { defaultTotalPercentage = 0.7d; diff --git a/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java b/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java index 75f5045c5fbb..0fbe36ec9c2d 100644 --- a/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java +++ b/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java @@ -70,8 +70,8 @@ public class SettingTests extends ESTestCase { public void testByteSizeSettingMinValue() { final Setting byteSizeValueSetting = Setting.byteSizeSetting( "a.byte.size", - new ByteSizeValue(100, ByteSizeUnit.MB), - new ByteSizeValue(20_000_000, ByteSizeUnit.BYTES), + ByteSizeValue.of(100, ByteSizeUnit.MB), + ByteSizeValue.of(20_000_000, ByteSizeUnit.BYTES), ByteSizeValue.ofBytes(Integer.MAX_VALUE) ); final long value = 20_000_000 - randomIntBetween(1, 1024); @@ -84,8 +84,8 @@ public class SettingTests extends ESTestCase { public void testByteSizeSettingMaxValue() { final Setting byteSizeValueSetting = Setting.byteSizeSetting( "a.byte.size", - new ByteSizeValue(100, ByteSizeUnit.MB), - new ByteSizeValue(16, ByteSizeUnit.MB), + ByteSizeValue.of(100, ByteSizeUnit.MB), + ByteSizeValue.of(16, ByteSizeUnit.MB), ByteSizeValue.ofBytes(Integer.MAX_VALUE) ); final long value = (1L << 31) - 1 + randomIntBetween(1, 1024); diff --git a/server/src/test/java/org/elasticsearch/common/settings/SettingsTests.java b/server/src/test/java/org/elasticsearch/common/settings/SettingsTests.java index b6e21e7bb911..d0f17f6a495d 100644 --- a/server/src/test/java/org/elasticsearch/common/settings/SettingsTests.java +++ b/server/src/test/java/org/elasticsearch/common/settings/SettingsTests.java @@ -659,7 +659,7 @@ public class SettingsTests extends ESTestCase { "key", ByteSizeValue.parseBytesSizeValue(randomIntBetween(1, 16) + "k", "key") ); - final ByteSizeValue expected = new ByteSizeValue(randomNonNegativeLong(), ByteSizeUnit.BYTES); + final ByteSizeValue expected = ByteSizeValue.of(randomNonNegativeLong(), ByteSizeUnit.BYTES); final Settings settings = Settings.builder().put("key", expected).build(); /* * Previously we would internally convert the byte size value to a string using a method that tries to be smart about the units diff --git a/server/src/test/java/org/elasticsearch/common/unit/ByteSizeValueTests.java b/server/src/test/java/org/elasticsearch/common/unit/ByteSizeValueTests.java index 63b9f56051cf..2dabbcb40d01 100644 --- a/server/src/test/java/org/elasticsearch/common/unit/ByteSizeValueTests.java +++ b/server/src/test/java/org/elasticsearch/common/unit/ByteSizeValueTests.java @@ -10,11 +10,16 @@ package org.elasticsearch.common.unit; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; +import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.Writeable.Reader; import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.test.ESTestCase; import org.hamcrest.MatcherAssert; import java.io.IOException; +import java.util.List; import java.util.function.Function; import static org.hamcrest.Matchers.containsString; @@ -23,24 +28,24 @@ import static org.hamcrest.Matchers.is; public class ByteSizeValueTests extends AbstractWireSerializingTestCase { public void testActualPeta() { - MatcherAssert.assertThat(new ByteSizeValue(4, ByteSizeUnit.PB).getBytes(), equalTo(4503599627370496L)); + MatcherAssert.assertThat(ByteSizeValue.of(4, ByteSizeUnit.PB).getBytes(), equalTo(4503599627370496L)); } public void testActualTera() { - MatcherAssert.assertThat(new ByteSizeValue(4, ByteSizeUnit.TB).getBytes(), equalTo(4398046511104L)); + MatcherAssert.assertThat(ByteSizeValue.of(4, ByteSizeUnit.TB).getBytes(), equalTo(4398046511104L)); } public void testActual() { - MatcherAssert.assertThat(new ByteSizeValue(4, ByteSizeUnit.GB).getBytes(), equalTo(4294967296L)); + MatcherAssert.assertThat(ByteSizeValue.of(4, ByteSizeUnit.GB).getBytes(), equalTo(4294967296L)); } public void testSimple() { - assertThat(ByteSizeUnit.BYTES.toBytes(10), is(new ByteSizeValue(10, ByteSizeUnit.BYTES).getBytes())); - assertThat(ByteSizeUnit.KB.toKB(10), is(new ByteSizeValue(10, ByteSizeUnit.KB).getKb())); - assertThat(ByteSizeUnit.MB.toMB(10), is(new ByteSizeValue(10, ByteSizeUnit.MB).getMb())); - assertThat(ByteSizeUnit.GB.toGB(10), is(new ByteSizeValue(10, ByteSizeUnit.GB).getGb())); - assertThat(ByteSizeUnit.TB.toTB(10), is(new ByteSizeValue(10, ByteSizeUnit.TB).getTb())); - assertThat(ByteSizeUnit.PB.toPB(10), is(new ByteSizeValue(10, ByteSizeUnit.PB).getPb())); + assertThat(ByteSizeUnit.BYTES.toBytes(10), is(ByteSizeValue.of(10, ByteSizeUnit.BYTES).getBytes())); + assertThat(ByteSizeUnit.KB.toKB(10), is(ByteSizeValue.of(10, ByteSizeUnit.KB).getKb())); + assertThat(ByteSizeUnit.MB.toMB(10), is(ByteSizeValue.of(10, ByteSizeUnit.MB).getMb())); + assertThat(ByteSizeUnit.GB.toGB(10), is(ByteSizeValue.of(10, ByteSizeUnit.GB).getGb())); + assertThat(ByteSizeUnit.TB.toTB(10), is(ByteSizeValue.of(10, ByteSizeUnit.TB).getTb())); + assertThat(ByteSizeUnit.PB.toPB(10), is(ByteSizeValue.of(10, ByteSizeUnit.PB).getPb())); } public void testToIntBytes() { @@ -60,13 +65,13 @@ public class ByteSizeValueTests extends AbstractWireSerializingTestCase randomNonNegativeLong() / unit.toBytes(1)); - ByteSizeValue firstByteValue = new ByteSizeValue(firstRandom, unit); - ByteSizeValue secondByteValue = new ByteSizeValue(secondRandom, unit); + ByteSizeValue firstByteValue = ByteSizeValue.of(firstRandom, unit); + ByteSizeValue secondByteValue = ByteSizeValue.of(secondRandom, unit); assertEquals(firstRandom > secondRandom, firstByteValue.compareTo(secondByteValue) > 0); assertEquals(secondRandom > firstRandom, secondByteValue.compareTo(firstByteValue) > 0); } @@ -173,38 +178,42 @@ public class ByteSizeValueTests extends AbstractWireSerializingTestCase randomFrom(ByteSizeUnit.values())); - ByteSizeValue firstByteValue = new ByteSizeValue(number, randomUnit); - ByteSizeValue secondByteValue = new ByteSizeValue(number, ByteSizeUnit.PB); + ByteSizeValue firstByteValue = ByteSizeValue.of(number, randomUnit); + ByteSizeValue secondByteValue = ByteSizeValue.of(number, ByteSizeUnit.PB); assertTrue(firstByteValue.compareTo(secondByteValue) < 0); assertTrue(secondByteValue.compareTo(firstByteValue) > 0); } public void testOutOfRange() { // Make sure a value of > Long.MAX_VALUE bytes throws an exception - ByteSizeUnit unit = randomValueOtherThan(ByteSizeUnit.BYTES, () -> randomFrom(ByteSizeUnit.values())); - long size = (long) randomDouble() * unit.toBytes(1) + (Long.MAX_VALUE - unit.toBytes(1)); - IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> new ByteSizeValue(size, unit)); - assertEquals( - "Values greater than " + Long.MAX_VALUE + " bytes are not supported: " + size + unit.getSuffix(), - exception.getMessage() - ); + for (ByteSizeUnit unit : ByteSizeUnit.values()) { + if (unit == ByteSizeUnit.BYTES) { + continue; + } + long size = (long) randomDouble() * unit.toBytes(1) + (Long.MAX_VALUE - unit.toBytes(1)); + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> ByteSizeValue.of(size, unit)); + assertEquals( + "Values greater than " + Long.MAX_VALUE + " bytes are not supported: " + size + unit.getSuffix(), + exception.getMessage() + ); - // Make sure for units other than BYTES a size of -1 throws an exception - ByteSizeUnit unit2 = randomValueOtherThan(ByteSizeUnit.BYTES, () -> randomFrom(ByteSizeUnit.values())); - long size2 = -1L; - exception = expectThrows(IllegalArgumentException.class, () -> new ByteSizeValue(size2, unit2)); - assertEquals("Values less than -1 bytes are not supported: " + size2 + unit2.getSuffix(), exception.getMessage()); + // Make sure for units other than BYTES a size of -1 throws an exception + ByteSizeUnit unit2 = randomValueOtherThan(ByteSizeUnit.BYTES, () -> randomFrom(ByteSizeUnit.values())); + long size2 = -1L; + exception = expectThrows(IllegalArgumentException.class, () -> ByteSizeValue.of(size2, unit2)); + assertEquals("Values less than -1 bytes are not supported: " + size2 + unit2.getSuffix(), exception.getMessage()); - // Make sure for any unit a size < -1 throws an exception - ByteSizeUnit unit3 = randomFrom(ByteSizeUnit.values()); - long size3 = -1L * randomNonNegativeLong() - 1L; - exception = expectThrows(IllegalArgumentException.class, () -> new ByteSizeValue(size3, unit3)); - assertEquals("Values less than -1 bytes are not supported: " + size3 + unit3.getSuffix(), exception.getMessage()); + // Make sure for any unit a size < -1 throws an exception + ByteSizeUnit unit3 = randomFrom(ByteSizeUnit.values()); + long size3 = -1L * randomNonNegativeLong() - 1L; + exception = expectThrows(IllegalArgumentException.class, () -> ByteSizeValue.of(size3, unit3)); + assertEquals("Values less than -1 bytes are not supported: " + size3 + unit3.getSuffix(), exception.getMessage()); + } } public void testConversionHashCode() { - ByteSizeValue firstValue = new ByteSizeValue(randomIntBetween(0, Integer.MAX_VALUE), ByteSizeUnit.GB); - ByteSizeValue secondValue = new ByteSizeValue(firstValue.getBytes(), ByteSizeUnit.BYTES); + ByteSizeValue firstValue = ByteSizeValue.of(randomIntBetween(0, Integer.MAX_VALUE), ByteSizeUnit.GB); + ByteSizeValue secondValue = ByteSizeValue.of(firstValue.getBytes(), ByteSizeUnit.BYTES); assertEquals(firstValue.hashCode(), secondValue.hashCode()); } @@ -216,7 +225,7 @@ public class ByteSizeValueTests extends AbstractWireSerializingTestCase Long.MAX_VALUE / unit.toBytes(1)) { throw new AssertionError(); } - return new ByteSizeValue(size, unit); + return ByteSizeValue.of(size, unit); } else { return ByteSizeValue.ofBytes(randomNonNegativeLong()); } @@ -228,38 +237,11 @@ public class ByteSizeValueTests extends AbstractWireSerializingTestCase { - final long unitBytes = instanceUnit.toBytes(1); - mutateSize = randomValueOtherThan(instanceSize, () -> randomNonNegativeLong() / unitBytes); - mutateUnit = instanceUnit; - } - case 1 -> { - mutateUnit = randomValueOtherThan(instanceUnit, () -> randomFrom(ByteSizeUnit.values())); - final long newUnitBytes = mutateUnit.toBytes(1); - /* - * If size is zero we can not reuse zero because zero with any unit will be equal to zero with any other - * unit so in this case we need to randomize a new size. Additionally, if the size unit pair is such that - * the representation would be such that the number of represented bytes would exceed Long.Max_VALUE, we - * have to randomize a new size too. - */ - if (instanceSize == 0 || instanceSize >= Long.MAX_VALUE / newUnitBytes) { - mutateSize = randomValueOtherThanMany( - v -> v == instanceSize && v >= Long.MAX_VALUE / newUnitBytes, - () -> randomNonNegativeLong() / newUnitBytes - ); - } else { - mutateSize = instanceSize; - } - } - default -> throw new AssertionError("Invalid randomisation branch"); - } - return new ByteSizeValue(mutateSize, mutateUnit); + protected ByteSizeValue mutateInstance(final ByteSizeValue original) { + return new ByteSizeValue( + randomValueOtherThan(original.getSizeInBytes(), ESTestCase::randomNonNegativeLong), + randomFrom(ByteSizeUnit.values()) + ); } public void testParse() { @@ -316,21 +298,24 @@ public class ByteSizeValueTests extends AbstractWireSerializingTestCase randomFrom(ByteSizeUnit.values())); - String fractionalValue = "23.5" + unit.getSuffix(); - ByteSizeValue instance = ByteSizeValue.parseBytesSizeValue(fractionalValue, "test"); - assertEquals(fractionalValue, instance.toString()); - assertWarnings( - "Fractional bytes values are deprecated. Use non-fractional bytes values instead: [" - + fractionalValue - + "] found for setting [test]" - ); + public void testParseFractionalNumber() { + for (var unit : ByteSizeUnit.values()) { + if (unit == ByteSizeUnit.BYTES) { + continue; + } + for (int tenths = 1; tenths <= 9; tenths++) { + checkFractionRoundTrip("23." + tenths + unit.getSuffix()); + } + } + } + + private void checkFractionRoundTrip(String fractionalValue) { + assertEquals(fractionalValue, ByteSizeValue.parseBytesSizeValue(fractionalValue, "test").toString()); } public void testGetBytesAsInt() { for (int i = 0; i < NUMBER_OF_TEST_RUNS; i++) { - ByteSizeValue instance = new ByteSizeValue(randomIntBetween(1, 1000), randomFrom(ByteSizeUnit.values())); + ByteSizeValue instance = ByteSizeValue.of(randomIntBetween(1, 1000), randomFrom(ByteSizeUnit.values())); long bytesValue = instance.getBytes(); if (bytesValue > Integer.MAX_VALUE) { IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> instance.bytesAsInt()); @@ -368,7 +353,7 @@ public class ByteSizeValueTests extends AbstractWireSerializingTestCase byteSizeValueFunction) { for (int i = 0; i < NUMBER_OF_TEST_RUNS; i++) { long size = randomIntBetween(1, 1000); - ByteSizeValue expected = new ByteSizeValue(size, unit); + ByteSizeValue expected = ByteSizeValue.of(size, unit); ByteSizeValue actual = byteSizeValueFunction.apply(size); assertThat(actual, equalTo(expected)); } @@ -381,27 +366,27 @@ public class ByteSizeValueTests extends AbstractWireSerializingTestCase ByteSizeValue.min(ByteSizeValue.MINUS_ONE, ByteSizeValue.ONE)); @@ -532,4 +517,58 @@ public class ByteSizeValueTests extends AbstractWireSerializingTestCase 0; bytes *= 10) { + checkTransportRoundTrip(new ByteSizeValue(bytes, unit), tv); + } + } + } + } + + private void checkTransportRoundTrip(ByteSizeValue original, TransportVersion transportVersion) throws IOException { + var deserialized = copyWriteable(original, writableRegistry(), ByteSizeValue::readFrom, transportVersion); + assertEquals(original.getSizeInBytes(), deserialized.getSizeInBytes()); + assertEquals(original.getDesiredUnit(), deserialized.getDesiredUnit()); + } } diff --git a/server/src/test/java/org/elasticsearch/common/unit/RelativeByteSizeValueTests.java b/server/src/test/java/org/elasticsearch/common/unit/RelativeByteSizeValueTests.java index 041e261ae52f..bb362ac5fc2b 100644 --- a/server/src/test/java/org/elasticsearch/common/unit/RelativeByteSizeValueTests.java +++ b/server/src/test/java/org/elasticsearch/common/unit/RelativeByteSizeValueTests.java @@ -21,7 +21,7 @@ import static org.hamcrest.Matchers.is; public class RelativeByteSizeValueTests extends ESTestCase { public void testDeserialization() throws IOException { - final var origin1 = new RelativeByteSizeValue(new ByteSizeValue(between(0, 2048), randomFrom(ByteSizeUnit.values()))); + final var origin1 = new RelativeByteSizeValue(ByteSizeValue.of(between(0, 2048), randomFrom(ByteSizeUnit.values()))); final var origin2 = new RelativeByteSizeValue(new RatioValue(randomDoubleBetween(0.0, 100.0, true))); final RelativeByteSizeValue target1, target2; @@ -39,7 +39,7 @@ public class RelativeByteSizeValueTests extends ESTestCase { assertNull(origin1.getRatio()); assertNull(target1.getRatio()); assertEquals(origin1.getAbsolute(), target1.getAbsolute()); - assertEquals(origin1.getAbsolute().getUnit(), target1.getAbsolute().getUnit()); + assertEquals(origin1.getAbsolute().getDesiredUnit(), target1.getAbsolute().getDesiredUnit()); assertFalse(origin2.isAbsolute()); assertFalse(target2.isAbsolute()); @@ -63,7 +63,7 @@ public class RelativeByteSizeValueTests extends ESTestCase { } public void testAbsolute() { - ByteSizeValue value = new ByteSizeValue(between(0, 100), randomFrom(ByteSizeUnit.values())); + ByteSizeValue value = ByteSizeValue.of(between(0, 100), randomFrom(ByteSizeUnit.values())); RelativeByteSizeValue parsed = RelativeByteSizeValue.parseRelativeByteSizeValue(value.getStringRep(), "test"); assertThat(parsed.getAbsolute(), equalTo(value)); assertThat(parsed.isAbsolute(), is(true)); diff --git a/server/src/test/java/org/elasticsearch/common/util/BitArrayTests.java b/server/src/test/java/org/elasticsearch/common/util/BitArrayTests.java index 06b5b87698c2..7a21ddbe75aa 100644 --- a/server/src/test/java/org/elasticsearch/common/util/BitArrayTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/BitArrayTests.java @@ -106,7 +106,7 @@ public class BitArrayTests extends ESTestCase { } public void testClearingDoesntAllocate() { - ByteSizeValue max = new ByteSizeValue(1, ByteSizeUnit.KB); + ByteSizeValue max = ByteSizeValue.of(1, ByteSizeUnit.KB); MockBigArrays bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), max); try (BitArray bitArray = new BitArray(1, bigArrays)) { bitArray.clear(100000000); diff --git a/server/src/test/java/org/elasticsearch/index/MergePolicyConfigTests.java b/server/src/test/java/org/elasticsearch/index/MergePolicyConfigTests.java index 0bc9d055c649..8ee7cc2b5111 100644 --- a/server/src/test/java/org/elasticsearch/index/MergePolicyConfigTests.java +++ b/server/src/test/java/org/elasticsearch/index/MergePolicyConfigTests.java @@ -164,19 +164,19 @@ public class MergePolicyConfigTests extends ESTestCase { Settings.builder() .put( MergePolicyConfig.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING.getKey(), - new ByteSizeValue(MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMb() + 1, ByteSizeUnit.MB) + ByteSizeValue.of(MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMb() + 1, ByteSizeUnit.MB) ) .build() ) ); assertEquals( ((TieredMergePolicy) indexSettings.getMergePolicy(false)).getFloorSegmentMB(), - new ByteSizeValue(MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMb() + 1, ByteSizeUnit.MB).getMbFrac(), + ByteSizeValue.of(MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMb() + 1, ByteSizeUnit.MB).getMbFrac(), 0.001 ); assertEquals( ((LogByteSizeMergePolicy) indexSettings.getMergePolicy(true)).getMinMergeMB(), - new ByteSizeValue(MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMb() + 1, ByteSizeUnit.MB).getMbFrac(), + ByteSizeValue.of(MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMb() + 1, ByteSizeUnit.MB).getMbFrac(), 0.001 ); @@ -303,12 +303,12 @@ public class MergePolicyConfigTests extends ESTestCase { ); assertEquals( ((TieredMergePolicy) indexSettings.getMergePolicy(false)).getFloorSegmentMB(), - new ByteSizeValue(MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMb(), ByteSizeUnit.MB).getMbFrac(), + ByteSizeValue.of(MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMb(), ByteSizeUnit.MB).getMbFrac(), 0.00 ); assertEquals( ((LogByteSizeMergePolicy) indexSettings.getMergePolicy(true)).getMinMergeMB(), - new ByteSizeValue(MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMb(), ByteSizeUnit.MB).getMbFrac(), + ByteSizeValue.of(MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMb(), ByteSizeUnit.MB).getMbFrac(), 0.00 ); assertEquals( diff --git a/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java b/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java index 99f2e2a562ee..69cf9d856ed4 100644 --- a/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java +++ b/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java @@ -290,8 +290,8 @@ public class TranslogTests extends ESTestCase { private TranslogConfig getTranslogConfig(final Path path, final Settings settings, OperationListener listener) { final ByteSizeValue bufferSize = randomFrom( TranslogConfig.DEFAULT_BUFFER_SIZE, - new ByteSizeValue(8, ByteSizeUnit.KB), - new ByteSizeValue(10 + randomInt(128 * 1024), ByteSizeUnit.BYTES) + ByteSizeValue.of(8, ByteSizeUnit.KB), + ByteSizeValue.of(10 + randomInt(128 * 1024), ByteSizeUnit.BYTES) ); final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(shardId.getIndex(), settings); @@ -1395,7 +1395,7 @@ public class TranslogTests extends ESTestCase { temp.getTranslogPath(), temp.getIndexSettings(), temp.getBigArrays(), - new ByteSizeValue(1, ByteSizeUnit.KB), + ByteSizeValue.of(1, ByteSizeUnit.KB), randomBoolean() ? DiskIoBufferPool.INSTANCE : RANDOMIZING_IO_BUFFERS, TranslogConfig.NOOP_OPERATION_LISTENER, true @@ -4080,7 +4080,7 @@ public class TranslogTests extends ESTestCase { translogDir, IndexSettingsModule.newIndexSettings(shardId.getIndex(), Settings.EMPTY), NON_RECYCLING_INSTANCE, - new ByteSizeValue(1, ByteSizeUnit.KB), + ByteSizeValue.of(1, ByteSizeUnit.KB), randomBoolean() ? DiskIoBufferPool.INSTANCE : RANDOMIZING_IO_BUFFERS, TranslogConfig.NOOP_OPERATION_LISTENER, false diff --git a/server/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java b/server/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java index 24c7585fee88..b44286ccd4e1 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java +++ b/server/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java @@ -237,7 +237,7 @@ public class IndexingMemoryControllerTests extends IndexShardTestCase { Settings.builder().put("indices.memory.index_buffer_size", "0.001%").put("indices.memory.min_index_buffer_size", "6mb").build() ); - assertThat(controller.indexingBufferSize(), equalTo(new ByteSizeValue(6, ByteSizeUnit.MB).getBytes())); + assertThat(controller.indexingBufferSize(), equalTo(ByteSizeValue.of(6, ByteSizeUnit.MB).getBytes())); } public void testNegativeMinIndexBufferSize() { @@ -289,7 +289,7 @@ public class IndexingMemoryControllerTests extends IndexShardTestCase { Settings.builder().put("indices.memory.index_buffer_size", "90%").put("indices.memory.max_index_buffer_size", "6mb").build() ); - assertThat(controller.indexingBufferSize(), equalTo(new ByteSizeValue(6, ByteSizeUnit.MB).getBytes())); + assertThat(controller.indexingBufferSize(), equalTo(ByteSizeValue.of(6, ByteSizeUnit.MB).getBytes())); } public void testThrottling() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerServiceTests.java b/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerServiceTests.java index 4cb6e1febd5e..dd6fbb753974 100644 --- a/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerServiceTests.java +++ b/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerServiceTests.java @@ -211,19 +211,19 @@ public class HierarchyCircuitBreakerServiceTests extends ESTestCase { CircuitBreaker requestCircuitBreaker = service.getBreaker(CircuitBreaker.REQUEST); CircuitBreaker fieldDataCircuitBreaker = service.getBreaker(CircuitBreaker.FIELDDATA); - assertEquals(new ByteSizeValue(200, ByteSizeUnit.MB).getBytes(), service.stats().getStats(CircuitBreaker.PARENT).getLimit()); - assertEquals(new ByteSizeValue(150, ByteSizeUnit.MB).getBytes(), requestCircuitBreaker.getLimit()); - assertEquals(new ByteSizeValue(150, ByteSizeUnit.MB).getBytes(), fieldDataCircuitBreaker.getLimit()); + assertEquals(ByteSizeValue.of(200, ByteSizeUnit.MB).getBytes(), service.stats().getStats(CircuitBreaker.PARENT).getLimit()); + assertEquals(ByteSizeValue.of(150, ByteSizeUnit.MB).getBytes(), requestCircuitBreaker.getLimit()); + assertEquals(ByteSizeValue.of(150, ByteSizeUnit.MB).getBytes(), fieldDataCircuitBreaker.getLimit()); - fieldDataCircuitBreaker.addEstimateBytesAndMaybeBreak(new ByteSizeValue(50, ByteSizeUnit.MB).getBytes(), "should not break"); - assertEquals(new ByteSizeValue(50, ByteSizeUnit.MB).getBytes(), fieldDataCircuitBreaker.getUsed(), 0.0); - requestCircuitBreaker.addEstimateBytesAndMaybeBreak(new ByteSizeValue(50, ByteSizeUnit.MB).getBytes(), "should not break"); - assertEquals(new ByteSizeValue(50, ByteSizeUnit.MB).getBytes(), requestCircuitBreaker.getUsed(), 0.0); - requestCircuitBreaker.addEstimateBytesAndMaybeBreak(new ByteSizeValue(50, ByteSizeUnit.MB).getBytes(), "should not break"); - assertEquals(new ByteSizeValue(100, ByteSizeUnit.MB).getBytes(), requestCircuitBreaker.getUsed(), 0.0); + fieldDataCircuitBreaker.addEstimateBytesAndMaybeBreak(ByteSizeValue.of(50, ByteSizeUnit.MB).getBytes(), "should not break"); + assertEquals(ByteSizeValue.of(50, ByteSizeUnit.MB).getBytes(), fieldDataCircuitBreaker.getUsed(), 0.0); + requestCircuitBreaker.addEstimateBytesAndMaybeBreak(ByteSizeValue.of(50, ByteSizeUnit.MB).getBytes(), "should not break"); + assertEquals(ByteSizeValue.of(50, ByteSizeUnit.MB).getBytes(), requestCircuitBreaker.getUsed(), 0.0); + requestCircuitBreaker.addEstimateBytesAndMaybeBreak(ByteSizeValue.of(50, ByteSizeUnit.MB).getBytes(), "should not break"); + assertEquals(ByteSizeValue.of(100, ByteSizeUnit.MB).getBytes(), requestCircuitBreaker.getUsed(), 0.0); CircuitBreakingException exception = expectThrows( CircuitBreakingException.class, - () -> requestCircuitBreaker.addEstimateBytesAndMaybeBreak(new ByteSizeValue(50, ByteSizeUnit.MB).getBytes(), "should break") + () -> requestCircuitBreaker.addEstimateBytesAndMaybeBreak(ByteSizeValue.of(50, ByteSizeUnit.MB).getBytes(), "should break") ); assertThat(exception.getMessage(), containsString("[parent] Data too large, data for [should break] would be")); assertThat(exception.getMessage(), containsString("which is larger than the limit of [209715200/200mb]")); @@ -733,7 +733,7 @@ public class HierarchyCircuitBreakerServiceTests extends ESTestCase { ); long parentLimitBytes = service.getParentLimit(); - assertEquals(new ByteSizeValue(100, ByteSizeUnit.BYTES).getBytes(), parentLimitBytes); + assertEquals(ByteSizeValue.of(100, ByteSizeUnit.BYTES).getBytes(), parentLimitBytes); CircuitBreaker breaker = service.getBreaker(CircuitBreaker.REQUEST); MultiBucketConsumerService.MultiBucketConsumer multiBucketConsumer = new MultiBucketConsumerService.MultiBucketConsumer( @@ -800,7 +800,7 @@ public class HierarchyCircuitBreakerServiceTests extends ESTestCase { } private static long mb(long size) { - return new ByteSizeValue(size, ByteSizeUnit.MB).getBytes(); + return ByteSizeValue.of(size, ByteSizeUnit.MB).getBytes(); } public void testUpdatingUseRealMemory() { diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java index bc2dc99f5b60..b7226ad27745 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java @@ -83,7 +83,7 @@ import static org.hamcrest.Matchers.sameInstance; import static org.mockito.Mockito.mock; public class PeerRecoveryTargetServiceTests extends IndexShardTestCase { - private static final ByteSizeValue SNAPSHOT_FILE_PART_SIZE = new ByteSizeValue(Long.MAX_VALUE, ByteSizeUnit.BYTES); + private static final ByteSizeValue SNAPSHOT_FILE_PART_SIZE = ByteSizeValue.of(Long.MAX_VALUE, ByteSizeUnit.BYTES); public void testWriteFileChunksConcurrently() throws Exception { IndexShard sourceShard = newStartedShard(true); @@ -454,7 +454,7 @@ public class PeerRecoveryTargetServiceTests extends IndexShardTestCase { @Override public int getReadSnapshotFileBufferSizeForRepo(String repository) { - return (int) new ByteSizeValue(128, ByteSizeUnit.KB).getBytes(); + return (int) ByteSizeValue.of(128, ByteSizeUnit.KB).getBytes(); } }; @@ -526,7 +526,7 @@ public class PeerRecoveryTargetServiceTests extends IndexShardTestCase { @Override public int getReadSnapshotFileBufferSizeForRepo(String repository) { - return (int) new ByteSizeValue(128, ByteSizeUnit.KB).getBytes(); + return (int) ByteSizeValue.of(128, ByteSizeUnit.KB).getBytes(); } }; @@ -636,7 +636,7 @@ public class PeerRecoveryTargetServiceTests extends IndexShardTestCase { @Override public int getReadSnapshotFileBufferSizeForRepo(String repository) { - return (int) new ByteSizeValue(128, ByteSizeUnit.KB).getBytes(); + return (int) ByteSizeValue.of(128, ByteSizeUnit.KB).getBytes(); } }; @@ -699,7 +699,7 @@ public class PeerRecoveryTargetServiceTests extends IndexShardTestCase { @Override public int getReadSnapshotFileBufferSizeForRepo(String repository) { - return (int) new ByteSizeValue(128, ByteSizeUnit.KB).getBytes(); + return (int) ByteSizeValue.of(128, ByteSizeUnit.KB).getBytes(); } }; @@ -713,7 +713,7 @@ public class PeerRecoveryTargetServiceTests extends IndexShardTestCase { BlobStoreIndexShardSnapshot.FileInfo fileInfo = new BlobStoreIndexShardSnapshot.FileInfo( "name", storeFileMetadata, - new ByteSizeValue(Long.MAX_VALUE, ByteSizeUnit.BYTES) + ByteSizeValue.of(Long.MAX_VALUE, ByteSizeUnit.BYTES) ); recoveryTarget.incRef(); diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySettingsTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySettingsTests.java index 457754e28e6e..f31a354dc3dd 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySettingsTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySettingsTests.java @@ -423,7 +423,7 @@ public class RecoverySettingsTests extends ESTestCase { .withMemory(ByteSizeValue.ofBytes(randomLongBetween(1L, ByteSizeUnit.GB.toBytes(4L)))) .build() .getMaxBytesPerSec(), - equalTo(new ByteSizeValue(40, ByteSizeUnit.MB)) + equalTo(ByteSizeValue.of(40, ByteSizeUnit.MB)) ); } { @@ -433,7 +433,7 @@ public class RecoverySettingsTests extends ESTestCase { .withMemory(ByteSizeValue.ofBytes(randomLongBetween(ByteSizeUnit.GB.toBytes(4L) + 1L, ByteSizeUnit.GB.toBytes(8L)))) .build() .getMaxBytesPerSec(), - equalTo(new ByteSizeValue(60, ByteSizeUnit.MB)) + equalTo(ByteSizeValue.of(60, ByteSizeUnit.MB)) ); } { @@ -443,7 +443,7 @@ public class RecoverySettingsTests extends ESTestCase { .withMemory(ByteSizeValue.ofBytes(randomLongBetween(ByteSizeUnit.GB.toBytes(8L) + 1L, ByteSizeUnit.GB.toBytes(16L)))) .build() .getMaxBytesPerSec(), - equalTo(new ByteSizeValue(90, ByteSizeUnit.MB)) + equalTo(ByteSizeValue.of(90, ByteSizeUnit.MB)) ); } { @@ -453,7 +453,7 @@ public class RecoverySettingsTests extends ESTestCase { .withMemory(ByteSizeValue.ofBytes(randomLongBetween(ByteSizeUnit.GB.toBytes(16L) + 1L, ByteSizeUnit.GB.toBytes(32L)))) .build() .getMaxBytesPerSec(), - equalTo(new ByteSizeValue(125, ByteSizeUnit.MB)) + equalTo(ByteSizeValue.of(125, ByteSizeUnit.MB)) ); } { @@ -463,7 +463,7 @@ public class RecoverySettingsTests extends ESTestCase { .withMemory(ByteSizeValue.ofBytes(randomLongBetween(ByteSizeUnit.GB.toBytes(32L) + 1L, ByteSizeUnit.TB.toBytes(4L)))) .build() .getMaxBytesPerSec(), - equalTo(new ByteSizeValue(250, ByteSizeUnit.MB)) + equalTo(ByteSizeValue.of(250, ByteSizeUnit.MB)) ); } } diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java index d9b2936dc30c..13d3e8360e43 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java @@ -1686,7 +1686,7 @@ public class RecoverySourceHandlerTests extends MapperServiceTestCase { 0 ); - ByteSizeValue partSize = new ByteSizeValue(Long.MAX_VALUE, ByteSizeUnit.BYTES); + ByteSizeValue partSize = ByteSizeValue.of(Long.MAX_VALUE, ByteSizeUnit.BYTES); List filesToRecoverFromSource = sourceFiles.subList(0, sourceFileCount); List filesToRecoverFromSnapshot = sourceFiles.subList(sourceFileCount, sourceFiles.size()); diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index 5793e8613bbd..bdfc0a693f7f 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -486,13 +486,13 @@ public abstract class ESIntegTestCase extends ESTestCase { if (random.nextBoolean()) { builder.put( IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), - new ByteSizeValue(RandomNumbers.randomIntBetween(random, 1, 300), ByteSizeUnit.MB) + ByteSizeValue.of(RandomNumbers.randomIntBetween(random, 1, 300), ByteSizeUnit.MB) ); } if (random.nextBoolean()) { - builder.put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.PB)); // just - // don't - // flush + builder.put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), ByteSizeValue.of(1, ByteSizeUnit.PB)); // just + // don't + // flush } if (random.nextBoolean()) { builder.put( diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index 1f7a17e43c21..b11d96cb3fa2 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -574,12 +574,12 @@ public final class InternalTestCluster extends TestCluster { if (random.nextInt(10) == 0) { // do something crazy slow here builder.put( RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), - new ByteSizeValue(RandomNumbers.randomIntBetween(random, 1, 10), ByteSizeUnit.MB) + ByteSizeValue.of(RandomNumbers.randomIntBetween(random, 1, 10), ByteSizeUnit.MB) ); } else { builder.put( RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), - new ByteSizeValue(RandomNumbers.randomIntBetween(random, 10, 200), ByteSizeUnit.MB) + ByteSizeValue.of(RandomNumbers.randomIntBetween(random, 10, 200), ByteSizeUnit.MB) ); } } diff --git a/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/topmetrics/TopMetricsAggregatorTests.java b/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/topmetrics/TopMetricsAggregatorTests.java index cd36af992b0a..96a373fc2e15 100644 --- a/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/topmetrics/TopMetricsAggregatorTests.java +++ b/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/topmetrics/TopMetricsAggregatorTests.java @@ -359,7 +359,7 @@ public class TopMetricsAggregatorTests extends AggregatorTestCase { public void testTonsOfBucketsTriggersBreaker() throws IOException { // Build a "simple" circuit breaker that trips at 20k CircuitBreakerService breaker = mock(CircuitBreakerService.class); - ByteSizeValue max = new ByteSizeValue(20, ByteSizeUnit.KB); + ByteSizeValue max = ByteSizeValue.of(20, ByteSizeUnit.KB); when(breaker.getBreaker(CircuitBreaker.REQUEST)).thenReturn(new MockBigArrays.LimitedBreaker(CircuitBreaker.REQUEST, max)); // Collect some buckets with it diff --git a/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/AbstractFrozenAutoscalingIntegTestCase.java b/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/AbstractFrozenAutoscalingIntegTestCase.java index d907a9825518..ddad823a8ae3 100644 --- a/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/AbstractFrozenAutoscalingIntegTestCase.java +++ b/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/AbstractFrozenAutoscalingIntegTestCase.java @@ -73,7 +73,7 @@ public abstract class AbstractFrozenAutoscalingIntegTestCase extends AbstractSna .put(super.nodeSettings(nodeOrdinal, otherSettings)) .put(SELF_GENERATED_LICENSE_TYPE.getKey(), "trial"); if (DiscoveryNode.hasRole(otherSettings, DiscoveryNodeRole.DATA_FROZEN_NODE_ROLE)) { - builder.put(SharedBlobCacheService.SHARED_CACHE_SIZE_SETTING.getKey(), new ByteSizeValue(10, ByteSizeUnit.MB)); + builder.put(SharedBlobCacheService.SHARED_CACHE_SIZE_SETTING.getKey(), ByteSizeValue.of(10, ByteSizeUnit.MB)); } return builder.build(); } diff --git a/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/shards/FrozenShardsDeciderServiceTests.java b/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/shards/FrozenShardsDeciderServiceTests.java index 34f4ec007bbd..ef0bb343efae 100644 --- a/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/shards/FrozenShardsDeciderServiceTests.java +++ b/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/shards/FrozenShardsDeciderServiceTests.java @@ -68,7 +68,7 @@ public class FrozenShardsDeciderServiceTests extends AutoscalingTestCase { ); assertThat(defaultSettingsResult.reason().summary(), equalTo("shard count [" + (shards * (replicas + 1) + "]"))); - ByteSizeValue memoryPerShard = new ByteSizeValue( + ByteSizeValue memoryPerShard = ByteSizeValue.of( randomLongBetween(0, 1000), randomFrom(ByteSizeUnit.BYTES, ByteSizeUnit.KB, ByteSizeUnit.MB) ); diff --git a/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/shared/SharedBlobCacheServiceTests.java b/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/shared/SharedBlobCacheServiceTests.java index b6f5b550aea9..8364cb307846 100644 --- a/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/shared/SharedBlobCacheServiceTests.java +++ b/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/shared/SharedBlobCacheServiceTests.java @@ -813,7 +813,7 @@ public class SharedBlobCacheServiceTests extends ESTestCase { } public void testCacheSizeChanges() throws IOException { - ByteSizeValue val1 = new ByteSizeValue(randomIntBetween(1, 5), ByteSizeUnit.MB); + ByteSizeValue val1 = ByteSizeValue.of(randomIntBetween(1, 5), ByteSizeUnit.MB); Settings settings = Settings.builder() .put(NODE_NAME_SETTING.getKey(), "node") .put(SharedBlobCacheService.SHARED_CACHE_SIZE_SETTING.getKey(), val1.getStringRep()) @@ -834,7 +834,7 @@ public class SharedBlobCacheServiceTests extends ESTestCase { assertEquals(val1.getBytes(), cacheService.getStats().size()); } - ByteSizeValue val2 = new ByteSizeValue(randomIntBetween(1, 5), ByteSizeUnit.MB); + ByteSizeValue val2 = ByteSizeValue.of(randomIntBetween(1, 5), ByteSizeUnit.MB); settings = Settings.builder() .put(settings) .put(SharedBlobCacheService.SHARED_CACHE_SIZE_SETTING.getKey(), val2.getStringRep()) diff --git a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java index 696c16df31a2..7d6def11df2c 100644 --- a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java +++ b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java @@ -120,7 +120,7 @@ public class CcrRetentionLeaseIT extends CcrIntegTestCase { ) throws IOException { final ClusterUpdateSettingsRequest settingsRequest = new ClusterUpdateSettingsRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) .masterNodeTimeout(TimeValue.MAX_VALUE); - final String chunkSize = new ByteSizeValue(randomFrom(4, 128, 1024), ByteSizeUnit.KB).getStringRep(); + final String chunkSize = ByteSizeValue.of(randomFrom(4, 128, 1024), ByteSizeUnit.KB).getStringRep(); settingsRequest.persistentSettings(Settings.builder().put(CcrSettings.RECOVERY_CHUNK_SIZE.getKey(), chunkSize)); assertAcked(followerClient().admin().cluster().updateSettings(settingsRequest).actionGet()); diff --git a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/FollowerFailOverIT.java b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/FollowerFailOverIT.java index 274d723a3757..7d2f74e0c92f 100644 --- a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/FollowerFailOverIT.java +++ b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/FollowerFailOverIT.java @@ -92,10 +92,10 @@ public class FollowerFailOverIT extends CcrIntegTestCase { availableDocs.release(between(100, 200)); PutFollowAction.Request follow = putFollow(leaderIndex, followerIndex); follow.getParameters().setMaxReadRequestOperationCount(randomIntBetween(32, 2048)); - follow.getParameters().setMaxReadRequestSize(new ByteSizeValue(randomIntBetween(1, 4096), ByteSizeUnit.KB)); + follow.getParameters().setMaxReadRequestSize(ByteSizeValue.of(randomIntBetween(1, 4096), ByteSizeUnit.KB)); follow.getParameters().setMaxOutstandingReadRequests(randomIntBetween(1, 10)); follow.getParameters().setMaxWriteRequestOperationCount(randomIntBetween(32, 2048)); - follow.getParameters().setMaxWriteRequestSize(new ByteSizeValue(randomIntBetween(1, 4096), ByteSizeUnit.KB)); + follow.getParameters().setMaxWriteRequestSize(ByteSizeValue.of(randomIntBetween(1, 4096), ByteSizeUnit.KB)); follow.getParameters().setMaxOutstandingWriteRequests(randomIntBetween(1, 10)); logger.info("--> follow request {}", Strings.toString(follow)); followerClient().execute(PutFollowAction.INSTANCE, follow).get(); @@ -153,10 +153,10 @@ public class FollowerFailOverIT extends CcrIntegTestCase { PutFollowAction.Request followRequest = putFollow("index1", "index2"); followRequest.getParameters().setMaxReadRequestOperationCount(randomIntBetween(32, 2048)); - followRequest.getParameters().setMaxReadRequestSize(new ByteSizeValue(randomIntBetween(1, 4096), ByteSizeUnit.KB)); + followRequest.getParameters().setMaxReadRequestSize(ByteSizeValue.of(randomIntBetween(1, 4096), ByteSizeUnit.KB)); followRequest.getParameters().setMaxOutstandingReadRequests(randomIntBetween(1, 10)); followRequest.getParameters().setMaxWriteRequestOperationCount(randomIntBetween(32, 2048)); - followRequest.getParameters().setMaxWriteRequestSize(new ByteSizeValue(randomIntBetween(1, 4096), ByteSizeUnit.KB)); + followRequest.getParameters().setMaxWriteRequestSize(ByteSizeValue.of(randomIntBetween(1, 4096), ByteSizeUnit.KB)); followRequest.getParameters().setMaxOutstandingWriteRequests(randomIntBetween(1, 10)); followRequest.waitForActiveShards(ActiveShardCount.ALL); followerClient().execute(PutFollowAction.INSTANCE, followRequest).get(); diff --git a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java index ba2c1bd18b43..4872862bd82b 100644 --- a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java +++ b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java @@ -150,7 +150,7 @@ public class IndexFollowingIT extends CcrIntegTestCase { .setMasterNodeTimeout(TimeValue.MAX_VALUE) .setPersistentSettings( Settings.builder() - .put(CcrSettings.RECOVERY_CHUNK_SIZE.getKey(), new ByteSizeValue(randomIntBetween(1, 1000), ByteSizeUnit.KB)) + .put(CcrSettings.RECOVERY_CHUNK_SIZE.getKey(), ByteSizeValue.of(randomIntBetween(1, 1000), ByteSizeUnit.KB)) ) .get(); @@ -667,7 +667,7 @@ public class IndexFollowingIT extends CcrIntegTestCase { } PutFollowAction.Request followRequest = putFollow("index1", "index2"); - followRequest.getParameters().setMaxReadRequestSize(new ByteSizeValue(randomIntBetween(1, 1024), ByteSizeUnit.BYTES)); + followRequest.getParameters().setMaxReadRequestSize(ByteSizeValue.of(randomIntBetween(1, 1024), ByteSizeUnit.BYTES)); followerClient().execute(PutFollowAction.INSTANCE, followRequest).get(); final Map firstBatchNumDocsPerShard = new HashMap<>(); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrSettings.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrSettings.java index 268b2ab47554..ab2d64acd308 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrSettings.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrSettings.java @@ -60,7 +60,7 @@ public final class CcrSettings { */ public static final Setting RECOVERY_MAX_BYTES_PER_SECOND = Setting.byteSizeSetting( "ccr.indices.recovery.max_bytes_per_sec", - new ByteSizeValue(40, ByteSizeUnit.MB), + ByteSizeValue.of(40, ByteSizeUnit.MB), Setting.Property.Dynamic, Setting.Property.NodeScope ); @@ -70,9 +70,9 @@ public final class CcrSettings { */ public static final Setting RECOVERY_CHUNK_SIZE = Setting.byteSizeSetting( "ccr.indices.recovery.chunk_size", - new ByteSizeValue(1, ByteSizeUnit.MB), - new ByteSizeValue(1, ByteSizeUnit.KB), - new ByteSizeValue(1, ByteSizeUnit.GB), + ByteSizeValue.of(1, ByteSizeUnit.MB), + ByteSizeValue.of(1, ByteSizeUnit.KB), + ByteSizeValue.of(1, ByteSizeUnit.GB), Setting.Property.Dynamic, Setting.Property.NodeScope ); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowAction.java index 5749bf762e2e..edce8c098e76 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowAction.java @@ -69,12 +69,12 @@ import static org.elasticsearch.xpack.ccr.Ccr.CCR_THREAD_POOL_NAME; public class TransportResumeFollowAction extends AcknowledgedTransportMasterNodeAction { - static final ByteSizeValue DEFAULT_MAX_READ_REQUEST_SIZE = new ByteSizeValue(32, ByteSizeUnit.MB); + static final ByteSizeValue DEFAULT_MAX_READ_REQUEST_SIZE = ByteSizeValue.of(32, ByteSizeUnit.MB); static final ByteSizeValue DEFAULT_MAX_WRITE_REQUEST_SIZE = ByteSizeValue.ofBytes(Long.MAX_VALUE); private static final TimeValue DEFAULT_MAX_RETRY_DELAY = new TimeValue(500); private static final int DEFAULT_MAX_OUTSTANDING_WRITE_REQUESTS = 9; private static final int DEFAULT_MAX_WRITE_BUFFER_COUNT = Integer.MAX_VALUE; - private static final ByteSizeValue DEFAULT_MAX_WRITE_BUFFER_SIZE = new ByteSizeValue(512, ByteSizeUnit.MB); + private static final ByteSizeValue DEFAULT_MAX_WRITE_BUFFER_SIZE = ByteSizeValue.of(512, ByteSizeUnit.MB); private static final int DEFAULT_MAX_READ_REQUEST_OPERATION_COUNT = 5120; private static final int DEFAULT_MAX_WRITE_REQUEST_OPERATION_COUNT = 5120; private static final int DEFAULT_MAX_OUTSTANDING_READ_REQUESTS = 12; diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestShardChangesAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestShardChangesAction.java index cb2aafd2300c..0bbcfab97eb3 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestShardChangesAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestShardChangesAction.java @@ -60,7 +60,7 @@ import static org.elasticsearch.rest.RestRequest.Method.GET; public class RestShardChangesAction extends BaseRestHandler { private static final long DEFAULT_FROM_SEQ_NO = 0L; - private static final ByteSizeValue DEFAULT_MAX_BATCH_SIZE = new ByteSizeValue(32, ByteSizeUnit.MB); + private static final ByteSizeValue DEFAULT_MAX_BATCH_SIZE = ByteSizeValue.of(32, ByteSizeUnit.MB); private static final TimeValue DEFAULT_POLL_TIMEOUT = new TimeValue(1, TimeUnit.MINUTES); private static final int DEFAULT_MAX_OPERATIONS_COUNT = 1024; private static final int DEFAULT_TIMEOUT_SECONDS = 60; diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ResumeFollowActionRequestTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ResumeFollowActionRequestTests.java index 475c9135d494..9abbf8a7f5fb 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ResumeFollowActionRequestTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ResumeFollowActionRequestTests.java @@ -69,7 +69,7 @@ public class ResumeFollowActionRequestTests extends AbstractXContentSerializingT followParameters.setMaxOutstandingWriteRequests(randomIntBetween(1, Integer.MAX_VALUE)); } if (randomBoolean()) { - followParameters.setMaxReadRequestSize(new ByteSizeValue(randomNonNegativeLong(), ByteSizeUnit.BYTES)); + followParameters.setMaxReadRequestSize(ByteSizeValue.of(randomNonNegativeLong(), ByteSizeUnit.BYTES)); } if (randomBoolean()) { followParameters.setMaxWriteBufferCount(randomIntBetween(1, Integer.MAX_VALUE)); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesActionTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesActionTests.java index 6f6131c8ea4e..5f6db10a1aaf 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesActionTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesActionTests.java @@ -67,7 +67,7 @@ public class ShardChangesActionTests extends ESSingleNodeTestCase { min, size, indexShard.getHistoryUUID(), - new ByteSizeValue(Long.MAX_VALUE, ByteSizeUnit.BYTES) + ByteSizeValue.of(Long.MAX_VALUE, ByteSizeUnit.BYTES) ); final List seenSeqNos = Arrays.stream(operations).map(Translog.Operation::seqNo).collect(Collectors.toList()); final List expectedSeqNos = LongStream.rangeClosed(min, max).boxed().collect(Collectors.toList()); @@ -84,7 +84,7 @@ public class ShardChangesActionTests extends ESSingleNodeTestCase { numWrites, numWrites + 1, indexShard.getHistoryUUID(), - new ByteSizeValue(Long.MAX_VALUE, ByteSizeUnit.BYTES) + ByteSizeValue.of(Long.MAX_VALUE, ByteSizeUnit.BYTES) ) ); final String message = String.format( @@ -103,7 +103,7 @@ public class ShardChangesActionTests extends ESSingleNodeTestCase { numWrites - 10, numWrites + 10, indexShard.getHistoryUUID(), - new ByteSizeValue(Long.MAX_VALUE, ByteSizeUnit.BYTES) + ByteSizeValue.of(Long.MAX_VALUE, ByteSizeUnit.BYTES) ); assertThat(operations.length, equalTo(10)); @@ -116,7 +116,7 @@ public class ShardChangesActionTests extends ESSingleNodeTestCase { 0, 10, "different-history-uuid", - new ByteSizeValue(Long.MAX_VALUE, ByteSizeUnit.BYTES) + ByteSizeValue.of(Long.MAX_VALUE, ByteSizeUnit.BYTES) ) ); assertThat( @@ -136,7 +136,7 @@ public class ShardChangesActionTests extends ESSingleNodeTestCase { fromSeqNo, batchSize, indexShard.getHistoryUUID(), - new ByteSizeValue(Long.MAX_VALUE, ByteSizeUnit.BYTES) + ByteSizeValue.of(Long.MAX_VALUE, ByteSizeUnit.BYTES) ) ); assertThat( @@ -159,7 +159,7 @@ public class ShardChangesActionTests extends ESSingleNodeTestCase { 0, 1, indexShard.getHistoryUUID(), - new ByteSizeValue(Long.MAX_VALUE, ByteSizeUnit.BYTES) + ByteSizeValue.of(Long.MAX_VALUE, ByteSizeUnit.BYTES) ) ); } @@ -179,7 +179,7 @@ public class ShardChangesActionTests extends ESSingleNodeTestCase { 0, randomIntBetween(100, 500), indexShard.getHistoryUUID(), - new ByteSizeValue(256, ByteSizeUnit.BYTES) + ByteSizeValue.of(256, ByteSizeUnit.BYTES) ); assertThat(operations.length, equalTo(8)); assertThat(operations[0].seqNo(), equalTo(0L)); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskRandomTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskRandomTests.java index ff3c029cd997..1e0387c2f716 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskRandomTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskRandomTests.java @@ -105,7 +105,7 @@ public class ShardFollowNodeTaskRandomTests extends ESTestCase { TransportResumeFollowAction.DEFAULT_MAX_READ_REQUEST_SIZE, TransportResumeFollowAction.DEFAULT_MAX_READ_REQUEST_SIZE, 10240, - new ByteSizeValue(512, ByteSizeUnit.MB), + ByteSizeValue.of(512, ByteSizeUnit.MB), TimeValue.timeValueMillis(10), TimeValue.timeValueMillis(10), Collections.emptyMap() diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskTests.java index 23c8d5c0ed4c..c2c8e4ed5d40 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskTests.java @@ -139,7 +139,7 @@ public class ShardFollowNodeTaskTests extends ESTestCase { params.maxReadRequestOperationCount = 64; params.maxOutstandingReadRequests = 1; params.maxOutstandingWriteRequests = 0; // need to set outstandingWrites to 0, other the write buffer gets flushed immediately - params.maxWriteBufferSize = new ByteSizeValue(1, ByteSizeUnit.KB); + params.maxWriteBufferSize = ByteSizeValue.of(1, ByteSizeUnit.KB); ShardFollowNodeTask task = createShardFollowTask(params); startTask(task, 63, -1); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java index 573c66cbb614..34ce43e58308 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java @@ -340,7 +340,7 @@ public class ShardFollowTaskReplicationTests extends ESIndexLevelReplicationTest fromSeqNo, numOps, leadingPrimary.getHistoryUUID(), - new ByteSizeValue(Long.MAX_VALUE, ByteSizeUnit.BYTES) + ByteSizeValue.of(Long.MAX_VALUE, ByteSizeUnit.BYTES) ); IndexShard followingPrimary = followerGroup.getPrimary(); @@ -405,7 +405,7 @@ public class ShardFollowTaskReplicationTests extends ESIndexLevelReplicationTest Future recoveryFuture = null; Settings settings = Settings.builder() .put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true) - .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(between(1, 1000), ByteSizeUnit.KB)) + .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), ByteSizeValue.of(between(1, 1000), ByteSizeUnit.KB)) .build(); IndexMetadata indexMetadata = buildIndexMetadata(between(0, 1), settings, indexMapping); try (ReplicationGroup group = new ReplicationGroup(indexMetadata) { @@ -505,7 +505,7 @@ public class ShardFollowTaskReplicationTests extends ESIndexLevelReplicationTest private ReplicationGroup createFollowGroup(ReplicationGroup leaderGroup, int replicas) throws IOException { final Settings settings = Settings.builder() .put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true) - .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(between(1, 1000), ByteSizeUnit.KB)) + .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), ByteSizeValue.of(between(1, 1000), ByteSizeUnit.KB)) .build(); IndexMetadata indexMetadata = buildIndexMetadata(replicas, settings, indexMapping); return new ReplicationGroup(indexMetadata) { @@ -573,10 +573,10 @@ public class ShardFollowTaskReplicationTests extends ESIndexLevelReplicationTest between(1, 64), between(1, 8), between(1, 4), - new ByteSizeValue(Long.MAX_VALUE, ByteSizeUnit.BYTES), - new ByteSizeValue(Long.MAX_VALUE, ByteSizeUnit.BYTES), + ByteSizeValue.of(Long.MAX_VALUE, ByteSizeUnit.BYTES), + ByteSizeValue.of(Long.MAX_VALUE, ByteSizeUnit.BYTES), 10240, - new ByteSizeValue(512, ByteSizeUnit.MB), + ByteSizeValue.of(512, ByteSizeUnit.MB), TimeValue.timeValueMillis(10), TimeValue.timeValueMillis(10), Collections.emptyMap() diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportActivateAutoFollowPatternActionTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportActivateAutoFollowPatternActionTests.java index 3a6a0d90f60b..d56214373582 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportActivateAutoFollowPatternActionTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportActivateAutoFollowPatternActionTests.java @@ -120,10 +120,10 @@ public class TransportActivateAutoFollowPatternActionTests extends ESTestCase { randomIntBetween(1, 100), randomIntBetween(1, 100), randomIntBetween(1, 100), - new ByteSizeValue(randomIntBetween(1, 100), randomFrom(ByteSizeUnit.values())), - new ByteSizeValue(randomIntBetween(1, 100), randomFrom(ByteSizeUnit.values())), + ByteSizeValue.of(randomIntBetween(1, 100), randomFrom(ByteSizeUnit.values())), + ByteSizeValue.of(randomIntBetween(1, 100), randomFrom(ByteSizeUnit.values())), randomIntBetween(1, 100), - new ByteSizeValue(randomIntBetween(1, 100), randomFrom(ByteSizeUnit.values())), + ByteSizeValue.of(randomIntBetween(1, 100), randomFrom(ByteSizeUnit.values())), TimeValue.timeValueSeconds(randomIntBetween(30, 600)), TimeValue.timeValueSeconds(randomIntBetween(30, 600)) ); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportFollowStatsActionTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportFollowStatsActionTests.java index 9a5afd65952a..698dc9be2906 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportFollowStatsActionTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportFollowStatsActionTests.java @@ -86,7 +86,7 @@ public class TransportFollowStatsActionTests extends ESTestCase { TransportResumeFollowAction.DEFAULT_MAX_READ_REQUEST_SIZE, TransportResumeFollowAction.DEFAULT_MAX_READ_REQUEST_SIZE, 10240, - new ByteSizeValue(512, ByteSizeUnit.MB), + ByteSizeValue.of(512, ByteSizeUnit.MB), TimeValue.timeValueMillis(10), TimeValue.timeValueMillis(10), Collections.emptyMap() diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportUnfollowActionTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportUnfollowActionTests.java index bfaac52f9285..474ad65e05da 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportUnfollowActionTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportUnfollowActionTests.java @@ -88,7 +88,7 @@ public class TransportUnfollowActionTests extends ESTestCase { TransportResumeFollowAction.DEFAULT_MAX_READ_REQUEST_SIZE, TransportResumeFollowAction.DEFAULT_MAX_READ_REQUEST_SIZE, 10240, - new ByteSizeValue(512, ByteSizeUnit.MB), + ByteSizeValue.of(512, ByteSizeUnit.MB), TimeValue.timeValueMillis(10), TimeValue.timeValueMillis(10), Collections.emptyMap() diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/PauseFollowerIndexStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/PauseFollowerIndexStepTests.java index da5d6eddfc72..efdd01b44d2f 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/PauseFollowerIndexStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/PauseFollowerIndexStepTests.java @@ -165,10 +165,10 @@ public class PauseFollowerIndexStepTests extends AbstractUnfollowIndexStepTestCa 1024, 1, 1, - new ByteSizeValue(32, ByteSizeUnit.MB), - new ByteSizeValue(Long.MAX_VALUE, ByteSizeUnit.BYTES), + ByteSizeValue.of(32, ByteSizeUnit.MB), + ByteSizeValue.of(Long.MAX_VALUE, ByteSizeUnit.BYTES), 10240, - new ByteSizeValue(512, ByteSizeUnit.MB), + ByteSizeValue.of(512, ByteSizeUnit.MB), TimeValue.timeValueMillis(10), TimeValue.timeValueMillis(10), Map.of() diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForRolloverReadyStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForRolloverReadyStepTests.java index 453404d096c3..8ad841664b87 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForRolloverReadyStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForRolloverReadyStepTests.java @@ -60,20 +60,20 @@ public class WaitForRolloverReadyStepTests extends AbstractStepTestCase nextKey = new Step.StepKey(nextKey.phase(), nextKey.action(), nextKey.name() + randomAlphaOfLength(5)); case 2 -> maxSize = randomValueOtherThan(maxSize, () -> { ByteSizeUnit maxSizeUnit = randomFrom(ByteSizeUnit.values()); - return new ByteSizeValue(randomNonNegativeLong() / maxSizeUnit.toBytes(1), maxSizeUnit); + return ByteSizeValue.of(randomNonNegativeLong() / maxSizeUnit.toBytes(1), maxSizeUnit); }); case 3 -> maxPrimaryShardSize = randomValueOtherThan(maxPrimaryShardSize, () -> { ByteSizeUnit maxPrimaryShardSizeUnit = randomFrom(ByteSizeUnit.values()); - return new ByteSizeValue(randomNonNegativeLong() / maxPrimaryShardSizeUnit.toBytes(1), maxPrimaryShardSizeUnit); + return ByteSizeValue.of(randomNonNegativeLong() / maxPrimaryShardSizeUnit.toBytes(1), maxPrimaryShardSizeUnit); }); case 4 -> maxAge = randomValueOtherThan(maxAge, () -> randomPositiveTimeValue()); case 5 -> maxDocs = randomValueOtherThan(maxDocs, ESTestCase::randomNonNegativeLong); case 6 -> maxPrimaryShardDocs = randomValueOtherThan(maxPrimaryShardDocs, ESTestCase::randomNonNegativeLong); case 7 -> minSize = randomValueOtherThan(minSize, () -> { ByteSizeUnit minSizeUnit = randomFrom(ByteSizeUnit.values()); - return new ByteSizeValue(randomNonNegativeLong() / minSizeUnit.toBytes(1), minSizeUnit); + return ByteSizeValue.of(randomNonNegativeLong() / minSizeUnit.toBytes(1), minSizeUnit); }); case 8 -> minPrimaryShardSize = randomValueOtherThan(minPrimaryShardSize, () -> { ByteSizeUnit minPrimaryShardSizeUnit = randomFrom(ByteSizeUnit.values()); - return new ByteSizeValue(randomNonNegativeLong() / minPrimaryShardSizeUnit.toBytes(1), minPrimaryShardSizeUnit); + return ByteSizeValue.of(randomNonNegativeLong() / minPrimaryShardSizeUnit.toBytes(1), minPrimaryShardSizeUnit); }); case 9 -> minAge = randomValueOtherThan(minAge, () -> randomPositiveTimeValue()); case 10 -> minDocs = randomValueOtherThan(minDocs, ESTestCase::randomNonNegativeLong); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/ForecastJobActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/ForecastJobActionRequestTests.java index efb165757c5d..ec7de261e9fa 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/ForecastJobActionRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/ForecastJobActionRequestTests.java @@ -34,7 +34,7 @@ public class ForecastJobActionRequestTests extends AbstractXContentSerializingTe } if (randomBoolean()) { request.setMaxModelMemory( - randomLongBetween(new ByteSizeValue(1, ByteSizeUnit.MB).getBytes(), new ByteSizeValue(499, ByteSizeUnit.MB).getBytes()) + randomLongBetween(ByteSizeValue.of(1, ByteSizeUnit.MB).getBytes(), ByteSizeValue.of(499, ByteSizeUnit.MB).getBytes()) ); } return request; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/DataFrameAnalyticsConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/DataFrameAnalyticsConfigTests.java index 02924b6d1501..933aaeadcb0c 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/DataFrameAnalyticsConfigTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/DataFrameAnalyticsConfigTests.java @@ -165,7 +165,7 @@ public class DataFrameAnalyticsConfigTests extends AbstractBWCSerializationTestC ); } if (randomBoolean()) { - builder.setModelMemoryLimit(new ByteSizeValue(randomIntBetween(1, 16), randomFrom(ByteSizeUnit.MB, ByteSizeUnit.GB))); + builder.setModelMemoryLimit(ByteSizeValue.of(randomIntBetween(1, 16), randomFrom(ByteSizeUnit.MB, ByteSizeUnit.GB))); } if (randomBoolean()) { builder.setDescription(randomAlphaOfLength(20)); @@ -285,31 +285,31 @@ public class DataFrameAnalyticsConfigTests extends AbstractBWCSerializationTestC assertTooSmall( expectThrows( ElasticsearchStatusException.class, - () -> builder.setModelMemoryLimit(new ByteSizeValue(-1, ByteSizeUnit.BYTES)).build() + () -> builder.setModelMemoryLimit(ByteSizeValue.of(-1, ByteSizeUnit.BYTES)).build() ) ); assertTooSmall( expectThrows( ElasticsearchStatusException.class, - () -> builder.setModelMemoryLimit(new ByteSizeValue(0, ByteSizeUnit.BYTES)).build() + () -> builder.setModelMemoryLimit(ByteSizeValue.of(0, ByteSizeUnit.BYTES)).build() ) ); assertTooSmall( expectThrows( ElasticsearchStatusException.class, - () -> builder.setModelMemoryLimit(new ByteSizeValue(0, ByteSizeUnit.KB)).build() + () -> builder.setModelMemoryLimit(ByteSizeValue.of(0, ByteSizeUnit.KB)).build() ) ); assertTooSmall( expectThrows( ElasticsearchStatusException.class, - () -> builder.setModelMemoryLimit(new ByteSizeValue(0, ByteSizeUnit.MB)).build() + () -> builder.setModelMemoryLimit(ByteSizeValue.of(0, ByteSizeUnit.MB)).build() ) ); assertTooSmall( expectThrows( ElasticsearchStatusException.class, - () -> builder.setModelMemoryLimit(new ByteSizeValue(1023, ByteSizeUnit.BYTES)).build() + () -> builder.setModelMemoryLimit(ByteSizeValue.of(1023, ByteSizeUnit.BYTES)).build() ) ); } @@ -329,7 +329,7 @@ public class DataFrameAnalyticsConfigTests extends AbstractBWCSerializationTestC DataFrameAnalyticsConfig defaultLimitConfig = createRandomBuilder("foo").setModelMemoryLimit(null).build(); - ByteSizeValue maxLimit = new ByteSizeValue(randomIntBetween(500, 1000), ByteSizeUnit.MB); + ByteSizeValue maxLimit = ByteSizeValue.of(randomIntBetween(500, 1000), ByteSizeUnit.MB); if (maxLimit.compareTo(defaultLimitConfig.getModelMemoryLimit()) < 0) { assertThat(maxLimit, equalTo(new DataFrameAnalyticsConfig.Builder(defaultLimitConfig, maxLimit).build().getModelMemoryLimit())); } else { @@ -342,10 +342,10 @@ public class DataFrameAnalyticsConfigTests extends AbstractBWCSerializationTestC public void testExplicitModelMemoryLimitTooHigh() { - ByteSizeValue configuredLimit = new ByteSizeValue(randomIntBetween(5, 10), ByteSizeUnit.GB); + ByteSizeValue configuredLimit = ByteSizeValue.of(randomIntBetween(5, 10), ByteSizeUnit.GB); DataFrameAnalyticsConfig explicitLimitConfig = createRandomBuilder("foo").setModelMemoryLimit(configuredLimit).build(); - ByteSizeValue maxLimit = new ByteSizeValue(randomIntBetween(500, 1000), ByteSizeUnit.MB); + ByteSizeValue maxLimit = ByteSizeValue.of(randomIntBetween(500, 1000), ByteSizeUnit.MB); ElasticsearchStatusException e = expectThrows( ElasticsearchStatusException.class, () -> new DataFrameAnalyticsConfig.Builder(explicitLimitConfig, maxLimit).build() diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/explain/MemoryEstimationTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/explain/MemoryEstimationTests.java index ad26ae421aca..cf803f7c3f95 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/explain/MemoryEstimationTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/explain/MemoryEstimationTests.java @@ -54,19 +54,19 @@ public class MemoryEstimationTests extends AbstractXContentSerializingTestCase { public void testValidateAnalysisLimitsAndSetDefaults_whenMaxIsLessThanTheDefault() { Job.Builder builder = buildJobBuilder("foo"); - builder.validateAnalysisLimitsAndSetDefaults(new ByteSizeValue(512L, ByteSizeUnit.MB)); + builder.validateAnalysisLimitsAndSetDefaults(ByteSizeValue.of(512L, ByteSizeUnit.MB)); Job job = builder.build(); assertNotNull(job.getAnalysisLimits()); @@ -189,7 +189,7 @@ public class JobTests extends AbstractXContentSerializingTestCase { builder.setAnalysisLimits(new AnalysisLimits(4096L, null)); ElasticsearchStatusException e = expectThrows( ElasticsearchStatusException.class, - () -> builder.validateAnalysisLimitsAndSetDefaults(new ByteSizeValue(1000L, ByteSizeUnit.MB)) + () -> builder.validateAnalysisLimitsAndSetDefaults(ByteSizeValue.of(1000L, ByteSizeUnit.MB)) ); assertEquals( "model_memory_limit [4gb] must be less than the value of the " @@ -198,7 +198,7 @@ public class JobTests extends AbstractXContentSerializingTestCase { e.getMessage() ); - builder.validateAnalysisLimitsAndSetDefaults(new ByteSizeValue(8192L, ByteSizeUnit.MB)); + builder.validateAnalysisLimitsAndSetDefaults(ByteSizeValue.of(8192L, ByteSizeUnit.MB)); } public void testEquals_GivenDifferentClass() { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdateTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdateTests.java index 24a3a097e9e2..6d81793117ae 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdateTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdateTests.java @@ -360,7 +360,7 @@ public class JobUpdateTests extends AbstractXContentSerializingTestCase update.mergeWithJob(jobBuilder.build(), new ByteSizeValue(512L, ByteSizeUnit.MB)) + () -> update.mergeWithJob(jobBuilder.build(), ByteSizeValue.of(512L, ByteSizeUnit.MB)) ); assertEquals( "model_memory_limit [1gb] must be less than the value of the xpack.ml.max_model_memory_limit setting [512mb]", @@ -386,14 +386,14 @@ public class JobUpdateTests extends AbstractXContentSerializingTestCase updateAboveMaxLimit.mergeWithJob(jobBuilder.build(), new ByteSizeValue(5000L, ByteSizeUnit.MB)) + () -> updateAboveMaxLimit.mergeWithJob(jobBuilder.build(), ByteSizeValue.of(5000L, ByteSizeUnit.MB)) ); assertEquals( "model_memory_limit [7.8gb] must be less than the value of the xpack.ml.max_model_memory_limit setting [4.8gb]", e.getMessage() ); - updateAboveMaxLimit.mergeWithJob(jobBuilder.build(), new ByteSizeValue(10000L, ByteSizeUnit.MB)); + updateAboveMaxLimit.mergeWithJob(jobBuilder.build(), ByteSizeValue.of(10000L, ByteSizeUnit.MB)); } public void testUpdate_givenEmptySnapshot() { diff --git a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DownsampleShardIndexer.java b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DownsampleShardIndexer.java index 50149ec2cbe5..bb3230ef0652 100644 --- a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DownsampleShardIndexer.java +++ b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DownsampleShardIndexer.java @@ -81,8 +81,8 @@ class DownsampleShardIndexer { private static final Logger logger = LogManager.getLogger(DownsampleShardIndexer.class); public static final int DOWNSAMPLE_BULK_ACTIONS = 10000; - public static final ByteSizeValue DOWNSAMPLE_BULK_SIZE = new ByteSizeValue(1, ByteSizeUnit.MB); - public static final ByteSizeValue DOWNSAMPLE_MAX_BYTES_IN_FLIGHT = new ByteSizeValue(50, ByteSizeUnit.MB); + public static final ByteSizeValue DOWNSAMPLE_BULK_SIZE = ByteSizeValue.of(1, ByteSizeUnit.MB); + public static final ByteSizeValue DOWNSAMPLE_MAX_BYTES_IN_FLIGHT = ByteSizeValue.of(50, ByteSizeUnit.MB); private final IndexShard indexShard; private final Client client; private final DownsampleMetrics downsampleMetrics; diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/HttpSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/HttpSettings.java index b2825d1b79cb..270e62765ba7 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/HttpSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/HttpSettings.java @@ -20,9 +20,9 @@ public class HttpSettings { // These settings are default scope for testing static final Setting MAX_HTTP_RESPONSE_SIZE = Setting.byteSizeSetting( "xpack.inference.http.max_response_size", - new ByteSizeValue(50, ByteSizeUnit.MB), // default + ByteSizeValue.of(50, ByteSizeUnit.MB), // default ByteSizeValue.ONE, // min - new ByteSizeValue(100, ByteSizeUnit.MB), // max + ByteSizeValue.of(100, ByteSizeUnit.MB), // max Setting.Property.NodeScope, Setting.Property.Dynamic ); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/common/SizeLimitInputStreamTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/common/SizeLimitInputStreamTests.java index 638c1858df45..dd13747f7a20 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/common/SizeLimitInputStreamTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/common/SizeLimitInputStreamTests.java @@ -86,7 +86,7 @@ public class SizeLimitInputStreamTests extends ESTestCase { private static SizeLimitInputStream createRandomLimitedStream(int dataSize, int maxAllowedSize) { String data = randomAlphaOfLength(dataSize); - ByteSizeValue byteSizeValue = new ByteSizeValue(maxAllowedSize, ByteSizeUnit.BYTES); + ByteSizeValue byteSizeValue = ByteSizeValue.of(maxAllowedSize, ByteSizeUnit.BYTES); return new SizeLimitInputStream(byteSizeValue, new ByteArrayInputStream(data.getBytes(UTF_8))); } } diff --git a/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/ModelLoaderUtils.java b/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/ModelLoaderUtils.java index 2e08b845f659..32a5a8127a42 100644 --- a/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/ModelLoaderUtils.java +++ b/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/ModelLoaderUtils.java @@ -63,7 +63,7 @@ final class ModelLoaderUtils { public static String METADATA_FILE_EXTENSION = ".metadata.json"; public static String MODEL_FILE_EXTENSION = ".pt"; - private static final ByteSizeValue VOCABULARY_SIZE_LIMIT = new ByteSizeValue(20, ByteSizeUnit.MB); + private static final ByteSizeValue VOCABULARY_SIZE_LIMIT = ByteSizeValue.of(20, ByteSizeUnit.MB); private static final String VOCABULARY = "vocabulary"; private static final String MERGES = "merges"; private static final String SCORES = "scores"; diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java index f4d50df4ff61..2793fbfd08e1 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java @@ -390,8 +390,8 @@ public class ClusterStatsMonitoringDocTests extends BaseMonitoringDocTestCase SNAPSHOT_BLOB_CACHE_METADATA_FILES_MAX_LENGTH_SETTING = new Setting<>( SNAPSHOT_BLOB_CACHE_METADATA_FILES_MAX_LENGTH, - new ByteSizeValue(64L, ByteSizeUnit.KB).getStringRep(), + ByteSizeValue.of(64L, ByteSizeUnit.KB).getStringRep(), s -> Setting.parseByteSize( s, - new ByteSizeValue(1L, ByteSizeUnit.KB), + ByteSizeValue.of(1L, ByteSizeUnit.KB), ByteSizeValue.ofBytes(Long.MAX_VALUE), SNAPSHOT_BLOB_CACHE_METADATA_FILES_MAX_LENGTH ), diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/cache/full/CacheService.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/cache/full/CacheService.java index 636d138c8a3e..2d8d78473501 100644 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/cache/full/CacheService.java +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/cache/full/CacheService.java @@ -71,7 +71,7 @@ public class CacheService extends AbstractLifecycleComponent { private static final String SETTINGS_PREFIX = "xpack.searchable.snapshot.cache."; - public static final ByteSizeValue MIN_SNAPSHOT_CACHE_RANGE_SIZE = new ByteSizeValue(4, ByteSizeUnit.KB); + public static final ByteSizeValue MIN_SNAPSHOT_CACHE_RANGE_SIZE = ByteSizeValue.of(4, ByteSizeUnit.KB); public static final ByteSizeValue MAX_SNAPSHOT_CACHE_RANGE_SIZE = ByteSizeValue.ofBytes(Integer.MAX_VALUE); /** @@ -82,7 +82,7 @@ public class CacheService extends AbstractLifecycleComponent { */ public static final Setting SNAPSHOT_CACHE_RANGE_SIZE_SETTING = Setting.byteSizeSetting( SETTINGS_PREFIX + "range_size", - new ByteSizeValue(32, ByteSizeUnit.MB), // default + ByteSizeValue.of(32, ByteSizeUnit.MB), // default MIN_SNAPSHOT_CACHE_RANGE_SIZE, // min MAX_SNAPSHOT_CACHE_RANGE_SIZE, // max Setting.Property.NodeScope @@ -96,7 +96,7 @@ public class CacheService extends AbstractLifecycleComponent { */ public static final Setting SNAPSHOT_CACHE_RECOVERY_RANGE_SIZE_SETTING = Setting.byteSizeSetting( SETTINGS_PREFIX + "recovery_range_size", - new ByteSizeValue(128, ByteSizeUnit.KB), // default + ByteSizeValue.of(128, ByteSizeUnit.KB), // default MIN_SNAPSHOT_CACHE_RANGE_SIZE, // min MAX_SNAPSHOT_CACHE_RANGE_SIZE, // max Setting.Property.NodeScope diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/store/IndexInputStats.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/store/IndexInputStats.java index 2dc1aaa59238..9abfda1fa936 100644 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/store/IndexInputStats.java +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/store/IndexInputStats.java @@ -23,7 +23,7 @@ import java.util.function.LongSupplier; public class IndexInputStats { /* A threshold beyond which an index input seeking is counted as "large" */ - static final ByteSizeValue SEEKING_THRESHOLD = new ByteSizeValue(8, ByteSizeUnit.MB); + static final ByteSizeValue SEEKING_THRESHOLD = ByteSizeValue.of(8, ByteSizeUnit.MB); private final long numFiles; private final long totalSize; diff --git a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/AbstractSearchableSnapshotsTestCase.java b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/AbstractSearchableSnapshotsTestCase.java index 4ee2bf7e6563..238e0d234ca7 100644 --- a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/AbstractSearchableSnapshotsTestCase.java +++ b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/AbstractSearchableSnapshotsTestCase.java @@ -224,7 +224,7 @@ public abstract class AbstractSearchableSnapshotsTestCase extends ESIndexInputTe } protected static ByteSizeValue randomFrozenCacheRangeSize() { - return pageAlignedBetween(new ByteSizeValue(4, ByteSizeUnit.KB), ByteSizeValue.ofBytes(Integer.MAX_VALUE)); + return pageAlignedBetween(ByteSizeValue.of(4, ByteSizeUnit.KB), ByteSizeValue.ofBytes(Integer.MAX_VALUE)); } private static ByteSizeValue pageAlignedBetween(ByteSizeValue min, ByteSizeValue max) { diff --git a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/SearchableSnapshotDirectoryStatsTests.java b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/SearchableSnapshotDirectoryStatsTests.java index a39031037eac..2f8fe4df336c 100644 --- a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/SearchableSnapshotDirectoryStatsTests.java +++ b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/SearchableSnapshotDirectoryStatsTests.java @@ -116,7 +116,7 @@ public class SearchableSnapshotDirectoryStatsTests extends AbstractSearchableSna public void testCachedBytesReadsAndWrites() throws Exception { // a cache service with a low range size but enough space to not evict the cache file final ByteSizeValue rangeSize = ByteSizeValue.ofBytes(SharedBytes.PAGE_SIZE * randomLongBetween(3, 6)); - final ByteSizeValue cacheSize = new ByteSizeValue(10, ByteSizeUnit.MB); + final ByteSizeValue cacheSize = ByteSizeValue.of(10, ByteSizeUnit.MB); executeTestCaseWithCache(cacheSize, rangeSize, (fileName, fileContent, directory) -> { try (IndexInput input = directory.openInput(fileName, randomIOContext())) { @@ -254,7 +254,7 @@ public class SearchableSnapshotDirectoryStatsTests extends AbstractSearchableSna } public void testDirectBytesReadsWithoutCache() throws Exception { - final ByteSizeValue uncachedChunkSize = new ByteSizeValue(randomIntBetween(512, MAX_FILE_LENGTH), ByteSizeUnit.BYTES); + final ByteSizeValue uncachedChunkSize = ByteSizeValue.of(randomIntBetween(512, MAX_FILE_LENGTH), ByteSizeUnit.BYTES); executeTestCaseWithoutCache(uncachedChunkSize, (fileName, fileContent, directory) -> { assertThat(directory.getStats(fileName), nullValue()); @@ -291,7 +291,7 @@ public class SearchableSnapshotDirectoryStatsTests extends AbstractSearchableSna public void testOptimizedBytesReads() throws Exception { // use a large uncached chunk size that allows to read the file in a single operation - final ByteSizeValue uncachedChunkSize = new ByteSizeValue(1, ByteSizeUnit.GB); + final ByteSizeValue uncachedChunkSize = ByteSizeValue.of(1, ByteSizeUnit.GB); executeTestCaseWithoutCache(uncachedChunkSize, (fileName, fileContent, directory) -> { final IOContext context = randomIOContext(); try (IndexInput input = directory.openInput(fileName, context)) { diff --git a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/input/DirectBlobContainerIndexInputTests.java b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/input/DirectBlobContainerIndexInputTests.java index 63345d7447c4..c32a064a7887 100644 --- a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/input/DirectBlobContainerIndexInputTests.java +++ b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/input/DirectBlobContainerIndexInputTests.java @@ -66,13 +66,13 @@ public class DirectBlobContainerIndexInputTests extends ESIndexInputTestCase { new StoreFileMetadata(fileName, input.length, checksum, Version.LATEST.toString()), partSize == input.length ? randomFrom( - new ByteSizeValue(partSize, ByteSizeUnit.BYTES), - new ByteSizeValue(randomLongBetween(partSize, Long.MAX_VALUE), ByteSizeUnit.BYTES), + ByteSizeValue.of(partSize, ByteSizeUnit.BYTES), + ByteSizeValue.of(randomLongBetween(partSize, Long.MAX_VALUE), ByteSizeUnit.BYTES), ByteSizeValue.ZERO, - new ByteSizeValue(-1, ByteSizeUnit.BYTES), + ByteSizeValue.of(-1, ByteSizeUnit.BYTES), null ) - : new ByteSizeValue(partSize, ByteSizeUnit.BYTES) + : ByteSizeValue.of(partSize, ByteSizeUnit.BYTES) ); final BlobContainer blobContainer = mock(BlobContainer.class); diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java index cd965bb67752..448b675aa0e5 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java @@ -256,9 +256,9 @@ public class Watcher extends Plugin implements SystemIndexPlugin, ScriptPlugin, ); private static final Setting SETTING_BULK_SIZE = Setting.byteSizeSetting( "xpack.watcher.bulk.size", - new ByteSizeValue(1, ByteSizeUnit.MB), - new ByteSizeValue(1, ByteSizeUnit.MB), - new ByteSizeValue(10, ByteSizeUnit.MB), + ByteSizeValue.of(1, ByteSizeUnit.MB), + ByteSizeValue.of(1, ByteSizeUnit.MB), + ByteSizeValue.of(10, ByteSizeUnit.MB), NodeScope ); diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/HttpSettings.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/HttpSettings.java index 553f1d40cd64..f16f739c7ee4 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/HttpSettings.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/HttpSettings.java @@ -58,9 +58,9 @@ public class HttpSettings { static final Setting MAX_HTTP_RESPONSE_SIZE = Setting.byteSizeSetting( "xpack.http.max_response_size", - new ByteSizeValue(10, ByteSizeUnit.MB), // default + ByteSizeValue.of(10, ByteSizeUnit.MB), // default ByteSizeValue.ONE, // min - new ByteSizeValue(50, ByteSizeUnit.MB), // max + ByteSizeValue.of(50, ByteSizeUnit.MB), // max Property.NodeScope ); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/common/http/HttpClientTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/common/http/HttpClientTests.java index 0aac3cb4463e..7e59d9aa29dd 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/common/http/HttpClientTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/common/http/HttpClientTests.java @@ -566,7 +566,7 @@ public class HttpClientTests extends ESTestCase { webServer.enqueue(new MockResponse().setResponseCode(200).setBody(data)); Settings settings = Settings.builder() - .put(HttpSettings.MAX_HTTP_RESPONSE_SIZE.getKey(), new ByteSizeValue(randomBytesLength - 1, ByteSizeUnit.BYTES)) + .put(HttpSettings.MAX_HTTP_RESPONSE_SIZE.getKey(), ByteSizeValue.of(randomBytesLength - 1, ByteSizeUnit.BYTES)) .build(); HttpRequest.Builder requestBuilder = HttpRequest.builder("localhost", webServer.getPort()).method(HttpMethod.GET).path("/"); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/common/http/SizeLimitInputStreamTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/common/http/SizeLimitInputStreamTests.java index b804ca97a546..4cd0f486a244 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/common/http/SizeLimitInputStreamTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/common/http/SizeLimitInputStreamTests.java @@ -30,7 +30,7 @@ public class SizeLimitInputStreamTests extends ESTestCase { } public void testMarking() { - ByteSizeValue byteSizeValue = new ByteSizeValue(1, ByteSizeUnit.BYTES); + ByteSizeValue byteSizeValue = ByteSizeValue.of(1, ByteSizeUnit.BYTES); SizeLimitInputStream is = new SizeLimitInputStream(byteSizeValue, new ByteArrayInputStream("empty".getBytes(UTF_8))); assertThat(is.markSupported(), is(false)); expectThrows(UnsupportedOperationException.class, () -> is.mark(10)); @@ -40,7 +40,7 @@ public class SizeLimitInputStreamTests extends ESTestCase { private void test(int inputStreamLength, int maxAllowedSize) throws IOException { String data = randomAlphaOfLength(inputStreamLength); - ByteSizeValue byteSizeValue = new ByteSizeValue(maxAllowedSize, ByteSizeUnit.BYTES); + ByteSizeValue byteSizeValue = ByteSizeValue.of(maxAllowedSize, ByteSizeUnit.BYTES); SizeLimitInputStream is = new SizeLimitInputStream(byteSizeValue, new ByteArrayInputStream(data.getBytes(UTF_8))); if (randomBoolean()) {