Merge remote-tracking branch 'upstream-main/main' into merge-main-9-1-25

This commit is contained in:
Simon Cooper 2025-01-09 16:09:21 +00:00
commit 2e04ed09a9
239 changed files with 3069 additions and 1469 deletions

View file

@ -58,6 +58,7 @@ import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.hasSize;
import static org.hamcrest.Matchers.instanceOf;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.notNullValue;
public class KeyStoreWrapperTests extends ESTestCase {
@ -436,17 +437,8 @@ public class KeyStoreWrapperTests extends ESTestCase {
public void testLegacyV3() throws GeneralSecurityException, IOException {
assumeFalse("Cannot open unprotected keystore on FIPS JVM", inFipsJvm());
final Path configDir = createTempDir();
final Path keystore = configDir.resolve("elasticsearch.keystore");
try (
InputStream is = KeyStoreWrapperTests.class.getResourceAsStream("/format-v3-elasticsearch.keystore");
OutputStream os = Files.newOutputStream(keystore)
) {
final byte[] buffer = new byte[4096];
int readBytes;
while ((readBytes = is.read(buffer)) > 0) {
os.write(buffer, 0, readBytes);
}
}
copyKeyStoreFromResourceToConfigDir(configDir, "/format-v3-elasticsearch.keystore");
final KeyStoreWrapper wrapper = KeyStoreWrapper.load(configDir);
assertNotNull(wrapper);
wrapper.decrypt(new char[0]);
@ -460,9 +452,31 @@ public class KeyStoreWrapperTests extends ESTestCase {
public void testLegacyV5() throws GeneralSecurityException, IOException {
final Path configDir = createTempDir();
copyKeyStoreFromResourceToConfigDir(configDir, "/format-v5-with-password-elasticsearch.keystore");
final KeyStoreWrapper wrapper = KeyStoreWrapper.load(configDir);
assertNotNull(wrapper);
wrapper.decrypt("keystorepassword".toCharArray());
assertThat(wrapper.getFormatVersion(), equalTo(5));
assertThat(wrapper.getSettingNames(), equalTo(Set.of("keystore.seed")));
}
public void testLegacyV6() throws GeneralSecurityException, IOException {
final Path configDir = createTempDir();
copyKeyStoreFromResourceToConfigDir(configDir, "/format-v6-elasticsearch.keystore");
final KeyStoreWrapper wrapper = KeyStoreWrapper.load(configDir);
assertNotNull(wrapper);
wrapper.decrypt("keystorepassword".toCharArray());
assertThat(wrapper.getFormatVersion(), equalTo(6));
assertThat(wrapper.getSettingNames(), equalTo(Set.of("keystore.seed", "string")));
assertThat(wrapper.getString("string"), equalTo("value"));
}
private void copyKeyStoreFromResourceToConfigDir(Path configDir, String name) throws IOException {
final Path keystore = configDir.resolve("elasticsearch.keystore");
try (
InputStream is = KeyStoreWrapperTests.class.getResourceAsStream("/format-v5-with-password-elasticsearch.keystore");
InputStream is = KeyStoreWrapperTests.class.getResourceAsStream(name); //
OutputStream os = Files.newOutputStream(keystore)
) {
final byte[] buffer = new byte[4096];
@ -471,11 +485,6 @@ public class KeyStoreWrapperTests extends ESTestCase {
os.write(buffer, 0, readBytes);
}
}
final KeyStoreWrapper wrapper = KeyStoreWrapper.load(configDir);
assertNotNull(wrapper);
wrapper.decrypt("keystorepassword".toCharArray());
assertThat(wrapper.getFormatVersion(), equalTo(5));
assertThat(wrapper.getSettingNames(), equalTo(Set.of("keystore.seed")));
}
public void testSerializationNewlyCreated() throws Exception {
@ -487,6 +496,7 @@ public class KeyStoreWrapperTests extends ESTestCase {
wrapper.writeTo(out);
final KeyStoreWrapper fromStream = new KeyStoreWrapper(out.bytes().streamInput());
assertThat(fromStream.getFormatVersion(), is(KeyStoreWrapper.CURRENT_VERSION));
assertThat(fromStream.getSettingNames(), hasSize(2));
assertThat(fromStream.getSettingNames(), containsInAnyOrder("string_setting", "keystore.seed"));

View file

@ -0,0 +1,5 @@
pr: 118669
summary: "[Connector API] Support soft-deletes of connectors"
area: Extract&Transform
type: feature
issues: []

View file

@ -1,5 +0,0 @@
pr: 119389
summary: Restrict Connector APIs to manage/monitor_connector privileges
area: Extract&Transform
type: feature
issues: []

View file

@ -0,0 +1,6 @@
pr: 119748
summary: Issue S3 web identity token refresh call with sufficient permissions
area: Snapshot/Restore
type: bug
issues:
- 119747

View file

@ -0,0 +1,5 @@
pr: 119749
summary: Strengthen encryption for elasticsearch-keystore tool to AES 256
area: Infra/CLI
type: enhancement
issues: []

View file

@ -0,0 +1,6 @@
pr: 119750
summary: "ESQL: `connect_transport_exception` should be thrown instead of `verification_exception`\
\ when ENRICH-ing if remote is disconnected"
area: Search
type: bug
issues: []

View file

@ -0,0 +1,6 @@
pr: 119793
summary: Resolve/cluster should mark remotes as not connected when a security exception
is thrown
area: CCS
type: bug
issues: []

View file

@ -0,0 +1,5 @@
pr: 119797
summary: "[Inference API] Fix bug checking for e5 or reranker default IDs"
area: Machine Learning
type: bug
issues: []

View file

@ -6,14 +6,14 @@
beta::[]
.New API reference
[sidebar]
--
For the most up-to-date API details, refer to {api-es}/group/endpoint-connector[Connector APIs].
--
Removes a connector and associated sync jobs.
This is a destructive action that is not recoverable.
Soft-deletes a connector and removes associated sync jobs.
Note: this action doesn't delete any API key, ingest pipeline or data index associated with the connector. These need to be removed manually.

View file

@ -33,6 +33,9 @@ To get started with Connector APIs, check out <<es-connectors-tutorial-api, our
`<connector_id>`::
(Required, string)
`include_deleted`::
(Optional, boolean) A flag indicating whether to also return connectors that have been soft-deleted. Defaults to `false`.
[[get-connector-api-response-codes]]
==== {api-response-codes-title}

View file

@ -47,6 +47,9 @@ To get started with Connector APIs, check out <<es-connectors-tutorial-api, our
`service_type`::
(Optional, string) A comma-separated list of connector service types, used to filter search results.
`include_deleted`::
(Optional, boolean) A flag indicating whether to also return connectors that have been soft-deleted. Defaults to `false`.
[[list-connector-api-example]]
==== {api-examples-title}

View file

@ -28,8 +28,19 @@ For each cluster in the index expression, information is returned about:
3. whether there are any indices, aliases or data streams on that cluster that match
the index expression
4. whether the search is likely to have errors returned when you do the {ccs} (including any
authorization errors if your user does not have permission to query the index)
5. cluster version information, including the Elasticsearch server version
authorization errors if your user does not have permission to query a remote cluster or
the indices on that cluster)
5. (in some cases) cluster version information, including the Elasticsearch server version
[TIP]
====
Whenever a security exception is returned for a remote cluster, that remote
will always be marked as connected=false in the response, since your user does not have
permissions to access that cluster (or perhaps the remote index) you are querying.
Once the proper security permissions are obtained, then you can rely on the `connected` field
in the response to determine whether the remote cluster is available and ready for querying.
====
////
[source,console]

View file

@ -1167,6 +1167,12 @@ tag::inference-config-text-embedding-size[]
The number of dimensions in the embedding vector produced by the model.
end::inference-config-text-embedding-size[]
tag::inference-config-text-expansion[]
The text expansion task works with sparse embedding models to transform an input sequence
into a vector of weighted tokens. These embeddings capture semantic meanings and
context and can be used in a <<sparse-vector,sparse vector>> field for powerful insights.
end::inference-config-text-expansion[]
tag::inference-config-text-similarity[]
Text similarity takes an input sequence and compares it with another input sequence. This is commonly referred to
as cross-encoding. This task is useful for ranking document text when comparing it to another provided text input.

View file

@ -395,10 +395,10 @@ the model definition is not supplied.
(Required, object)
The default configuration for inference. This can be: `regression`,
`classification`, `fill_mask`, `ner`, `question_answering`,
`text_classification`, `text_embedding` or `zero_shot_classification`.
`text_classification`, `text_embedding`, `text_expansion` or `zero_shot_classification`.
If `regression` or `classification`, it must match the `target_type` of the
underlying `definition.trained_model`. If `fill_mask`, `ner`,
`question_answering`, `text_classification`, or `text_embedding`; the
`question_answering`, `text_classification`, `text_embedding` or `text_expansion`; the
`model_type` must be `pytorch`.
+
.Properties of `inference_config`
@ -592,6 +592,25 @@ Refer to <<tokenization-properties>> to review the properties of the
`tokenization` object.
=====
`text_expansion`:::
(Object, optional)
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-text-expansion]
+
.Properties of text_expansion inference
[%collapsible%open]
=====
`results_field`::::
(Optional, string)
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-results-field]
`tokenization`::::
(Optional, object)
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization]
+
Refer to <<tokenization-properties>> to review the properties of the
`tokenization` object.
=====
`text_similarity`:::
(Object, optional)
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-text-similarity]

View file

@ -77,7 +77,7 @@ and latency of the network interconnections between all nodes in the cluster.
You must therefore ensure that the storage and networking available to the
nodes in your cluster are good enough to meet your performance goals.
[[dangling-indices]]
[[dangling-index]]
==== Dangling indices
When a node joins the cluster, if it finds any shards stored in its local

View file

@ -1,6 +1,7 @@
[[modules-gateway]]
=== Local gateway settings
[[dangling-indices]]
The local gateway stores the cluster state and shard data across full
cluster restarts.

View file

@ -287,7 +287,7 @@ include::network/tracers.asciidoc[]
include::network/threading.asciidoc[]
[[readiness-tcp-port]]
[[tcp-readiness-port]]
==== TCP readiness port
preview::[]

View file

@ -46,6 +46,8 @@ The following additional roles are available:
* `voting_only`
[[coordinating-only-node]]If you leave `node.roles` unset, then the node is considered to be a <<coordinating-only-node-role,coordinating only node>>.
[IMPORTANT]
====
If you set `node.roles`, ensure you specify every node role your cluster needs.

View file

@ -13,6 +13,10 @@ All of the monitoring metrics are stored in {es}, which enables you to easily
visualize the data in {kib}. By default, the monitoring metrics are stored in
local indices.
****
If you're using Elastic Cloud Hosted, then you can use AutoOps to monitor your cluster. AutoOps significantly simplifies cluster management with performance recommendations, resource utilization visibility, real-time issue detection and resolution paths. For more information, refer to https://www.elastic.co/guide/en/cloud/current/ec-autoops.html[Monitor with AutoOps].
****
TIP: In production, we strongly recommend using a separate monitoring cluster.
Using a separate monitoring cluster prevents production cluster outages from
impacting your ability to access your monitoring data. It also prevents

View file

@ -359,7 +359,7 @@ node.roles: [ ingest ]
----
[discrete]
[[coordinating-only-node]]
[[coordinating-only-node-role]]
==== Coordinating only node
If you take away the ability to be able to handle master duties, to hold data,

View file

@ -19,6 +19,7 @@ CAUTION: Setting your own JVM options is generally not recommended and could neg
impact performance and stability. Using the {es}-provided defaults
is recommended in most circumstances.
[[readiness-tcp-port]]
NOTE: Do not modify the root `jvm.options` file. Use files in `jvm.options.d/` instead.
[[jvm-options-syntax]]
@ -153,7 +154,7 @@ options. We do not recommend using `ES_JAVA_OPTS` in production.
NOTE: If you are running {es} as a Windows service, you can change the heap size
using the service manager. See <<windows-service>>.
[[heap-dump-path]]
[[heap-dump-path-setting]]
include::important-settings/heap-dump-path.asciidoc[leveloffset=-1]
[[gc-logging]]

View file

@ -41,6 +41,7 @@ include::important-settings/discovery-settings.asciidoc[]
include::important-settings/heap-size.asciidoc[]
[[heap-dump-path]]
include::important-settings/heap-dump-path.asciidoc[]
include::important-settings/gc-logging.asciidoc[]

View file

@ -6,6 +6,10 @@
This section provides a series of troubleshooting solutions aimed at helping users
fix problems that an {es} deployment might encounter.
****
If you're using Elastic Cloud Hosted, then you can use AutoOps to monitor your cluster. AutoOps significantly simplifies cluster management with performance recommendations, resource utilization visibility, real-time issue detection and resolution paths. For more information, refer to https://www.elastic.co/guide/en/cloud/current/ec-autoops.html[Monitor with AutoOps].
****
[discrete]
[[troubleshooting-general]]
=== General

View file

@ -12,6 +12,10 @@ memory pressure if usage consistently exceeds 85%.
See https://www.youtube.com/watch?v=k3wYlRVbMSw[this video] for a walkthrough
of diagnosing circuit breaker errors.
****
If you're using Elastic Cloud Hosted, then you can use AutoOps to monitor your cluster. AutoOps significantly simplifies cluster management with performance recommendations, resource utilization visibility, real-time issue detection and resolution paths. For more information, refer to https://www.elastic.co/guide/en/cloud/current/ec-autoops.html[Monitor with AutoOps].
****
[discrete]
[[diagnose-circuit-breaker-errors]]
==== Diagnose circuit breaker errors

View file

@ -11,3 +11,7 @@ include::{es-ref-dir}/tab-widgets/troubleshooting/data/diagnose-unassigned-shard
See https://www.youtube.com/watch?v=v2mbeSd1vTQ[this video]
for a walkthrough of monitoring allocation health.
****
If you're using Elastic Cloud Hosted, then you can use AutoOps to monitor your cluster. AutoOps significantly simplifies cluster management with performance recommendations, resource utilization visibility, real-time issue detection and resolution paths. For more information, refer to https://www.elastic.co/guide/en/cloud/current/ec-autoops.html[Monitor with AutoOps].
****

View file

@ -21,6 +21,10 @@ usage falls below the <<cluster-routing-watermark-high,high disk watermark>>.
To achieve this, {es} attempts to rebalance some of the affected node's shards
to other nodes in the same data tier.
****
If you're using Elastic Cloud Hosted, then you can use AutoOps to monitor your cluster. AutoOps significantly simplifies cluster management with performance recommendations, resource utilization visibility, real-time issue detection and resolution paths. For more information, refer to https://www.elastic.co/guide/en/cloud/current/ec-autoops.html[Monitor with AutoOps].
****
[[fix-watermark-errors-rebalance]]
==== Monitor rebalancing

View file

@ -11,6 +11,10 @@ depleted, {es} will reject search requests until more threads are available.
You might experience high CPU usage if a <<data-tiers,data tier>>, and therefore the nodes assigned to that tier, is experiencing more traffic than other tiers. This imbalance in resource utilization is also known as <<hotspotting,hot spotting>>.
****
If you're using Elastic Cloud Hosted, then you can use AutoOps to monitor your cluster. AutoOps significantly simplifies cluster management with performance recommendations, resource utilization visibility, real-time issue detection and resolution paths. For more information, refer to https://www.elastic.co/guide/en/cloud/current/ec-autoops.html[Monitor with AutoOps].
****
[discrete]
[[diagnose-high-cpu-usage]]
==== Diagnose high CPU usage

View file

@ -6,6 +6,10 @@ High JVM memory usage can degrade cluster performance and trigger
taking steps to reduce memory pressure if a node's JVM memory usage consistently
exceeds 85%.
****
If you're using Elastic Cloud Hosted, then you can use AutoOps to monitor your cluster. AutoOps significantly simplifies cluster management with performance recommendations, resource utilization visibility, real-time issue detection and resolution paths. For more information, refer to https://www.elastic.co/guide/en/cloud/current/ec-autoops.html[Monitor with AutoOps].
****
[discrete]
[[diagnose-high-jvm-memory-pressure]]
==== Diagnose high JVM memory pressure

View file

@ -11,6 +11,10 @@ may occur in {es} when resource utilizations are unevenly distributed across
ongoing significantly unique utilization may lead to cluster bottlenecks
and should be reviewed.
****
If you're using Elastic Cloud Hosted, then you can use AutoOps to monitor your cluster. AutoOps significantly simplifies cluster management with performance recommendations, resource utilization visibility, real-time issue detection and resolution paths. For more information, refer to https://www.elastic.co/guide/en/cloud/current/ec-autoops.html[Monitor with AutoOps].
****
See link:https://www.youtube.com/watch?v=Q5ODJ5nIKAM[this video] for a walkthrough of troubleshooting a hot spotting issue.
[discrete]

View file

@ -22,6 +22,10 @@ the remaining problems so management and cleanup activities can proceed.
See https://www.youtube.com/watch?v=v2mbeSd1vTQ[this video]
for a walkthrough of monitoring allocation health.
****
If you're using Elastic Cloud Hosted, then you can use AutoOps to monitor your cluster. AutoOps significantly simplifies cluster management with performance recommendations, resource utilization visibility, real-time issue detection and resolution paths. For more information, refer to https://www.elastic.co/guide/en/cloud/current/ec-autoops.html[Monitor with AutoOps].
****
[discrete]
[[diagnose-cluster-status]]
==== Diagnose your cluster status

View file

@ -12,6 +12,10 @@ thread pool returns a `TOO_MANY_REQUESTS` error message.
* High <<index-modules-indexing-pressure,indexing pressure>> that exceeds the
<<memory-limits,`indexing_pressure.memory.limit`>>.
****
If you're using Elastic Cloud Hosted, then you can use AutoOps to monitor your cluster. AutoOps significantly simplifies cluster management with performance recommendations, resource utilization visibility, real-time issue detection and resolution paths. For more information, refer to https://www.elastic.co/guide/en/cloud/current/ec-autoops.html[Monitor with AutoOps].
****
[discrete]
[[check-rejected-tasks]]
==== Check rejected tasks
@ -69,7 +73,7 @@ These stats are cumulative from node startup.
Indexing pressure rejections appear as an
`EsRejectedExecutionException`, and indicate that they were rejected due
to `coordinating_and_primary_bytes`, `coordinating`, `primary`, or `replica`.
to `combined_coordinating_and_primary`, `coordinating`, `primary`, or `replica`.
These errors are often related to <<task-queue-backlog,backlogged tasks>>,
<<docs-bulk,bulk index>> sizing, or the ingest target's

View file

@ -16,5 +16,8 @@ In order to fix this follow the next steps:
include::{es-ref-dir}/tab-widgets/troubleshooting/data/increase-cluster-shard-limit-widget.asciidoc[]
****
If you're using Elastic Cloud Hosted, then you can use AutoOps to monitor your cluster. AutoOps significantly simplifies cluster management with performance recommendations, resource utilization visibility, real-time issue detection and resolution paths. For more information, refer to https://www.elastic.co/guide/en/cloud/current/ec-autoops.html[Monitor with AutoOps].
****

View file

@ -14,5 +14,9 @@ In order to fix this follow the next steps:
include::{es-ref-dir}/tab-widgets/troubleshooting/data/total-shards-per-node-widget.asciidoc[]
****
If you're using Elastic Cloud Hosted, then you can use AutoOps to monitor your cluster. AutoOps significantly simplifies cluster management with performance recommendations, resource utilization visibility, real-time issue detection and resolution paths. For more information, refer to https://www.elastic.co/guide/en/cloud/current/ec-autoops.html[Monitor with AutoOps].
****

View file

@ -17,5 +17,8 @@ In order to fix this follow the next steps:
include::{es-ref-dir}/tab-widgets/troubleshooting/data/increase-tier-capacity-widget.asciidoc[]
****
If you're using Elastic Cloud Hosted, then you can use AutoOps to monitor your cluster. AutoOps significantly simplifies cluster management with performance recommendations, resource utilization visibility, real-time issue detection and resolution paths. For more information, refer to https://www.elastic.co/guide/en/cloud/current/ec-autoops.html[Monitor with AutoOps].
****

View file

@ -15,6 +15,10 @@ https://discuss.elastic.co[Elastic Discuss] to minimize turnaround time.
See this https://www.youtube.com/watch?v=Bb6SaqhqYHw[this video] for a walkthrough of capturing an {es} diagnostic.
****
If you're using Elastic Cloud Hosted, then you can use AutoOps to monitor your cluster. AutoOps significantly simplifies cluster management with performance recommendations, resource utilization visibility, real-time issue detection and resolution paths. For more information, refer to https://www.elastic.co/guide/en/cloud/current/ec-autoops.html[Monitor with AutoOps].
****
[discrete]
[[diagnostic-tool-requirements]]
=== Requirements

View file

@ -3,6 +3,10 @@
This guide describes how to fix common errors and problems with {es} clusters.
****
If you're using Elastic Cloud Hosted, then you can use AutoOps to monitor your cluster. AutoOps significantly simplifies cluster management with performance recommendations, resource utilization visibility, real-time issue detection and resolution paths. For more information, refer to https://www.elastic.co/guide/en/cloud/current/ec-autoops.html[Monitor with AutoOps].
****
<<fix-watermark-errors,Watermark errors>>::
Fix watermark errors that occur when a data node is critically low on disk space
and has reached the flood-stage disk usage watermark.

View file

@ -14,5 +14,8 @@ information about the problem:
include::{es-ref-dir}/tab-widgets/troubleshooting/snapshot/repeated-snapshot-failures-widget.asciidoc[]
****
If you're using Elastic Cloud Hosted, then you can use AutoOps to monitor your cluster. AutoOps significantly simplifies cluster management with performance recommendations, resource utilization visibility, real-time issue detection and resolution paths. For more information, refer to https://www.elastic.co/guide/en/cloud/current/ec-autoops.html[Monitor with AutoOps].
****

View file

@ -8,3 +8,7 @@ The current shards capacity of the cluster is available in the
<<health-api-response-details-shards-capacity, health API shards capacity section>>.
include::{es-ref-dir}/tab-widgets/troubleshooting/troubleshooting-shards-capacity-widget.asciidoc[]
****
If you're using Elastic Cloud Hosted, then you can use AutoOps to monitor your cluster. AutoOps significantly simplifies cluster management with performance recommendations, resource utilization visibility, real-time issue detection and resolution paths. For more information, refer to https://www.elastic.co/guide/en/cloud/current/ec-autoops.html[Monitor with AutoOps].
****

View file

@ -7,6 +7,10 @@ Elasticsearch balances shards across data tiers to achieve a good compromise bet
* disk usage
* write load (for indices in data streams)
****
If you're using Elastic Cloud Hosted, then you can use AutoOps to monitor your cluster. AutoOps significantly simplifies cluster management with performance recommendations, resource utilization visibility, real-time issue detection and resolution paths. For more information, refer to https://www.elastic.co/guide/en/cloud/current/ec-autoops.html[Monitor with AutoOps].
****
Elasticsearch does not take into account the amount or complexity of search queries when rebalancing shards.
This is indirectly achieved by balancing shard count and disk usage.

View file

@ -17,6 +17,10 @@ logs.
* The master may appear busy due to frequent cluster state updates.
****
If you're using Elastic Cloud Hosted, then you can use AutoOps to monitor your cluster. AutoOps significantly simplifies cluster management with performance recommendations, resource utilization visibility, real-time issue detection and resolution paths. For more information, refer to https://www.elastic.co/guide/en/cloud/current/ec-autoops.html[Monitor with AutoOps].
****
To troubleshoot a cluster in this state, first ensure the cluster has a
<<discovery-troubleshooting,stable master>>. Next, focus on the nodes
unexpectedly leaving the cluster ahead of all other issues. It will not be

View file

@ -92,7 +92,7 @@ public final class IngestGeoIpMetadata implements Metadata.ProjectCustom {
@Override
public Iterator<? extends ToXContent> toXContentChunked(ToXContent.Params ignored) {
return Iterators.concat(ChunkedToXContentHelper.xContentValuesMap(DATABASES_FIELD.getPreferredName(), databases));
return Iterators.concat(ChunkedToXContentHelper.xContentObjectFields(DATABASES_FIELD.getPreferredName(), databases));
}
@Override

View file

@ -434,7 +434,7 @@ class S3Service implements Closeable {
public void onFileChanged(Path file) {
if (file.equals(webIdentityTokenFileSymlink)) {
LOGGER.debug("WS web identity token file [{}] changed, updating credentials", file);
credentialsProvider.refresh();
SocketAccess.doPrivilegedVoid(credentialsProvider::refresh);
}
}
});

View file

@ -84,9 +84,6 @@ tests:
issue: https://github.com/elastic/elasticsearch/issues/115816
- class: org.elasticsearch.xpack.application.connector.ConnectorIndexServiceTests
issue: https://github.com/elastic/elasticsearch/issues/116087
- class: org.elasticsearch.xpack.ml.integration.DatafeedJobsRestIT
method: testLookbackWithIndicesOptions
issue: https://github.com/elastic/elasticsearch/issues/116127
- class: org.elasticsearch.xpack.test.rest.XPackRestIT
method: test {p0=transform/transforms_start_stop/Test start already started transform}
issue: https://github.com/elastic/elasticsearch/issues/98802
@ -152,8 +149,6 @@ tests:
- class: org.elasticsearch.test.rest.yaml.CcsCommonYamlTestSuiteIT
method: test {p0=search.highlight/50_synthetic_source/text multi unified from vectors}
issue: https://github.com/elastic/elasticsearch/issues/117815
- class: org.elasticsearch.xpack.ml.integration.DatafeedJobsRestIT
issue: https://github.com/elastic/elasticsearch/issues/111319
- class: org.elasticsearch.xpack.esql.plugin.ClusterRequestTests
method: testFallbackIndicesOptions
issue: https://github.com/elastic/elasticsearch/issues/117937
@ -265,6 +260,18 @@ tests:
- class: org.elasticsearch.script.mustache.SearchTemplateRequestTests
method: testSerialization
issue: https://github.com/elastic/elasticsearch/issues/119822
- class: org.elasticsearch.index.rankeval.RankEvalRequestTests
method: testSerialization
issue: https://github.com/elastic/elasticsearch/issues/119859
- class: org.elasticsearch.index.rankeval.RankEvalRequestTests
method: testEqualsAndHashcode
issue: https://github.com/elastic/elasticsearch/issues/119860
- class: org.elasticsearch.index.rankeval.RankEvalRequestTests
method: testConcurrentSerialization
issue: https://github.com/elastic/elasticsearch/issues/119861
- class: org.elasticsearch.index.rankeval.RankEvalRequestTests
method: testConcurrentEquals
issue: https://github.com/elastic/elasticsearch/issues/119862
# Examples:
#

View file

@ -27,7 +27,11 @@ public class HdfsClassPatcher {
"org/apache/hadoop/util/ShutdownHookManager.class",
ShutdownHookManagerPatcher::new,
"org/apache/hadoop/util/Shell.class",
ShellPatcher::new
ShellPatcher::new,
"org/apache/hadoop/security/UserGroupInformation.class",
SubjectGetSubjectPatcher::new,
"org/apache/hadoop/security/authentication/client/KerberosAuthenticator.class",
SubjectGetSubjectPatcher::new
);
public static void main(String[] args) throws Exception {

View file

@ -0,0 +1,60 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the "Elastic License
* 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
* Public License v 1"; you may not use this file except in compliance with, at
* your election, the "Elastic License 2.0", the "GNU Affero General Public
* License v3.0 only", or the "Server Side Public License, v 1".
*/
package org.elasticsearch.hdfs.patch;
import org.objectweb.asm.ClassVisitor;
import org.objectweb.asm.ClassWriter;
import org.objectweb.asm.MethodVisitor;
import org.objectweb.asm.Type;
import static org.objectweb.asm.Opcodes.ASM9;
import static org.objectweb.asm.Opcodes.INVOKESTATIC;
import static org.objectweb.asm.Opcodes.POP;
class SubjectGetSubjectPatcher extends ClassVisitor {
SubjectGetSubjectPatcher(ClassWriter classWriter) {
super(ASM9, classWriter);
}
@Override
public MethodVisitor visitMethod(int access, String name, String descriptor, String signature, String[] exceptions) {
return new ReplaceCallMethodVisitor(super.visitMethod(access, name, descriptor, signature, exceptions), name, access, descriptor);
}
/**
* Replaces calls to Subject.getSubject(context); with calls to Subject.current();
*/
private static class ReplaceCallMethodVisitor extends MethodVisitor {
private static final String SUBJECT_CLASS_INTERNAL_NAME = "javax/security/auth/Subject";
private static final String METHOD_NAME = "getSubject";
ReplaceCallMethodVisitor(MethodVisitor methodVisitor, String name, int access, String descriptor) {
super(ASM9, methodVisitor);
}
@Override
public void visitMethodInsn(int opcode, String owner, String name, String descriptor, boolean isInterface) {
if (opcode == INVOKESTATIC && SUBJECT_CLASS_INTERNAL_NAME.equals(owner) && METHOD_NAME.equals(name)) {
// Get rid of the extra arg on the stack
mv.visitInsn(POP);
// Call Subject.current()
mv.visitMethodInsn(
INVOKESTATIC,
SUBJECT_CLASS_INTERNAL_NAME,
"current",
Type.getMethodDescriptor(Type.getObjectType(SUBJECT_CLASS_INTERNAL_NAME)),
false
);
} else {
super.visitMethodInsn(opcode, owner, name, descriptor, isInterface);
}
}
}
}

View file

@ -1,61 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the "Elastic License
* 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
* Public License v 1"; you may not use this file except in compliance with, at
* your election, the "Elastic License 2.0", the "GNU Affero General Public
* License v3.0 only", or the "Server Side Public License, v 1".
*/
package org.elasticsearch.upgrades;
import com.carrotsearch.randomizedtesting.annotations.Name;
import org.elasticsearch.client.Request;
import org.elasticsearch.common.xcontent.support.XContentMapValues;
import org.elasticsearch.features.FeatureService;
import org.junit.Before;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
import static org.hamcrest.Matchers.empty;
import static org.hamcrest.Matchers.hasSize;
public class ClusterFeatureMigrationIT extends AbstractRollingUpgradeTestCase {
@Before
public void checkMigrationVersion() {
assumeFalse(
"This checks migrations from before cluster features were introduced",
oldClusterHasFeature(FeatureService.FEATURES_SUPPORTED)
);
}
public ClusterFeatureMigrationIT(@Name("upgradedNodes") int upgradedNodes) {
super(upgradedNodes);
}
public void testClusterFeatureMigration() throws IOException {
if (isUpgradedCluster()) {
// check the nodes all have a feature in their cluster state (there should always be features_supported)
var response = entityAsMap(adminClient().performRequest(new Request("GET", "/_cluster/state/nodes")));
List<?> nodeFeatures = (List<?>) XContentMapValues.extractValue("nodes_features", response);
assertThat(nodeFeatures, hasSize(adminClient().getNodes().size()));
Map<String, List<?>> features = nodeFeatures.stream()
.map(o -> (Map<?, ?>) o)
.collect(Collectors.toMap(m -> (String) m.get("node_id"), m -> (List<?>) m.get("features")));
Set<String> missing = features.entrySet()
.stream()
.filter(e -> e.getValue().contains(FeatureService.FEATURES_SUPPORTED.id()) == false)
.map(Map.Entry::getKey)
.collect(Collectors.toSet());
assertThat(missing + " out of " + features.keySet() + " does not have the required feature", missing, empty());
}
}
}

View file

@ -26,6 +26,13 @@
}
}
]
},
"params": {
"include_deleted": {
"type": "boolean",
"default": false,
"description": "A flag indicating whether to return connectors that have been soft-deleted."
}
}
}
}

View file

@ -47,6 +47,11 @@
"query": {
"type": "string",
"description": "A search string for querying connectors, filtering results by matching against connector names, descriptions, and index names"
},
"include_deleted": {
"type": "boolean",
"default": false,
"description": "A flag indicating whether to return connectors that have been soft-deleted."
}
}
}

View file

@ -30,7 +30,6 @@ public class ClusterFeaturesIT extends ESIntegTestCase {
FeatureService service = internalCluster().getCurrentMasterNodeInstance(FeatureService.class);
assertThat(service.getNodeFeatures(), hasKey(FeatureService.FEATURES_SUPPORTED.id()));
assertThat(service.getNodeFeatures(), hasKey(FeatureService.TEST_FEATURES_ENABLED.id()));
// check the nodes all have a feature in their cluster state (there should always be features_supported)
@ -38,7 +37,7 @@ public class ClusterFeaturesIT extends ESIntegTestCase {
var features = response.getState().clusterFeatures().nodeFeatures();
Set<String> missing = features.entrySet()
.stream()
.filter(e -> e.getValue().contains(FeatureService.FEATURES_SUPPORTED.id()) == false)
.filter(e -> e.getValue().contains(FeatureService.TEST_FEATURES_ENABLED.id()) == false)
.map(Map.Entry::getKey)
.collect(Collectors.toSet());
assertThat(missing + " out of " + features.keySet() + " does not have the required feature", missing, empty());

View file

@ -37,7 +37,7 @@ import java.util.Iterator;
import java.util.Locale;
import static org.elasticsearch.cluster.routing.allocation.AbstractAllocationDecision.discoveryNodeToXContent;
import static org.elasticsearch.common.xcontent.ChunkedToXContentHelper.singleChunk;
import static org.elasticsearch.common.xcontent.ChunkedToXContentHelper.chunk;
/**
* A {@code ClusterAllocationExplanation} is an explanation of why a shard is unassigned,
@ -169,7 +169,7 @@ public final class ClusterAllocationExplanation implements ChunkedToXContentObje
}
public Iterator<? extends ToXContent> toXContentChunked(ToXContent.Params params) {
return Iterators.concat(singleChunk((builder, p) -> {
return Iterators.concat(chunk((builder, p) -> {
builder.startObject();
if (isSpecificShard() == false) {

View file

@ -33,8 +33,8 @@ import java.util.Map;
import java.util.Objects;
import java.util.Set;
import static org.elasticsearch.common.xcontent.ChunkedToXContentHelper.chunk;
import static org.elasticsearch.common.xcontent.ChunkedToXContentHelper.endObject;
import static org.elasticsearch.common.xcontent.ChunkedToXContentHelper.singleChunk;
import static org.elasticsearch.common.xcontent.ChunkedToXContentHelper.startObject;
public class DesiredBalanceResponse extends ActionResponse implements ChunkedToXContentObject {
@ -88,7 +88,7 @@ public class DesiredBalanceResponse extends ActionResponse implements ChunkedToX
@Override
public Iterator<? extends ToXContent> toXContentChunked(ToXContent.Params params) {
return Iterators.concat(
singleChunk(
chunk(
(builder, p) -> builder.startObject()
.field("stats", stats)
.field("cluster_balance_stats", clusterBalanceStats)
@ -101,16 +101,16 @@ public class DesiredBalanceResponse extends ActionResponse implements ChunkedToX
Iterators.flatMap(
indexEntry.getValue().entrySet().iterator(),
shardEntry -> Iterators.concat(
singleChunk((builder, p) -> builder.field(String.valueOf(shardEntry.getKey()))),
chunk((builder, p) -> builder.field(String.valueOf(shardEntry.getKey()))),
shardEntry.getValue().toXContentChunked(params)
)
),
endObject()
)
),
singleChunk((builder, p) -> builder.endObject().startObject("cluster_info")),
chunk((builder, p) -> builder.endObject().startObject("cluster_info")),
clusterInfo.toXContentChunked(params),
singleChunk((builder, p) -> builder.endObject().endObject())
chunk((builder, p) -> builder.endObject().endObject())
);
}
@ -173,9 +173,9 @@ public class DesiredBalanceResponse extends ActionResponse implements ChunkedToX
@Override
public Iterator<? extends ToXContent> toXContentChunked(ToXContent.Params params) {
return Iterators.concat(
singleChunk((builder, p) -> builder.startObject().startArray("current")),
chunk((builder, p) -> builder.startObject().startArray("current")),
current().iterator(),
singleChunk((builder, p) -> builder.endArray().field("desired").value(desired, p).endObject())
chunk((builder, p) -> builder.endArray().field("desired").value(desired, p).endObject())
);
}
}

View file

@ -45,7 +45,7 @@ import java.util.Iterator;
import java.util.Map;
import java.util.Objects;
import static org.elasticsearch.common.xcontent.ChunkedToXContentHelper.singleChunk;
import static org.elasticsearch.common.xcontent.ChunkedToXContentHelper.chunk;
/**
* Node statistics (dynamic, changes depending on when created).
@ -347,7 +347,7 @@ public class NodeStats extends BaseNodeResponse implements ChunkedToXContent {
@Override
public Iterator<? extends ToXContent> toXContentChunked(ToXContent.Params outerParams) {
return Iterators.concat(singleChunk((builder, params) -> {
return Iterators.concat(chunk((builder, params) -> {
builder.field("name", getNode().getName());
builder.field("transport_address", getNode().getAddress().toString());
builder.field("host", getNode().getHostName());
@ -369,9 +369,7 @@ public class NodeStats extends BaseNodeResponse implements ChunkedToXContent {
return builder;
}),
ifPresent(getIndices()).toXContentChunked(outerParams),
singleChunk(
(builder, p) -> builder.value(ifPresent(getOs()), p).value(ifPresent(getProcess()), p).value(ifPresent(getJvm()), p)
),
chunk((builder, p) -> builder.value(ifPresent(getOs()), p).value(ifPresent(getProcess()), p).value(ifPresent(getJvm()), p)),
ifPresent(getThreadPool()).toXContentChunked(outerParams),
singleChunkIfPresent(getFs()),
ifPresent(getTransport()).toXContentChunked(outerParams),
@ -382,7 +380,7 @@ public class NodeStats extends BaseNodeResponse implements ChunkedToXContent {
ifPresent(getIngestStats()).toXContentChunked(outerParams),
singleChunkIfPresent(getAdaptiveSelectionStats()),
singleChunkIfPresent(getScriptCacheStats()),
singleChunk(
chunk(
(builder, p) -> builder.value(ifPresent(getIndexingPressureStats()), p)
.value(ifPresent(getRepositoriesStats()), p)
.value(ifPresent(getNodeAllocationStats()), p)
@ -399,6 +397,6 @@ public class NodeStats extends BaseNodeResponse implements ChunkedToXContent {
}
private static Iterator<ToXContent> singleChunkIfPresent(ToXContent toXContent) {
return toXContent == null ? Collections.emptyIterator() : ChunkedToXContentHelper.singleChunk(toXContent);
return toXContent == null ? Collections.emptyIterator() : ChunkedToXContentHelper.chunk(toXContent);
}
}

View file

@ -97,7 +97,7 @@ public class ClusterRerouteResponse extends ActionResponse implements IsAcknowle
return Iterators.concat(
Iterators.single((builder, params) -> builder.startObject().field(ACKNOWLEDGED_KEY, isAcknowledged())),
emitState(outerParams)
? ChunkedToXContentHelper.wrapWithObject("state", state.toXContentChunked(outerParams))
? ChunkedToXContentHelper.object("state", state.toXContentChunked(outerParams))
: Collections.emptyIterator(),
Iterators.single((builder, params) -> {
if (params.paramAsBoolean("explain", false)) {

View file

@ -9,12 +9,14 @@
package org.elasticsearch.action.admin.indices.resolve;
import org.elasticsearch.TransportVersion;
import org.elasticsearch.TransportVersions;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.IndicesRequest;
import org.elasticsearch.action.ValidateActions;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.tasks.CancellableTask;
@ -30,6 +32,7 @@ import java.util.Objects;
public class ResolveClusterActionRequest extends ActionRequest implements IndicesRequest.Replaceable {
public static final IndicesOptions DEFAULT_INDICES_OPTIONS = IndicesOptions.strictExpandOpen();
public static final String TRANSPORT_VERSION_ERROR_MESSAGE_PREFIX = "ResolveClusterAction requires at least version";
private String[] names;
/*
@ -65,12 +68,7 @@ public class ResolveClusterActionRequest extends ActionRequest implements Indice
public ResolveClusterActionRequest(StreamInput in) throws IOException {
super(in);
if (in.getTransportVersion().before(TransportVersions.V_8_13_0)) {
throw new UnsupportedOperationException(
"ResolveClusterAction requires at least version "
+ TransportVersions.V_8_13_0.toReleaseVersion()
+ " but was "
+ in.getTransportVersion().toReleaseVersion()
);
throw new UnsupportedOperationException(createVersionErrorMessage(in.getTransportVersion()));
}
this.names = in.readStringArray();
this.indicesOptions = IndicesOptions.readIndicesOptions(in);
@ -81,17 +79,21 @@ public class ResolveClusterActionRequest extends ActionRequest implements Indice
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
if (out.getTransportVersion().before(TransportVersions.V_8_13_0)) {
throw new UnsupportedOperationException(
"ResolveClusterAction requires at least version "
+ TransportVersions.V_8_13_0.toReleaseVersion()
+ " but was "
+ out.getTransportVersion().toReleaseVersion()
);
throw new UnsupportedOperationException(createVersionErrorMessage(out.getTransportVersion()));
}
out.writeStringArray(names);
indicesOptions.writeIndicesOptions(out);
}
private String createVersionErrorMessage(TransportVersion versionFound) {
return Strings.format(
"%s %s but was %s",
TRANSPORT_VERSION_ERROR_MESSAGE_PREFIX,
TransportVersions.V_8_13_0.toReleaseVersion(),
versionFound.toReleaseVersion()
);
}
@Override
public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = null;

View file

@ -51,7 +51,6 @@ import static org.elasticsearch.action.search.TransportSearchHelper.checkCCSVers
public class TransportResolveClusterAction extends HandledTransportAction<ResolveClusterActionRequest, ResolveClusterActionResponse> {
private static final Logger logger = LogManager.getLogger(TransportResolveClusterAction.class);
private static final String TRANSPORT_VERSION_ERROR_MESSAGE = "ResolveClusterAction requires at least Transport Version";
public static final String NAME = "indices:admin/resolve/cluster";
public static final ActionType<ResolveClusterActionResponse> TYPE = new ActionType<>(NAME);
@ -175,7 +174,13 @@ public class TransportResolveClusterAction extends HandledTransportAction<Resolv
failure,
ElasticsearchSecurityException.class
) instanceof ElasticsearchSecurityException ese) {
clusterInfoMap.put(clusterAlias, new ResolveClusterInfo(true, skipUnavailable, ese.getMessage()));
/*
* some ElasticsearchSecurityExceptions come from the local cluster security interceptor after you've
* issued the client.execute call but before any call went to the remote cluster, so with an
* ElasticsearchSecurityException you can't tell whether the remote cluster is available or not, so mark
* it as connected=false
*/
clusterInfoMap.put(clusterAlias, new ResolveClusterInfo(false, skipUnavailable, ese.getMessage()));
} else if (ExceptionsHelper.unwrap(failure, IndexNotFoundException.class) instanceof IndexNotFoundException infe) {
clusterInfoMap.put(clusterAlias, new ResolveClusterInfo(true, skipUnavailable, infe.getMessage()));
} else {
@ -184,7 +189,7 @@ public class TransportResolveClusterAction extends HandledTransportAction<Resolv
// this error at the Transport layer BEFORE it sends the request to the remote cluster, since there
// are version guards on the Writeables for this Action, namely ResolveClusterActionRequest.writeTo
if (cause instanceof UnsupportedOperationException
&& cause.getMessage().contains(TRANSPORT_VERSION_ERROR_MESSAGE)) {
&& cause.getMessage().contains(ResolveClusterActionRequest.TRANSPORT_VERSION_ERROR_MESSAGE_PREFIX)) {
// Since this cluster does not have _resolve/cluster, we call the _resolve/index
// endpoint to fill in the matching_indices field of the response for that cluster
ResolveIndexAction.Request resolveIndexRequest = new ResolveIndexAction.Request(

View file

@ -78,9 +78,7 @@ public class IndicesSegmentResponse extends ChunkedBroadcastResponse {
getIndices().values().iterator(),
indexSegments -> Iterators.concat(
ChunkedToXContentHelper.singleChunk(
(builder, p) -> builder.startObject(indexSegments.getIndex()).startObject(Fields.SHARDS)
),
ChunkedToXContentHelper.chunk((builder, p) -> builder.startObject(indexSegments.getIndex()).startObject(Fields.SHARDS)),
Iterators.flatMap(
indexSegments.iterator(),
indexSegment -> Iterators.concat(
@ -90,7 +88,7 @@ public class IndicesSegmentResponse extends ChunkedBroadcastResponse {
indexSegment.iterator(),
shardSegments -> Iterators.concat(
ChunkedToXContentHelper.singleChunk((builder, p) -> {
ChunkedToXContentHelper.chunk((builder, p) -> {
builder.startObject();
builder.startObject(Fields.ROUTING);
@ -112,7 +110,7 @@ public class IndicesSegmentResponse extends ChunkedBroadcastResponse {
shardSegments.iterator(),
segment -> Iterators.concat(
ChunkedToXContentHelper.singleChunk((builder, p) -> {
ChunkedToXContentHelper.chunk((builder, p) -> {
builder.startObject(segment.getName());
builder.field(Fields.GENERATION, segment.getGeneration());
builder.field(Fields.NUM_DOCS, segment.getNumDocs());
@ -132,7 +130,7 @@ public class IndicesSegmentResponse extends ChunkedBroadcastResponse {
return builder;
}),
getSegmentSortChunks(segment.getSegmentSort()),
ChunkedToXContentHelper.singleChunk((builder, p) -> {
ChunkedToXContentHelper.chunk((builder, p) -> {
if (segment.attributes != null && segment.attributes.isEmpty() == false) {
builder.field("attributes", segment.attributes);
}
@ -141,13 +139,13 @@ public class IndicesSegmentResponse extends ChunkedBroadcastResponse {
})
)
),
ChunkedToXContentHelper.singleChunk((builder, p) -> builder.endObject().endObject())
ChunkedToXContentHelper.chunk((builder, p) -> builder.endObject().endObject())
)
),
ChunkedToXContentHelper.endArray()
)
),
ChunkedToXContentHelper.singleChunk((builder, p) -> builder.endObject().endObject())
ChunkedToXContentHelper.chunk((builder, p) -> builder.endObject().endObject())
)
),
ChunkedToXContentHelper.endObject()

View file

@ -50,9 +50,9 @@ public class FieldUsageStatsResponse extends ChunkedBroadcastResponse {
return Iterators.flatMap(
stats.entrySet().stream().sorted(Map.Entry.comparingByKey()).iterator(),
entry -> Iterators.concat(
ChunkedToXContentHelper.singleChunk((builder, p) -> builder.startObject(entry.getKey()).startArray("shards")),
ChunkedToXContentHelper.chunk((builder, p) -> builder.startObject(entry.getKey()).startArray("shards")),
entry.getValue().iterator(),
ChunkedToXContentHelper.singleChunk((builder, p) -> builder.endArray().endObject())
ChunkedToXContentHelper.chunk((builder, p) -> builder.endArray().endObject())
)
);
}

View file

@ -206,7 +206,7 @@ public class IndicesStatsResponse extends ChunkedBroadcastResponse {
if (level == ClusterStatsLevel.INDICES || level == ClusterStatsLevel.SHARDS) {
return Iterators.concat(
ChunkedToXContentHelper.singleChunk((builder, p) -> {
ChunkedToXContentHelper.chunk((builder, p) -> {
commonStats(builder, p);
return builder.startObject(Fields.INDICES);
}),
@ -214,7 +214,7 @@ public class IndicesStatsResponse extends ChunkedBroadcastResponse {
getIndices().values().iterator(),
indexStats -> Iterators.concat(
ChunkedToXContentHelper.singleChunk((builder, p) -> {
ChunkedToXContentHelper.chunk((builder, p) -> {
builder.startObject(indexStats.getIndex());
builder.field("uuid", indexStats.getUuid());
if (indexStats.getHealth() != null) {
@ -259,7 +259,7 @@ public class IndicesStatsResponse extends ChunkedBroadcastResponse {
ChunkedToXContentHelper.endObject()
);
} else {
return ChunkedToXContentHelper.singleChunk((builder, p) -> {
return ChunkedToXContentHelper.chunk((builder, p) -> {
commonStats(builder, p);
return builder;
});

View file

@ -219,7 +219,6 @@ public class TransportSimulateIndexTemplateAction extends TransportMasterNodeRea
.build();
final IndexMetadata indexMetadata = IndexMetadata.builder(indexName)
// handle mixed-cluster states by passing in minTransportVersion to reset event.ingested range to UNKNOWN if an older version
.eventIngestedRange(getEventIngestedRange(indexName, simulatedState))
.settings(dummySettings)
.build();
@ -308,7 +307,6 @@ public class TransportSimulateIndexTemplateAction extends TransportMasterNodeRea
dummySettings.put(templateSettings);
final IndexMetadata indexMetadata = IndexMetadata.builder(indexName)
// handle mixed-cluster states by passing in minTransportVersion to reset event.ingested range to UNKNOWN if an older version
.eventIngestedRange(getEventIngestedRange(indexName, simulatedState))
.settings(dummySettings)
.build();

View file

@ -551,9 +551,8 @@ public final class SearchPhaseController {
assert numReducePhases >= 0 : "num reduce phases must be >= 0 but was: " + numReducePhases;
numReducePhases++; // increment for this phase
if (queryResults.isEmpty()) { // early terminate we have nothing to reduce
final TotalHits totalHits = topDocsStats.getTotalHits();
return new ReducedQueryPhase(
totalHits,
topDocsStats.getTotalHits(),
topDocsStats.fetchHits,
topDocsStats.getMaxScore(),
false,
@ -570,8 +569,7 @@ public final class SearchPhaseController {
true
);
}
int total = queryResults.size();
final Collection<SearchPhaseResult> nonNullResults = new ArrayList<>();
final List<QuerySearchResult> nonNullResults = new ArrayList<>();
boolean hasSuggest = false;
boolean hasProfileResults = false;
for (SearchPhaseResult queryResult : queryResults) {
@ -581,12 +579,11 @@ public final class SearchPhaseController {
}
hasSuggest |= res.suggest() != null;
hasProfileResults |= res.hasProfileResults();
nonNullResults.add(queryResult);
nonNullResults.add(res);
}
queryResults = nonNullResults;
validateMergeSortValueFormats(queryResults);
if (queryResults.isEmpty()) {
var ex = new IllegalStateException("must have at least one non-empty search result, got 0 out of " + total);
validateMergeSortValueFormats(nonNullResults);
if (nonNullResults.isEmpty()) {
var ex = new IllegalStateException("must have at least one non-empty search result, got 0 out of " + queryResults.size());
assert false : ex;
throw ex;
}
@ -594,13 +591,12 @@ public final class SearchPhaseController {
// count the total (we use the query result provider here, since we might not get any hits (we scrolled past them))
final Map<String, List<Suggestion<?>>> groupedSuggestions = hasSuggest ? new HashMap<>() : Collections.emptyMap();
final Map<String, SearchProfileQueryPhaseResult> profileShardResults = hasProfileResults
? Maps.newMapWithExpectedSize(queryResults.size())
? Maps.newMapWithExpectedSize(nonNullResults.size())
: Collections.emptyMap();
int from = 0;
int size = 0;
DocValueFormat[] sortValueFormats = null;
for (SearchPhaseResult entry : queryResults) {
QuerySearchResult result = entry.queryResult();
for (QuerySearchResult result : nonNullResults) {
from = result.from();
// sorted queries can set the size to 0 if they have enough competitive hits.
size = Math.max(result.size(), size);
@ -611,8 +607,7 @@ public final class SearchPhaseController {
if (hasSuggest) {
assert result.suggest() != null;
for (Suggestion<? extends Suggestion.Entry<? extends Suggestion.Entry.Option>> suggestion : result.suggest()) {
List<Suggestion<?>> suggestionList = groupedSuggestions.computeIfAbsent(suggestion.getName(), s -> new ArrayList<>());
suggestionList.add(suggestion);
groupedSuggestions.computeIfAbsent(suggestion.getName(), s -> new ArrayList<>()).add(suggestion);
if (suggestion instanceof CompletionSuggestion completionSuggestion) {
completionSuggestion.setShardIndex(result.getShardIndex());
}
@ -620,53 +615,48 @@ public final class SearchPhaseController {
}
assert bufferedTopDocs.isEmpty() || result.hasConsumedTopDocs() : "firstResult has no aggs but we got non null buffered aggs?";
if (hasProfileResults) {
String key = result.getSearchShardTarget().toString();
profileShardResults.put(key, result.consumeProfileResult());
profileShardResults.put(result.getSearchShardTarget().toString(), result.consumeProfileResult());
}
}
final Suggest reducedSuggest;
final List<CompletionSuggestion> reducedCompletionSuggestions;
if (groupedSuggestions.isEmpty()) {
reducedSuggest = null;
reducedCompletionSuggestions = Collections.emptyList();
} else {
reducedSuggest = new Suggest(Suggest.reduce(groupedSuggestions));
reducedCompletionSuggestions = reducedSuggest.filter(CompletionSuggestion.class);
}
final InternalAggregations aggregations = bufferedAggs == null
? null
: InternalAggregations.topLevelReduceDelayable(
bufferedAggs,
performFinalReduce ? aggReduceContextBuilder.forFinalReduction() : aggReduceContextBuilder.forPartialReduction()
);
final SearchProfileResultsBuilder profileBuilder = profileShardResults.isEmpty()
? null
: new SearchProfileResultsBuilder(profileShardResults);
final Suggest reducedSuggest = groupedSuggestions.isEmpty() ? null : new Suggest(Suggest.reduce(groupedSuggestions));
final SortedTopDocs sortedTopDocs;
if (queryPhaseRankCoordinatorContext == null) {
sortedTopDocs = sortDocs(isScrollRequest, bufferedTopDocs, from, size, reducedCompletionSuggestions);
} else {
ScoreDoc[] rankedDocs = queryPhaseRankCoordinatorContext.rankQueryPhaseResults(
queryResults.stream().map(SearchPhaseResult::queryResult).toList(),
topDocsStats
sortedTopDocs = sortDocs(
isScrollRequest,
bufferedTopDocs,
from,
size,
reducedSuggest == null ? Collections.emptyList() : reducedSuggest.filter(CompletionSuggestion.class)
);
} else {
sortedTopDocs = new SortedTopDocs(
queryPhaseRankCoordinatorContext.rankQueryPhaseResults(nonNullResults, topDocsStats),
false,
null,
null,
null,
0
);
sortedTopDocs = new SortedTopDocs(rankedDocs, false, null, null, null, 0);
size = sortedTopDocs.scoreDocs.length;
// we need to reset from here as pagination and result trimming has already taken place
// within the `QueryPhaseRankCoordinatorContext#rankQueryPhaseResults` and we don't want
// to apply it again in the `getHits` method.
from = 0;
}
final TotalHits totalHits = topDocsStats.getTotalHits();
return new ReducedQueryPhase(
totalHits,
topDocsStats.getTotalHits(),
topDocsStats.fetchHits,
topDocsStats.getMaxScore(),
topDocsStats.timedOut,
topDocsStats.terminatedEarly,
reducedSuggest,
aggregations,
profileBuilder,
bufferedAggs == null
? null
: InternalAggregations.topLevelReduceDelayable(
bufferedAggs,
performFinalReduce ? aggReduceContextBuilder.forFinalReduction() : aggReduceContextBuilder.forPartialReduction()
),
profileShardResults.isEmpty() ? null : new SearchProfileResultsBuilder(profileShardResults),
sortedTopDocs,
sortValueFormats,
queryPhaseRankCoordinatorContext,

View file

@ -391,13 +391,13 @@ public class SearchResponse extends ActionResponse implements ChunkedToXContentO
public Iterator<? extends ToXContent> innerToXContentChunked(ToXContent.Params params) {
return Iterators.concat(
ChunkedToXContentHelper.singleChunk(SearchResponse.this::headerToXContent),
ChunkedToXContentHelper.chunk(SearchResponse.this::headerToXContent),
Iterators.single(clusters),
Iterators.concat(
hits.toXContentChunked(params),
aggregations == null ? Collections.emptyIterator() : ChunkedToXContentHelper.singleChunk(aggregations),
suggest == null ? Collections.emptyIterator() : ChunkedToXContentHelper.singleChunk(suggest),
profileResults == null ? Collections.emptyIterator() : ChunkedToXContentHelper.singleChunk(profileResults)
aggregations == null ? Collections.emptyIterator() : ChunkedToXContentHelper.chunk(aggregations),
suggest == null ? Collections.emptyIterator() : ChunkedToXContentHelper.chunk(suggest),
profileResults == null ? Collections.emptyIterator() : ChunkedToXContentHelper.chunk(profileResults)
)
);
}

View file

@ -36,8 +36,8 @@ import java.util.Set;
import static org.elasticsearch.cluster.routing.ShardRouting.newUnassigned;
import static org.elasticsearch.cluster.routing.UnassignedInfo.Reason.REINITIALIZED;
import static org.elasticsearch.common.xcontent.ChunkedToXContentHelper.chunk;
import static org.elasticsearch.common.xcontent.ChunkedToXContentHelper.endArray;
import static org.elasticsearch.common.xcontent.ChunkedToXContentHelper.singleChunk;
import static org.elasticsearch.common.xcontent.ChunkedToXContentHelper.startObject;
/**
@ -162,7 +162,7 @@ public class ClusterInfo implements ChunkedToXContent, Writeable {
}
return builder.endObject(); // end $nodename
}),
singleChunk(
chunk(
(builder, p) -> builder.endObject() // end "nodes"
.startObject("shard_sizes")
),
@ -171,7 +171,7 @@ public class ClusterInfo implements ChunkedToXContent, Writeable {
shardSizes.entrySet().iterator(),
c -> (builder, p) -> builder.humanReadableField(c.getKey() + "_bytes", c.getKey(), ByteSizeValue.ofBytes(c.getValue()))
),
singleChunk(
chunk(
(builder, p) -> builder.endObject() // end "shard_sizes"
.startObject("shard_data_set_sizes")
),
@ -183,12 +183,12 @@ public class ClusterInfo implements ChunkedToXContent, Writeable {
ByteSizeValue.ofBytes(c.getValue())
)
),
singleChunk(
chunk(
(builder, p) -> builder.endObject() // end "shard_data_set_sizes"
.startObject("shard_paths")
),
Iterators.map(dataPath.entrySet().iterator(), c -> (builder, p) -> builder.field(c.getKey().toString(), c.getValue())),
singleChunk(
chunk(
(builder, p) -> builder.endObject() // end "shard_paths"
.startArray("reserved_sizes")
),

View file

@ -34,7 +34,7 @@ import org.elasticsearch.cluster.routing.allocation.AllocationService;
import org.elasticsearch.cluster.routing.allocation.AllocationService.RerouteStrategy;
import org.elasticsearch.cluster.routing.allocation.AllocationStatsService;
import org.elasticsearch.cluster.routing.allocation.ExistingShardsAllocator;
import org.elasticsearch.cluster.routing.allocation.NodeAllocationStatsProvider;
import org.elasticsearch.cluster.routing.allocation.NodeAllocationStatsAndWeightsCalculator;
import org.elasticsearch.cluster.routing.allocation.WriteLoadForecaster;
import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator;
import org.elasticsearch.cluster.routing.allocation.allocator.DesiredBalanceShardsAllocator;
@ -141,7 +141,10 @@ public class ClusterModule extends AbstractModule {
this.clusterPlugins = clusterPlugins;
this.deciderList = createAllocationDeciders(settings, clusterService.getClusterSettings(), clusterPlugins);
this.allocationDeciders = new AllocationDeciders(deciderList);
var nodeAllocationStatsProvider = new NodeAllocationStatsProvider(writeLoadForecaster, clusterService.getClusterSettings());
var nodeAllocationStatsAndWeightsCalculator = new NodeAllocationStatsAndWeightsCalculator(
writeLoadForecaster,
clusterService.getClusterSettings()
);
this.shardsAllocator = createShardsAllocator(
settings,
clusterService.getClusterSettings(),
@ -151,7 +154,7 @@ public class ClusterModule extends AbstractModule {
this::reconcile,
writeLoadForecaster,
telemetryProvider,
nodeAllocationStatsProvider
nodeAllocationStatsAndWeightsCalculator
);
this.clusterService = clusterService;
this.indexNameExpressionResolver = new IndexNameExpressionResolver(threadPool.getThreadContext(), systemIndices, projectResolver);
@ -169,7 +172,7 @@ public class ClusterModule extends AbstractModule {
clusterService,
clusterInfoService,
shardsAllocator,
nodeAllocationStatsProvider
nodeAllocationStatsAndWeightsCalculator
);
this.telemetryProvider = telemetryProvider;
}
@ -420,7 +423,7 @@ public class ClusterModule extends AbstractModule {
DesiredBalanceReconcilerAction reconciler,
WriteLoadForecaster writeLoadForecaster,
TelemetryProvider telemetryProvider,
NodeAllocationStatsProvider nodeAllocationStatsProvider
NodeAllocationStatsAndWeightsCalculator nodeAllocationStatsAndWeightsCalculator
) {
Map<String, Supplier<ShardsAllocator>> allocators = new HashMap<>();
allocators.put(BALANCED_ALLOCATOR, () -> new BalancedShardsAllocator(clusterSettings, writeLoadForecaster));
@ -433,7 +436,7 @@ public class ClusterModule extends AbstractModule {
clusterService,
reconciler,
telemetryProvider,
nodeAllocationStatsProvider
nodeAllocationStatsAndWeightsCalculator
)
);

View file

@ -827,7 +827,7 @@ public class ClusterState implements ChunkedToXContent, Diffable<ClusterState> {
metrics.contains(Metric.CUSTOMS)
? Iterators.flatMap(
customs.entrySet().iterator(),
e -> ChunkedToXContentHelper.wrapWithObject(e.getKey(), e.getValue().toXContentChunked(outerParams))
e -> ChunkedToXContentHelper.object(e.getKey(), e.getValue().toXContentChunked(outerParams))
)
: Collections.emptyIterator()
);

View file

@ -103,7 +103,7 @@ public class ComponentTemplateMetadata implements Metadata.ProjectCustom {
@Override
public Iterator<? extends ToXContent> toXContentChunked(ToXContent.Params ignored) {
return ChunkedToXContentHelper.xContentValuesMap(COMPONENT_TEMPLATE.getPreferredName(), componentTemplates);
return ChunkedToXContentHelper.xContentObjectFields(COMPONENT_TEMPLATE.getPreferredName(), componentTemplates);
}
@Override

View file

@ -104,7 +104,7 @@ public class ComposableIndexTemplateMetadata implements Metadata.ProjectCustom {
@Override
public Iterator<? extends ToXContent> toXContentChunked(ToXContent.Params ignored) {
return ChunkedToXContentHelper.xContentValuesMap(INDEX_TEMPLATE.getPreferredName(), indexTemplates);
return ChunkedToXContentHelper.xContentObjectFields(INDEX_TEMPLATE.getPreferredName(), indexTemplates);
}
@Override

View file

@ -234,7 +234,7 @@ public class DataStreamMetadata implements Metadata.ProjectCustom {
@Override
public Iterator<? extends ToXContent> toXContentChunked(ToXContent.Params ignored) {
return Iterators.concat(
ChunkedToXContentHelper.xContentValuesMap(DATA_STREAM.getPreferredName(), dataStreams),
ChunkedToXContentHelper.xContentObjectFields(DATA_STREAM.getPreferredName(), dataStreams),
ChunkedToXContentHelper.startObject(DATA_STREAM_ALIASES.getPreferredName()),
dataStreamAliases.values().iterator(),
ChunkedToXContentHelper.endObject()

View file

@ -1703,12 +1703,7 @@ public class IndexMetadata implements Diffable<IndexMetadata>, ToXContentFragmen
out.writeOptionalDouble(indexWriteLoadForecast);
out.writeOptionalLong(shardSizeInBytesForecast);
}
if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_15_0)) {
eventIngestedRange.writeTo(out);
} else {
assert eventIngestedRange == IndexLongFieldRange.UNKNOWN
: "eventIngestedRange should be UNKNOWN until all nodes are on the new version but is " + eventIngestedRange;
}
}
@Override

View file

@ -671,10 +671,10 @@ public class Metadata implements Diffable<Metadata>, ChunkedToXContent {
Iterators.flatMap(
customs.entrySet().iterator(),
entry -> entry.getValue().context().contains(context)
? ChunkedToXContentHelper.wrapWithObject(entry.getKey(), entry.getValue().toXContentChunked(p))
? ChunkedToXContentHelper.object(entry.getKey(), entry.getValue().toXContentChunked(p))
: Collections.emptyIterator()
),
ChunkedToXContentHelper.wrapWithObject("reserved_state", reservedStateMetadata().values().iterator()),
ChunkedToXContentHelper.object("reserved_state", reservedStateMetadata().values().iterator()),
ChunkedToXContentHelper.endObject()
);
} else {
@ -700,10 +700,10 @@ public class Metadata implements Diffable<Metadata>, ChunkedToXContent {
Iterators.flatMap(
customs.entrySet().iterator(),
entry -> entry.getValue().context().contains(context)
? ChunkedToXContentHelper.wrapWithObject(entry.getKey(), entry.getValue().toXContentChunked(p))
? ChunkedToXContentHelper.object(entry.getKey(), entry.getValue().toXContentChunked(p))
: Collections.emptyIterator()
),
ChunkedToXContentHelper.wrapWithObject("reserved_state", clusterReservedState.values().iterator()),
ChunkedToXContentHelper.object("reserved_state", clusterReservedState.values().iterator()),
ChunkedToXContentHelper.endObject()
);
}

View file

@ -74,7 +74,6 @@ import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.MapperService.MergeReason;
import org.elasticsearch.index.mapper.SourceFieldMapper;
import org.elasticsearch.index.query.SearchExecutionContext;
import org.elasticsearch.index.shard.IndexLongFieldRange;
import org.elasticsearch.indices.IndexCreationException;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.indices.InvalidIndexNameException;
@ -548,8 +547,7 @@ public class MetadataCreateIndexService {
temporaryIndexMeta.getSettings(),
temporaryIndexMeta.getRoutingNumShards(),
sourceMetadata,
temporaryIndexMeta.isSystem(),
currentState.getMinTransportVersion()
temporaryIndexMeta.isSystem()
);
} catch (Exception e) {
logger.info("failed to build index metadata [{}]", request.index());
@ -1414,15 +1412,10 @@ public class MetadataCreateIndexService {
Settings indexSettings,
int routingNumShards,
@Nullable IndexMetadata sourceMetadata,
boolean isSystem,
TransportVersion minClusterTransportVersion
boolean isSystem
) {
IndexMetadata.Builder indexMetadataBuilder = createIndexMetadataBuilder(indexName, sourceMetadata, indexSettings, routingNumShards);
indexMetadataBuilder.system(isSystem);
if (minClusterTransportVersion.before(TransportVersions.V_8_15_0)) {
// promote to UNKNOWN for older versions since they don't know how to handle event.ingested in cluster state
indexMetadataBuilder.eventIngestedRange(IndexLongFieldRange.UNKNOWN);
}
// now, update the mappings with the actual source
Map<String, MappingMetadata> mappingsMetadata = new HashMap<>();
DocumentMapper docMapper = documentMapperSupplier.get();

View file

@ -191,7 +191,7 @@ public class NodesShutdownMetadata implements Metadata.ClusterCustom {
@Override
public Iterator<? extends ToXContent> toXContentChunked(ToXContent.Params params) {
return ChunkedToXContentHelper.xContentValuesMap(NODES_FIELD.getPreferredName(), nodes);
return ChunkedToXContentHelper.xContentObjectFields(NODES_FIELD.getPreferredName(), nodes);
}
/**

View file

@ -27,8 +27,8 @@ import java.util.Collections;
import java.util.Iterator;
import java.util.Objects;
import static org.elasticsearch.common.xcontent.ChunkedToXContentHelper.chunk;
import static org.elasticsearch.common.xcontent.ChunkedToXContentHelper.endObject;
import static org.elasticsearch.common.xcontent.ChunkedToXContentHelper.singleChunk;
import static org.elasticsearch.common.xcontent.ChunkedToXContentHelper.startObject;
public class ShutdownShardMigrationStatus implements Writeable, ChunkedToXContentObject {
@ -168,7 +168,7 @@ public class ShutdownShardMigrationStatus implements Writeable, ChunkedToXConten
public Iterator<? extends ToXContent> toXContentChunked(ToXContent.Params params) {
return Iterators.concat(
startObject(),
singleChunk((builder, p) -> buildHeader(builder)),
chunk((builder, p) -> buildHeader(builder)),
Objects.nonNull(allocationDecision)
? Iterators.concat(startObject(NODE_ALLOCATION_DECISION_KEY), allocationDecision.toXContentChunked(params), endObject())
: Collections.emptyIterator(),

View file

@ -19,35 +19,41 @@ import java.util.Map;
import java.util.function.Supplier;
import java.util.stream.Collectors;
/**
* Exposes cluster allocation metrics. Constructs {@link NodeAllocationStats} per node, on demand.
*/
public class AllocationStatsService {
private final ClusterService clusterService;
private final ClusterInfoService clusterInfoService;
private final Supplier<DesiredBalance> desiredBalanceSupplier;
private final NodeAllocationStatsProvider nodeAllocationStatsProvider;
private final NodeAllocationStatsAndWeightsCalculator nodeAllocationStatsAndWeightsCalculator;
public AllocationStatsService(
ClusterService clusterService,
ClusterInfoService clusterInfoService,
ShardsAllocator shardsAllocator,
NodeAllocationStatsProvider nodeAllocationStatsProvider
NodeAllocationStatsAndWeightsCalculator nodeAllocationStatsAndWeightsCalculator
) {
this.clusterService = clusterService;
this.clusterInfoService = clusterInfoService;
this.nodeAllocationStatsProvider = nodeAllocationStatsProvider;
this.nodeAllocationStatsAndWeightsCalculator = nodeAllocationStatsAndWeightsCalculator;
this.desiredBalanceSupplier = shardsAllocator instanceof DesiredBalanceShardsAllocator allocator
? allocator::getDesiredBalance
: () -> null;
}
/**
* Returns a map of node IDs to node allocation stats.
*/
public Map<String, NodeAllocationStats> stats() {
var state = clusterService.state();
var stats = nodeAllocationStatsProvider.stats(
state.metadata(),
state.getRoutingNodes(),
var clusterState = clusterService.state();
var nodesStatsAndWeights = nodeAllocationStatsAndWeightsCalculator.nodesAllocationStatsAndWeights(
clusterState.metadata(),
clusterState.getRoutingNodes(),
clusterInfoService.getClusterInfo(),
desiredBalanceSupplier.get()
);
return stats.entrySet()
return nodesStatsAndWeights.entrySet()
.stream()
.collect(
Collectors.toMap(

View file

@ -18,6 +18,15 @@ import org.elasticsearch.xcontent.XContentBuilder;
import java.io.IOException;
/**
* Point-in-time allocation stats for a particular node.
*
* @param shards count of shards on this node.
* @param undesiredShards count of shards that we want to move off of this node.
* @param forecastedIngestLoad the predicted near future total ingest load on this node.
* @param forecastedDiskUsage the predicted near future total disk usage on this node.
* @param currentDiskUsage the current total disk usage on this node.
*/
public record NodeAllocationStats(
int shards,
int undesiredShards,

View file

@ -24,7 +24,10 @@ import org.elasticsearch.core.Nullable;
import java.util.Map;
public class NodeAllocationStatsProvider {
/**
* Calculates the allocation weights and usage stats for each node: see {@link NodeAllocationStatsAndWeight} for details.
*/
public class NodeAllocationStatsAndWeightsCalculator {
private final WriteLoadForecaster writeLoadForecaster;
private volatile float indexBalanceFactor;
@ -32,7 +35,10 @@ public class NodeAllocationStatsProvider {
private volatile float writeLoadBalanceFactor;
private volatile float diskUsageBalanceFactor;
public record NodeAllocationAndClusterBalanceStats(
/**
* Node shard allocation stats and the total node weight.
*/
public record NodeAllocationStatsAndWeight(
int shards,
int undesiredShards,
double forecastedIngestLoad,
@ -41,7 +47,7 @@ public class NodeAllocationStatsProvider {
float currentNodeWeight
) {}
public NodeAllocationStatsProvider(WriteLoadForecaster writeLoadForecaster, ClusterSettings clusterSettings) {
public NodeAllocationStatsAndWeightsCalculator(WriteLoadForecaster writeLoadForecaster, ClusterSettings clusterSettings) {
this.writeLoadForecaster = writeLoadForecaster;
clusterSettings.initializeAndWatch(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING, value -> this.shardBalanceFactor = value);
clusterSettings.initializeAndWatch(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING, value -> this.indexBalanceFactor = value);
@ -55,7 +61,10 @@ public class NodeAllocationStatsProvider {
);
}
public Map<String, NodeAllocationAndClusterBalanceStats> stats(
/**
* Returns a map of node IDs to {@link NodeAllocationStatsAndWeight}.
*/
public Map<String, NodeAllocationStatsAndWeight> nodesAllocationStatsAndWeights(
Metadata metadata,
RoutingNodes routingNodes,
ClusterInfo clusterInfo,
@ -66,7 +75,7 @@ public class NodeAllocationStatsProvider {
var avgWriteLoadPerNode = WeightFunction.avgWriteLoadPerNode(writeLoadForecaster, metadata, routingNodes);
var avgDiskUsageInBytesPerNode = WeightFunction.avgDiskUsageInBytesPerNode(clusterInfo, metadata, routingNodes);
var stats = Maps.<String, NodeAllocationAndClusterBalanceStats>newMapWithExpectedSize(routingNodes.size());
var nodeAllocationStatsAndWeights = Maps.<String, NodeAllocationStatsAndWeight>newMapWithExpectedSize(routingNodes.size());
for (RoutingNode node : routingNodes) {
int shards = 0;
int undesiredShards = 0;
@ -75,9 +84,10 @@ public class NodeAllocationStatsProvider {
long currentDiskUsage = 0;
for (ShardRouting shardRouting : node) {
if (shardRouting.relocating()) {
// Skip the shard if it is moving off this node. The node running recovery will count it.
continue;
}
shards++;
++shards;
IndexMetadata indexMetadata = metadata.indexMetadata(shardRouting.index());
if (isDesiredAllocation(desiredBalance, shardRouting) == false) {
undesiredShards++;
@ -86,9 +96,8 @@ public class NodeAllocationStatsProvider {
forecastedWriteLoad += writeLoadForecaster.getForecastedWriteLoad(indexMetadata).orElse(0.0);
forecastedDiskUsage += Math.max(indexMetadata.getForecastedShardSizeInBytes().orElse(0), shardSize);
currentDiskUsage += shardSize;
}
float currentNodeWeight = weightFunction.nodeWeight(
float currentNodeWeight = weightFunction.calculateNodeWeight(
shards,
avgShardsPerNode,
forecastedWriteLoad,
@ -96,10 +105,12 @@ public class NodeAllocationStatsProvider {
currentDiskUsage,
avgDiskUsageInBytesPerNode
);
stats.put(
nodeAllocationStatsAndWeights.put(
node.nodeId(),
new NodeAllocationAndClusterBalanceStats(
new NodeAllocationStatsAndWeight(
shards,
// It's part of a public API contract for an 'undesired_shards' field that -1 will be returned if an allocator other
// than the desired balance allocator is used.
desiredBalance != null ? undesiredShards : -1,
forecastedWriteLoad,
forecastedDiskUsage,
@ -109,10 +120,13 @@ public class NodeAllocationStatsProvider {
);
}
return stats;
return nodeAllocationStatsAndWeights;
}
private static boolean isDesiredAllocation(DesiredBalance desiredBalance, ShardRouting shardRouting) {
/**
* Checks whether a shard is currently allocated to a node that is wanted by the desired balance decision.
*/
private static boolean isDesiredAllocation(@Nullable DesiredBalance desiredBalance, ShardRouting shardRouting) {
if (desiredBalance == null) {
return true;
}

View file

@ -114,6 +114,7 @@ public class BalancedShardsAllocator implements ShardsAllocator {
Property.NodeScope
);
// TODO: deduplicate these fields, use the fields in NodeAllocationStatsAndWeightsCalculator instead.
private volatile float indexBalanceFactor;
private volatile float shardBalanceFactor;
private volatile float writeLoadBalanceFactor;
@ -170,7 +171,7 @@ public class BalancedShardsAllocator implements ShardsAllocator {
Map<DiscoveryNode, DesiredBalanceMetrics.NodeWeightStats> nodeLevelWeights = new HashMap<>();
for (var entry : balancer.nodes.entrySet()) {
var node = entry.getValue();
var nodeWeight = weightFunction.nodeWeight(
var nodeWeight = weightFunction.calculateNodeWeight(
node.numShards(),
balancer.avgShardsPerNode(),
node.writeLoad(),
@ -266,7 +267,7 @@ public class BalancedShardsAllocator implements ShardsAllocator {
private final RoutingAllocation allocation;
private final RoutingNodes routingNodes;
private final Metadata metadata;
private final WeightFunction weight;
private final WeightFunction weightFunction;
private final float threshold;
private final float avgShardsPerNode;
@ -275,12 +276,17 @@ public class BalancedShardsAllocator implements ShardsAllocator {
private final Map<String, ModelNode> nodes;
private final NodeSorter sorter;
private Balancer(WriteLoadForecaster writeLoadForecaster, RoutingAllocation allocation, WeightFunction weight, float threshold) {
private Balancer(
WriteLoadForecaster writeLoadForecaster,
RoutingAllocation allocation,
WeightFunction weightFunction,
float threshold
) {
this.writeLoadForecaster = writeLoadForecaster;
this.allocation = allocation;
this.routingNodes = allocation.routingNodes();
this.metadata = allocation.metadata();
this.weight = weight;
this.weightFunction = weightFunction;
this.threshold = threshold;
avgShardsPerNode = WeightFunction.avgShardPerNode(metadata, routingNodes);
avgWriteLoadPerNode = WeightFunction.avgWriteLoadPerNode(writeLoadForecaster, metadata, routingNodes);
@ -359,7 +365,7 @@ public class BalancedShardsAllocator implements ShardsAllocator {
* to sort based on an index.
*/
private NodeSorter newNodeSorter() {
return new NodeSorter(nodesArray(), weight, this);
return new NodeSorter(nodesArray(), weightFunction, this);
}
/**
@ -443,7 +449,7 @@ public class BalancedShardsAllocator implements ShardsAllocator {
assert currentNode != null : "currently assigned node could not be found";
// balance the shard, if a better node can be found
final float currentWeight = weight.weight(this, currentNode, index);
final float currentWeight = weightFunction.calculateNodeWeightWithIndex(this, currentNode, index);
final AllocationDeciders deciders = allocation.deciders();
Type rebalanceDecisionType = Type.NO;
ModelNode targetNode = null;
@ -459,7 +465,7 @@ public class BalancedShardsAllocator implements ShardsAllocator {
// this is a comparison of the number of shards on this node to the number of shards
// that should be on each node on average (both taking the cluster as a whole into account
// as well as shards per index)
final float nodeWeight = weight.weight(this, node, index);
final float nodeWeight = weightFunction.calculateNodeWeightWithIndex(this, node, index);
// if the node we are examining has a worse (higher) weight than the node the shard is
// assigned to, then there is no way moving the shard to the node with the worse weight
// can make the balance of the cluster better, so we check for that here
@ -1051,7 +1057,7 @@ public class BalancedShardsAllocator implements ShardsAllocator {
}
// weight of this index currently on the node
float currentWeight = weight.weight(this, node, index);
float currentWeight = weightFunction.calculateNodeWeightWithIndex(this, node, index);
// moving the shard would not improve the balance, and we are not in explain mode, so short circuit
if (currentWeight > minWeight && explain == false) {
continue;
@ -1345,7 +1351,7 @@ public class BalancedShardsAllocator implements ShardsAllocator {
}
public float weight(ModelNode node) {
return function.weight(balancer, node, index);
return function.calculateNodeWeightWithIndex(balancer, node, index);
}
public float minWeightDelta() {

View file

@ -21,7 +21,7 @@ import java.util.Objects;
*
* @param assignments a set of the (persistent) node IDs to which each {@link ShardId} should be allocated
* @param weightsPerNode The node weights calculated based on
* {@link org.elasticsearch.cluster.routing.allocation.allocator.WeightFunction#nodeWeight}
* {@link org.elasticsearch.cluster.routing.allocation.allocator.WeightFunction#calculateNodeWeight}
*/
public record DesiredBalance(
long lastConvergedIndex,

View file

@ -10,7 +10,7 @@
package org.elasticsearch.cluster.routing.allocation.allocator;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.allocation.NodeAllocationStatsProvider.NodeAllocationAndClusterBalanceStats;
import org.elasticsearch.cluster.routing.allocation.NodeAllocationStatsAndWeightsCalculator.NodeAllocationStatsAndWeight;
import org.elasticsearch.telemetry.metric.DoubleWithAttributes;
import org.elasticsearch.telemetry.metric.LongWithAttributes;
import org.elasticsearch.telemetry.metric.MeterRegistry;
@ -20,6 +20,12 @@ import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicReference;
/**
* Maintains balancer metrics and makes them accessible to the {@link MeterRegistry} and APM reporting. Metrics are updated
* ({@link #updateMetrics}) or cleared ({@link #zeroAllMetrics}) as a result of cluster events and the metrics will be pulled for reporting
* via the MeterRegistry implementation. Only the master node reports metrics: see {@link #setNodeIsMaster}. When
* {@link #nodeIsMaster} is false, empty values are returned such that MeterRegistry ignores the metrics for reporting purposes.
*/
public class DesiredBalanceMetrics {
public record AllocationStats(long unassignedShards, long totalAllocations, long undesiredAllocationsExcludingShuttingDownNodes) {}
@ -69,13 +75,14 @@ public class DesiredBalanceMetrics {
private volatile long undesiredAllocations;
private final AtomicReference<Map<DiscoveryNode, NodeWeightStats>> weightStatsPerNodeRef = new AtomicReference<>(Map.of());
private final AtomicReference<Map<DiscoveryNode, NodeAllocationAndClusterBalanceStats>> allocationStatsPerNodeRef =
new AtomicReference<>(Map.of());
private final AtomicReference<Map<DiscoveryNode, NodeAllocationStatsAndWeight>> allocationStatsPerNodeRef = new AtomicReference<>(
Map.of()
);
public void updateMetrics(
AllocationStats allocationStats,
Map<DiscoveryNode, NodeWeightStats> weightStatsPerNode,
Map<DiscoveryNode, NodeAllocationAndClusterBalanceStats> nodeAllocationStats
Map<DiscoveryNode, NodeAllocationStatsAndWeight> nodeAllocationStats
) {
assert allocationStats != null : "allocation stats cannot be null";
assert weightStatsPerNode != null : "node balance weight stats cannot be null";
@ -170,6 +177,10 @@ public class DesiredBalanceMetrics {
);
}
/**
* When {@link #nodeIsMaster} is set to true, the server will report APM metrics registered in this file. When set to false, empty
* values will be returned such that no APM metrics are sent from this server.
*/
public void setNodeIsMaster(boolean nodeIsMaster) {
this.nodeIsMaster = nodeIsMaster;
}
@ -339,6 +350,10 @@ public class DesiredBalanceMetrics {
return List.of();
}
/**
* Sets all the internal class fields to zero/empty. Typically used in conjunction with {@link #setNodeIsMaster}.
* This is best-effort because it is possible for {@link #updateMetrics} to race with this method.
*/
public void zeroAllMetrics() {
unassignedShards = 0;
totalAllocations = 0;

View file

@ -21,8 +21,8 @@ import org.elasticsearch.cluster.routing.RoutingNodes;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.UnassignedInfo;
import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus;
import org.elasticsearch.cluster.routing.allocation.NodeAllocationStatsProvider;
import org.elasticsearch.cluster.routing.allocation.NodeAllocationStatsProvider.NodeAllocationAndClusterBalanceStats;
import org.elasticsearch.cluster.routing.allocation.NodeAllocationStatsAndWeightsCalculator;
import org.elasticsearch.cluster.routing.allocation.NodeAllocationStatsAndWeightsCalculator.NodeAllocationStatsAndWeight;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.routing.allocation.allocator.DesiredBalanceMetrics.AllocationStats;
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
@ -76,13 +76,13 @@ public class DesiredBalanceReconciler {
private final NodeAllocationOrdering allocationOrdering = new NodeAllocationOrdering();
private final NodeAllocationOrdering moveOrdering = new NodeAllocationOrdering();
private final DesiredBalanceMetrics desiredBalanceMetrics;
private final NodeAllocationStatsProvider nodeAllocationStatsProvider;
private final NodeAllocationStatsAndWeightsCalculator nodeAllocationStatsAndWeightsCalculator;
public DesiredBalanceReconciler(
ClusterSettings clusterSettings,
ThreadPool threadPool,
DesiredBalanceMetrics desiredBalanceMetrics,
NodeAllocationStatsProvider nodeAllocationStatsProvider
NodeAllocationStatsAndWeightsCalculator nodeAllocationStatsAndWeightsCalculator
) {
this.desiredBalanceMetrics = desiredBalanceMetrics;
this.undesiredAllocationLogInterval = new FrequencyCappedAction(
@ -94,7 +94,7 @@ public class DesiredBalanceReconciler {
UNDESIRED_ALLOCATIONS_LOG_THRESHOLD_SETTING,
value -> this.undesiredAllocationsLogThreshold = value
);
this.nodeAllocationStatsProvider = nodeAllocationStatsProvider;
this.nodeAllocationStatsAndWeightsCalculator = nodeAllocationStatsAndWeightsCalculator;
}
public void reconcile(DesiredBalance desiredBalance, RoutingAllocation allocation) {
@ -160,20 +160,22 @@ public class DesiredBalanceReconciler {
}
private void updateDesireBalanceMetrics(AllocationStats allocationStats) {
var stats = nodeAllocationStatsProvider.stats(
var nodesStatsAndWeights = nodeAllocationStatsAndWeightsCalculator.nodesAllocationStatsAndWeights(
allocation.metadata(),
allocation.routingNodes(),
allocation.clusterInfo(),
desiredBalance
);
Map<DiscoveryNode, NodeAllocationAndClusterBalanceStats> nodeAllocationStats = new HashMap<>(stats.size());
for (var entry : stats.entrySet()) {
var node = allocation.nodes().get(entry.getKey());
Map<DiscoveryNode, NodeAllocationStatsAndWeight> filteredNodeAllocationStatsAndWeights = new HashMap<>(
nodesStatsAndWeights.size()
);
for (var nodeStatsAndWeight : nodesStatsAndWeights.entrySet()) {
var node = allocation.nodes().get(nodeStatsAndWeight.getKey());
if (node != null) {
nodeAllocationStats.put(node, entry.getValue());
filteredNodeAllocationStatsAndWeights.put(node, nodeStatsAndWeight.getValue());
}
}
desiredBalanceMetrics.updateMetrics(allocationStats, desiredBalance.weightsPerNode(), nodeAllocationStats);
desiredBalanceMetrics.updateMetrics(allocationStats, desiredBalance.weightsPerNode(), filteredNodeAllocationStatsAndWeights);
}
private boolean allocateUnassignedInvariant() {

View file

@ -18,7 +18,7 @@ import org.elasticsearch.cluster.ClusterStateTaskListener;
import org.elasticsearch.cluster.metadata.SingleNodeShutdownMetadata;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.allocation.AllocationService.RerouteStrategy;
import org.elasticsearch.cluster.routing.allocation.NodeAllocationStatsProvider;
import org.elasticsearch.cluster.routing.allocation.NodeAllocationStatsAndWeightsCalculator;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.routing.allocation.RoutingExplanations;
import org.elasticsearch.cluster.routing.allocation.ShardAllocationDecision;
@ -89,7 +89,7 @@ public class DesiredBalanceShardsAllocator implements ShardsAllocator {
ClusterService clusterService,
DesiredBalanceReconcilerAction reconciler,
TelemetryProvider telemetryProvider,
NodeAllocationStatsProvider nodeAllocationStatsProvider
NodeAllocationStatsAndWeightsCalculator nodeAllocationStatsAndWeightsCalculator
) {
this(
delegateAllocator,
@ -98,7 +98,7 @@ public class DesiredBalanceShardsAllocator implements ShardsAllocator {
new DesiredBalanceComputer(clusterSettings, threadPool, delegateAllocator),
reconciler,
telemetryProvider,
nodeAllocationStatsProvider
nodeAllocationStatsAndWeightsCalculator
);
}
@ -109,7 +109,7 @@ public class DesiredBalanceShardsAllocator implements ShardsAllocator {
DesiredBalanceComputer desiredBalanceComputer,
DesiredBalanceReconcilerAction reconciler,
TelemetryProvider telemetryProvider,
NodeAllocationStatsProvider nodeAllocationStatsProvider
NodeAllocationStatsAndWeightsCalculator nodeAllocationStatsAndWeightsCalculator
) {
this.desiredBalanceMetrics = new DesiredBalanceMetrics(telemetryProvider.getMeterRegistry());
this.delegateAllocator = delegateAllocator;
@ -120,7 +120,7 @@ public class DesiredBalanceShardsAllocator implements ShardsAllocator {
clusterService.getClusterSettings(),
threadPool,
desiredBalanceMetrics,
nodeAllocationStatsProvider
nodeAllocationStatsAndWeightsCalculator
);
this.desiredBalanceComputation = new ContinuousComputation<>(threadPool.generic()) {

View file

@ -60,9 +60,9 @@ public class WeightFunction {
theta3 = diskUsageBalance / sum;
}
float weight(BalancedShardsAllocator.Balancer balancer, BalancedShardsAllocator.ModelNode node, ProjectIndex index) {
float calculateNodeWeightWithIndex(BalancedShardsAllocator.Balancer balancer, BalancedShardsAllocator.ModelNode node, ProjectIndex index) {
final float weightIndex = node.numShards(index) - balancer.avgShardsPerNode(index);
final float nodeWeight = nodeWeight(
final float nodeWeight = calculateNodeWeight(
node.numShards(),
balancer.avgShardsPerNode(),
node.writeLoad(),
@ -73,7 +73,7 @@ public class WeightFunction {
return nodeWeight + theta1 * weightIndex;
}
public float nodeWeight(
public float calculateNodeWeight(
int nodeNumShards,
float avgShardsPerNode,
double nodeWriteLoad,

View file

@ -120,7 +120,8 @@ public class KeyStoreWrapper implements SecureSettings {
/** The version where lucene directory API changed from BE to LE. */
public static final int LE_VERSION = 5;
public static final int HIGHER_KDF_ITERATION_COUNT_VERSION = 6;
public static final int CURRENT_VERSION = HIGHER_KDF_ITERATION_COUNT_VERSION;
public static final int CIPHER_KEY_BITS_256_VERSION = 7;
public static final int CURRENT_VERSION = CIPHER_KEY_BITS_256_VERSION;
/** The algorithm used to derive the cipher key from a password. */
private static final String KDF_ALGO = "PBKDF2WithHmacSHA512";
@ -128,14 +129,8 @@ public class KeyStoreWrapper implements SecureSettings {
/** The number of iterations to derive the cipher key. */
private static final int KDF_ITERS = 210000;
/**
* The number of bits for the cipher key.
*
* Note: The Oracle JDK 8 ships with a limited JCE policy that restricts key length for AES to 128 bits.
* This can be increased to 256 bits once minimum java 9 is the minimum java version.
* See http://www.oracle.com/technetwork/java/javase/terms/readme/jdk9-readme-3852447.html#jce
* */
private static final int CIPHER_KEY_BITS = 128;
/** The number of bits for the cipher key (256 bits are supported as of Java 9).*/
private static final int CIPHER_KEY_BITS = 256;
/** The number of bits for the GCM tag. */
private static final int GCM_TAG_BITS = 128;
@ -156,6 +151,7 @@ public class KeyStoreWrapper implements SecureSettings {
// 4: remove distinction between string/files, ES 6.8/7.1
// 5: Lucene directory API changed to LE, ES 8.0
// 6: increase KDF iteration count, ES 8.14
// 7: increase cipher key length to 256 bits, ES 9.0
/** The metadata format version used to read the current keystore wrapper. */
private final int formatVersion;
@ -318,8 +314,9 @@ public class KeyStoreWrapper implements SecureSettings {
return hasPassword;
}
private static Cipher createCipher(int opmode, char[] password, byte[] salt, byte[] iv, int kdfIters) throws GeneralSecurityException {
PBEKeySpec keySpec = new PBEKeySpec(password, salt, kdfIters, CIPHER_KEY_BITS);
private static Cipher createCipher(int opmode, char[] password, byte[] salt, byte[] iv, int kdfIters, int cipherKeyBits)
throws GeneralSecurityException {
PBEKeySpec keySpec = new PBEKeySpec(password, salt, kdfIters, cipherKeyBits);
SecretKeyFactory keyFactory = SecretKeyFactory.getInstance(KDF_ALGO);
SecretKey secretKey;
try {
@ -343,6 +340,11 @@ public class KeyStoreWrapper implements SecureSettings {
return formatVersion < HIGHER_KDF_ITERATION_COUNT_VERSION ? 10000 : KDF_ITERS;
}
private static int getCipherKeyBitsForVersion(int formatVersion) {
// cipher key length was increased in version 7; it was 128 bits in previous versions
return formatVersion < CIPHER_KEY_BITS_256_VERSION ? 128 : CIPHER_KEY_BITS;
}
/**
* Decrypts the underlying keystore data.
*
@ -371,7 +373,14 @@ public class KeyStoreWrapper implements SecureSettings {
throw new SecurityException("Keystore has been corrupted or tampered with", e);
}
Cipher cipher = createCipher(Cipher.DECRYPT_MODE, password, salt, iv, getKdfIterationCountForVersion(formatVersion));
Cipher cipher = createCipher(
Cipher.DECRYPT_MODE,
password,
salt,
iv,
getKdfIterationCountForVersion(formatVersion),
getCipherKeyBitsForVersion(formatVersion)
);
try (
ByteArrayInputStream bytesStream = new ByteArrayInputStream(encryptedBytes);
CipherInputStream cipherStream = new CipherInputStream(bytesStream, cipher);
@ -409,11 +418,12 @@ public class KeyStoreWrapper implements SecureSettings {
}
/** Encrypt the keystore entries and return the encrypted data. */
private byte[] encrypt(char[] password, byte[] salt, byte[] iv, int kdfIterationCount) throws GeneralSecurityException, IOException {
private byte[] encrypt(char[] password, byte[] salt, byte[] iv, int kdfIterationCount, int cipherKeyBits)
throws GeneralSecurityException, IOException {
assert isLoaded();
ByteArrayOutputStream bytes = new ByteArrayOutputStream();
Cipher cipher = createCipher(Cipher.ENCRYPT_MODE, password, salt, iv, kdfIterationCount);
Cipher cipher = createCipher(Cipher.ENCRYPT_MODE, password, salt, iv, kdfIterationCount, cipherKeyBits);
try (
CipherOutputStream cipherStream = new CipherOutputStream(bytes, cipher);
DataOutputStream output = new DataOutputStream(cipherStream)
@ -456,7 +466,13 @@ public class KeyStoreWrapper implements SecureSettings {
byte[] iv = new byte[12];
random.nextBytes(iv);
// encrypted data
byte[] encryptedBytes = encrypt(password, salt, iv, getKdfIterationCountForVersion(CURRENT_VERSION));
byte[] encryptedBytes = encrypt(
password,
salt,
iv,
getKdfIterationCountForVersion(CURRENT_VERSION),
getCipherKeyBitsForVersion(CURRENT_VERSION)
);
// size of data block
output.writeInt(4 + salt.length + 4 + iv.length + 4 + encryptedBytes.length);

View file

@ -19,7 +19,9 @@ import java.util.function.Supplier;
public final class CachedSupplier<T> implements Supplier<T> {
private volatile Supplier<T> supplier;
private volatile T result;
// result does not need to be volatile as we only read it after reading that the supplier got nulled out. Since we null out the
// supplier after setting the result, total store order from an observed volatile write is sufficient to make a plain read safe.
private T result;
public static <R> CachedSupplier<R> wrap(Supplier<R> supplier) {
if (supplier instanceof CachedSupplier<R> c) {
@ -38,15 +40,19 @@ public final class CachedSupplier<T> implements Supplier<T> {
if (supplier == null) {
return result;
}
initResult();
return initResult();
}
private synchronized T initResult() {
var s = supplier;
if (s != null) {
T res = s.get();
result = res;
supplier = null;
return res;
} else {
return result;
}
private synchronized void initResult() {
if (supplier != null) {
result = supplier.get();
supplier = null;
}
}
}

View file

@ -21,69 +21,60 @@ public enum ChunkedToXContentHelper {
;
public static Iterator<ToXContent> startObject() {
return Iterators.single(((builder, params) -> builder.startObject()));
return Iterators.single((b, p) -> b.startObject());
}
public static Iterator<ToXContent> startObject(String name) {
return Iterators.single(((builder, params) -> builder.startObject(name)));
return Iterators.single((b, p) -> b.startObject(name));
}
public static Iterator<ToXContent> endObject() {
return Iterators.single(((builder, params) -> builder.endObject()));
return Iterators.single((b, p) -> b.endObject());
}
public static Iterator<ToXContent> startArray() {
return Iterators.single(((builder, params) -> builder.startArray()));
return Iterators.single((b, p) -> b.startArray());
}
public static Iterator<ToXContent> startArray(String name) {
return Iterators.single(((builder, params) -> builder.startArray(name)));
return Iterators.single((b, p) -> b.startArray(name));
}
public static Iterator<ToXContent> endArray() {
return Iterators.single(((builder, params) -> builder.endArray()));
}
public static Iterator<ToXContent> map(String name, Map<String, ?> map) {
return map(name, map, entry -> (ToXContent) (builder, params) -> builder.field(entry.getKey(), entry.getValue()));
}
public static Iterator<ToXContent> xContentFragmentValuesMap(String name, Map<String, ? extends ToXContent> map) {
return map(
name,
map,
entry -> (ToXContent) (builder, params) -> entry.getValue().toXContent(builder.startObject(entry.getKey()), params).endObject()
);
}
public static Iterator<ToXContent> xContentValuesMap(String name, Map<String, ? extends ToXContent> map) {
return map(
name,
map,
entry -> (ToXContent) (builder, params) -> entry.getValue().toXContent(builder.field(entry.getKey()), params)
);
return Iterators.single((b, p) -> b.endArray());
}
/**
* Like xContentFragmentValuesMap, but allows the underlying XContent object to define its own "name" with startObject(string)
* and endObject, rather than assuming that the key in the map should be the name in the XContent output.
* @param name name to use in the XContent for the outer object wrapping the map being rendered to XContent
* @param map map being rendered to XContent
* Defines an object named {@code name}, with the contents of each field set by {@code map}
*/
public static Iterator<ToXContent> xContentFragmentValuesMapCreateOwnName(String name, Map<String, ? extends ToXContent> map) {
return map(name, map, entry -> (ToXContent) (builder, params) -> entry.getValue().toXContent(builder, params));
public static Iterator<ToXContent> object(String name, Map<String, ?> map) {
return object(name, map, e -> (b, p) -> b.field(e.getKey(), e.getValue()));
}
/**
* Defines an object named {@code name}, with the contents of each field created from each entry in {@code map}
*/
public static Iterator<ToXContent> xContentObjectFields(String name, Map<String, ? extends ToXContent> map) {
return object(name, map, e -> (b, p) -> e.getValue().toXContent(b.field(e.getKey()), p));
}
/**
* Defines an object named {@code name}, with the contents of each field each another object created from each entry in {@code map}
*/
public static Iterator<ToXContent> xContentObjectFieldObjects(String name, Map<String, ? extends ToXContent> map) {
return object(name, map, e -> (b, p) -> e.getValue().toXContent(b.startObject(e.getKey()), p).endObject());
}
public static Iterator<ToXContent> field(String name, boolean value) {
return Iterators.single(((builder, params) -> builder.field(name, value)));
return Iterators.single((b, p) -> b.field(name, value));
}
public static Iterator<ToXContent> field(String name, long value) {
return Iterators.single(((builder, params) -> builder.field(name, value)));
return Iterators.single((b, p) -> b.field(name, value));
}
public static Iterator<ToXContent> field(String name, String value) {
return Iterators.single(((builder, params) -> builder.field(name, value)));
return Iterators.single((b, p) -> b.field(name, value));
}
public static Iterator<ToXContent> optionalField(String name, String value) {
@ -107,7 +98,7 @@ public enum ChunkedToXContentHelper {
}
public static Iterator<ToXContent> array(String name, Iterator<? extends ToXContent> contents) {
return Iterators.concat(ChunkedToXContentHelper.startArray(name), contents, ChunkedToXContentHelper.endArray());
return Iterators.concat(startArray(name), contents, endArray());
}
/**
@ -119,19 +110,32 @@ public enum ChunkedToXContentHelper {
* @return Iterator composing field name and value serialization
*/
public static Iterator<ToXContent> array(String name, Iterator<? extends ChunkedToXContentObject> contents, ToXContent.Params params) {
return Iterators.concat(
ChunkedToXContentHelper.startArray(name),
Iterators.flatMap(contents, c -> c.toXContentChunked(params)),
ChunkedToXContentHelper.endArray()
);
return Iterators.concat(startArray(name), Iterators.flatMap(contents, c -> c.toXContentChunked(params)), endArray());
}
public static <T extends ToXContent> Iterator<ToXContent> wrapWithObject(String name, Iterator<T> iterator) {
/**
* Defines an object named {@code name}, with the contents set by {@code iterator}
*/
public static Iterator<ToXContent> object(String name, Iterator<? extends ToXContent> iterator) {
return Iterators.concat(startObject(name), iterator, endObject());
}
public static <T> Iterator<ToXContent> map(String name, Map<String, T> map, Function<Map.Entry<String, T>, ToXContent> toXContent) {
return wrapWithObject(name, Iterators.map(map.entrySet().iterator(), toXContent));
/**
* Defines an object named {@code name}, with the contents set by calling {@code toXContent} on each entry in {@code map}
*/
public static <T> Iterator<ToXContent> object(String name, Map<String, T> map, Function<Map.Entry<String, T>, ToXContent> toXContent) {
return object(name, Iterators.map(map.entrySet().iterator(), toXContent));
}
/**
* Creates an Iterator of a single ToXContent object that serializes the given object as a single chunk. Just wraps {@link
* Iterators#single}, but still useful because it avoids any type ambiguity.
*
* @param item Item to wrap
* @return Singleton iterator for the given item.
*/
public static Iterator<ToXContent> chunk(ToXContent item) {
return Iterators.single(item);
}
/**

View file

@ -13,18 +13,9 @@ import java.util.Set;
/**
* This class specifies features for the features functionality itself.
* <p>
* This adds a feature {@code features_supported} indicating that a node supports node features.
* Nodes that do not support features won't have this feature in its feature set,
* so this can be checked without needing to look at the node version.
*/
public class FeatureInfrastructureFeatures implements FeatureSpecification {
@Override
public Set<NodeFeature> getFeatures() {
return Set.of(FeatureService.FEATURES_SUPPORTED);
}
@Override
public Set<NodeFeature> getTestFeatures() {
return Set.of(FeatureService.TEST_FEATURES_ENABLED);

View file

@ -26,10 +26,6 @@ import java.util.Map;
*/
public class FeatureService {
/**
* A feature indicating that node features are supported.
*/
public static final NodeFeature FEATURES_SUPPORTED = new NodeFeature("features_supported", true);
public static final NodeFeature TEST_FEATURES_ENABLED = new NodeFeature("test_features_enabled");
private static final Logger logger = LogManager.getLogger(FeatureService.class);

View file

@ -31,7 +31,7 @@ import java.util.Set;
*/
public interface FeatureSpecification {
/**
* Returns a set of regular features that this node supports.
* Returns a set of features that this node supports.
*/
default Set<NodeFeature> getFeatures() {
return Set.of();

View file

@ -18,8 +18,6 @@ import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.DoubleValues;
import org.apache.lucene.search.DoubleValuesSource;
import org.apache.lucene.search.IndexSearcher;
import org.elasticsearch.search.profile.query.QueryProfiler;
import org.elasticsearch.search.vectors.QueryProfilerProvider;
import java.io.IOException;
import java.util.Arrays;
@ -29,12 +27,11 @@ import java.util.Objects;
* DoubleValuesSource that is used to calculate scores according to a similarity function for a KnnFloatVectorField, using the
* original vector values stored in the index
*/
public class VectorSimilarityFloatValueSource extends DoubleValuesSource implements QueryProfilerProvider {
public class VectorSimilarityFloatValueSource extends DoubleValuesSource {
private final String field;
private final float[] target;
private final VectorSimilarityFunction vectorSimilarityFunction;
private long vectorOpsCount;
public VectorSimilarityFloatValueSource(String field, float[] target, VectorSimilarityFunction vectorSimilarityFunction) {
this.field = field;
@ -52,7 +49,6 @@ public class VectorSimilarityFloatValueSource extends DoubleValuesSource impleme
return new DoubleValues() {
@Override
public double doubleValue() throws IOException {
vectorOpsCount++;
return vectorSimilarityFunction.compare(target, vectorValues.vectorValue(iterator.index()));
}
@ -73,11 +69,6 @@ public class VectorSimilarityFloatValueSource extends DoubleValuesSource impleme
return this;
}
@Override
public void profile(QueryProfiler queryProfiler) {
queryProfiler.addVectorOpsCount(vectorOpsCount);
}
@Override
public int hashCode() {
return Objects.hash(field, Arrays.hashCode(target), vectorSimilarityFunction);

View file

@ -487,11 +487,6 @@ public class KnnVectorQueryBuilder extends AbstractQueryBuilder<KnnVectorQueryBu
return this;
}
@Override
protected QueryBuilder doIndexMetadataRewrite(QueryRewriteContext context) throws IOException {
return super.doIndexMetadataRewrite(context);
}
@Override
protected Query doToQuery(SearchExecutionContext context) throws IOException {
MappedFieldType fieldType = context.getFieldType(fieldName);
@ -529,8 +524,8 @@ public class KnnVectorQueryBuilder extends AbstractQueryBuilder<KnnVectorQueryBu
String parentPath = context.nestedLookup().getNestedParent(fieldName);
Float numCandidatesFactor = rescoreVectorBuilder() == null ? null : rescoreVectorBuilder.numCandidatesFactor();
BitSetProducer parentBitSet = null;
if (parentPath != null) {
final BitSetProducer parentBitSet;
final Query parentFilter;
NestedObjectMapper originalObjectMapper = context.nestedScope().getObjectMapper();
if (originalObjectMapper != null) {
@ -559,6 +554,8 @@ public class KnnVectorQueryBuilder extends AbstractQueryBuilder<KnnVectorQueryBu
// Now join the filterQuery & parentFilter to provide the matching blocks of children
filterQuery = new ToChildBlockJoinQuery(filterQuery, parentBitSet);
}
}
return vectorFieldType.createKnnQuery(
queryVector,
k,
@ -569,8 +566,6 @@ public class KnnVectorQueryBuilder extends AbstractQueryBuilder<KnnVectorQueryBu
parentBitSet
);
}
return vectorFieldType.createKnnQuery(queryVector, k, adjustedNumCands, numCandidatesFactor, filterQuery, vectorSimilarity, null);
}
@Override
protected int doHashCode() {

View file

@ -32,16 +32,15 @@ public class RescoreKnnVectorQuery extends Query implements QueryProfilerProvide
private final String fieldName;
private final float[] floatTarget;
private final VectorSimilarityFunction vectorSimilarityFunction;
private final Integer k;
private final int k;
private final Query innerQuery;
private QueryProfilerProvider vectorProfiling;
private long vectorOperations = 0;
public RescoreKnnVectorQuery(
String fieldName,
float[] floatTarget,
VectorSimilarityFunction vectorSimilarityFunction,
Integer k,
int k,
Query innerQuery
) {
this.fieldName = fieldName;
@ -54,19 +53,12 @@ public class RescoreKnnVectorQuery extends Query implements QueryProfilerProvide
@Override
public Query rewrite(IndexSearcher searcher) throws IOException {
DoubleValuesSource valueSource = new VectorSimilarityFloatValueSource(fieldName, floatTarget, vectorSimilarityFunction);
// Vector similarity VectorSimilarityFloatValueSource keep track of the compared vectors - we need that in case we don't need
// to calculate top k and return directly the query to understand how many comparisons were done
vectorProfiling = (QueryProfilerProvider) valueSource;
FunctionScoreQuery functionScoreQuery = new FunctionScoreQuery(innerQuery, valueSource);
Query query = searcher.rewrite(functionScoreQuery);
if (k == null) {
// No need to calculate top k - let the request size limit the results.
return query;
}
// Retrieve top k documents from the rescored query
TopDocs topDocs = searcher.search(query, k);
vectorOperations = topDocs.totalHits.value();
ScoreDoc[] scoreDocs = topDocs.scoreDocs;
int[] docIds = new int[scoreDocs.length];
float[] scores = new float[scoreDocs.length];
@ -82,7 +74,7 @@ public class RescoreKnnVectorQuery extends Query implements QueryProfilerProvide
return innerQuery;
}
public Integer k() {
public int k() {
return k;
}
@ -92,10 +84,7 @@ public class RescoreKnnVectorQuery extends Query implements QueryProfilerProvide
queryProfilerProvider.profile(queryProfiler);
}
if (vectorProfiling == null) {
throw new IllegalStateException("Query should have been rewritten");
}
vectorProfiling.profile(queryProfiler);
queryProfiler.addVectorOpsCount(vectorOperations);
}
@Override

View file

@ -58,7 +58,7 @@ public class FeatureMigrationResults implements Metadata.ProjectCustom {
@Override
public Iterator<? extends ToXContent> toXContentChunked(ToXContent.Params ignored) {
return ChunkedToXContentHelper.xContentValuesMap(RESULTS_FIELD.getPreferredName(), featureStatuses);
return ChunkedToXContentHelper.xContentObjectFields(RESULTS_FIELD.getPreferredName(), featureStatuses);
}
/**

View file

@ -36,7 +36,7 @@ public class BaseNodesXContentResponseTests extends ESTestCase {
final var fullResponse = new BaseNodesXContentResponse<>(ClusterName.DEFAULT, List.of(new TestNodeResponse(node)), List.of()) {
@Override
protected Iterator<? extends ToXContent> xContentChunks(ToXContent.Params outerParams) {
return ChunkedToXContentHelper.singleChunk((b, p) -> b.startObject("content").endObject());
return ChunkedToXContentHelper.chunk((b, p) -> b.startObject("content").endObject());
}
@Override

View file

@ -13,7 +13,6 @@ import org.apache.lucene.util.automaton.Automaton;
import org.apache.lucene.util.automaton.RegExp;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.ResourceAlreadyExistsException;
import org.elasticsearch.TransportVersion;
import org.elasticsearch.TransportVersions;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.admin.indices.alias.Alias;
@ -1331,16 +1330,7 @@ public class MetadataCreateIndexServiceTests extends ESTestCase {
Settings indexSettings = indexSettings(IndexVersion.current(), 1, 0).build();
List<AliasMetadata> aliases = List.of(AliasMetadata.builder("alias1").build());
IndexMetadata indexMetadata = buildIndexMetadata(
"test",
aliases,
() -> null,
indexSettings,
4,
sourceIndexMetadata,
false,
TransportVersion.current()
);
IndexMetadata indexMetadata = buildIndexMetadata("test", aliases, () -> null, indexSettings, 4, sourceIndexMetadata, false);
assertThat(indexMetadata.getAliases().size(), is(1));
assertThat(indexMetadata.getAliases().keySet().iterator().next(), is("alias1"));
@ -1349,35 +1339,6 @@ public class MetadataCreateIndexServiceTests extends ESTestCase {
assertThat(indexMetadata.getEventIngestedRange(), equalTo(IndexLongFieldRange.NO_SHARDS));
}
public void testBuildIndexMetadataWithTransportVersionBeforeEventIngestedRangeAdded() {
IndexMetadata sourceIndexMetadata = IndexMetadata.builder("parent")
.settings(Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()).build())
.numberOfShards(1)
.numberOfReplicas(0)
.primaryTerm(0, 3L)
.build();
Settings indexSettings = indexSettings(IndexVersion.current(), 1, 0).build();
List<AliasMetadata> aliases = List.of(AliasMetadata.builder("alias1").build());
IndexMetadata indexMetadata = buildIndexMetadata(
"test",
aliases,
() -> null,
indexSettings,
4,
sourceIndexMetadata,
false,
TransportVersions.V_8_0_0
);
assertThat(indexMetadata.getAliases().size(), is(1));
assertThat(indexMetadata.getAliases().keySet().iterator().next(), is("alias1"));
assertThat("The source index primary term must be used", indexMetadata.primaryTerm(0), is(3L));
assertThat(indexMetadata.getTimestampRange(), equalTo(IndexLongFieldRange.NO_SHARDS));
// on versions before event.ingested was added to cluster state, it should default to UNKNOWN, not NO_SHARDS
assertThat(indexMetadata.getEventIngestedRange(), equalTo(IndexLongFieldRange.UNKNOWN));
}
public void testGetIndexNumberOfRoutingShardsWithNullSourceIndex() {
Settings indexSettings = Settings.builder()
.put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())

View file

@ -84,7 +84,7 @@ public class AllocationStatsServiceTests extends ESAllocationTestCase {
clusterService,
() -> clusterInfo,
createShardAllocator(),
new NodeAllocationStatsProvider(TEST_WRITE_LOAD_FORECASTER, ClusterSettings.createBuiltInClusterSettings())
new NodeAllocationStatsAndWeightsCalculator(TEST_WRITE_LOAD_FORECASTER, ClusterSettings.createBuiltInClusterSettings())
);
assertThat(
service.stats(),
@ -125,7 +125,7 @@ public class AllocationStatsServiceTests extends ESAllocationTestCase {
clusterService,
EmptyClusterInfoService.INSTANCE,
createShardAllocator(),
new NodeAllocationStatsProvider(TEST_WRITE_LOAD_FORECASTER, ClusterSettings.createBuiltInClusterSettings())
new NodeAllocationStatsAndWeightsCalculator(TEST_WRITE_LOAD_FORECASTER, ClusterSettings.createBuiltInClusterSettings())
);
assertThat(
service.stats(),
@ -182,7 +182,7 @@ public class AllocationStatsServiceTests extends ESAllocationTestCase {
);
}
},
new NodeAllocationStatsProvider(TEST_WRITE_LOAD_FORECASTER, ClusterSettings.createBuiltInClusterSettings())
new NodeAllocationStatsAndWeightsCalculator(TEST_WRITE_LOAD_FORECASTER, ClusterSettings.createBuiltInClusterSettings())
);
assertThat(
service.stats(),

View file

@ -456,18 +456,19 @@ public class DenseVectorFieldTypeTests extends FieldTypeTestCase {
);
// Total results is k, internal k is multiplied by oversample
checkRescoreQueryParameters(fieldType, 10, 200, 2.5F, null, 500, 10);
checkRescoreQueryParameters(fieldType, 10, 200, randomInt(), 2.5F, null, 500, 10);
// If numCands < k, update numCands to k
checkRescoreQueryParameters(fieldType, 10, 20, 2.5F, null, 50, 10);
checkRescoreQueryParameters(fieldType, 10, 20, randomInt(), 2.5F, null, 50, 10);
// Oversampling limits for num candidates
checkRescoreQueryParameters(fieldType, 1000, 1000, 11.0F, null, 10000, 1000);
checkRescoreQueryParameters(fieldType, 5000, 7500, 2.5F, null, 10000, 5000);
checkRescoreQueryParameters(fieldType, 1000, 1000, randomInt(), 11.0F, null, 10000, 1000);
checkRescoreQueryParameters(fieldType, 5000, 7500, randomInt(), 2.5F, null, 10000, 5000);
}
private static void checkRescoreQueryParameters(
DenseVectorFieldType fieldType,
Integer k,
int k,
int candidates,
int requestSize,
float numCandsFactor,
Integer expectedK,
int expectedCandidates,

View file

@ -9,8 +9,6 @@
package org.elasticsearch.search.vectors;
import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.KnnFloatVectorField;
import org.apache.lucene.index.DirectoryReader;
@ -33,11 +31,9 @@ import org.elasticsearch.test.ESTestCase;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.PriorityQueue;
import java.util.stream.Collectors;
@ -49,21 +45,11 @@ import static org.hamcrest.Matchers.greaterThan;
public class RescoreKnnVectorQueryTests extends ESTestCase {
public static final String FIELD_NAME = "float_vector";
private final int numDocs;
private final Integer k;
public RescoreKnnVectorQueryTests(boolean useK) {
this.numDocs = randomIntBetween(10, 100);
this.k = useK ? randomIntBetween(1, numDocs - 1) : null;
}
public void testRescoreDocs() throws Exception {
int numDocs = randomIntBetween(10, 100);
int numDims = randomIntBetween(5, 100);
Integer adjustedK = k;
if (k == null) {
adjustedK = numDocs;
}
int k = randomIntBetween(1, numDocs - 1);
try (Directory d = newDirectory()) {
addRandomDocuments(numDocs, d, numDims);
@ -77,7 +63,7 @@ public class RescoreKnnVectorQueryTests extends ESTestCase {
FIELD_NAME,
queryVector,
VectorSimilarityFunction.COSINE,
adjustedK,
k,
new MatchAllDocsQuery()
);
@ -86,7 +72,7 @@ public class RescoreKnnVectorQueryTests extends ESTestCase {
Map<Integer, Float> rescoredDocs = Arrays.stream(docs.scoreDocs)
.collect(Collectors.toMap(scoreDoc -> scoreDoc.doc, scoreDoc -> scoreDoc.score));
assertThat(rescoredDocs.size(), equalTo(adjustedK));
assertThat(rescoredDocs.size(), equalTo(k));
Collection<Float> rescoredScores = new HashSet<>(rescoredDocs.values());
@ -113,7 +99,7 @@ public class RescoreKnnVectorQueryTests extends ESTestCase {
assertThat(rescoredDocs.size(), equalTo(0));
// Check top scoring docs are contained in rescored docs
for (int i = 0; i < adjustedK; i++) {
for (int i = 0; i < k; i++) {
Float topScore = topK.poll();
if (rescoredScores.contains(topScore) == false) {
fail("Top score " + topScore + " not contained in rescored doc scores " + rescoredScores);
@ -124,7 +110,9 @@ public class RescoreKnnVectorQueryTests extends ESTestCase {
}
public void testProfiling() throws Exception {
int numDocs = randomIntBetween(10, 100);
int numDims = randomIntBetween(5, 100);
int k = randomIntBetween(1, numDocs - 1);
try (Directory d = newDirectory()) {
addRandomDocuments(numDocs, d, numDims);
@ -132,13 +120,13 @@ public class RescoreKnnVectorQueryTests extends ESTestCase {
try (IndexReader reader = DirectoryReader.open(d)) {
float[] queryVector = randomVector(numDims);
checkProfiling(queryVector, reader, new MatchAllDocsQuery());
checkProfiling(queryVector, reader, new MockQueryProfilerProvider(randomIntBetween(1, 100)));
checkProfiling(k, numDocs, queryVector, reader, new MatchAllDocsQuery());
checkProfiling(k, numDocs, queryVector, reader, new MockQueryProfilerProvider(randomIntBetween(1, 100)));
}
}
}
private void checkProfiling(float[] queryVector, IndexReader reader, Query innerQuery) throws IOException {
private void checkProfiling(int k, int numDocs, float[] queryVector, IndexReader reader, Query innerQuery) throws IOException {
RescoreKnnVectorQuery rescoreKnnVectorQuery = new RescoreKnnVectorQuery(
FIELD_NAME,
queryVector,
@ -229,13 +217,4 @@ public class RescoreKnnVectorQueryTests extends ESTestCase {
w.forceMerge(1);
}
}
@ParametersFactory
public static Iterable<Object[]> parameters() {
List<Object[]> params = new ArrayList<>();
params.add(new Object[] { true });
params.add(new Object[] { false });
return params;
}
}

View file

@ -26,7 +26,7 @@ import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.UnassignedInfo;
import org.elasticsearch.cluster.routing.allocation.AllocationService;
import org.elasticsearch.cluster.routing.allocation.FailedShard;
import org.elasticsearch.cluster.routing.allocation.NodeAllocationStatsProvider;
import org.elasticsearch.cluster.routing.allocation.NodeAllocationStatsAndWeightsCalculator;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.routing.allocation.WriteLoadForecaster;
import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator;
@ -438,12 +438,10 @@ public abstract class ESAllocationTestCase extends ESTestCase {
}
}
protected static final NodeAllocationStatsProvider EMPTY_NODE_ALLOCATION_STATS = new NodeAllocationStatsProvider(
WriteLoadForecaster.DEFAULT,
createBuiltInClusterSettings()
) {
protected static final NodeAllocationStatsAndWeightsCalculator EMPTY_NODE_ALLOCATION_STATS =
new NodeAllocationStatsAndWeightsCalculator(WriteLoadForecaster.DEFAULT, createBuiltInClusterSettings()) {
@Override
public Map<String, NodeAllocationAndClusterBalanceStats> stats(
public Map<String, NodeAllocationStatsAndWeight> nodesAllocationStatsAndWeights(
Metadata metadata,
RoutingNodes routingNodes,
ClusterInfo clusterInfo,

View file

@ -119,7 +119,7 @@ public class AutoscalingMetadata implements Metadata.ClusterCustom {
@Override
public Iterator<? extends ToXContent> toXContentChunked(ToXContent.Params ignored) {
return ChunkedToXContentHelper.xContentValuesMap(POLICIES_FIELD.getPreferredName(), policies);
return ChunkedToXContentHelper.xContentObjectFields(POLICIES_FIELD.getPreferredName(), policies);
}
@Override

View file

@ -151,9 +151,9 @@ public class AutoFollowMetadata extends AbstractNamedDiffable<Metadata.ProjectCu
@Override
public Iterator<? extends ToXContent> toXContentChunked(ToXContent.Params params) {
return Iterators.concat(
ChunkedToXContentHelper.xContentFragmentValuesMap(PATTERNS_FIELD.getPreferredName(), patterns),
ChunkedToXContentHelper.map(FOLLOWED_LEADER_INDICES_FIELD.getPreferredName(), followedLeaderIndexUUIDs),
ChunkedToXContentHelper.map(HEADERS.getPreferredName(), headers)
ChunkedToXContentHelper.xContentObjectFieldObjects(PATTERNS_FIELD.getPreferredName(), patterns),
ChunkedToXContentHelper.object(FOLLOWED_LEADER_INDICES_FIELD.getPreferredName(), followedLeaderIndexUUIDs),
ChunkedToXContentHelper.object(HEADERS.getPreferredName(), headers)
);
}

Some files were not shown because too many files have changed in this diff Show more