Merge remote-tracking branch 'upstream-main/main' into merge-main-9-1-25

This commit is contained in:
Simon Cooper 2025-01-09 16:09:21 +00:00
commit 2e04ed09a9
239 changed files with 3069 additions and 1469 deletions

View file

@ -58,6 +58,7 @@ import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.hasSize;
import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.instanceOf;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.notNullValue;
public class KeyStoreWrapperTests extends ESTestCase { public class KeyStoreWrapperTests extends ESTestCase {
@ -436,17 +437,8 @@ public class KeyStoreWrapperTests extends ESTestCase {
public void testLegacyV3() throws GeneralSecurityException, IOException { public void testLegacyV3() throws GeneralSecurityException, IOException {
assumeFalse("Cannot open unprotected keystore on FIPS JVM", inFipsJvm()); assumeFalse("Cannot open unprotected keystore on FIPS JVM", inFipsJvm());
final Path configDir = createTempDir(); final Path configDir = createTempDir();
final Path keystore = configDir.resolve("elasticsearch.keystore"); copyKeyStoreFromResourceToConfigDir(configDir, "/format-v3-elasticsearch.keystore");
try (
InputStream is = KeyStoreWrapperTests.class.getResourceAsStream("/format-v3-elasticsearch.keystore");
OutputStream os = Files.newOutputStream(keystore)
) {
final byte[] buffer = new byte[4096];
int readBytes;
while ((readBytes = is.read(buffer)) > 0) {
os.write(buffer, 0, readBytes);
}
}
final KeyStoreWrapper wrapper = KeyStoreWrapper.load(configDir); final KeyStoreWrapper wrapper = KeyStoreWrapper.load(configDir);
assertNotNull(wrapper); assertNotNull(wrapper);
wrapper.decrypt(new char[0]); wrapper.decrypt(new char[0]);
@ -460,9 +452,31 @@ public class KeyStoreWrapperTests extends ESTestCase {
public void testLegacyV5() throws GeneralSecurityException, IOException { public void testLegacyV5() throws GeneralSecurityException, IOException {
final Path configDir = createTempDir(); final Path configDir = createTempDir();
copyKeyStoreFromResourceToConfigDir(configDir, "/format-v5-with-password-elasticsearch.keystore");
final KeyStoreWrapper wrapper = KeyStoreWrapper.load(configDir);
assertNotNull(wrapper);
wrapper.decrypt("keystorepassword".toCharArray());
assertThat(wrapper.getFormatVersion(), equalTo(5));
assertThat(wrapper.getSettingNames(), equalTo(Set.of("keystore.seed")));
}
public void testLegacyV6() throws GeneralSecurityException, IOException {
final Path configDir = createTempDir();
copyKeyStoreFromResourceToConfigDir(configDir, "/format-v6-elasticsearch.keystore");
final KeyStoreWrapper wrapper = KeyStoreWrapper.load(configDir);
assertNotNull(wrapper);
wrapper.decrypt("keystorepassword".toCharArray());
assertThat(wrapper.getFormatVersion(), equalTo(6));
assertThat(wrapper.getSettingNames(), equalTo(Set.of("keystore.seed", "string")));
assertThat(wrapper.getString("string"), equalTo("value"));
}
private void copyKeyStoreFromResourceToConfigDir(Path configDir, String name) throws IOException {
final Path keystore = configDir.resolve("elasticsearch.keystore"); final Path keystore = configDir.resolve("elasticsearch.keystore");
try ( try (
InputStream is = KeyStoreWrapperTests.class.getResourceAsStream("/format-v5-with-password-elasticsearch.keystore"); InputStream is = KeyStoreWrapperTests.class.getResourceAsStream(name); //
OutputStream os = Files.newOutputStream(keystore) OutputStream os = Files.newOutputStream(keystore)
) { ) {
final byte[] buffer = new byte[4096]; final byte[] buffer = new byte[4096];
@ -471,11 +485,6 @@ public class KeyStoreWrapperTests extends ESTestCase {
os.write(buffer, 0, readBytes); os.write(buffer, 0, readBytes);
} }
} }
final KeyStoreWrapper wrapper = KeyStoreWrapper.load(configDir);
assertNotNull(wrapper);
wrapper.decrypt("keystorepassword".toCharArray());
assertThat(wrapper.getFormatVersion(), equalTo(5));
assertThat(wrapper.getSettingNames(), equalTo(Set.of("keystore.seed")));
} }
public void testSerializationNewlyCreated() throws Exception { public void testSerializationNewlyCreated() throws Exception {
@ -487,6 +496,7 @@ public class KeyStoreWrapperTests extends ESTestCase {
wrapper.writeTo(out); wrapper.writeTo(out);
final KeyStoreWrapper fromStream = new KeyStoreWrapper(out.bytes().streamInput()); final KeyStoreWrapper fromStream = new KeyStoreWrapper(out.bytes().streamInput());
assertThat(fromStream.getFormatVersion(), is(KeyStoreWrapper.CURRENT_VERSION));
assertThat(fromStream.getSettingNames(), hasSize(2)); assertThat(fromStream.getSettingNames(), hasSize(2));
assertThat(fromStream.getSettingNames(), containsInAnyOrder("string_setting", "keystore.seed")); assertThat(fromStream.getSettingNames(), containsInAnyOrder("string_setting", "keystore.seed"));

View file

@ -0,0 +1,5 @@
pr: 118669
summary: "[Connector API] Support soft-deletes of connectors"
area: Extract&Transform
type: feature
issues: []

View file

@ -1,5 +0,0 @@
pr: 119389
summary: Restrict Connector APIs to manage/monitor_connector privileges
area: Extract&Transform
type: feature
issues: []

View file

@ -0,0 +1,6 @@
pr: 119748
summary: Issue S3 web identity token refresh call with sufficient permissions
area: Snapshot/Restore
type: bug
issues:
- 119747

View file

@ -0,0 +1,5 @@
pr: 119749
summary: Strengthen encryption for elasticsearch-keystore tool to AES 256
area: Infra/CLI
type: enhancement
issues: []

View file

@ -0,0 +1,6 @@
pr: 119750
summary: "ESQL: `connect_transport_exception` should be thrown instead of `verification_exception`\
\ when ENRICH-ing if remote is disconnected"
area: Search
type: bug
issues: []

View file

@ -0,0 +1,6 @@
pr: 119793
summary: Resolve/cluster should mark remotes as not connected when a security exception
is thrown
area: CCS
type: bug
issues: []

View file

@ -0,0 +1,5 @@
pr: 119797
summary: "[Inference API] Fix bug checking for e5 or reranker default IDs"
area: Machine Learning
type: bug
issues: []

View file

@ -6,14 +6,14 @@
beta::[] beta::[]
.New API reference .New API reference
[sidebar] [sidebar]
-- --
For the most up-to-date API details, refer to {api-es}/group/endpoint-connector[Connector APIs]. For the most up-to-date API details, refer to {api-es}/group/endpoint-connector[Connector APIs].
-- --
Removes a connector and associated sync jobs. Soft-deletes a connector and removes associated sync jobs.
This is a destructive action that is not recoverable.
Note: this action doesn't delete any API key, ingest pipeline or data index associated with the connector. These need to be removed manually. Note: this action doesn't delete any API key, ingest pipeline or data index associated with the connector. These need to be removed manually.

View file

@ -33,6 +33,9 @@ To get started with Connector APIs, check out <<es-connectors-tutorial-api, our
`<connector_id>`:: `<connector_id>`::
(Required, string) (Required, string)
`include_deleted`::
(Optional, boolean) A flag indicating whether to also return connectors that have been soft-deleted. Defaults to `false`.
[[get-connector-api-response-codes]] [[get-connector-api-response-codes]]
==== {api-response-codes-title} ==== {api-response-codes-title}

View file

@ -47,6 +47,9 @@ To get started with Connector APIs, check out <<es-connectors-tutorial-api, our
`service_type`:: `service_type`::
(Optional, string) A comma-separated list of connector service types, used to filter search results. (Optional, string) A comma-separated list of connector service types, used to filter search results.
`include_deleted`::
(Optional, boolean) A flag indicating whether to also return connectors that have been soft-deleted. Defaults to `false`.
[[list-connector-api-example]] [[list-connector-api-example]]
==== {api-examples-title} ==== {api-examples-title}

View file

@ -28,8 +28,19 @@ For each cluster in the index expression, information is returned about:
3. whether there are any indices, aliases or data streams on that cluster that match 3. whether there are any indices, aliases or data streams on that cluster that match
the index expression the index expression
4. whether the search is likely to have errors returned when you do the {ccs} (including any 4. whether the search is likely to have errors returned when you do the {ccs} (including any
authorization errors if your user does not have permission to query the index) authorization errors if your user does not have permission to query a remote cluster or
5. cluster version information, including the Elasticsearch server version the indices on that cluster)
5. (in some cases) cluster version information, including the Elasticsearch server version
[TIP]
====
Whenever a security exception is returned for a remote cluster, that remote
will always be marked as connected=false in the response, since your user does not have
permissions to access that cluster (or perhaps the remote index) you are querying.
Once the proper security permissions are obtained, then you can rely on the `connected` field
in the response to determine whether the remote cluster is available and ready for querying.
====
//// ////
[source,console] [source,console]

View file

@ -1167,6 +1167,12 @@ tag::inference-config-text-embedding-size[]
The number of dimensions in the embedding vector produced by the model. The number of dimensions in the embedding vector produced by the model.
end::inference-config-text-embedding-size[] end::inference-config-text-embedding-size[]
tag::inference-config-text-expansion[]
The text expansion task works with sparse embedding models to transform an input sequence
into a vector of weighted tokens. These embeddings capture semantic meanings and
context and can be used in a <<sparse-vector,sparse vector>> field for powerful insights.
end::inference-config-text-expansion[]
tag::inference-config-text-similarity[] tag::inference-config-text-similarity[]
Text similarity takes an input sequence and compares it with another input sequence. This is commonly referred to Text similarity takes an input sequence and compares it with another input sequence. This is commonly referred to
as cross-encoding. This task is useful for ranking document text when comparing it to another provided text input. as cross-encoding. This task is useful for ranking document text when comparing it to another provided text input.

View file

@ -395,10 +395,10 @@ the model definition is not supplied.
(Required, object) (Required, object)
The default configuration for inference. This can be: `regression`, The default configuration for inference. This can be: `regression`,
`classification`, `fill_mask`, `ner`, `question_answering`, `classification`, `fill_mask`, `ner`, `question_answering`,
`text_classification`, `text_embedding` or `zero_shot_classification`. `text_classification`, `text_embedding`, `text_expansion` or `zero_shot_classification`.
If `regression` or `classification`, it must match the `target_type` of the If `regression` or `classification`, it must match the `target_type` of the
underlying `definition.trained_model`. If `fill_mask`, `ner`, underlying `definition.trained_model`. If `fill_mask`, `ner`,
`question_answering`, `text_classification`, or `text_embedding`; the `question_answering`, `text_classification`, `text_embedding` or `text_expansion`; the
`model_type` must be `pytorch`. `model_type` must be `pytorch`.
+ +
.Properties of `inference_config` .Properties of `inference_config`
@ -592,6 +592,25 @@ Refer to <<tokenization-properties>> to review the properties of the
`tokenization` object. `tokenization` object.
===== =====
`text_expansion`:::
(Object, optional)
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-text-expansion]
+
.Properties of text_expansion inference
[%collapsible%open]
=====
`results_field`::::
(Optional, string)
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-results-field]
`tokenization`::::
(Optional, object)
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization]
+
Refer to <<tokenization-properties>> to review the properties of the
`tokenization` object.
=====
`text_similarity`::: `text_similarity`:::
(Object, optional) (Object, optional)
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-text-similarity] include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-text-similarity]

View file

@ -77,7 +77,7 @@ and latency of the network interconnections between all nodes in the cluster.
You must therefore ensure that the storage and networking available to the You must therefore ensure that the storage and networking available to the
nodes in your cluster are good enough to meet your performance goals. nodes in your cluster are good enough to meet your performance goals.
[[dangling-indices]] [[dangling-index]]
==== Dangling indices ==== Dangling indices
When a node joins the cluster, if it finds any shards stored in its local When a node joins the cluster, if it finds any shards stored in its local

View file

@ -1,6 +1,7 @@
[[modules-gateway]] [[modules-gateway]]
=== Local gateway settings === Local gateway settings
[[dangling-indices]]
The local gateway stores the cluster state and shard data across full The local gateway stores the cluster state and shard data across full
cluster restarts. cluster restarts.

View file

@ -287,7 +287,7 @@ include::network/tracers.asciidoc[]
include::network/threading.asciidoc[] include::network/threading.asciidoc[]
[[readiness-tcp-port]] [[tcp-readiness-port]]
==== TCP readiness port ==== TCP readiness port
preview::[] preview::[]

View file

@ -46,6 +46,8 @@ The following additional roles are available:
* `voting_only` * `voting_only`
[[coordinating-only-node]]If you leave `node.roles` unset, then the node is considered to be a <<coordinating-only-node-role,coordinating only node>>.
[IMPORTANT] [IMPORTANT]
==== ====
If you set `node.roles`, ensure you specify every node role your cluster needs. If you set `node.roles`, ensure you specify every node role your cluster needs.

View file

@ -13,6 +13,10 @@ All of the monitoring metrics are stored in {es}, which enables you to easily
visualize the data in {kib}. By default, the monitoring metrics are stored in visualize the data in {kib}. By default, the monitoring metrics are stored in
local indices. local indices.
****
If you're using Elastic Cloud Hosted, then you can use AutoOps to monitor your cluster. AutoOps significantly simplifies cluster management with performance recommendations, resource utilization visibility, real-time issue detection and resolution paths. For more information, refer to https://www.elastic.co/guide/en/cloud/current/ec-autoops.html[Monitor with AutoOps].
****
TIP: In production, we strongly recommend using a separate monitoring cluster. TIP: In production, we strongly recommend using a separate monitoring cluster.
Using a separate monitoring cluster prevents production cluster outages from Using a separate monitoring cluster prevents production cluster outages from
impacting your ability to access your monitoring data. It also prevents impacting your ability to access your monitoring data. It also prevents

View file

@ -359,7 +359,7 @@ node.roles: [ ingest ]
---- ----
[discrete] [discrete]
[[coordinating-only-node]] [[coordinating-only-node-role]]
==== Coordinating only node ==== Coordinating only node
If you take away the ability to be able to handle master duties, to hold data, If you take away the ability to be able to handle master duties, to hold data,

View file

@ -19,6 +19,7 @@ CAUTION: Setting your own JVM options is generally not recommended and could neg
impact performance and stability. Using the {es}-provided defaults impact performance and stability. Using the {es}-provided defaults
is recommended in most circumstances. is recommended in most circumstances.
[[readiness-tcp-port]]
NOTE: Do not modify the root `jvm.options` file. Use files in `jvm.options.d/` instead. NOTE: Do not modify the root `jvm.options` file. Use files in `jvm.options.d/` instead.
[[jvm-options-syntax]] [[jvm-options-syntax]]
@ -153,7 +154,7 @@ options. We do not recommend using `ES_JAVA_OPTS` in production.
NOTE: If you are running {es} as a Windows service, you can change the heap size NOTE: If you are running {es} as a Windows service, you can change the heap size
using the service manager. See <<windows-service>>. using the service manager. See <<windows-service>>.
[[heap-dump-path]] [[heap-dump-path-setting]]
include::important-settings/heap-dump-path.asciidoc[leveloffset=-1] include::important-settings/heap-dump-path.asciidoc[leveloffset=-1]
[[gc-logging]] [[gc-logging]]

View file

@ -41,6 +41,7 @@ include::important-settings/discovery-settings.asciidoc[]
include::important-settings/heap-size.asciidoc[] include::important-settings/heap-size.asciidoc[]
[[heap-dump-path]]
include::important-settings/heap-dump-path.asciidoc[] include::important-settings/heap-dump-path.asciidoc[]
include::important-settings/gc-logging.asciidoc[] include::important-settings/gc-logging.asciidoc[]

View file

@ -6,6 +6,10 @@
This section provides a series of troubleshooting solutions aimed at helping users This section provides a series of troubleshooting solutions aimed at helping users
fix problems that an {es} deployment might encounter. fix problems that an {es} deployment might encounter.
****
If you're using Elastic Cloud Hosted, then you can use AutoOps to monitor your cluster. AutoOps significantly simplifies cluster management with performance recommendations, resource utilization visibility, real-time issue detection and resolution paths. For more information, refer to https://www.elastic.co/guide/en/cloud/current/ec-autoops.html[Monitor with AutoOps].
****
[discrete] [discrete]
[[troubleshooting-general]] [[troubleshooting-general]]
=== General === General

View file

@ -12,6 +12,10 @@ memory pressure if usage consistently exceeds 85%.
See https://www.youtube.com/watch?v=k3wYlRVbMSw[this video] for a walkthrough See https://www.youtube.com/watch?v=k3wYlRVbMSw[this video] for a walkthrough
of diagnosing circuit breaker errors. of diagnosing circuit breaker errors.
****
If you're using Elastic Cloud Hosted, then you can use AutoOps to monitor your cluster. AutoOps significantly simplifies cluster management with performance recommendations, resource utilization visibility, real-time issue detection and resolution paths. For more information, refer to https://www.elastic.co/guide/en/cloud/current/ec-autoops.html[Monitor with AutoOps].
****
[discrete] [discrete]
[[diagnose-circuit-breaker-errors]] [[diagnose-circuit-breaker-errors]]
==== Diagnose circuit breaker errors ==== Diagnose circuit breaker errors

View file

@ -11,3 +11,7 @@ include::{es-ref-dir}/tab-widgets/troubleshooting/data/diagnose-unassigned-shard
See https://www.youtube.com/watch?v=v2mbeSd1vTQ[this video] See https://www.youtube.com/watch?v=v2mbeSd1vTQ[this video]
for a walkthrough of monitoring allocation health. for a walkthrough of monitoring allocation health.
****
If you're using Elastic Cloud Hosted, then you can use AutoOps to monitor your cluster. AutoOps significantly simplifies cluster management with performance recommendations, resource utilization visibility, real-time issue detection and resolution paths. For more information, refer to https://www.elastic.co/guide/en/cloud/current/ec-autoops.html[Monitor with AutoOps].
****

View file

@ -21,6 +21,10 @@ usage falls below the <<cluster-routing-watermark-high,high disk watermark>>.
To achieve this, {es} attempts to rebalance some of the affected node's shards To achieve this, {es} attempts to rebalance some of the affected node's shards
to other nodes in the same data tier. to other nodes in the same data tier.
****
If you're using Elastic Cloud Hosted, then you can use AutoOps to monitor your cluster. AutoOps significantly simplifies cluster management with performance recommendations, resource utilization visibility, real-time issue detection and resolution paths. For more information, refer to https://www.elastic.co/guide/en/cloud/current/ec-autoops.html[Monitor with AutoOps].
****
[[fix-watermark-errors-rebalance]] [[fix-watermark-errors-rebalance]]
==== Monitor rebalancing ==== Monitor rebalancing

View file

@ -11,6 +11,10 @@ depleted, {es} will reject search requests until more threads are available.
You might experience high CPU usage if a <<data-tiers,data tier>>, and therefore the nodes assigned to that tier, is experiencing more traffic than other tiers. This imbalance in resource utilization is also known as <<hotspotting,hot spotting>>. You might experience high CPU usage if a <<data-tiers,data tier>>, and therefore the nodes assigned to that tier, is experiencing more traffic than other tiers. This imbalance in resource utilization is also known as <<hotspotting,hot spotting>>.
****
If you're using Elastic Cloud Hosted, then you can use AutoOps to monitor your cluster. AutoOps significantly simplifies cluster management with performance recommendations, resource utilization visibility, real-time issue detection and resolution paths. For more information, refer to https://www.elastic.co/guide/en/cloud/current/ec-autoops.html[Monitor with AutoOps].
****
[discrete] [discrete]
[[diagnose-high-cpu-usage]] [[diagnose-high-cpu-usage]]
==== Diagnose high CPU usage ==== Diagnose high CPU usage

View file

@ -6,6 +6,10 @@ High JVM memory usage can degrade cluster performance and trigger
taking steps to reduce memory pressure if a node's JVM memory usage consistently taking steps to reduce memory pressure if a node's JVM memory usage consistently
exceeds 85%. exceeds 85%.
****
If you're using Elastic Cloud Hosted, then you can use AutoOps to monitor your cluster. AutoOps significantly simplifies cluster management with performance recommendations, resource utilization visibility, real-time issue detection and resolution paths. For more information, refer to https://www.elastic.co/guide/en/cloud/current/ec-autoops.html[Monitor with AutoOps].
****
[discrete] [discrete]
[[diagnose-high-jvm-memory-pressure]] [[diagnose-high-jvm-memory-pressure]]
==== Diagnose high JVM memory pressure ==== Diagnose high JVM memory pressure

View file

@ -11,6 +11,10 @@ may occur in {es} when resource utilizations are unevenly distributed across
ongoing significantly unique utilization may lead to cluster bottlenecks ongoing significantly unique utilization may lead to cluster bottlenecks
and should be reviewed. and should be reviewed.
****
If you're using Elastic Cloud Hosted, then you can use AutoOps to monitor your cluster. AutoOps significantly simplifies cluster management with performance recommendations, resource utilization visibility, real-time issue detection and resolution paths. For more information, refer to https://www.elastic.co/guide/en/cloud/current/ec-autoops.html[Monitor with AutoOps].
****
See link:https://www.youtube.com/watch?v=Q5ODJ5nIKAM[this video] for a walkthrough of troubleshooting a hot spotting issue. See link:https://www.youtube.com/watch?v=Q5ODJ5nIKAM[this video] for a walkthrough of troubleshooting a hot spotting issue.
[discrete] [discrete]

View file

@ -22,6 +22,10 @@ the remaining problems so management and cleanup activities can proceed.
See https://www.youtube.com/watch?v=v2mbeSd1vTQ[this video] See https://www.youtube.com/watch?v=v2mbeSd1vTQ[this video]
for a walkthrough of monitoring allocation health. for a walkthrough of monitoring allocation health.
****
If you're using Elastic Cloud Hosted, then you can use AutoOps to monitor your cluster. AutoOps significantly simplifies cluster management with performance recommendations, resource utilization visibility, real-time issue detection and resolution paths. For more information, refer to https://www.elastic.co/guide/en/cloud/current/ec-autoops.html[Monitor with AutoOps].
****
[discrete] [discrete]
[[diagnose-cluster-status]] [[diagnose-cluster-status]]
==== Diagnose your cluster status ==== Diagnose your cluster status

View file

@ -12,6 +12,10 @@ thread pool returns a `TOO_MANY_REQUESTS` error message.
* High <<index-modules-indexing-pressure,indexing pressure>> that exceeds the * High <<index-modules-indexing-pressure,indexing pressure>> that exceeds the
<<memory-limits,`indexing_pressure.memory.limit`>>. <<memory-limits,`indexing_pressure.memory.limit`>>.
****
If you're using Elastic Cloud Hosted, then you can use AutoOps to monitor your cluster. AutoOps significantly simplifies cluster management with performance recommendations, resource utilization visibility, real-time issue detection and resolution paths. For more information, refer to https://www.elastic.co/guide/en/cloud/current/ec-autoops.html[Monitor with AutoOps].
****
[discrete] [discrete]
[[check-rejected-tasks]] [[check-rejected-tasks]]
==== Check rejected tasks ==== Check rejected tasks
@ -69,7 +73,7 @@ These stats are cumulative from node startup.
Indexing pressure rejections appear as an Indexing pressure rejections appear as an
`EsRejectedExecutionException`, and indicate that they were rejected due `EsRejectedExecutionException`, and indicate that they were rejected due
to `coordinating_and_primary_bytes`, `coordinating`, `primary`, or `replica`. to `combined_coordinating_and_primary`, `coordinating`, `primary`, or `replica`.
These errors are often related to <<task-queue-backlog,backlogged tasks>>, These errors are often related to <<task-queue-backlog,backlogged tasks>>,
<<docs-bulk,bulk index>> sizing, or the ingest target's <<docs-bulk,bulk index>> sizing, or the ingest target's

View file

@ -16,5 +16,8 @@ In order to fix this follow the next steps:
include::{es-ref-dir}/tab-widgets/troubleshooting/data/increase-cluster-shard-limit-widget.asciidoc[] include::{es-ref-dir}/tab-widgets/troubleshooting/data/increase-cluster-shard-limit-widget.asciidoc[]
****
If you're using Elastic Cloud Hosted, then you can use AutoOps to monitor your cluster. AutoOps significantly simplifies cluster management with performance recommendations, resource utilization visibility, real-time issue detection and resolution paths. For more information, refer to https://www.elastic.co/guide/en/cloud/current/ec-autoops.html[Monitor with AutoOps].
****

View file

@ -14,5 +14,9 @@ In order to fix this follow the next steps:
include::{es-ref-dir}/tab-widgets/troubleshooting/data/total-shards-per-node-widget.asciidoc[] include::{es-ref-dir}/tab-widgets/troubleshooting/data/total-shards-per-node-widget.asciidoc[]
****
If you're using Elastic Cloud Hosted, then you can use AutoOps to monitor your cluster. AutoOps significantly simplifies cluster management with performance recommendations, resource utilization visibility, real-time issue detection and resolution paths. For more information, refer to https://www.elastic.co/guide/en/cloud/current/ec-autoops.html[Monitor with AutoOps].
****

View file

@ -17,5 +17,8 @@ In order to fix this follow the next steps:
include::{es-ref-dir}/tab-widgets/troubleshooting/data/increase-tier-capacity-widget.asciidoc[] include::{es-ref-dir}/tab-widgets/troubleshooting/data/increase-tier-capacity-widget.asciidoc[]
****
If you're using Elastic Cloud Hosted, then you can use AutoOps to monitor your cluster. AutoOps significantly simplifies cluster management with performance recommendations, resource utilization visibility, real-time issue detection and resolution paths. For more information, refer to https://www.elastic.co/guide/en/cloud/current/ec-autoops.html[Monitor with AutoOps].
****

View file

@ -15,6 +15,10 @@ https://discuss.elastic.co[Elastic Discuss] to minimize turnaround time.
See this https://www.youtube.com/watch?v=Bb6SaqhqYHw[this video] for a walkthrough of capturing an {es} diagnostic. See this https://www.youtube.com/watch?v=Bb6SaqhqYHw[this video] for a walkthrough of capturing an {es} diagnostic.
****
If you're using Elastic Cloud Hosted, then you can use AutoOps to monitor your cluster. AutoOps significantly simplifies cluster management with performance recommendations, resource utilization visibility, real-time issue detection and resolution paths. For more information, refer to https://www.elastic.co/guide/en/cloud/current/ec-autoops.html[Monitor with AutoOps].
****
[discrete] [discrete]
[[diagnostic-tool-requirements]] [[diagnostic-tool-requirements]]
=== Requirements === Requirements

View file

@ -3,6 +3,10 @@
This guide describes how to fix common errors and problems with {es} clusters. This guide describes how to fix common errors and problems with {es} clusters.
****
If you're using Elastic Cloud Hosted, then you can use AutoOps to monitor your cluster. AutoOps significantly simplifies cluster management with performance recommendations, resource utilization visibility, real-time issue detection and resolution paths. For more information, refer to https://www.elastic.co/guide/en/cloud/current/ec-autoops.html[Monitor with AutoOps].
****
<<fix-watermark-errors,Watermark errors>>:: <<fix-watermark-errors,Watermark errors>>::
Fix watermark errors that occur when a data node is critically low on disk space Fix watermark errors that occur when a data node is critically low on disk space
and has reached the flood-stage disk usage watermark. and has reached the flood-stage disk usage watermark.

View file

@ -14,5 +14,8 @@ information about the problem:
include::{es-ref-dir}/tab-widgets/troubleshooting/snapshot/repeated-snapshot-failures-widget.asciidoc[] include::{es-ref-dir}/tab-widgets/troubleshooting/snapshot/repeated-snapshot-failures-widget.asciidoc[]
****
If you're using Elastic Cloud Hosted, then you can use AutoOps to monitor your cluster. AutoOps significantly simplifies cluster management with performance recommendations, resource utilization visibility, real-time issue detection and resolution paths. For more information, refer to https://www.elastic.co/guide/en/cloud/current/ec-autoops.html[Monitor with AutoOps].
****

View file

@ -8,3 +8,7 @@ The current shards capacity of the cluster is available in the
<<health-api-response-details-shards-capacity, health API shards capacity section>>. <<health-api-response-details-shards-capacity, health API shards capacity section>>.
include::{es-ref-dir}/tab-widgets/troubleshooting/troubleshooting-shards-capacity-widget.asciidoc[] include::{es-ref-dir}/tab-widgets/troubleshooting/troubleshooting-shards-capacity-widget.asciidoc[]
****
If you're using Elastic Cloud Hosted, then you can use AutoOps to monitor your cluster. AutoOps significantly simplifies cluster management with performance recommendations, resource utilization visibility, real-time issue detection and resolution paths. For more information, refer to https://www.elastic.co/guide/en/cloud/current/ec-autoops.html[Monitor with AutoOps].
****

View file

@ -7,6 +7,10 @@ Elasticsearch balances shards across data tiers to achieve a good compromise bet
* disk usage * disk usage
* write load (for indices in data streams) * write load (for indices in data streams)
****
If you're using Elastic Cloud Hosted, then you can use AutoOps to monitor your cluster. AutoOps significantly simplifies cluster management with performance recommendations, resource utilization visibility, real-time issue detection and resolution paths. For more information, refer to https://www.elastic.co/guide/en/cloud/current/ec-autoops.html[Monitor with AutoOps].
****
Elasticsearch does not take into account the amount or complexity of search queries when rebalancing shards. Elasticsearch does not take into account the amount or complexity of search queries when rebalancing shards.
This is indirectly achieved by balancing shard count and disk usage. This is indirectly achieved by balancing shard count and disk usage.

View file

@ -17,6 +17,10 @@ logs.
* The master may appear busy due to frequent cluster state updates. * The master may appear busy due to frequent cluster state updates.
****
If you're using Elastic Cloud Hosted, then you can use AutoOps to monitor your cluster. AutoOps significantly simplifies cluster management with performance recommendations, resource utilization visibility, real-time issue detection and resolution paths. For more information, refer to https://www.elastic.co/guide/en/cloud/current/ec-autoops.html[Monitor with AutoOps].
****
To troubleshoot a cluster in this state, first ensure the cluster has a To troubleshoot a cluster in this state, first ensure the cluster has a
<<discovery-troubleshooting,stable master>>. Next, focus on the nodes <<discovery-troubleshooting,stable master>>. Next, focus on the nodes
unexpectedly leaving the cluster ahead of all other issues. It will not be unexpectedly leaving the cluster ahead of all other issues. It will not be

View file

@ -92,7 +92,7 @@ public final class IngestGeoIpMetadata implements Metadata.ProjectCustom {
@Override @Override
public Iterator<? extends ToXContent> toXContentChunked(ToXContent.Params ignored) { public Iterator<? extends ToXContent> toXContentChunked(ToXContent.Params ignored) {
return Iterators.concat(ChunkedToXContentHelper.xContentValuesMap(DATABASES_FIELD.getPreferredName(), databases)); return Iterators.concat(ChunkedToXContentHelper.xContentObjectFields(DATABASES_FIELD.getPreferredName(), databases));
} }
@Override @Override

View file

@ -434,7 +434,7 @@ class S3Service implements Closeable {
public void onFileChanged(Path file) { public void onFileChanged(Path file) {
if (file.equals(webIdentityTokenFileSymlink)) { if (file.equals(webIdentityTokenFileSymlink)) {
LOGGER.debug("WS web identity token file [{}] changed, updating credentials", file); LOGGER.debug("WS web identity token file [{}] changed, updating credentials", file);
credentialsProvider.refresh(); SocketAccess.doPrivilegedVoid(credentialsProvider::refresh);
} }
} }
}); });

View file

@ -84,9 +84,6 @@ tests:
issue: https://github.com/elastic/elasticsearch/issues/115816 issue: https://github.com/elastic/elasticsearch/issues/115816
- class: org.elasticsearch.xpack.application.connector.ConnectorIndexServiceTests - class: org.elasticsearch.xpack.application.connector.ConnectorIndexServiceTests
issue: https://github.com/elastic/elasticsearch/issues/116087 issue: https://github.com/elastic/elasticsearch/issues/116087
- class: org.elasticsearch.xpack.ml.integration.DatafeedJobsRestIT
method: testLookbackWithIndicesOptions
issue: https://github.com/elastic/elasticsearch/issues/116127
- class: org.elasticsearch.xpack.test.rest.XPackRestIT - class: org.elasticsearch.xpack.test.rest.XPackRestIT
method: test {p0=transform/transforms_start_stop/Test start already started transform} method: test {p0=transform/transforms_start_stop/Test start already started transform}
issue: https://github.com/elastic/elasticsearch/issues/98802 issue: https://github.com/elastic/elasticsearch/issues/98802
@ -152,8 +149,6 @@ tests:
- class: org.elasticsearch.test.rest.yaml.CcsCommonYamlTestSuiteIT - class: org.elasticsearch.test.rest.yaml.CcsCommonYamlTestSuiteIT
method: test {p0=search.highlight/50_synthetic_source/text multi unified from vectors} method: test {p0=search.highlight/50_synthetic_source/text multi unified from vectors}
issue: https://github.com/elastic/elasticsearch/issues/117815 issue: https://github.com/elastic/elasticsearch/issues/117815
- class: org.elasticsearch.xpack.ml.integration.DatafeedJobsRestIT
issue: https://github.com/elastic/elasticsearch/issues/111319
- class: org.elasticsearch.xpack.esql.plugin.ClusterRequestTests - class: org.elasticsearch.xpack.esql.plugin.ClusterRequestTests
method: testFallbackIndicesOptions method: testFallbackIndicesOptions
issue: https://github.com/elastic/elasticsearch/issues/117937 issue: https://github.com/elastic/elasticsearch/issues/117937
@ -265,6 +260,18 @@ tests:
- class: org.elasticsearch.script.mustache.SearchTemplateRequestTests - class: org.elasticsearch.script.mustache.SearchTemplateRequestTests
method: testSerialization method: testSerialization
issue: https://github.com/elastic/elasticsearch/issues/119822 issue: https://github.com/elastic/elasticsearch/issues/119822
- class: org.elasticsearch.index.rankeval.RankEvalRequestTests
method: testSerialization
issue: https://github.com/elastic/elasticsearch/issues/119859
- class: org.elasticsearch.index.rankeval.RankEvalRequestTests
method: testEqualsAndHashcode
issue: https://github.com/elastic/elasticsearch/issues/119860
- class: org.elasticsearch.index.rankeval.RankEvalRequestTests
method: testConcurrentSerialization
issue: https://github.com/elastic/elasticsearch/issues/119861
- class: org.elasticsearch.index.rankeval.RankEvalRequestTests
method: testConcurrentEquals
issue: https://github.com/elastic/elasticsearch/issues/119862
# Examples: # Examples:
# #

View file

@ -27,7 +27,11 @@ public class HdfsClassPatcher {
"org/apache/hadoop/util/ShutdownHookManager.class", "org/apache/hadoop/util/ShutdownHookManager.class",
ShutdownHookManagerPatcher::new, ShutdownHookManagerPatcher::new,
"org/apache/hadoop/util/Shell.class", "org/apache/hadoop/util/Shell.class",
ShellPatcher::new ShellPatcher::new,
"org/apache/hadoop/security/UserGroupInformation.class",
SubjectGetSubjectPatcher::new,
"org/apache/hadoop/security/authentication/client/KerberosAuthenticator.class",
SubjectGetSubjectPatcher::new
); );
public static void main(String[] args) throws Exception { public static void main(String[] args) throws Exception {

View file

@ -0,0 +1,60 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the "Elastic License
* 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
* Public License v 1"; you may not use this file except in compliance with, at
* your election, the "Elastic License 2.0", the "GNU Affero General Public
* License v3.0 only", or the "Server Side Public License, v 1".
*/
package org.elasticsearch.hdfs.patch;
import org.objectweb.asm.ClassVisitor;
import org.objectweb.asm.ClassWriter;
import org.objectweb.asm.MethodVisitor;
import org.objectweb.asm.Type;
import static org.objectweb.asm.Opcodes.ASM9;
import static org.objectweb.asm.Opcodes.INVOKESTATIC;
import static org.objectweb.asm.Opcodes.POP;
class SubjectGetSubjectPatcher extends ClassVisitor {
SubjectGetSubjectPatcher(ClassWriter classWriter) {
super(ASM9, classWriter);
}
@Override
public MethodVisitor visitMethod(int access, String name, String descriptor, String signature, String[] exceptions) {
return new ReplaceCallMethodVisitor(super.visitMethod(access, name, descriptor, signature, exceptions), name, access, descriptor);
}
/**
* Replaces calls to Subject.getSubject(context); with calls to Subject.current();
*/
private static class ReplaceCallMethodVisitor extends MethodVisitor {
private static final String SUBJECT_CLASS_INTERNAL_NAME = "javax/security/auth/Subject";
private static final String METHOD_NAME = "getSubject";
ReplaceCallMethodVisitor(MethodVisitor methodVisitor, String name, int access, String descriptor) {
super(ASM9, methodVisitor);
}
@Override
public void visitMethodInsn(int opcode, String owner, String name, String descriptor, boolean isInterface) {
if (opcode == INVOKESTATIC && SUBJECT_CLASS_INTERNAL_NAME.equals(owner) && METHOD_NAME.equals(name)) {
// Get rid of the extra arg on the stack
mv.visitInsn(POP);
// Call Subject.current()
mv.visitMethodInsn(
INVOKESTATIC,
SUBJECT_CLASS_INTERNAL_NAME,
"current",
Type.getMethodDescriptor(Type.getObjectType(SUBJECT_CLASS_INTERNAL_NAME)),
false
);
} else {
super.visitMethodInsn(opcode, owner, name, descriptor, isInterface);
}
}
}
}

View file

@ -1,61 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the "Elastic License
* 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
* Public License v 1"; you may not use this file except in compliance with, at
* your election, the "Elastic License 2.0", the "GNU Affero General Public
* License v3.0 only", or the "Server Side Public License, v 1".
*/
package org.elasticsearch.upgrades;
import com.carrotsearch.randomizedtesting.annotations.Name;
import org.elasticsearch.client.Request;
import org.elasticsearch.common.xcontent.support.XContentMapValues;
import org.elasticsearch.features.FeatureService;
import org.junit.Before;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
import static org.hamcrest.Matchers.empty;
import static org.hamcrest.Matchers.hasSize;
public class ClusterFeatureMigrationIT extends AbstractRollingUpgradeTestCase {
@Before
public void checkMigrationVersion() {
assumeFalse(
"This checks migrations from before cluster features were introduced",
oldClusterHasFeature(FeatureService.FEATURES_SUPPORTED)
);
}
public ClusterFeatureMigrationIT(@Name("upgradedNodes") int upgradedNodes) {
super(upgradedNodes);
}
public void testClusterFeatureMigration() throws IOException {
if (isUpgradedCluster()) {
// check the nodes all have a feature in their cluster state (there should always be features_supported)
var response = entityAsMap(adminClient().performRequest(new Request("GET", "/_cluster/state/nodes")));
List<?> nodeFeatures = (List<?>) XContentMapValues.extractValue("nodes_features", response);
assertThat(nodeFeatures, hasSize(adminClient().getNodes().size()));
Map<String, List<?>> features = nodeFeatures.stream()
.map(o -> (Map<?, ?>) o)
.collect(Collectors.toMap(m -> (String) m.get("node_id"), m -> (List<?>) m.get("features")));
Set<String> missing = features.entrySet()
.stream()
.filter(e -> e.getValue().contains(FeatureService.FEATURES_SUPPORTED.id()) == false)
.map(Map.Entry::getKey)
.collect(Collectors.toSet());
assertThat(missing + " out of " + features.keySet() + " does not have the required feature", missing, empty());
}
}
}

View file

@ -26,6 +26,13 @@
} }
} }
] ]
},
"params": {
"include_deleted": {
"type": "boolean",
"default": false,
"description": "A flag indicating whether to return connectors that have been soft-deleted."
}
} }
} }
} }

View file

@ -47,6 +47,11 @@
"query": { "query": {
"type": "string", "type": "string",
"description": "A search string for querying connectors, filtering results by matching against connector names, descriptions, and index names" "description": "A search string for querying connectors, filtering results by matching against connector names, descriptions, and index names"
},
"include_deleted": {
"type": "boolean",
"default": false,
"description": "A flag indicating whether to return connectors that have been soft-deleted."
} }
} }
} }

View file

@ -30,7 +30,6 @@ public class ClusterFeaturesIT extends ESIntegTestCase {
FeatureService service = internalCluster().getCurrentMasterNodeInstance(FeatureService.class); FeatureService service = internalCluster().getCurrentMasterNodeInstance(FeatureService.class);
assertThat(service.getNodeFeatures(), hasKey(FeatureService.FEATURES_SUPPORTED.id()));
assertThat(service.getNodeFeatures(), hasKey(FeatureService.TEST_FEATURES_ENABLED.id())); assertThat(service.getNodeFeatures(), hasKey(FeatureService.TEST_FEATURES_ENABLED.id()));
// check the nodes all have a feature in their cluster state (there should always be features_supported) // check the nodes all have a feature in their cluster state (there should always be features_supported)
@ -38,7 +37,7 @@ public class ClusterFeaturesIT extends ESIntegTestCase {
var features = response.getState().clusterFeatures().nodeFeatures(); var features = response.getState().clusterFeatures().nodeFeatures();
Set<String> missing = features.entrySet() Set<String> missing = features.entrySet()
.stream() .stream()
.filter(e -> e.getValue().contains(FeatureService.FEATURES_SUPPORTED.id()) == false) .filter(e -> e.getValue().contains(FeatureService.TEST_FEATURES_ENABLED.id()) == false)
.map(Map.Entry::getKey) .map(Map.Entry::getKey)
.collect(Collectors.toSet()); .collect(Collectors.toSet());
assertThat(missing + " out of " + features.keySet() + " does not have the required feature", missing, empty()); assertThat(missing + " out of " + features.keySet() + " does not have the required feature", missing, empty());

View file

@ -37,7 +37,7 @@ import java.util.Iterator;
import java.util.Locale; import java.util.Locale;
import static org.elasticsearch.cluster.routing.allocation.AbstractAllocationDecision.discoveryNodeToXContent; import static org.elasticsearch.cluster.routing.allocation.AbstractAllocationDecision.discoveryNodeToXContent;
import static org.elasticsearch.common.xcontent.ChunkedToXContentHelper.singleChunk; import static org.elasticsearch.common.xcontent.ChunkedToXContentHelper.chunk;
/** /**
* A {@code ClusterAllocationExplanation} is an explanation of why a shard is unassigned, * A {@code ClusterAllocationExplanation} is an explanation of why a shard is unassigned,
@ -169,7 +169,7 @@ public final class ClusterAllocationExplanation implements ChunkedToXContentObje
} }
public Iterator<? extends ToXContent> toXContentChunked(ToXContent.Params params) { public Iterator<? extends ToXContent> toXContentChunked(ToXContent.Params params) {
return Iterators.concat(singleChunk((builder, p) -> { return Iterators.concat(chunk((builder, p) -> {
builder.startObject(); builder.startObject();
if (isSpecificShard() == false) { if (isSpecificShard() == false) {

View file

@ -33,8 +33,8 @@ import java.util.Map;
import java.util.Objects; import java.util.Objects;
import java.util.Set; import java.util.Set;
import static org.elasticsearch.common.xcontent.ChunkedToXContentHelper.chunk;
import static org.elasticsearch.common.xcontent.ChunkedToXContentHelper.endObject; import static org.elasticsearch.common.xcontent.ChunkedToXContentHelper.endObject;
import static org.elasticsearch.common.xcontent.ChunkedToXContentHelper.singleChunk;
import static org.elasticsearch.common.xcontent.ChunkedToXContentHelper.startObject; import static org.elasticsearch.common.xcontent.ChunkedToXContentHelper.startObject;
public class DesiredBalanceResponse extends ActionResponse implements ChunkedToXContentObject { public class DesiredBalanceResponse extends ActionResponse implements ChunkedToXContentObject {
@ -88,7 +88,7 @@ public class DesiredBalanceResponse extends ActionResponse implements ChunkedToX
@Override @Override
public Iterator<? extends ToXContent> toXContentChunked(ToXContent.Params params) { public Iterator<? extends ToXContent> toXContentChunked(ToXContent.Params params) {
return Iterators.concat( return Iterators.concat(
singleChunk( chunk(
(builder, p) -> builder.startObject() (builder, p) -> builder.startObject()
.field("stats", stats) .field("stats", stats)
.field("cluster_balance_stats", clusterBalanceStats) .field("cluster_balance_stats", clusterBalanceStats)
@ -101,16 +101,16 @@ public class DesiredBalanceResponse extends ActionResponse implements ChunkedToX
Iterators.flatMap( Iterators.flatMap(
indexEntry.getValue().entrySet().iterator(), indexEntry.getValue().entrySet().iterator(),
shardEntry -> Iterators.concat( shardEntry -> Iterators.concat(
singleChunk((builder, p) -> builder.field(String.valueOf(shardEntry.getKey()))), chunk((builder, p) -> builder.field(String.valueOf(shardEntry.getKey()))),
shardEntry.getValue().toXContentChunked(params) shardEntry.getValue().toXContentChunked(params)
) )
), ),
endObject() endObject()
) )
), ),
singleChunk((builder, p) -> builder.endObject().startObject("cluster_info")), chunk((builder, p) -> builder.endObject().startObject("cluster_info")),
clusterInfo.toXContentChunked(params), clusterInfo.toXContentChunked(params),
singleChunk((builder, p) -> builder.endObject().endObject()) chunk((builder, p) -> builder.endObject().endObject())
); );
} }
@ -173,9 +173,9 @@ public class DesiredBalanceResponse extends ActionResponse implements ChunkedToX
@Override @Override
public Iterator<? extends ToXContent> toXContentChunked(ToXContent.Params params) { public Iterator<? extends ToXContent> toXContentChunked(ToXContent.Params params) {
return Iterators.concat( return Iterators.concat(
singleChunk((builder, p) -> builder.startObject().startArray("current")), chunk((builder, p) -> builder.startObject().startArray("current")),
current().iterator(), current().iterator(),
singleChunk((builder, p) -> builder.endArray().field("desired").value(desired, p).endObject()) chunk((builder, p) -> builder.endArray().field("desired").value(desired, p).endObject())
); );
} }
} }

View file

@ -45,7 +45,7 @@ import java.util.Iterator;
import java.util.Map; import java.util.Map;
import java.util.Objects; import java.util.Objects;
import static org.elasticsearch.common.xcontent.ChunkedToXContentHelper.singleChunk; import static org.elasticsearch.common.xcontent.ChunkedToXContentHelper.chunk;
/** /**
* Node statistics (dynamic, changes depending on when created). * Node statistics (dynamic, changes depending on when created).
@ -347,7 +347,7 @@ public class NodeStats extends BaseNodeResponse implements ChunkedToXContent {
@Override @Override
public Iterator<? extends ToXContent> toXContentChunked(ToXContent.Params outerParams) { public Iterator<? extends ToXContent> toXContentChunked(ToXContent.Params outerParams) {
return Iterators.concat(singleChunk((builder, params) -> { return Iterators.concat(chunk((builder, params) -> {
builder.field("name", getNode().getName()); builder.field("name", getNode().getName());
builder.field("transport_address", getNode().getAddress().toString()); builder.field("transport_address", getNode().getAddress().toString());
builder.field("host", getNode().getHostName()); builder.field("host", getNode().getHostName());
@ -369,9 +369,7 @@ public class NodeStats extends BaseNodeResponse implements ChunkedToXContent {
return builder; return builder;
}), }),
ifPresent(getIndices()).toXContentChunked(outerParams), ifPresent(getIndices()).toXContentChunked(outerParams),
singleChunk( chunk((builder, p) -> builder.value(ifPresent(getOs()), p).value(ifPresent(getProcess()), p).value(ifPresent(getJvm()), p)),
(builder, p) -> builder.value(ifPresent(getOs()), p).value(ifPresent(getProcess()), p).value(ifPresent(getJvm()), p)
),
ifPresent(getThreadPool()).toXContentChunked(outerParams), ifPresent(getThreadPool()).toXContentChunked(outerParams),
singleChunkIfPresent(getFs()), singleChunkIfPresent(getFs()),
ifPresent(getTransport()).toXContentChunked(outerParams), ifPresent(getTransport()).toXContentChunked(outerParams),
@ -382,7 +380,7 @@ public class NodeStats extends BaseNodeResponse implements ChunkedToXContent {
ifPresent(getIngestStats()).toXContentChunked(outerParams), ifPresent(getIngestStats()).toXContentChunked(outerParams),
singleChunkIfPresent(getAdaptiveSelectionStats()), singleChunkIfPresent(getAdaptiveSelectionStats()),
singleChunkIfPresent(getScriptCacheStats()), singleChunkIfPresent(getScriptCacheStats()),
singleChunk( chunk(
(builder, p) -> builder.value(ifPresent(getIndexingPressureStats()), p) (builder, p) -> builder.value(ifPresent(getIndexingPressureStats()), p)
.value(ifPresent(getRepositoriesStats()), p) .value(ifPresent(getRepositoriesStats()), p)
.value(ifPresent(getNodeAllocationStats()), p) .value(ifPresent(getNodeAllocationStats()), p)
@ -399,6 +397,6 @@ public class NodeStats extends BaseNodeResponse implements ChunkedToXContent {
} }
private static Iterator<ToXContent> singleChunkIfPresent(ToXContent toXContent) { private static Iterator<ToXContent> singleChunkIfPresent(ToXContent toXContent) {
return toXContent == null ? Collections.emptyIterator() : ChunkedToXContentHelper.singleChunk(toXContent); return toXContent == null ? Collections.emptyIterator() : ChunkedToXContentHelper.chunk(toXContent);
} }
} }

View file

@ -97,7 +97,7 @@ public class ClusterRerouteResponse extends ActionResponse implements IsAcknowle
return Iterators.concat( return Iterators.concat(
Iterators.single((builder, params) -> builder.startObject().field(ACKNOWLEDGED_KEY, isAcknowledged())), Iterators.single((builder, params) -> builder.startObject().field(ACKNOWLEDGED_KEY, isAcknowledged())),
emitState(outerParams) emitState(outerParams)
? ChunkedToXContentHelper.wrapWithObject("state", state.toXContentChunked(outerParams)) ? ChunkedToXContentHelper.object("state", state.toXContentChunked(outerParams))
: Collections.emptyIterator(), : Collections.emptyIterator(),
Iterators.single((builder, params) -> { Iterators.single((builder, params) -> {
if (params.paramAsBoolean("explain", false)) { if (params.paramAsBoolean("explain", false)) {

View file

@ -9,12 +9,14 @@
package org.elasticsearch.action.admin.indices.resolve; package org.elasticsearch.action.admin.indices.resolve;
import org.elasticsearch.TransportVersion;
import org.elasticsearch.TransportVersions; import org.elasticsearch.TransportVersions;
import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.IndicesRequest;
import org.elasticsearch.action.ValidateActions; import org.elasticsearch.action.ValidateActions;
import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.CancellableTask;
@ -30,6 +32,7 @@ import java.util.Objects;
public class ResolveClusterActionRequest extends ActionRequest implements IndicesRequest.Replaceable { public class ResolveClusterActionRequest extends ActionRequest implements IndicesRequest.Replaceable {
public static final IndicesOptions DEFAULT_INDICES_OPTIONS = IndicesOptions.strictExpandOpen(); public static final IndicesOptions DEFAULT_INDICES_OPTIONS = IndicesOptions.strictExpandOpen();
public static final String TRANSPORT_VERSION_ERROR_MESSAGE_PREFIX = "ResolveClusterAction requires at least version";
private String[] names; private String[] names;
/* /*
@ -65,12 +68,7 @@ public class ResolveClusterActionRequest extends ActionRequest implements Indice
public ResolveClusterActionRequest(StreamInput in) throws IOException { public ResolveClusterActionRequest(StreamInput in) throws IOException {
super(in); super(in);
if (in.getTransportVersion().before(TransportVersions.V_8_13_0)) { if (in.getTransportVersion().before(TransportVersions.V_8_13_0)) {
throw new UnsupportedOperationException( throw new UnsupportedOperationException(createVersionErrorMessage(in.getTransportVersion()));
"ResolveClusterAction requires at least version "
+ TransportVersions.V_8_13_0.toReleaseVersion()
+ " but was "
+ in.getTransportVersion().toReleaseVersion()
);
} }
this.names = in.readStringArray(); this.names = in.readStringArray();
this.indicesOptions = IndicesOptions.readIndicesOptions(in); this.indicesOptions = IndicesOptions.readIndicesOptions(in);
@ -81,17 +79,21 @@ public class ResolveClusterActionRequest extends ActionRequest implements Indice
public void writeTo(StreamOutput out) throws IOException { public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out); super.writeTo(out);
if (out.getTransportVersion().before(TransportVersions.V_8_13_0)) { if (out.getTransportVersion().before(TransportVersions.V_8_13_0)) {
throw new UnsupportedOperationException( throw new UnsupportedOperationException(createVersionErrorMessage(out.getTransportVersion()));
"ResolveClusterAction requires at least version "
+ TransportVersions.V_8_13_0.toReleaseVersion()
+ " but was "
+ out.getTransportVersion().toReleaseVersion()
);
} }
out.writeStringArray(names); out.writeStringArray(names);
indicesOptions.writeIndicesOptions(out); indicesOptions.writeIndicesOptions(out);
} }
private String createVersionErrorMessage(TransportVersion versionFound) {
return Strings.format(
"%s %s but was %s",
TRANSPORT_VERSION_ERROR_MESSAGE_PREFIX,
TransportVersions.V_8_13_0.toReleaseVersion(),
versionFound.toReleaseVersion()
);
}
@Override @Override
public ActionRequestValidationException validate() { public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = null; ActionRequestValidationException validationException = null;

View file

@ -51,7 +51,6 @@ import static org.elasticsearch.action.search.TransportSearchHelper.checkCCSVers
public class TransportResolveClusterAction extends HandledTransportAction<ResolveClusterActionRequest, ResolveClusterActionResponse> { public class TransportResolveClusterAction extends HandledTransportAction<ResolveClusterActionRequest, ResolveClusterActionResponse> {
private static final Logger logger = LogManager.getLogger(TransportResolveClusterAction.class); private static final Logger logger = LogManager.getLogger(TransportResolveClusterAction.class);
private static final String TRANSPORT_VERSION_ERROR_MESSAGE = "ResolveClusterAction requires at least Transport Version";
public static final String NAME = "indices:admin/resolve/cluster"; public static final String NAME = "indices:admin/resolve/cluster";
public static final ActionType<ResolveClusterActionResponse> TYPE = new ActionType<>(NAME); public static final ActionType<ResolveClusterActionResponse> TYPE = new ActionType<>(NAME);
@ -175,7 +174,13 @@ public class TransportResolveClusterAction extends HandledTransportAction<Resolv
failure, failure,
ElasticsearchSecurityException.class ElasticsearchSecurityException.class
) instanceof ElasticsearchSecurityException ese) { ) instanceof ElasticsearchSecurityException ese) {
clusterInfoMap.put(clusterAlias, new ResolveClusterInfo(true, skipUnavailable, ese.getMessage())); /*
* some ElasticsearchSecurityExceptions come from the local cluster security interceptor after you've
* issued the client.execute call but before any call went to the remote cluster, so with an
* ElasticsearchSecurityException you can't tell whether the remote cluster is available or not, so mark
* it as connected=false
*/
clusterInfoMap.put(clusterAlias, new ResolveClusterInfo(false, skipUnavailable, ese.getMessage()));
} else if (ExceptionsHelper.unwrap(failure, IndexNotFoundException.class) instanceof IndexNotFoundException infe) { } else if (ExceptionsHelper.unwrap(failure, IndexNotFoundException.class) instanceof IndexNotFoundException infe) {
clusterInfoMap.put(clusterAlias, new ResolveClusterInfo(true, skipUnavailable, infe.getMessage())); clusterInfoMap.put(clusterAlias, new ResolveClusterInfo(true, skipUnavailable, infe.getMessage()));
} else { } else {
@ -184,7 +189,7 @@ public class TransportResolveClusterAction extends HandledTransportAction<Resolv
// this error at the Transport layer BEFORE it sends the request to the remote cluster, since there // this error at the Transport layer BEFORE it sends the request to the remote cluster, since there
// are version guards on the Writeables for this Action, namely ResolveClusterActionRequest.writeTo // are version guards on the Writeables for this Action, namely ResolveClusterActionRequest.writeTo
if (cause instanceof UnsupportedOperationException if (cause instanceof UnsupportedOperationException
&& cause.getMessage().contains(TRANSPORT_VERSION_ERROR_MESSAGE)) { && cause.getMessage().contains(ResolveClusterActionRequest.TRANSPORT_VERSION_ERROR_MESSAGE_PREFIX)) {
// Since this cluster does not have _resolve/cluster, we call the _resolve/index // Since this cluster does not have _resolve/cluster, we call the _resolve/index
// endpoint to fill in the matching_indices field of the response for that cluster // endpoint to fill in the matching_indices field of the response for that cluster
ResolveIndexAction.Request resolveIndexRequest = new ResolveIndexAction.Request( ResolveIndexAction.Request resolveIndexRequest = new ResolveIndexAction.Request(

View file

@ -78,9 +78,7 @@ public class IndicesSegmentResponse extends ChunkedBroadcastResponse {
getIndices().values().iterator(), getIndices().values().iterator(),
indexSegments -> Iterators.concat( indexSegments -> Iterators.concat(
ChunkedToXContentHelper.singleChunk( ChunkedToXContentHelper.chunk((builder, p) -> builder.startObject(indexSegments.getIndex()).startObject(Fields.SHARDS)),
(builder, p) -> builder.startObject(indexSegments.getIndex()).startObject(Fields.SHARDS)
),
Iterators.flatMap( Iterators.flatMap(
indexSegments.iterator(), indexSegments.iterator(),
indexSegment -> Iterators.concat( indexSegment -> Iterators.concat(
@ -90,7 +88,7 @@ public class IndicesSegmentResponse extends ChunkedBroadcastResponse {
indexSegment.iterator(), indexSegment.iterator(),
shardSegments -> Iterators.concat( shardSegments -> Iterators.concat(
ChunkedToXContentHelper.singleChunk((builder, p) -> { ChunkedToXContentHelper.chunk((builder, p) -> {
builder.startObject(); builder.startObject();
builder.startObject(Fields.ROUTING); builder.startObject(Fields.ROUTING);
@ -112,7 +110,7 @@ public class IndicesSegmentResponse extends ChunkedBroadcastResponse {
shardSegments.iterator(), shardSegments.iterator(),
segment -> Iterators.concat( segment -> Iterators.concat(
ChunkedToXContentHelper.singleChunk((builder, p) -> { ChunkedToXContentHelper.chunk((builder, p) -> {
builder.startObject(segment.getName()); builder.startObject(segment.getName());
builder.field(Fields.GENERATION, segment.getGeneration()); builder.field(Fields.GENERATION, segment.getGeneration());
builder.field(Fields.NUM_DOCS, segment.getNumDocs()); builder.field(Fields.NUM_DOCS, segment.getNumDocs());
@ -132,7 +130,7 @@ public class IndicesSegmentResponse extends ChunkedBroadcastResponse {
return builder; return builder;
}), }),
getSegmentSortChunks(segment.getSegmentSort()), getSegmentSortChunks(segment.getSegmentSort()),
ChunkedToXContentHelper.singleChunk((builder, p) -> { ChunkedToXContentHelper.chunk((builder, p) -> {
if (segment.attributes != null && segment.attributes.isEmpty() == false) { if (segment.attributes != null && segment.attributes.isEmpty() == false) {
builder.field("attributes", segment.attributes); builder.field("attributes", segment.attributes);
} }
@ -141,13 +139,13 @@ public class IndicesSegmentResponse extends ChunkedBroadcastResponse {
}) })
) )
), ),
ChunkedToXContentHelper.singleChunk((builder, p) -> builder.endObject().endObject()) ChunkedToXContentHelper.chunk((builder, p) -> builder.endObject().endObject())
) )
), ),
ChunkedToXContentHelper.endArray() ChunkedToXContentHelper.endArray()
) )
), ),
ChunkedToXContentHelper.singleChunk((builder, p) -> builder.endObject().endObject()) ChunkedToXContentHelper.chunk((builder, p) -> builder.endObject().endObject())
) )
), ),
ChunkedToXContentHelper.endObject() ChunkedToXContentHelper.endObject()

View file

@ -50,9 +50,9 @@ public class FieldUsageStatsResponse extends ChunkedBroadcastResponse {
return Iterators.flatMap( return Iterators.flatMap(
stats.entrySet().stream().sorted(Map.Entry.comparingByKey()).iterator(), stats.entrySet().stream().sorted(Map.Entry.comparingByKey()).iterator(),
entry -> Iterators.concat( entry -> Iterators.concat(
ChunkedToXContentHelper.singleChunk((builder, p) -> builder.startObject(entry.getKey()).startArray("shards")), ChunkedToXContentHelper.chunk((builder, p) -> builder.startObject(entry.getKey()).startArray("shards")),
entry.getValue().iterator(), entry.getValue().iterator(),
ChunkedToXContentHelper.singleChunk((builder, p) -> builder.endArray().endObject()) ChunkedToXContentHelper.chunk((builder, p) -> builder.endArray().endObject())
) )
); );
} }

View file

@ -206,7 +206,7 @@ public class IndicesStatsResponse extends ChunkedBroadcastResponse {
if (level == ClusterStatsLevel.INDICES || level == ClusterStatsLevel.SHARDS) { if (level == ClusterStatsLevel.INDICES || level == ClusterStatsLevel.SHARDS) {
return Iterators.concat( return Iterators.concat(
ChunkedToXContentHelper.singleChunk((builder, p) -> { ChunkedToXContentHelper.chunk((builder, p) -> {
commonStats(builder, p); commonStats(builder, p);
return builder.startObject(Fields.INDICES); return builder.startObject(Fields.INDICES);
}), }),
@ -214,7 +214,7 @@ public class IndicesStatsResponse extends ChunkedBroadcastResponse {
getIndices().values().iterator(), getIndices().values().iterator(),
indexStats -> Iterators.concat( indexStats -> Iterators.concat(
ChunkedToXContentHelper.singleChunk((builder, p) -> { ChunkedToXContentHelper.chunk((builder, p) -> {
builder.startObject(indexStats.getIndex()); builder.startObject(indexStats.getIndex());
builder.field("uuid", indexStats.getUuid()); builder.field("uuid", indexStats.getUuid());
if (indexStats.getHealth() != null) { if (indexStats.getHealth() != null) {
@ -259,7 +259,7 @@ public class IndicesStatsResponse extends ChunkedBroadcastResponse {
ChunkedToXContentHelper.endObject() ChunkedToXContentHelper.endObject()
); );
} else { } else {
return ChunkedToXContentHelper.singleChunk((builder, p) -> { return ChunkedToXContentHelper.chunk((builder, p) -> {
commonStats(builder, p); commonStats(builder, p);
return builder; return builder;
}); });

View file

@ -219,7 +219,6 @@ public class TransportSimulateIndexTemplateAction extends TransportMasterNodeRea
.build(); .build();
final IndexMetadata indexMetadata = IndexMetadata.builder(indexName) final IndexMetadata indexMetadata = IndexMetadata.builder(indexName)
// handle mixed-cluster states by passing in minTransportVersion to reset event.ingested range to UNKNOWN if an older version
.eventIngestedRange(getEventIngestedRange(indexName, simulatedState)) .eventIngestedRange(getEventIngestedRange(indexName, simulatedState))
.settings(dummySettings) .settings(dummySettings)
.build(); .build();
@ -308,7 +307,6 @@ public class TransportSimulateIndexTemplateAction extends TransportMasterNodeRea
dummySettings.put(templateSettings); dummySettings.put(templateSettings);
final IndexMetadata indexMetadata = IndexMetadata.builder(indexName) final IndexMetadata indexMetadata = IndexMetadata.builder(indexName)
// handle mixed-cluster states by passing in minTransportVersion to reset event.ingested range to UNKNOWN if an older version
.eventIngestedRange(getEventIngestedRange(indexName, simulatedState)) .eventIngestedRange(getEventIngestedRange(indexName, simulatedState))
.settings(dummySettings) .settings(dummySettings)
.build(); .build();

View file

@ -551,9 +551,8 @@ public final class SearchPhaseController {
assert numReducePhases >= 0 : "num reduce phases must be >= 0 but was: " + numReducePhases; assert numReducePhases >= 0 : "num reduce phases must be >= 0 but was: " + numReducePhases;
numReducePhases++; // increment for this phase numReducePhases++; // increment for this phase
if (queryResults.isEmpty()) { // early terminate we have nothing to reduce if (queryResults.isEmpty()) { // early terminate we have nothing to reduce
final TotalHits totalHits = topDocsStats.getTotalHits();
return new ReducedQueryPhase( return new ReducedQueryPhase(
totalHits, topDocsStats.getTotalHits(),
topDocsStats.fetchHits, topDocsStats.fetchHits,
topDocsStats.getMaxScore(), topDocsStats.getMaxScore(),
false, false,
@ -570,8 +569,7 @@ public final class SearchPhaseController {
true true
); );
} }
int total = queryResults.size(); final List<QuerySearchResult> nonNullResults = new ArrayList<>();
final Collection<SearchPhaseResult> nonNullResults = new ArrayList<>();
boolean hasSuggest = false; boolean hasSuggest = false;
boolean hasProfileResults = false; boolean hasProfileResults = false;
for (SearchPhaseResult queryResult : queryResults) { for (SearchPhaseResult queryResult : queryResults) {
@ -581,12 +579,11 @@ public final class SearchPhaseController {
} }
hasSuggest |= res.suggest() != null; hasSuggest |= res.suggest() != null;
hasProfileResults |= res.hasProfileResults(); hasProfileResults |= res.hasProfileResults();
nonNullResults.add(queryResult); nonNullResults.add(res);
} }
queryResults = nonNullResults; validateMergeSortValueFormats(nonNullResults);
validateMergeSortValueFormats(queryResults); if (nonNullResults.isEmpty()) {
if (queryResults.isEmpty()) { var ex = new IllegalStateException("must have at least one non-empty search result, got 0 out of " + queryResults.size());
var ex = new IllegalStateException("must have at least one non-empty search result, got 0 out of " + total);
assert false : ex; assert false : ex;
throw ex; throw ex;
} }
@ -594,13 +591,12 @@ public final class SearchPhaseController {
// count the total (we use the query result provider here, since we might not get any hits (we scrolled past them)) // count the total (we use the query result provider here, since we might not get any hits (we scrolled past them))
final Map<String, List<Suggestion<?>>> groupedSuggestions = hasSuggest ? new HashMap<>() : Collections.emptyMap(); final Map<String, List<Suggestion<?>>> groupedSuggestions = hasSuggest ? new HashMap<>() : Collections.emptyMap();
final Map<String, SearchProfileQueryPhaseResult> profileShardResults = hasProfileResults final Map<String, SearchProfileQueryPhaseResult> profileShardResults = hasProfileResults
? Maps.newMapWithExpectedSize(queryResults.size()) ? Maps.newMapWithExpectedSize(nonNullResults.size())
: Collections.emptyMap(); : Collections.emptyMap();
int from = 0; int from = 0;
int size = 0; int size = 0;
DocValueFormat[] sortValueFormats = null; DocValueFormat[] sortValueFormats = null;
for (SearchPhaseResult entry : queryResults) { for (QuerySearchResult result : nonNullResults) {
QuerySearchResult result = entry.queryResult();
from = result.from(); from = result.from();
// sorted queries can set the size to 0 if they have enough competitive hits. // sorted queries can set the size to 0 if they have enough competitive hits.
size = Math.max(result.size(), size); size = Math.max(result.size(), size);
@ -611,8 +607,7 @@ public final class SearchPhaseController {
if (hasSuggest) { if (hasSuggest) {
assert result.suggest() != null; assert result.suggest() != null;
for (Suggestion<? extends Suggestion.Entry<? extends Suggestion.Entry.Option>> suggestion : result.suggest()) { for (Suggestion<? extends Suggestion.Entry<? extends Suggestion.Entry.Option>> suggestion : result.suggest()) {
List<Suggestion<?>> suggestionList = groupedSuggestions.computeIfAbsent(suggestion.getName(), s -> new ArrayList<>()); groupedSuggestions.computeIfAbsent(suggestion.getName(), s -> new ArrayList<>()).add(suggestion);
suggestionList.add(suggestion);
if (suggestion instanceof CompletionSuggestion completionSuggestion) { if (suggestion instanceof CompletionSuggestion completionSuggestion) {
completionSuggestion.setShardIndex(result.getShardIndex()); completionSuggestion.setShardIndex(result.getShardIndex());
} }
@ -620,53 +615,48 @@ public final class SearchPhaseController {
} }
assert bufferedTopDocs.isEmpty() || result.hasConsumedTopDocs() : "firstResult has no aggs but we got non null buffered aggs?"; assert bufferedTopDocs.isEmpty() || result.hasConsumedTopDocs() : "firstResult has no aggs but we got non null buffered aggs?";
if (hasProfileResults) { if (hasProfileResults) {
String key = result.getSearchShardTarget().toString(); profileShardResults.put(result.getSearchShardTarget().toString(), result.consumeProfileResult());
profileShardResults.put(key, result.consumeProfileResult());
} }
} }
final Suggest reducedSuggest; final Suggest reducedSuggest = groupedSuggestions.isEmpty() ? null : new Suggest(Suggest.reduce(groupedSuggestions));
final List<CompletionSuggestion> reducedCompletionSuggestions;
if (groupedSuggestions.isEmpty()) {
reducedSuggest = null;
reducedCompletionSuggestions = Collections.emptyList();
} else {
reducedSuggest = new Suggest(Suggest.reduce(groupedSuggestions));
reducedCompletionSuggestions = reducedSuggest.filter(CompletionSuggestion.class);
}
final InternalAggregations aggregations = bufferedAggs == null
? null
: InternalAggregations.topLevelReduceDelayable(
bufferedAggs,
performFinalReduce ? aggReduceContextBuilder.forFinalReduction() : aggReduceContextBuilder.forPartialReduction()
);
final SearchProfileResultsBuilder profileBuilder = profileShardResults.isEmpty()
? null
: new SearchProfileResultsBuilder(profileShardResults);
final SortedTopDocs sortedTopDocs; final SortedTopDocs sortedTopDocs;
if (queryPhaseRankCoordinatorContext == null) { if (queryPhaseRankCoordinatorContext == null) {
sortedTopDocs = sortDocs(isScrollRequest, bufferedTopDocs, from, size, reducedCompletionSuggestions); sortedTopDocs = sortDocs(
} else { isScrollRequest,
ScoreDoc[] rankedDocs = queryPhaseRankCoordinatorContext.rankQueryPhaseResults( bufferedTopDocs,
queryResults.stream().map(SearchPhaseResult::queryResult).toList(), from,
topDocsStats size,
reducedSuggest == null ? Collections.emptyList() : reducedSuggest.filter(CompletionSuggestion.class)
);
} else {
sortedTopDocs = new SortedTopDocs(
queryPhaseRankCoordinatorContext.rankQueryPhaseResults(nonNullResults, topDocsStats),
false,
null,
null,
null,
0
); );
sortedTopDocs = new SortedTopDocs(rankedDocs, false, null, null, null, 0);
size = sortedTopDocs.scoreDocs.length; size = sortedTopDocs.scoreDocs.length;
// we need to reset from here as pagination and result trimming has already taken place // we need to reset from here as pagination and result trimming has already taken place
// within the `QueryPhaseRankCoordinatorContext#rankQueryPhaseResults` and we don't want // within the `QueryPhaseRankCoordinatorContext#rankQueryPhaseResults` and we don't want
// to apply it again in the `getHits` method. // to apply it again in the `getHits` method.
from = 0; from = 0;
} }
final TotalHits totalHits = topDocsStats.getTotalHits();
return new ReducedQueryPhase( return new ReducedQueryPhase(
totalHits, topDocsStats.getTotalHits(),
topDocsStats.fetchHits, topDocsStats.fetchHits,
topDocsStats.getMaxScore(), topDocsStats.getMaxScore(),
topDocsStats.timedOut, topDocsStats.timedOut,
topDocsStats.terminatedEarly, topDocsStats.terminatedEarly,
reducedSuggest, reducedSuggest,
aggregations, bufferedAggs == null
profileBuilder, ? null
: InternalAggregations.topLevelReduceDelayable(
bufferedAggs,
performFinalReduce ? aggReduceContextBuilder.forFinalReduction() : aggReduceContextBuilder.forPartialReduction()
),
profileShardResults.isEmpty() ? null : new SearchProfileResultsBuilder(profileShardResults),
sortedTopDocs, sortedTopDocs,
sortValueFormats, sortValueFormats,
queryPhaseRankCoordinatorContext, queryPhaseRankCoordinatorContext,

View file

@ -391,13 +391,13 @@ public class SearchResponse extends ActionResponse implements ChunkedToXContentO
public Iterator<? extends ToXContent> innerToXContentChunked(ToXContent.Params params) { public Iterator<? extends ToXContent> innerToXContentChunked(ToXContent.Params params) {
return Iterators.concat( return Iterators.concat(
ChunkedToXContentHelper.singleChunk(SearchResponse.this::headerToXContent), ChunkedToXContentHelper.chunk(SearchResponse.this::headerToXContent),
Iterators.single(clusters), Iterators.single(clusters),
Iterators.concat( Iterators.concat(
hits.toXContentChunked(params), hits.toXContentChunked(params),
aggregations == null ? Collections.emptyIterator() : ChunkedToXContentHelper.singleChunk(aggregations), aggregations == null ? Collections.emptyIterator() : ChunkedToXContentHelper.chunk(aggregations),
suggest == null ? Collections.emptyIterator() : ChunkedToXContentHelper.singleChunk(suggest), suggest == null ? Collections.emptyIterator() : ChunkedToXContentHelper.chunk(suggest),
profileResults == null ? Collections.emptyIterator() : ChunkedToXContentHelper.singleChunk(profileResults) profileResults == null ? Collections.emptyIterator() : ChunkedToXContentHelper.chunk(profileResults)
) )
); );
} }

View file

@ -36,8 +36,8 @@ import java.util.Set;
import static org.elasticsearch.cluster.routing.ShardRouting.newUnassigned; import static org.elasticsearch.cluster.routing.ShardRouting.newUnassigned;
import static org.elasticsearch.cluster.routing.UnassignedInfo.Reason.REINITIALIZED; import static org.elasticsearch.cluster.routing.UnassignedInfo.Reason.REINITIALIZED;
import static org.elasticsearch.common.xcontent.ChunkedToXContentHelper.chunk;
import static org.elasticsearch.common.xcontent.ChunkedToXContentHelper.endArray; import static org.elasticsearch.common.xcontent.ChunkedToXContentHelper.endArray;
import static org.elasticsearch.common.xcontent.ChunkedToXContentHelper.singleChunk;
import static org.elasticsearch.common.xcontent.ChunkedToXContentHelper.startObject; import static org.elasticsearch.common.xcontent.ChunkedToXContentHelper.startObject;
/** /**
@ -162,7 +162,7 @@ public class ClusterInfo implements ChunkedToXContent, Writeable {
} }
return builder.endObject(); // end $nodename return builder.endObject(); // end $nodename
}), }),
singleChunk( chunk(
(builder, p) -> builder.endObject() // end "nodes" (builder, p) -> builder.endObject() // end "nodes"
.startObject("shard_sizes") .startObject("shard_sizes")
), ),
@ -171,7 +171,7 @@ public class ClusterInfo implements ChunkedToXContent, Writeable {
shardSizes.entrySet().iterator(), shardSizes.entrySet().iterator(),
c -> (builder, p) -> builder.humanReadableField(c.getKey() + "_bytes", c.getKey(), ByteSizeValue.ofBytes(c.getValue())) c -> (builder, p) -> builder.humanReadableField(c.getKey() + "_bytes", c.getKey(), ByteSizeValue.ofBytes(c.getValue()))
), ),
singleChunk( chunk(
(builder, p) -> builder.endObject() // end "shard_sizes" (builder, p) -> builder.endObject() // end "shard_sizes"
.startObject("shard_data_set_sizes") .startObject("shard_data_set_sizes")
), ),
@ -183,12 +183,12 @@ public class ClusterInfo implements ChunkedToXContent, Writeable {
ByteSizeValue.ofBytes(c.getValue()) ByteSizeValue.ofBytes(c.getValue())
) )
), ),
singleChunk( chunk(
(builder, p) -> builder.endObject() // end "shard_data_set_sizes" (builder, p) -> builder.endObject() // end "shard_data_set_sizes"
.startObject("shard_paths") .startObject("shard_paths")
), ),
Iterators.map(dataPath.entrySet().iterator(), c -> (builder, p) -> builder.field(c.getKey().toString(), c.getValue())), Iterators.map(dataPath.entrySet().iterator(), c -> (builder, p) -> builder.field(c.getKey().toString(), c.getValue())),
singleChunk( chunk(
(builder, p) -> builder.endObject() // end "shard_paths" (builder, p) -> builder.endObject() // end "shard_paths"
.startArray("reserved_sizes") .startArray("reserved_sizes")
), ),

View file

@ -34,7 +34,7 @@ import org.elasticsearch.cluster.routing.allocation.AllocationService;
import org.elasticsearch.cluster.routing.allocation.AllocationService.RerouteStrategy; import org.elasticsearch.cluster.routing.allocation.AllocationService.RerouteStrategy;
import org.elasticsearch.cluster.routing.allocation.AllocationStatsService; import org.elasticsearch.cluster.routing.allocation.AllocationStatsService;
import org.elasticsearch.cluster.routing.allocation.ExistingShardsAllocator; import org.elasticsearch.cluster.routing.allocation.ExistingShardsAllocator;
import org.elasticsearch.cluster.routing.allocation.NodeAllocationStatsProvider; import org.elasticsearch.cluster.routing.allocation.NodeAllocationStatsAndWeightsCalculator;
import org.elasticsearch.cluster.routing.allocation.WriteLoadForecaster; import org.elasticsearch.cluster.routing.allocation.WriteLoadForecaster;
import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator;
import org.elasticsearch.cluster.routing.allocation.allocator.DesiredBalanceShardsAllocator; import org.elasticsearch.cluster.routing.allocation.allocator.DesiredBalanceShardsAllocator;
@ -141,7 +141,10 @@ public class ClusterModule extends AbstractModule {
this.clusterPlugins = clusterPlugins; this.clusterPlugins = clusterPlugins;
this.deciderList = createAllocationDeciders(settings, clusterService.getClusterSettings(), clusterPlugins); this.deciderList = createAllocationDeciders(settings, clusterService.getClusterSettings(), clusterPlugins);
this.allocationDeciders = new AllocationDeciders(deciderList); this.allocationDeciders = new AllocationDeciders(deciderList);
var nodeAllocationStatsProvider = new NodeAllocationStatsProvider(writeLoadForecaster, clusterService.getClusterSettings()); var nodeAllocationStatsAndWeightsCalculator = new NodeAllocationStatsAndWeightsCalculator(
writeLoadForecaster,
clusterService.getClusterSettings()
);
this.shardsAllocator = createShardsAllocator( this.shardsAllocator = createShardsAllocator(
settings, settings,
clusterService.getClusterSettings(), clusterService.getClusterSettings(),
@ -151,7 +154,7 @@ public class ClusterModule extends AbstractModule {
this::reconcile, this::reconcile,
writeLoadForecaster, writeLoadForecaster,
telemetryProvider, telemetryProvider,
nodeAllocationStatsProvider nodeAllocationStatsAndWeightsCalculator
); );
this.clusterService = clusterService; this.clusterService = clusterService;
this.indexNameExpressionResolver = new IndexNameExpressionResolver(threadPool.getThreadContext(), systemIndices, projectResolver); this.indexNameExpressionResolver = new IndexNameExpressionResolver(threadPool.getThreadContext(), systemIndices, projectResolver);
@ -169,7 +172,7 @@ public class ClusterModule extends AbstractModule {
clusterService, clusterService,
clusterInfoService, clusterInfoService,
shardsAllocator, shardsAllocator,
nodeAllocationStatsProvider nodeAllocationStatsAndWeightsCalculator
); );
this.telemetryProvider = telemetryProvider; this.telemetryProvider = telemetryProvider;
} }
@ -420,7 +423,7 @@ public class ClusterModule extends AbstractModule {
DesiredBalanceReconcilerAction reconciler, DesiredBalanceReconcilerAction reconciler,
WriteLoadForecaster writeLoadForecaster, WriteLoadForecaster writeLoadForecaster,
TelemetryProvider telemetryProvider, TelemetryProvider telemetryProvider,
NodeAllocationStatsProvider nodeAllocationStatsProvider NodeAllocationStatsAndWeightsCalculator nodeAllocationStatsAndWeightsCalculator
) { ) {
Map<String, Supplier<ShardsAllocator>> allocators = new HashMap<>(); Map<String, Supplier<ShardsAllocator>> allocators = new HashMap<>();
allocators.put(BALANCED_ALLOCATOR, () -> new BalancedShardsAllocator(clusterSettings, writeLoadForecaster)); allocators.put(BALANCED_ALLOCATOR, () -> new BalancedShardsAllocator(clusterSettings, writeLoadForecaster));
@ -433,7 +436,7 @@ public class ClusterModule extends AbstractModule {
clusterService, clusterService,
reconciler, reconciler,
telemetryProvider, telemetryProvider,
nodeAllocationStatsProvider nodeAllocationStatsAndWeightsCalculator
) )
); );

View file

@ -827,7 +827,7 @@ public class ClusterState implements ChunkedToXContent, Diffable<ClusterState> {
metrics.contains(Metric.CUSTOMS) metrics.contains(Metric.CUSTOMS)
? Iterators.flatMap( ? Iterators.flatMap(
customs.entrySet().iterator(), customs.entrySet().iterator(),
e -> ChunkedToXContentHelper.wrapWithObject(e.getKey(), e.getValue().toXContentChunked(outerParams)) e -> ChunkedToXContentHelper.object(e.getKey(), e.getValue().toXContentChunked(outerParams))
) )
: Collections.emptyIterator() : Collections.emptyIterator()
); );

View file

@ -103,7 +103,7 @@ public class ComponentTemplateMetadata implements Metadata.ProjectCustom {
@Override @Override
public Iterator<? extends ToXContent> toXContentChunked(ToXContent.Params ignored) { public Iterator<? extends ToXContent> toXContentChunked(ToXContent.Params ignored) {
return ChunkedToXContentHelper.xContentValuesMap(COMPONENT_TEMPLATE.getPreferredName(), componentTemplates); return ChunkedToXContentHelper.xContentObjectFields(COMPONENT_TEMPLATE.getPreferredName(), componentTemplates);
} }
@Override @Override

View file

@ -104,7 +104,7 @@ public class ComposableIndexTemplateMetadata implements Metadata.ProjectCustom {
@Override @Override
public Iterator<? extends ToXContent> toXContentChunked(ToXContent.Params ignored) { public Iterator<? extends ToXContent> toXContentChunked(ToXContent.Params ignored) {
return ChunkedToXContentHelper.xContentValuesMap(INDEX_TEMPLATE.getPreferredName(), indexTemplates); return ChunkedToXContentHelper.xContentObjectFields(INDEX_TEMPLATE.getPreferredName(), indexTemplates);
} }
@Override @Override

View file

@ -234,7 +234,7 @@ public class DataStreamMetadata implements Metadata.ProjectCustom {
@Override @Override
public Iterator<? extends ToXContent> toXContentChunked(ToXContent.Params ignored) { public Iterator<? extends ToXContent> toXContentChunked(ToXContent.Params ignored) {
return Iterators.concat( return Iterators.concat(
ChunkedToXContentHelper.xContentValuesMap(DATA_STREAM.getPreferredName(), dataStreams), ChunkedToXContentHelper.xContentObjectFields(DATA_STREAM.getPreferredName(), dataStreams),
ChunkedToXContentHelper.startObject(DATA_STREAM_ALIASES.getPreferredName()), ChunkedToXContentHelper.startObject(DATA_STREAM_ALIASES.getPreferredName()),
dataStreamAliases.values().iterator(), dataStreamAliases.values().iterator(),
ChunkedToXContentHelper.endObject() ChunkedToXContentHelper.endObject()

View file

@ -1703,12 +1703,7 @@ public class IndexMetadata implements Diffable<IndexMetadata>, ToXContentFragmen
out.writeOptionalDouble(indexWriteLoadForecast); out.writeOptionalDouble(indexWriteLoadForecast);
out.writeOptionalLong(shardSizeInBytesForecast); out.writeOptionalLong(shardSizeInBytesForecast);
} }
if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_15_0)) { eventIngestedRange.writeTo(out);
eventIngestedRange.writeTo(out);
} else {
assert eventIngestedRange == IndexLongFieldRange.UNKNOWN
: "eventIngestedRange should be UNKNOWN until all nodes are on the new version but is " + eventIngestedRange;
}
} }
@Override @Override

View file

@ -671,10 +671,10 @@ public class Metadata implements Diffable<Metadata>, ChunkedToXContent {
Iterators.flatMap( Iterators.flatMap(
customs.entrySet().iterator(), customs.entrySet().iterator(),
entry -> entry.getValue().context().contains(context) entry -> entry.getValue().context().contains(context)
? ChunkedToXContentHelper.wrapWithObject(entry.getKey(), entry.getValue().toXContentChunked(p)) ? ChunkedToXContentHelper.object(entry.getKey(), entry.getValue().toXContentChunked(p))
: Collections.emptyIterator() : Collections.emptyIterator()
), ),
ChunkedToXContentHelper.wrapWithObject("reserved_state", reservedStateMetadata().values().iterator()), ChunkedToXContentHelper.object("reserved_state", reservedStateMetadata().values().iterator()),
ChunkedToXContentHelper.endObject() ChunkedToXContentHelper.endObject()
); );
} else { } else {
@ -700,10 +700,10 @@ public class Metadata implements Diffable<Metadata>, ChunkedToXContent {
Iterators.flatMap( Iterators.flatMap(
customs.entrySet().iterator(), customs.entrySet().iterator(),
entry -> entry.getValue().context().contains(context) entry -> entry.getValue().context().contains(context)
? ChunkedToXContentHelper.wrapWithObject(entry.getKey(), entry.getValue().toXContentChunked(p)) ? ChunkedToXContentHelper.object(entry.getKey(), entry.getValue().toXContentChunked(p))
: Collections.emptyIterator() : Collections.emptyIterator()
), ),
ChunkedToXContentHelper.wrapWithObject("reserved_state", clusterReservedState.values().iterator()), ChunkedToXContentHelper.object("reserved_state", clusterReservedState.values().iterator()),
ChunkedToXContentHelper.endObject() ChunkedToXContentHelper.endObject()
); );
} }

View file

@ -74,7 +74,6 @@ import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.MapperService.MergeReason; import org.elasticsearch.index.mapper.MapperService.MergeReason;
import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.index.mapper.SourceFieldMapper;
import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.index.query.SearchExecutionContext;
import org.elasticsearch.index.shard.IndexLongFieldRange;
import org.elasticsearch.indices.IndexCreationException; import org.elasticsearch.indices.IndexCreationException;
import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.indices.InvalidIndexNameException; import org.elasticsearch.indices.InvalidIndexNameException;
@ -548,8 +547,7 @@ public class MetadataCreateIndexService {
temporaryIndexMeta.getSettings(), temporaryIndexMeta.getSettings(),
temporaryIndexMeta.getRoutingNumShards(), temporaryIndexMeta.getRoutingNumShards(),
sourceMetadata, sourceMetadata,
temporaryIndexMeta.isSystem(), temporaryIndexMeta.isSystem()
currentState.getMinTransportVersion()
); );
} catch (Exception e) { } catch (Exception e) {
logger.info("failed to build index metadata [{}]", request.index()); logger.info("failed to build index metadata [{}]", request.index());
@ -1414,15 +1412,10 @@ public class MetadataCreateIndexService {
Settings indexSettings, Settings indexSettings,
int routingNumShards, int routingNumShards,
@Nullable IndexMetadata sourceMetadata, @Nullable IndexMetadata sourceMetadata,
boolean isSystem, boolean isSystem
TransportVersion minClusterTransportVersion
) { ) {
IndexMetadata.Builder indexMetadataBuilder = createIndexMetadataBuilder(indexName, sourceMetadata, indexSettings, routingNumShards); IndexMetadata.Builder indexMetadataBuilder = createIndexMetadataBuilder(indexName, sourceMetadata, indexSettings, routingNumShards);
indexMetadataBuilder.system(isSystem); indexMetadataBuilder.system(isSystem);
if (minClusterTransportVersion.before(TransportVersions.V_8_15_0)) {
// promote to UNKNOWN for older versions since they don't know how to handle event.ingested in cluster state
indexMetadataBuilder.eventIngestedRange(IndexLongFieldRange.UNKNOWN);
}
// now, update the mappings with the actual source // now, update the mappings with the actual source
Map<String, MappingMetadata> mappingsMetadata = new HashMap<>(); Map<String, MappingMetadata> mappingsMetadata = new HashMap<>();
DocumentMapper docMapper = documentMapperSupplier.get(); DocumentMapper docMapper = documentMapperSupplier.get();

View file

@ -191,7 +191,7 @@ public class NodesShutdownMetadata implements Metadata.ClusterCustom {
@Override @Override
public Iterator<? extends ToXContent> toXContentChunked(ToXContent.Params params) { public Iterator<? extends ToXContent> toXContentChunked(ToXContent.Params params) {
return ChunkedToXContentHelper.xContentValuesMap(NODES_FIELD.getPreferredName(), nodes); return ChunkedToXContentHelper.xContentObjectFields(NODES_FIELD.getPreferredName(), nodes);
} }
/** /**

View file

@ -27,8 +27,8 @@ import java.util.Collections;
import java.util.Iterator; import java.util.Iterator;
import java.util.Objects; import java.util.Objects;
import static org.elasticsearch.common.xcontent.ChunkedToXContentHelper.chunk;
import static org.elasticsearch.common.xcontent.ChunkedToXContentHelper.endObject; import static org.elasticsearch.common.xcontent.ChunkedToXContentHelper.endObject;
import static org.elasticsearch.common.xcontent.ChunkedToXContentHelper.singleChunk;
import static org.elasticsearch.common.xcontent.ChunkedToXContentHelper.startObject; import static org.elasticsearch.common.xcontent.ChunkedToXContentHelper.startObject;
public class ShutdownShardMigrationStatus implements Writeable, ChunkedToXContentObject { public class ShutdownShardMigrationStatus implements Writeable, ChunkedToXContentObject {
@ -168,7 +168,7 @@ public class ShutdownShardMigrationStatus implements Writeable, ChunkedToXConten
public Iterator<? extends ToXContent> toXContentChunked(ToXContent.Params params) { public Iterator<? extends ToXContent> toXContentChunked(ToXContent.Params params) {
return Iterators.concat( return Iterators.concat(
startObject(), startObject(),
singleChunk((builder, p) -> buildHeader(builder)), chunk((builder, p) -> buildHeader(builder)),
Objects.nonNull(allocationDecision) Objects.nonNull(allocationDecision)
? Iterators.concat(startObject(NODE_ALLOCATION_DECISION_KEY), allocationDecision.toXContentChunked(params), endObject()) ? Iterators.concat(startObject(NODE_ALLOCATION_DECISION_KEY), allocationDecision.toXContentChunked(params), endObject())
: Collections.emptyIterator(), : Collections.emptyIterator(),

View file

@ -19,35 +19,41 @@ import java.util.Map;
import java.util.function.Supplier; import java.util.function.Supplier;
import java.util.stream.Collectors; import java.util.stream.Collectors;
/**
* Exposes cluster allocation metrics. Constructs {@link NodeAllocationStats} per node, on demand.
*/
public class AllocationStatsService { public class AllocationStatsService {
private final ClusterService clusterService; private final ClusterService clusterService;
private final ClusterInfoService clusterInfoService; private final ClusterInfoService clusterInfoService;
private final Supplier<DesiredBalance> desiredBalanceSupplier; private final Supplier<DesiredBalance> desiredBalanceSupplier;
private final NodeAllocationStatsProvider nodeAllocationStatsProvider; private final NodeAllocationStatsAndWeightsCalculator nodeAllocationStatsAndWeightsCalculator;
public AllocationStatsService( public AllocationStatsService(
ClusterService clusterService, ClusterService clusterService,
ClusterInfoService clusterInfoService, ClusterInfoService clusterInfoService,
ShardsAllocator shardsAllocator, ShardsAllocator shardsAllocator,
NodeAllocationStatsProvider nodeAllocationStatsProvider NodeAllocationStatsAndWeightsCalculator nodeAllocationStatsAndWeightsCalculator
) { ) {
this.clusterService = clusterService; this.clusterService = clusterService;
this.clusterInfoService = clusterInfoService; this.clusterInfoService = clusterInfoService;
this.nodeAllocationStatsProvider = nodeAllocationStatsProvider; this.nodeAllocationStatsAndWeightsCalculator = nodeAllocationStatsAndWeightsCalculator;
this.desiredBalanceSupplier = shardsAllocator instanceof DesiredBalanceShardsAllocator allocator this.desiredBalanceSupplier = shardsAllocator instanceof DesiredBalanceShardsAllocator allocator
? allocator::getDesiredBalance ? allocator::getDesiredBalance
: () -> null; : () -> null;
} }
/**
* Returns a map of node IDs to node allocation stats.
*/
public Map<String, NodeAllocationStats> stats() { public Map<String, NodeAllocationStats> stats() {
var state = clusterService.state(); var clusterState = clusterService.state();
var stats = nodeAllocationStatsProvider.stats( var nodesStatsAndWeights = nodeAllocationStatsAndWeightsCalculator.nodesAllocationStatsAndWeights(
state.metadata(), clusterState.metadata(),
state.getRoutingNodes(), clusterState.getRoutingNodes(),
clusterInfoService.getClusterInfo(), clusterInfoService.getClusterInfo(),
desiredBalanceSupplier.get() desiredBalanceSupplier.get()
); );
return stats.entrySet() return nodesStatsAndWeights.entrySet()
.stream() .stream()
.collect( .collect(
Collectors.toMap( Collectors.toMap(

View file

@ -18,6 +18,15 @@ import org.elasticsearch.xcontent.XContentBuilder;
import java.io.IOException; import java.io.IOException;
/**
* Point-in-time allocation stats for a particular node.
*
* @param shards count of shards on this node.
* @param undesiredShards count of shards that we want to move off of this node.
* @param forecastedIngestLoad the predicted near future total ingest load on this node.
* @param forecastedDiskUsage the predicted near future total disk usage on this node.
* @param currentDiskUsage the current total disk usage on this node.
*/
public record NodeAllocationStats( public record NodeAllocationStats(
int shards, int shards,
int undesiredShards, int undesiredShards,

View file

@ -24,7 +24,10 @@ import org.elasticsearch.core.Nullable;
import java.util.Map; import java.util.Map;
public class NodeAllocationStatsProvider { /**
* Calculates the allocation weights and usage stats for each node: see {@link NodeAllocationStatsAndWeight} for details.
*/
public class NodeAllocationStatsAndWeightsCalculator {
private final WriteLoadForecaster writeLoadForecaster; private final WriteLoadForecaster writeLoadForecaster;
private volatile float indexBalanceFactor; private volatile float indexBalanceFactor;
@ -32,7 +35,10 @@ public class NodeAllocationStatsProvider {
private volatile float writeLoadBalanceFactor; private volatile float writeLoadBalanceFactor;
private volatile float diskUsageBalanceFactor; private volatile float diskUsageBalanceFactor;
public record NodeAllocationAndClusterBalanceStats( /**
* Node shard allocation stats and the total node weight.
*/
public record NodeAllocationStatsAndWeight(
int shards, int shards,
int undesiredShards, int undesiredShards,
double forecastedIngestLoad, double forecastedIngestLoad,
@ -41,7 +47,7 @@ public class NodeAllocationStatsProvider {
float currentNodeWeight float currentNodeWeight
) {} ) {}
public NodeAllocationStatsProvider(WriteLoadForecaster writeLoadForecaster, ClusterSettings clusterSettings) { public NodeAllocationStatsAndWeightsCalculator(WriteLoadForecaster writeLoadForecaster, ClusterSettings clusterSettings) {
this.writeLoadForecaster = writeLoadForecaster; this.writeLoadForecaster = writeLoadForecaster;
clusterSettings.initializeAndWatch(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING, value -> this.shardBalanceFactor = value); clusterSettings.initializeAndWatch(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING, value -> this.shardBalanceFactor = value);
clusterSettings.initializeAndWatch(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING, value -> this.indexBalanceFactor = value); clusterSettings.initializeAndWatch(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING, value -> this.indexBalanceFactor = value);
@ -55,7 +61,10 @@ public class NodeAllocationStatsProvider {
); );
} }
public Map<String, NodeAllocationAndClusterBalanceStats> stats( /**
* Returns a map of node IDs to {@link NodeAllocationStatsAndWeight}.
*/
public Map<String, NodeAllocationStatsAndWeight> nodesAllocationStatsAndWeights(
Metadata metadata, Metadata metadata,
RoutingNodes routingNodes, RoutingNodes routingNodes,
ClusterInfo clusterInfo, ClusterInfo clusterInfo,
@ -66,7 +75,7 @@ public class NodeAllocationStatsProvider {
var avgWriteLoadPerNode = WeightFunction.avgWriteLoadPerNode(writeLoadForecaster, metadata, routingNodes); var avgWriteLoadPerNode = WeightFunction.avgWriteLoadPerNode(writeLoadForecaster, metadata, routingNodes);
var avgDiskUsageInBytesPerNode = WeightFunction.avgDiskUsageInBytesPerNode(clusterInfo, metadata, routingNodes); var avgDiskUsageInBytesPerNode = WeightFunction.avgDiskUsageInBytesPerNode(clusterInfo, metadata, routingNodes);
var stats = Maps.<String, NodeAllocationAndClusterBalanceStats>newMapWithExpectedSize(routingNodes.size()); var nodeAllocationStatsAndWeights = Maps.<String, NodeAllocationStatsAndWeight>newMapWithExpectedSize(routingNodes.size());
for (RoutingNode node : routingNodes) { for (RoutingNode node : routingNodes) {
int shards = 0; int shards = 0;
int undesiredShards = 0; int undesiredShards = 0;
@ -75,9 +84,10 @@ public class NodeAllocationStatsProvider {
long currentDiskUsage = 0; long currentDiskUsage = 0;
for (ShardRouting shardRouting : node) { for (ShardRouting shardRouting : node) {
if (shardRouting.relocating()) { if (shardRouting.relocating()) {
// Skip the shard if it is moving off this node. The node running recovery will count it.
continue; continue;
} }
shards++; ++shards;
IndexMetadata indexMetadata = metadata.indexMetadata(shardRouting.index()); IndexMetadata indexMetadata = metadata.indexMetadata(shardRouting.index());
if (isDesiredAllocation(desiredBalance, shardRouting) == false) { if (isDesiredAllocation(desiredBalance, shardRouting) == false) {
undesiredShards++; undesiredShards++;
@ -86,9 +96,8 @@ public class NodeAllocationStatsProvider {
forecastedWriteLoad += writeLoadForecaster.getForecastedWriteLoad(indexMetadata).orElse(0.0); forecastedWriteLoad += writeLoadForecaster.getForecastedWriteLoad(indexMetadata).orElse(0.0);
forecastedDiskUsage += Math.max(indexMetadata.getForecastedShardSizeInBytes().orElse(0), shardSize); forecastedDiskUsage += Math.max(indexMetadata.getForecastedShardSizeInBytes().orElse(0), shardSize);
currentDiskUsage += shardSize; currentDiskUsage += shardSize;
} }
float currentNodeWeight = weightFunction.nodeWeight( float currentNodeWeight = weightFunction.calculateNodeWeight(
shards, shards,
avgShardsPerNode, avgShardsPerNode,
forecastedWriteLoad, forecastedWriteLoad,
@ -96,10 +105,12 @@ public class NodeAllocationStatsProvider {
currentDiskUsage, currentDiskUsage,
avgDiskUsageInBytesPerNode avgDiskUsageInBytesPerNode
); );
stats.put( nodeAllocationStatsAndWeights.put(
node.nodeId(), node.nodeId(),
new NodeAllocationAndClusterBalanceStats( new NodeAllocationStatsAndWeight(
shards, shards,
// It's part of a public API contract for an 'undesired_shards' field that -1 will be returned if an allocator other
// than the desired balance allocator is used.
desiredBalance != null ? undesiredShards : -1, desiredBalance != null ? undesiredShards : -1,
forecastedWriteLoad, forecastedWriteLoad,
forecastedDiskUsage, forecastedDiskUsage,
@ -109,10 +120,13 @@ public class NodeAllocationStatsProvider {
); );
} }
return stats; return nodeAllocationStatsAndWeights;
} }
private static boolean isDesiredAllocation(DesiredBalance desiredBalance, ShardRouting shardRouting) { /**
* Checks whether a shard is currently allocated to a node that is wanted by the desired balance decision.
*/
private static boolean isDesiredAllocation(@Nullable DesiredBalance desiredBalance, ShardRouting shardRouting) {
if (desiredBalance == null) { if (desiredBalance == null) {
return true; return true;
} }

View file

@ -114,6 +114,7 @@ public class BalancedShardsAllocator implements ShardsAllocator {
Property.NodeScope Property.NodeScope
); );
// TODO: deduplicate these fields, use the fields in NodeAllocationStatsAndWeightsCalculator instead.
private volatile float indexBalanceFactor; private volatile float indexBalanceFactor;
private volatile float shardBalanceFactor; private volatile float shardBalanceFactor;
private volatile float writeLoadBalanceFactor; private volatile float writeLoadBalanceFactor;
@ -170,7 +171,7 @@ public class BalancedShardsAllocator implements ShardsAllocator {
Map<DiscoveryNode, DesiredBalanceMetrics.NodeWeightStats> nodeLevelWeights = new HashMap<>(); Map<DiscoveryNode, DesiredBalanceMetrics.NodeWeightStats> nodeLevelWeights = new HashMap<>();
for (var entry : balancer.nodes.entrySet()) { for (var entry : balancer.nodes.entrySet()) {
var node = entry.getValue(); var node = entry.getValue();
var nodeWeight = weightFunction.nodeWeight( var nodeWeight = weightFunction.calculateNodeWeight(
node.numShards(), node.numShards(),
balancer.avgShardsPerNode(), balancer.avgShardsPerNode(),
node.writeLoad(), node.writeLoad(),
@ -266,7 +267,7 @@ public class BalancedShardsAllocator implements ShardsAllocator {
private final RoutingAllocation allocation; private final RoutingAllocation allocation;
private final RoutingNodes routingNodes; private final RoutingNodes routingNodes;
private final Metadata metadata; private final Metadata metadata;
private final WeightFunction weight; private final WeightFunction weightFunction;
private final float threshold; private final float threshold;
private final float avgShardsPerNode; private final float avgShardsPerNode;
@ -275,12 +276,17 @@ public class BalancedShardsAllocator implements ShardsAllocator {
private final Map<String, ModelNode> nodes; private final Map<String, ModelNode> nodes;
private final NodeSorter sorter; private final NodeSorter sorter;
private Balancer(WriteLoadForecaster writeLoadForecaster, RoutingAllocation allocation, WeightFunction weight, float threshold) { private Balancer(
WriteLoadForecaster writeLoadForecaster,
RoutingAllocation allocation,
WeightFunction weightFunction,
float threshold
) {
this.writeLoadForecaster = writeLoadForecaster; this.writeLoadForecaster = writeLoadForecaster;
this.allocation = allocation; this.allocation = allocation;
this.routingNodes = allocation.routingNodes(); this.routingNodes = allocation.routingNodes();
this.metadata = allocation.metadata(); this.metadata = allocation.metadata();
this.weight = weight; this.weightFunction = weightFunction;
this.threshold = threshold; this.threshold = threshold;
avgShardsPerNode = WeightFunction.avgShardPerNode(metadata, routingNodes); avgShardsPerNode = WeightFunction.avgShardPerNode(metadata, routingNodes);
avgWriteLoadPerNode = WeightFunction.avgWriteLoadPerNode(writeLoadForecaster, metadata, routingNodes); avgWriteLoadPerNode = WeightFunction.avgWriteLoadPerNode(writeLoadForecaster, metadata, routingNodes);
@ -359,7 +365,7 @@ public class BalancedShardsAllocator implements ShardsAllocator {
* to sort based on an index. * to sort based on an index.
*/ */
private NodeSorter newNodeSorter() { private NodeSorter newNodeSorter() {
return new NodeSorter(nodesArray(), weight, this); return new NodeSorter(nodesArray(), weightFunction, this);
} }
/** /**
@ -443,7 +449,7 @@ public class BalancedShardsAllocator implements ShardsAllocator {
assert currentNode != null : "currently assigned node could not be found"; assert currentNode != null : "currently assigned node could not be found";
// balance the shard, if a better node can be found // balance the shard, if a better node can be found
final float currentWeight = weight.weight(this, currentNode, index); final float currentWeight = weightFunction.calculateNodeWeightWithIndex(this, currentNode, index);
final AllocationDeciders deciders = allocation.deciders(); final AllocationDeciders deciders = allocation.deciders();
Type rebalanceDecisionType = Type.NO; Type rebalanceDecisionType = Type.NO;
ModelNode targetNode = null; ModelNode targetNode = null;
@ -459,7 +465,7 @@ public class BalancedShardsAllocator implements ShardsAllocator {
// this is a comparison of the number of shards on this node to the number of shards // this is a comparison of the number of shards on this node to the number of shards
// that should be on each node on average (both taking the cluster as a whole into account // that should be on each node on average (both taking the cluster as a whole into account
// as well as shards per index) // as well as shards per index)
final float nodeWeight = weight.weight(this, node, index); final float nodeWeight = weightFunction.calculateNodeWeightWithIndex(this, node, index);
// if the node we are examining has a worse (higher) weight than the node the shard is // if the node we are examining has a worse (higher) weight than the node the shard is
// assigned to, then there is no way moving the shard to the node with the worse weight // assigned to, then there is no way moving the shard to the node with the worse weight
// can make the balance of the cluster better, so we check for that here // can make the balance of the cluster better, so we check for that here
@ -1051,7 +1057,7 @@ public class BalancedShardsAllocator implements ShardsAllocator {
} }
// weight of this index currently on the node // weight of this index currently on the node
float currentWeight = weight.weight(this, node, index); float currentWeight = weightFunction.calculateNodeWeightWithIndex(this, node, index);
// moving the shard would not improve the balance, and we are not in explain mode, so short circuit // moving the shard would not improve the balance, and we are not in explain mode, so short circuit
if (currentWeight > minWeight && explain == false) { if (currentWeight > minWeight && explain == false) {
continue; continue;
@ -1345,7 +1351,7 @@ public class BalancedShardsAllocator implements ShardsAllocator {
} }
public float weight(ModelNode node) { public float weight(ModelNode node) {
return function.weight(balancer, node, index); return function.calculateNodeWeightWithIndex(balancer, node, index);
} }
public float minWeightDelta() { public float minWeightDelta() {

View file

@ -21,7 +21,7 @@ import java.util.Objects;
* *
* @param assignments a set of the (persistent) node IDs to which each {@link ShardId} should be allocated * @param assignments a set of the (persistent) node IDs to which each {@link ShardId} should be allocated
* @param weightsPerNode The node weights calculated based on * @param weightsPerNode The node weights calculated based on
* {@link org.elasticsearch.cluster.routing.allocation.allocator.WeightFunction#nodeWeight} * {@link org.elasticsearch.cluster.routing.allocation.allocator.WeightFunction#calculateNodeWeight}
*/ */
public record DesiredBalance( public record DesiredBalance(
long lastConvergedIndex, long lastConvergedIndex,

View file

@ -10,7 +10,7 @@
package org.elasticsearch.cluster.routing.allocation.allocator; package org.elasticsearch.cluster.routing.allocation.allocator;
import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.allocation.NodeAllocationStatsProvider.NodeAllocationAndClusterBalanceStats; import org.elasticsearch.cluster.routing.allocation.NodeAllocationStatsAndWeightsCalculator.NodeAllocationStatsAndWeight;
import org.elasticsearch.telemetry.metric.DoubleWithAttributes; import org.elasticsearch.telemetry.metric.DoubleWithAttributes;
import org.elasticsearch.telemetry.metric.LongWithAttributes; import org.elasticsearch.telemetry.metric.LongWithAttributes;
import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.telemetry.metric.MeterRegistry;
@ -20,6 +20,12 @@ import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.atomic.AtomicReference;
/**
* Maintains balancer metrics and makes them accessible to the {@link MeterRegistry} and APM reporting. Metrics are updated
* ({@link #updateMetrics}) or cleared ({@link #zeroAllMetrics}) as a result of cluster events and the metrics will be pulled for reporting
* via the MeterRegistry implementation. Only the master node reports metrics: see {@link #setNodeIsMaster}. When
* {@link #nodeIsMaster} is false, empty values are returned such that MeterRegistry ignores the metrics for reporting purposes.
*/
public class DesiredBalanceMetrics { public class DesiredBalanceMetrics {
public record AllocationStats(long unassignedShards, long totalAllocations, long undesiredAllocationsExcludingShuttingDownNodes) {} public record AllocationStats(long unassignedShards, long totalAllocations, long undesiredAllocationsExcludingShuttingDownNodes) {}
@ -69,13 +75,14 @@ public class DesiredBalanceMetrics {
private volatile long undesiredAllocations; private volatile long undesiredAllocations;
private final AtomicReference<Map<DiscoveryNode, NodeWeightStats>> weightStatsPerNodeRef = new AtomicReference<>(Map.of()); private final AtomicReference<Map<DiscoveryNode, NodeWeightStats>> weightStatsPerNodeRef = new AtomicReference<>(Map.of());
private final AtomicReference<Map<DiscoveryNode, NodeAllocationAndClusterBalanceStats>> allocationStatsPerNodeRef = private final AtomicReference<Map<DiscoveryNode, NodeAllocationStatsAndWeight>> allocationStatsPerNodeRef = new AtomicReference<>(
new AtomicReference<>(Map.of()); Map.of()
);
public void updateMetrics( public void updateMetrics(
AllocationStats allocationStats, AllocationStats allocationStats,
Map<DiscoveryNode, NodeWeightStats> weightStatsPerNode, Map<DiscoveryNode, NodeWeightStats> weightStatsPerNode,
Map<DiscoveryNode, NodeAllocationAndClusterBalanceStats> nodeAllocationStats Map<DiscoveryNode, NodeAllocationStatsAndWeight> nodeAllocationStats
) { ) {
assert allocationStats != null : "allocation stats cannot be null"; assert allocationStats != null : "allocation stats cannot be null";
assert weightStatsPerNode != null : "node balance weight stats cannot be null"; assert weightStatsPerNode != null : "node balance weight stats cannot be null";
@ -170,6 +177,10 @@ public class DesiredBalanceMetrics {
); );
} }
/**
* When {@link #nodeIsMaster} is set to true, the server will report APM metrics registered in this file. When set to false, empty
* values will be returned such that no APM metrics are sent from this server.
*/
public void setNodeIsMaster(boolean nodeIsMaster) { public void setNodeIsMaster(boolean nodeIsMaster) {
this.nodeIsMaster = nodeIsMaster; this.nodeIsMaster = nodeIsMaster;
} }
@ -339,6 +350,10 @@ public class DesiredBalanceMetrics {
return List.of(); return List.of();
} }
/**
* Sets all the internal class fields to zero/empty. Typically used in conjunction with {@link #setNodeIsMaster}.
* This is best-effort because it is possible for {@link #updateMetrics} to race with this method.
*/
public void zeroAllMetrics() { public void zeroAllMetrics() {
unassignedShards = 0; unassignedShards = 0;
totalAllocations = 0; totalAllocations = 0;

View file

@ -21,8 +21,8 @@ import org.elasticsearch.cluster.routing.RoutingNodes;
import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.cluster.routing.UnassignedInfo;
import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus; import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus;
import org.elasticsearch.cluster.routing.allocation.NodeAllocationStatsProvider; import org.elasticsearch.cluster.routing.allocation.NodeAllocationStatsAndWeightsCalculator;
import org.elasticsearch.cluster.routing.allocation.NodeAllocationStatsProvider.NodeAllocationAndClusterBalanceStats; import org.elasticsearch.cluster.routing.allocation.NodeAllocationStatsAndWeightsCalculator.NodeAllocationStatsAndWeight;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.routing.allocation.allocator.DesiredBalanceMetrics.AllocationStats; import org.elasticsearch.cluster.routing.allocation.allocator.DesiredBalanceMetrics.AllocationStats;
import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.cluster.routing.allocation.decider.Decision;
@ -76,13 +76,13 @@ public class DesiredBalanceReconciler {
private final NodeAllocationOrdering allocationOrdering = new NodeAllocationOrdering(); private final NodeAllocationOrdering allocationOrdering = new NodeAllocationOrdering();
private final NodeAllocationOrdering moveOrdering = new NodeAllocationOrdering(); private final NodeAllocationOrdering moveOrdering = new NodeAllocationOrdering();
private final DesiredBalanceMetrics desiredBalanceMetrics; private final DesiredBalanceMetrics desiredBalanceMetrics;
private final NodeAllocationStatsProvider nodeAllocationStatsProvider; private final NodeAllocationStatsAndWeightsCalculator nodeAllocationStatsAndWeightsCalculator;
public DesiredBalanceReconciler( public DesiredBalanceReconciler(
ClusterSettings clusterSettings, ClusterSettings clusterSettings,
ThreadPool threadPool, ThreadPool threadPool,
DesiredBalanceMetrics desiredBalanceMetrics, DesiredBalanceMetrics desiredBalanceMetrics,
NodeAllocationStatsProvider nodeAllocationStatsProvider NodeAllocationStatsAndWeightsCalculator nodeAllocationStatsAndWeightsCalculator
) { ) {
this.desiredBalanceMetrics = desiredBalanceMetrics; this.desiredBalanceMetrics = desiredBalanceMetrics;
this.undesiredAllocationLogInterval = new FrequencyCappedAction( this.undesiredAllocationLogInterval = new FrequencyCappedAction(
@ -94,7 +94,7 @@ public class DesiredBalanceReconciler {
UNDESIRED_ALLOCATIONS_LOG_THRESHOLD_SETTING, UNDESIRED_ALLOCATIONS_LOG_THRESHOLD_SETTING,
value -> this.undesiredAllocationsLogThreshold = value value -> this.undesiredAllocationsLogThreshold = value
); );
this.nodeAllocationStatsProvider = nodeAllocationStatsProvider; this.nodeAllocationStatsAndWeightsCalculator = nodeAllocationStatsAndWeightsCalculator;
} }
public void reconcile(DesiredBalance desiredBalance, RoutingAllocation allocation) { public void reconcile(DesiredBalance desiredBalance, RoutingAllocation allocation) {
@ -160,20 +160,22 @@ public class DesiredBalanceReconciler {
} }
private void updateDesireBalanceMetrics(AllocationStats allocationStats) { private void updateDesireBalanceMetrics(AllocationStats allocationStats) {
var stats = nodeAllocationStatsProvider.stats( var nodesStatsAndWeights = nodeAllocationStatsAndWeightsCalculator.nodesAllocationStatsAndWeights(
allocation.metadata(), allocation.metadata(),
allocation.routingNodes(), allocation.routingNodes(),
allocation.clusterInfo(), allocation.clusterInfo(),
desiredBalance desiredBalance
); );
Map<DiscoveryNode, NodeAllocationAndClusterBalanceStats> nodeAllocationStats = new HashMap<>(stats.size()); Map<DiscoveryNode, NodeAllocationStatsAndWeight> filteredNodeAllocationStatsAndWeights = new HashMap<>(
for (var entry : stats.entrySet()) { nodesStatsAndWeights.size()
var node = allocation.nodes().get(entry.getKey()); );
for (var nodeStatsAndWeight : nodesStatsAndWeights.entrySet()) {
var node = allocation.nodes().get(nodeStatsAndWeight.getKey());
if (node != null) { if (node != null) {
nodeAllocationStats.put(node, entry.getValue()); filteredNodeAllocationStatsAndWeights.put(node, nodeStatsAndWeight.getValue());
} }
} }
desiredBalanceMetrics.updateMetrics(allocationStats, desiredBalance.weightsPerNode(), nodeAllocationStats); desiredBalanceMetrics.updateMetrics(allocationStats, desiredBalance.weightsPerNode(), filteredNodeAllocationStatsAndWeights);
} }
private boolean allocateUnassignedInvariant() { private boolean allocateUnassignedInvariant() {

View file

@ -18,7 +18,7 @@ import org.elasticsearch.cluster.ClusterStateTaskListener;
import org.elasticsearch.cluster.metadata.SingleNodeShutdownMetadata; import org.elasticsearch.cluster.metadata.SingleNodeShutdownMetadata;
import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.allocation.AllocationService.RerouteStrategy; import org.elasticsearch.cluster.routing.allocation.AllocationService.RerouteStrategy;
import org.elasticsearch.cluster.routing.allocation.NodeAllocationStatsProvider; import org.elasticsearch.cluster.routing.allocation.NodeAllocationStatsAndWeightsCalculator;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.routing.allocation.RoutingExplanations; import org.elasticsearch.cluster.routing.allocation.RoutingExplanations;
import org.elasticsearch.cluster.routing.allocation.ShardAllocationDecision; import org.elasticsearch.cluster.routing.allocation.ShardAllocationDecision;
@ -89,7 +89,7 @@ public class DesiredBalanceShardsAllocator implements ShardsAllocator {
ClusterService clusterService, ClusterService clusterService,
DesiredBalanceReconcilerAction reconciler, DesiredBalanceReconcilerAction reconciler,
TelemetryProvider telemetryProvider, TelemetryProvider telemetryProvider,
NodeAllocationStatsProvider nodeAllocationStatsProvider NodeAllocationStatsAndWeightsCalculator nodeAllocationStatsAndWeightsCalculator
) { ) {
this( this(
delegateAllocator, delegateAllocator,
@ -98,7 +98,7 @@ public class DesiredBalanceShardsAllocator implements ShardsAllocator {
new DesiredBalanceComputer(clusterSettings, threadPool, delegateAllocator), new DesiredBalanceComputer(clusterSettings, threadPool, delegateAllocator),
reconciler, reconciler,
telemetryProvider, telemetryProvider,
nodeAllocationStatsProvider nodeAllocationStatsAndWeightsCalculator
); );
} }
@ -109,7 +109,7 @@ public class DesiredBalanceShardsAllocator implements ShardsAllocator {
DesiredBalanceComputer desiredBalanceComputer, DesiredBalanceComputer desiredBalanceComputer,
DesiredBalanceReconcilerAction reconciler, DesiredBalanceReconcilerAction reconciler,
TelemetryProvider telemetryProvider, TelemetryProvider telemetryProvider,
NodeAllocationStatsProvider nodeAllocationStatsProvider NodeAllocationStatsAndWeightsCalculator nodeAllocationStatsAndWeightsCalculator
) { ) {
this.desiredBalanceMetrics = new DesiredBalanceMetrics(telemetryProvider.getMeterRegistry()); this.desiredBalanceMetrics = new DesiredBalanceMetrics(telemetryProvider.getMeterRegistry());
this.delegateAllocator = delegateAllocator; this.delegateAllocator = delegateAllocator;
@ -120,7 +120,7 @@ public class DesiredBalanceShardsAllocator implements ShardsAllocator {
clusterService.getClusterSettings(), clusterService.getClusterSettings(),
threadPool, threadPool,
desiredBalanceMetrics, desiredBalanceMetrics,
nodeAllocationStatsProvider nodeAllocationStatsAndWeightsCalculator
); );
this.desiredBalanceComputation = new ContinuousComputation<>(threadPool.generic()) { this.desiredBalanceComputation = new ContinuousComputation<>(threadPool.generic()) {

View file

@ -60,9 +60,9 @@ public class WeightFunction {
theta3 = diskUsageBalance / sum; theta3 = diskUsageBalance / sum;
} }
float weight(BalancedShardsAllocator.Balancer balancer, BalancedShardsAllocator.ModelNode node, ProjectIndex index) { float calculateNodeWeightWithIndex(BalancedShardsAllocator.Balancer balancer, BalancedShardsAllocator.ModelNode node, ProjectIndex index) {
final float weightIndex = node.numShards(index) - balancer.avgShardsPerNode(index); final float weightIndex = node.numShards(index) - balancer.avgShardsPerNode(index);
final float nodeWeight = nodeWeight( final float nodeWeight = calculateNodeWeight(
node.numShards(), node.numShards(),
balancer.avgShardsPerNode(), balancer.avgShardsPerNode(),
node.writeLoad(), node.writeLoad(),
@ -73,7 +73,7 @@ public class WeightFunction {
return nodeWeight + theta1 * weightIndex; return nodeWeight + theta1 * weightIndex;
} }
public float nodeWeight( public float calculateNodeWeight(
int nodeNumShards, int nodeNumShards,
float avgShardsPerNode, float avgShardsPerNode,
double nodeWriteLoad, double nodeWriteLoad,

View file

@ -120,7 +120,8 @@ public class KeyStoreWrapper implements SecureSettings {
/** The version where lucene directory API changed from BE to LE. */ /** The version where lucene directory API changed from BE to LE. */
public static final int LE_VERSION = 5; public static final int LE_VERSION = 5;
public static final int HIGHER_KDF_ITERATION_COUNT_VERSION = 6; public static final int HIGHER_KDF_ITERATION_COUNT_VERSION = 6;
public static final int CURRENT_VERSION = HIGHER_KDF_ITERATION_COUNT_VERSION; public static final int CIPHER_KEY_BITS_256_VERSION = 7;
public static final int CURRENT_VERSION = CIPHER_KEY_BITS_256_VERSION;
/** The algorithm used to derive the cipher key from a password. */ /** The algorithm used to derive the cipher key from a password. */
private static final String KDF_ALGO = "PBKDF2WithHmacSHA512"; private static final String KDF_ALGO = "PBKDF2WithHmacSHA512";
@ -128,14 +129,8 @@ public class KeyStoreWrapper implements SecureSettings {
/** The number of iterations to derive the cipher key. */ /** The number of iterations to derive the cipher key. */
private static final int KDF_ITERS = 210000; private static final int KDF_ITERS = 210000;
/** /** The number of bits for the cipher key (256 bits are supported as of Java 9).*/
* The number of bits for the cipher key. private static final int CIPHER_KEY_BITS = 256;
*
* Note: The Oracle JDK 8 ships with a limited JCE policy that restricts key length for AES to 128 bits.
* This can be increased to 256 bits once minimum java 9 is the minimum java version.
* See http://www.oracle.com/technetwork/java/javase/terms/readme/jdk9-readme-3852447.html#jce
* */
private static final int CIPHER_KEY_BITS = 128;
/** The number of bits for the GCM tag. */ /** The number of bits for the GCM tag. */
private static final int GCM_TAG_BITS = 128; private static final int GCM_TAG_BITS = 128;
@ -156,6 +151,7 @@ public class KeyStoreWrapper implements SecureSettings {
// 4: remove distinction between string/files, ES 6.8/7.1 // 4: remove distinction between string/files, ES 6.8/7.1
// 5: Lucene directory API changed to LE, ES 8.0 // 5: Lucene directory API changed to LE, ES 8.0
// 6: increase KDF iteration count, ES 8.14 // 6: increase KDF iteration count, ES 8.14
// 7: increase cipher key length to 256 bits, ES 9.0
/** The metadata format version used to read the current keystore wrapper. */ /** The metadata format version used to read the current keystore wrapper. */
private final int formatVersion; private final int formatVersion;
@ -318,8 +314,9 @@ public class KeyStoreWrapper implements SecureSettings {
return hasPassword; return hasPassword;
} }
private static Cipher createCipher(int opmode, char[] password, byte[] salt, byte[] iv, int kdfIters) throws GeneralSecurityException { private static Cipher createCipher(int opmode, char[] password, byte[] salt, byte[] iv, int kdfIters, int cipherKeyBits)
PBEKeySpec keySpec = new PBEKeySpec(password, salt, kdfIters, CIPHER_KEY_BITS); throws GeneralSecurityException {
PBEKeySpec keySpec = new PBEKeySpec(password, salt, kdfIters, cipherKeyBits);
SecretKeyFactory keyFactory = SecretKeyFactory.getInstance(KDF_ALGO); SecretKeyFactory keyFactory = SecretKeyFactory.getInstance(KDF_ALGO);
SecretKey secretKey; SecretKey secretKey;
try { try {
@ -343,6 +340,11 @@ public class KeyStoreWrapper implements SecureSettings {
return formatVersion < HIGHER_KDF_ITERATION_COUNT_VERSION ? 10000 : KDF_ITERS; return formatVersion < HIGHER_KDF_ITERATION_COUNT_VERSION ? 10000 : KDF_ITERS;
} }
private static int getCipherKeyBitsForVersion(int formatVersion) {
// cipher key length was increased in version 7; it was 128 bits in previous versions
return formatVersion < CIPHER_KEY_BITS_256_VERSION ? 128 : CIPHER_KEY_BITS;
}
/** /**
* Decrypts the underlying keystore data. * Decrypts the underlying keystore data.
* *
@ -371,7 +373,14 @@ public class KeyStoreWrapper implements SecureSettings {
throw new SecurityException("Keystore has been corrupted or tampered with", e); throw new SecurityException("Keystore has been corrupted or tampered with", e);
} }
Cipher cipher = createCipher(Cipher.DECRYPT_MODE, password, salt, iv, getKdfIterationCountForVersion(formatVersion)); Cipher cipher = createCipher(
Cipher.DECRYPT_MODE,
password,
salt,
iv,
getKdfIterationCountForVersion(formatVersion),
getCipherKeyBitsForVersion(formatVersion)
);
try ( try (
ByteArrayInputStream bytesStream = new ByteArrayInputStream(encryptedBytes); ByteArrayInputStream bytesStream = new ByteArrayInputStream(encryptedBytes);
CipherInputStream cipherStream = new CipherInputStream(bytesStream, cipher); CipherInputStream cipherStream = new CipherInputStream(bytesStream, cipher);
@ -409,11 +418,12 @@ public class KeyStoreWrapper implements SecureSettings {
} }
/** Encrypt the keystore entries and return the encrypted data. */ /** Encrypt the keystore entries and return the encrypted data. */
private byte[] encrypt(char[] password, byte[] salt, byte[] iv, int kdfIterationCount) throws GeneralSecurityException, IOException { private byte[] encrypt(char[] password, byte[] salt, byte[] iv, int kdfIterationCount, int cipherKeyBits)
throws GeneralSecurityException, IOException {
assert isLoaded(); assert isLoaded();
ByteArrayOutputStream bytes = new ByteArrayOutputStream(); ByteArrayOutputStream bytes = new ByteArrayOutputStream();
Cipher cipher = createCipher(Cipher.ENCRYPT_MODE, password, salt, iv, kdfIterationCount); Cipher cipher = createCipher(Cipher.ENCRYPT_MODE, password, salt, iv, kdfIterationCount, cipherKeyBits);
try ( try (
CipherOutputStream cipherStream = new CipherOutputStream(bytes, cipher); CipherOutputStream cipherStream = new CipherOutputStream(bytes, cipher);
DataOutputStream output = new DataOutputStream(cipherStream) DataOutputStream output = new DataOutputStream(cipherStream)
@ -456,7 +466,13 @@ public class KeyStoreWrapper implements SecureSettings {
byte[] iv = new byte[12]; byte[] iv = new byte[12];
random.nextBytes(iv); random.nextBytes(iv);
// encrypted data // encrypted data
byte[] encryptedBytes = encrypt(password, salt, iv, getKdfIterationCountForVersion(CURRENT_VERSION)); byte[] encryptedBytes = encrypt(
password,
salt,
iv,
getKdfIterationCountForVersion(CURRENT_VERSION),
getCipherKeyBitsForVersion(CURRENT_VERSION)
);
// size of data block // size of data block
output.writeInt(4 + salt.length + 4 + iv.length + 4 + encryptedBytes.length); output.writeInt(4 + salt.length + 4 + iv.length + 4 + encryptedBytes.length);

View file

@ -19,7 +19,9 @@ import java.util.function.Supplier;
public final class CachedSupplier<T> implements Supplier<T> { public final class CachedSupplier<T> implements Supplier<T> {
private volatile Supplier<T> supplier; private volatile Supplier<T> supplier;
private volatile T result; // result does not need to be volatile as we only read it after reading that the supplier got nulled out. Since we null out the
// supplier after setting the result, total store order from an observed volatile write is sufficient to make a plain read safe.
private T result;
public static <R> CachedSupplier<R> wrap(Supplier<R> supplier) { public static <R> CachedSupplier<R> wrap(Supplier<R> supplier) {
if (supplier instanceof CachedSupplier<R> c) { if (supplier instanceof CachedSupplier<R> c) {
@ -38,14 +40,18 @@ public final class CachedSupplier<T> implements Supplier<T> {
if (supplier == null) { if (supplier == null) {
return result; return result;
} }
initResult(); return initResult();
return result;
} }
private synchronized void initResult() { private synchronized T initResult() {
if (supplier != null) { var s = supplier;
result = supplier.get(); if (s != null) {
T res = s.get();
result = res;
supplier = null; supplier = null;
return res;
} else {
return result;
} }
} }

View file

@ -21,69 +21,60 @@ public enum ChunkedToXContentHelper {
; ;
public static Iterator<ToXContent> startObject() { public static Iterator<ToXContent> startObject() {
return Iterators.single(((builder, params) -> builder.startObject())); return Iterators.single((b, p) -> b.startObject());
} }
public static Iterator<ToXContent> startObject(String name) { public static Iterator<ToXContent> startObject(String name) {
return Iterators.single(((builder, params) -> builder.startObject(name))); return Iterators.single((b, p) -> b.startObject(name));
} }
public static Iterator<ToXContent> endObject() { public static Iterator<ToXContent> endObject() {
return Iterators.single(((builder, params) -> builder.endObject())); return Iterators.single((b, p) -> b.endObject());
} }
public static Iterator<ToXContent> startArray() { public static Iterator<ToXContent> startArray() {
return Iterators.single(((builder, params) -> builder.startArray())); return Iterators.single((b, p) -> b.startArray());
} }
public static Iterator<ToXContent> startArray(String name) { public static Iterator<ToXContent> startArray(String name) {
return Iterators.single(((builder, params) -> builder.startArray(name))); return Iterators.single((b, p) -> b.startArray(name));
} }
public static Iterator<ToXContent> endArray() { public static Iterator<ToXContent> endArray() {
return Iterators.single(((builder, params) -> builder.endArray())); return Iterators.single((b, p) -> b.endArray());
}
public static Iterator<ToXContent> map(String name, Map<String, ?> map) {
return map(name, map, entry -> (ToXContent) (builder, params) -> builder.field(entry.getKey(), entry.getValue()));
}
public static Iterator<ToXContent> xContentFragmentValuesMap(String name, Map<String, ? extends ToXContent> map) {
return map(
name,
map,
entry -> (ToXContent) (builder, params) -> entry.getValue().toXContent(builder.startObject(entry.getKey()), params).endObject()
);
}
public static Iterator<ToXContent> xContentValuesMap(String name, Map<String, ? extends ToXContent> map) {
return map(
name,
map,
entry -> (ToXContent) (builder, params) -> entry.getValue().toXContent(builder.field(entry.getKey()), params)
);
} }
/** /**
* Like xContentFragmentValuesMap, but allows the underlying XContent object to define its own "name" with startObject(string) * Defines an object named {@code name}, with the contents of each field set by {@code map}
* and endObject, rather than assuming that the key in the map should be the name in the XContent output.
* @param name name to use in the XContent for the outer object wrapping the map being rendered to XContent
* @param map map being rendered to XContent
*/ */
public static Iterator<ToXContent> xContentFragmentValuesMapCreateOwnName(String name, Map<String, ? extends ToXContent> map) { public static Iterator<ToXContent> object(String name, Map<String, ?> map) {
return map(name, map, entry -> (ToXContent) (builder, params) -> entry.getValue().toXContent(builder, params)); return object(name, map, e -> (b, p) -> b.field(e.getKey(), e.getValue()));
}
/**
* Defines an object named {@code name}, with the contents of each field created from each entry in {@code map}
*/
public static Iterator<ToXContent> xContentObjectFields(String name, Map<String, ? extends ToXContent> map) {
return object(name, map, e -> (b, p) -> e.getValue().toXContent(b.field(e.getKey()), p));
}
/**
* Defines an object named {@code name}, with the contents of each field each another object created from each entry in {@code map}
*/
public static Iterator<ToXContent> xContentObjectFieldObjects(String name, Map<String, ? extends ToXContent> map) {
return object(name, map, e -> (b, p) -> e.getValue().toXContent(b.startObject(e.getKey()), p).endObject());
} }
public static Iterator<ToXContent> field(String name, boolean value) { public static Iterator<ToXContent> field(String name, boolean value) {
return Iterators.single(((builder, params) -> builder.field(name, value))); return Iterators.single((b, p) -> b.field(name, value));
} }
public static Iterator<ToXContent> field(String name, long value) { public static Iterator<ToXContent> field(String name, long value) {
return Iterators.single(((builder, params) -> builder.field(name, value))); return Iterators.single((b, p) -> b.field(name, value));
} }
public static Iterator<ToXContent> field(String name, String value) { public static Iterator<ToXContent> field(String name, String value) {
return Iterators.single(((builder, params) -> builder.field(name, value))); return Iterators.single((b, p) -> b.field(name, value));
} }
public static Iterator<ToXContent> optionalField(String name, String value) { public static Iterator<ToXContent> optionalField(String name, String value) {
@ -107,7 +98,7 @@ public enum ChunkedToXContentHelper {
} }
public static Iterator<ToXContent> array(String name, Iterator<? extends ToXContent> contents) { public static Iterator<ToXContent> array(String name, Iterator<? extends ToXContent> contents) {
return Iterators.concat(ChunkedToXContentHelper.startArray(name), contents, ChunkedToXContentHelper.endArray()); return Iterators.concat(startArray(name), contents, endArray());
} }
/** /**
@ -119,19 +110,32 @@ public enum ChunkedToXContentHelper {
* @return Iterator composing field name and value serialization * @return Iterator composing field name and value serialization
*/ */
public static Iterator<ToXContent> array(String name, Iterator<? extends ChunkedToXContentObject> contents, ToXContent.Params params) { public static Iterator<ToXContent> array(String name, Iterator<? extends ChunkedToXContentObject> contents, ToXContent.Params params) {
return Iterators.concat( return Iterators.concat(startArray(name), Iterators.flatMap(contents, c -> c.toXContentChunked(params)), endArray());
ChunkedToXContentHelper.startArray(name),
Iterators.flatMap(contents, c -> c.toXContentChunked(params)),
ChunkedToXContentHelper.endArray()
);
} }
public static <T extends ToXContent> Iterator<ToXContent> wrapWithObject(String name, Iterator<T> iterator) { /**
* Defines an object named {@code name}, with the contents set by {@code iterator}
*/
public static Iterator<ToXContent> object(String name, Iterator<? extends ToXContent> iterator) {
return Iterators.concat(startObject(name), iterator, endObject()); return Iterators.concat(startObject(name), iterator, endObject());
} }
public static <T> Iterator<ToXContent> map(String name, Map<String, T> map, Function<Map.Entry<String, T>, ToXContent> toXContent) { /**
return wrapWithObject(name, Iterators.map(map.entrySet().iterator(), toXContent)); * Defines an object named {@code name}, with the contents set by calling {@code toXContent} on each entry in {@code map}
*/
public static <T> Iterator<ToXContent> object(String name, Map<String, T> map, Function<Map.Entry<String, T>, ToXContent> toXContent) {
return object(name, Iterators.map(map.entrySet().iterator(), toXContent));
}
/**
* Creates an Iterator of a single ToXContent object that serializes the given object as a single chunk. Just wraps {@link
* Iterators#single}, but still useful because it avoids any type ambiguity.
*
* @param item Item to wrap
* @return Singleton iterator for the given item.
*/
public static Iterator<ToXContent> chunk(ToXContent item) {
return Iterators.single(item);
} }
/** /**

View file

@ -13,18 +13,9 @@ import java.util.Set;
/** /**
* This class specifies features for the features functionality itself. * This class specifies features for the features functionality itself.
* <p>
* This adds a feature {@code features_supported} indicating that a node supports node features.
* Nodes that do not support features won't have this feature in its feature set,
* so this can be checked without needing to look at the node version.
*/ */
public class FeatureInfrastructureFeatures implements FeatureSpecification { public class FeatureInfrastructureFeatures implements FeatureSpecification {
@Override
public Set<NodeFeature> getFeatures() {
return Set.of(FeatureService.FEATURES_SUPPORTED);
}
@Override @Override
public Set<NodeFeature> getTestFeatures() { public Set<NodeFeature> getTestFeatures() {
return Set.of(FeatureService.TEST_FEATURES_ENABLED); return Set.of(FeatureService.TEST_FEATURES_ENABLED);

View file

@ -26,10 +26,6 @@ import java.util.Map;
*/ */
public class FeatureService { public class FeatureService {
/**
* A feature indicating that node features are supported.
*/
public static final NodeFeature FEATURES_SUPPORTED = new NodeFeature("features_supported", true);
public static final NodeFeature TEST_FEATURES_ENABLED = new NodeFeature("test_features_enabled"); public static final NodeFeature TEST_FEATURES_ENABLED = new NodeFeature("test_features_enabled");
private static final Logger logger = LogManager.getLogger(FeatureService.class); private static final Logger logger = LogManager.getLogger(FeatureService.class);

View file

@ -31,7 +31,7 @@ import java.util.Set;
*/ */
public interface FeatureSpecification { public interface FeatureSpecification {
/** /**
* Returns a set of regular features that this node supports. * Returns a set of features that this node supports.
*/ */
default Set<NodeFeature> getFeatures() { default Set<NodeFeature> getFeatures() {
return Set.of(); return Set.of();

View file

@ -18,8 +18,6 @@ import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.DoubleValues; import org.apache.lucene.search.DoubleValues;
import org.apache.lucene.search.DoubleValuesSource; import org.apache.lucene.search.DoubleValuesSource;
import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.IndexSearcher;
import org.elasticsearch.search.profile.query.QueryProfiler;
import org.elasticsearch.search.vectors.QueryProfilerProvider;
import java.io.IOException; import java.io.IOException;
import java.util.Arrays; import java.util.Arrays;
@ -29,12 +27,11 @@ import java.util.Objects;
* DoubleValuesSource that is used to calculate scores according to a similarity function for a KnnFloatVectorField, using the * DoubleValuesSource that is used to calculate scores according to a similarity function for a KnnFloatVectorField, using the
* original vector values stored in the index * original vector values stored in the index
*/ */
public class VectorSimilarityFloatValueSource extends DoubleValuesSource implements QueryProfilerProvider { public class VectorSimilarityFloatValueSource extends DoubleValuesSource {
private final String field; private final String field;
private final float[] target; private final float[] target;
private final VectorSimilarityFunction vectorSimilarityFunction; private final VectorSimilarityFunction vectorSimilarityFunction;
private long vectorOpsCount;
public VectorSimilarityFloatValueSource(String field, float[] target, VectorSimilarityFunction vectorSimilarityFunction) { public VectorSimilarityFloatValueSource(String field, float[] target, VectorSimilarityFunction vectorSimilarityFunction) {
this.field = field; this.field = field;
@ -52,7 +49,6 @@ public class VectorSimilarityFloatValueSource extends DoubleValuesSource impleme
return new DoubleValues() { return new DoubleValues() {
@Override @Override
public double doubleValue() throws IOException { public double doubleValue() throws IOException {
vectorOpsCount++;
return vectorSimilarityFunction.compare(target, vectorValues.vectorValue(iterator.index())); return vectorSimilarityFunction.compare(target, vectorValues.vectorValue(iterator.index()));
} }
@ -73,11 +69,6 @@ public class VectorSimilarityFloatValueSource extends DoubleValuesSource impleme
return this; return this;
} }
@Override
public void profile(QueryProfiler queryProfiler) {
queryProfiler.addVectorOpsCount(vectorOpsCount);
}
@Override @Override
public int hashCode() { public int hashCode() {
return Objects.hash(field, Arrays.hashCode(target), vectorSimilarityFunction); return Objects.hash(field, Arrays.hashCode(target), vectorSimilarityFunction);

View file

@ -487,11 +487,6 @@ public class KnnVectorQueryBuilder extends AbstractQueryBuilder<KnnVectorQueryBu
return this; return this;
} }
@Override
protected QueryBuilder doIndexMetadataRewrite(QueryRewriteContext context) throws IOException {
return super.doIndexMetadataRewrite(context);
}
@Override @Override
protected Query doToQuery(SearchExecutionContext context) throws IOException { protected Query doToQuery(SearchExecutionContext context) throws IOException {
MappedFieldType fieldType = context.getFieldType(fieldName); MappedFieldType fieldType = context.getFieldType(fieldName);
@ -529,8 +524,8 @@ public class KnnVectorQueryBuilder extends AbstractQueryBuilder<KnnVectorQueryBu
String parentPath = context.nestedLookup().getNestedParent(fieldName); String parentPath = context.nestedLookup().getNestedParent(fieldName);
Float numCandidatesFactor = rescoreVectorBuilder() == null ? null : rescoreVectorBuilder.numCandidatesFactor(); Float numCandidatesFactor = rescoreVectorBuilder() == null ? null : rescoreVectorBuilder.numCandidatesFactor();
BitSetProducer parentBitSet = null;
if (parentPath != null) { if (parentPath != null) {
final BitSetProducer parentBitSet;
final Query parentFilter; final Query parentFilter;
NestedObjectMapper originalObjectMapper = context.nestedScope().getObjectMapper(); NestedObjectMapper originalObjectMapper = context.nestedScope().getObjectMapper();
if (originalObjectMapper != null) { if (originalObjectMapper != null) {
@ -559,17 +554,17 @@ public class KnnVectorQueryBuilder extends AbstractQueryBuilder<KnnVectorQueryBu
// Now join the filterQuery & parentFilter to provide the matching blocks of children // Now join the filterQuery & parentFilter to provide the matching blocks of children
filterQuery = new ToChildBlockJoinQuery(filterQuery, parentBitSet); filterQuery = new ToChildBlockJoinQuery(filterQuery, parentBitSet);
} }
return vectorFieldType.createKnnQuery(
queryVector,
k,
adjustedNumCands,
numCandidatesFactor,
filterQuery,
vectorSimilarity,
parentBitSet
);
} }
return vectorFieldType.createKnnQuery(queryVector, k, adjustedNumCands, numCandidatesFactor, filterQuery, vectorSimilarity, null);
return vectorFieldType.createKnnQuery(
queryVector,
k,
adjustedNumCands,
numCandidatesFactor,
filterQuery,
vectorSimilarity,
parentBitSet
);
} }
@Override @Override

View file

@ -32,16 +32,15 @@ public class RescoreKnnVectorQuery extends Query implements QueryProfilerProvide
private final String fieldName; private final String fieldName;
private final float[] floatTarget; private final float[] floatTarget;
private final VectorSimilarityFunction vectorSimilarityFunction; private final VectorSimilarityFunction vectorSimilarityFunction;
private final Integer k; private final int k;
private final Query innerQuery; private final Query innerQuery;
private long vectorOperations = 0;
private QueryProfilerProvider vectorProfiling;
public RescoreKnnVectorQuery( public RescoreKnnVectorQuery(
String fieldName, String fieldName,
float[] floatTarget, float[] floatTarget,
VectorSimilarityFunction vectorSimilarityFunction, VectorSimilarityFunction vectorSimilarityFunction,
Integer k, int k,
Query innerQuery Query innerQuery
) { ) {
this.fieldName = fieldName; this.fieldName = fieldName;
@ -54,19 +53,12 @@ public class RescoreKnnVectorQuery extends Query implements QueryProfilerProvide
@Override @Override
public Query rewrite(IndexSearcher searcher) throws IOException { public Query rewrite(IndexSearcher searcher) throws IOException {
DoubleValuesSource valueSource = new VectorSimilarityFloatValueSource(fieldName, floatTarget, vectorSimilarityFunction); DoubleValuesSource valueSource = new VectorSimilarityFloatValueSource(fieldName, floatTarget, vectorSimilarityFunction);
// Vector similarity VectorSimilarityFloatValueSource keep track of the compared vectors - we need that in case we don't need
// to calculate top k and return directly the query to understand how many comparisons were done
vectorProfiling = (QueryProfilerProvider) valueSource;
FunctionScoreQuery functionScoreQuery = new FunctionScoreQuery(innerQuery, valueSource); FunctionScoreQuery functionScoreQuery = new FunctionScoreQuery(innerQuery, valueSource);
Query query = searcher.rewrite(functionScoreQuery); Query query = searcher.rewrite(functionScoreQuery);
if (k == null) {
// No need to calculate top k - let the request size limit the results.
return query;
}
// Retrieve top k documents from the rescored query // Retrieve top k documents from the rescored query
TopDocs topDocs = searcher.search(query, k); TopDocs topDocs = searcher.search(query, k);
vectorOperations = topDocs.totalHits.value();
ScoreDoc[] scoreDocs = topDocs.scoreDocs; ScoreDoc[] scoreDocs = topDocs.scoreDocs;
int[] docIds = new int[scoreDocs.length]; int[] docIds = new int[scoreDocs.length];
float[] scores = new float[scoreDocs.length]; float[] scores = new float[scoreDocs.length];
@ -82,7 +74,7 @@ public class RescoreKnnVectorQuery extends Query implements QueryProfilerProvide
return innerQuery; return innerQuery;
} }
public Integer k() { public int k() {
return k; return k;
} }
@ -92,10 +84,7 @@ public class RescoreKnnVectorQuery extends Query implements QueryProfilerProvide
queryProfilerProvider.profile(queryProfiler); queryProfilerProvider.profile(queryProfiler);
} }
if (vectorProfiling == null) { queryProfiler.addVectorOpsCount(vectorOperations);
throw new IllegalStateException("Query should have been rewritten");
}
vectorProfiling.profile(queryProfiler);
} }
@Override @Override

View file

@ -58,7 +58,7 @@ public class FeatureMigrationResults implements Metadata.ProjectCustom {
@Override @Override
public Iterator<? extends ToXContent> toXContentChunked(ToXContent.Params ignored) { public Iterator<? extends ToXContent> toXContentChunked(ToXContent.Params ignored) {
return ChunkedToXContentHelper.xContentValuesMap(RESULTS_FIELD.getPreferredName(), featureStatuses); return ChunkedToXContentHelper.xContentObjectFields(RESULTS_FIELD.getPreferredName(), featureStatuses);
} }
/** /**

View file

@ -36,7 +36,7 @@ public class BaseNodesXContentResponseTests extends ESTestCase {
final var fullResponse = new BaseNodesXContentResponse<>(ClusterName.DEFAULT, List.of(new TestNodeResponse(node)), List.of()) { final var fullResponse = new BaseNodesXContentResponse<>(ClusterName.DEFAULT, List.of(new TestNodeResponse(node)), List.of()) {
@Override @Override
protected Iterator<? extends ToXContent> xContentChunks(ToXContent.Params outerParams) { protected Iterator<? extends ToXContent> xContentChunks(ToXContent.Params outerParams) {
return ChunkedToXContentHelper.singleChunk((b, p) -> b.startObject("content").endObject()); return ChunkedToXContentHelper.chunk((b, p) -> b.startObject("content").endObject());
} }
@Override @Override

View file

@ -13,7 +13,6 @@ import org.apache.lucene.util.automaton.Automaton;
import org.apache.lucene.util.automaton.RegExp; import org.apache.lucene.util.automaton.RegExp;
import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.ResourceAlreadyExistsException; import org.elasticsearch.ResourceAlreadyExistsException;
import org.elasticsearch.TransportVersion;
import org.elasticsearch.TransportVersions; import org.elasticsearch.TransportVersions;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.admin.indices.alias.Alias;
@ -1331,16 +1330,7 @@ public class MetadataCreateIndexServiceTests extends ESTestCase {
Settings indexSettings = indexSettings(IndexVersion.current(), 1, 0).build(); Settings indexSettings = indexSettings(IndexVersion.current(), 1, 0).build();
List<AliasMetadata> aliases = List.of(AliasMetadata.builder("alias1").build()); List<AliasMetadata> aliases = List.of(AliasMetadata.builder("alias1").build());
IndexMetadata indexMetadata = buildIndexMetadata( IndexMetadata indexMetadata = buildIndexMetadata("test", aliases, () -> null, indexSettings, 4, sourceIndexMetadata, false);
"test",
aliases,
() -> null,
indexSettings,
4,
sourceIndexMetadata,
false,
TransportVersion.current()
);
assertThat(indexMetadata.getAliases().size(), is(1)); assertThat(indexMetadata.getAliases().size(), is(1));
assertThat(indexMetadata.getAliases().keySet().iterator().next(), is("alias1")); assertThat(indexMetadata.getAliases().keySet().iterator().next(), is("alias1"));
@ -1349,35 +1339,6 @@ public class MetadataCreateIndexServiceTests extends ESTestCase {
assertThat(indexMetadata.getEventIngestedRange(), equalTo(IndexLongFieldRange.NO_SHARDS)); assertThat(indexMetadata.getEventIngestedRange(), equalTo(IndexLongFieldRange.NO_SHARDS));
} }
public void testBuildIndexMetadataWithTransportVersionBeforeEventIngestedRangeAdded() {
IndexMetadata sourceIndexMetadata = IndexMetadata.builder("parent")
.settings(Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()).build())
.numberOfShards(1)
.numberOfReplicas(0)
.primaryTerm(0, 3L)
.build();
Settings indexSettings = indexSettings(IndexVersion.current(), 1, 0).build();
List<AliasMetadata> aliases = List.of(AliasMetadata.builder("alias1").build());
IndexMetadata indexMetadata = buildIndexMetadata(
"test",
aliases,
() -> null,
indexSettings,
4,
sourceIndexMetadata,
false,
TransportVersions.V_8_0_0
);
assertThat(indexMetadata.getAliases().size(), is(1));
assertThat(indexMetadata.getAliases().keySet().iterator().next(), is("alias1"));
assertThat("The source index primary term must be used", indexMetadata.primaryTerm(0), is(3L));
assertThat(indexMetadata.getTimestampRange(), equalTo(IndexLongFieldRange.NO_SHARDS));
// on versions before event.ingested was added to cluster state, it should default to UNKNOWN, not NO_SHARDS
assertThat(indexMetadata.getEventIngestedRange(), equalTo(IndexLongFieldRange.UNKNOWN));
}
public void testGetIndexNumberOfRoutingShardsWithNullSourceIndex() { public void testGetIndexNumberOfRoutingShardsWithNullSourceIndex() {
Settings indexSettings = Settings.builder() Settings indexSettings = Settings.builder()
.put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())

View file

@ -84,7 +84,7 @@ public class AllocationStatsServiceTests extends ESAllocationTestCase {
clusterService, clusterService,
() -> clusterInfo, () -> clusterInfo,
createShardAllocator(), createShardAllocator(),
new NodeAllocationStatsProvider(TEST_WRITE_LOAD_FORECASTER, ClusterSettings.createBuiltInClusterSettings()) new NodeAllocationStatsAndWeightsCalculator(TEST_WRITE_LOAD_FORECASTER, ClusterSettings.createBuiltInClusterSettings())
); );
assertThat( assertThat(
service.stats(), service.stats(),
@ -125,7 +125,7 @@ public class AllocationStatsServiceTests extends ESAllocationTestCase {
clusterService, clusterService,
EmptyClusterInfoService.INSTANCE, EmptyClusterInfoService.INSTANCE,
createShardAllocator(), createShardAllocator(),
new NodeAllocationStatsProvider(TEST_WRITE_LOAD_FORECASTER, ClusterSettings.createBuiltInClusterSettings()) new NodeAllocationStatsAndWeightsCalculator(TEST_WRITE_LOAD_FORECASTER, ClusterSettings.createBuiltInClusterSettings())
); );
assertThat( assertThat(
service.stats(), service.stats(),
@ -182,7 +182,7 @@ public class AllocationStatsServiceTests extends ESAllocationTestCase {
); );
} }
}, },
new NodeAllocationStatsProvider(TEST_WRITE_LOAD_FORECASTER, ClusterSettings.createBuiltInClusterSettings()) new NodeAllocationStatsAndWeightsCalculator(TEST_WRITE_LOAD_FORECASTER, ClusterSettings.createBuiltInClusterSettings())
); );
assertThat( assertThat(
service.stats(), service.stats(),

View file

@ -456,18 +456,19 @@ public class DenseVectorFieldTypeTests extends FieldTypeTestCase {
); );
// Total results is k, internal k is multiplied by oversample // Total results is k, internal k is multiplied by oversample
checkRescoreQueryParameters(fieldType, 10, 200, 2.5F, null, 500, 10); checkRescoreQueryParameters(fieldType, 10, 200, randomInt(), 2.5F, null, 500, 10);
// If numCands < k, update numCands to k // If numCands < k, update numCands to k
checkRescoreQueryParameters(fieldType, 10, 20, 2.5F, null, 50, 10); checkRescoreQueryParameters(fieldType, 10, 20, randomInt(), 2.5F, null, 50, 10);
// Oversampling limits for num candidates // Oversampling limits for num candidates
checkRescoreQueryParameters(fieldType, 1000, 1000, 11.0F, null, 10000, 1000); checkRescoreQueryParameters(fieldType, 1000, 1000, randomInt(), 11.0F, null, 10000, 1000);
checkRescoreQueryParameters(fieldType, 5000, 7500, 2.5F, null, 10000, 5000); checkRescoreQueryParameters(fieldType, 5000, 7500, randomInt(), 2.5F, null, 10000, 5000);
} }
private static void checkRescoreQueryParameters( private static void checkRescoreQueryParameters(
DenseVectorFieldType fieldType, DenseVectorFieldType fieldType,
Integer k, int k,
int candidates, int candidates,
int requestSize,
float numCandsFactor, float numCandsFactor,
Integer expectedK, Integer expectedK,
int expectedCandidates, int expectedCandidates,

View file

@ -9,8 +9,6 @@
package org.elasticsearch.search.vectors; package org.elasticsearch.search.vectors;
import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
import org.apache.lucene.document.Document; import org.apache.lucene.document.Document;
import org.apache.lucene.document.KnnFloatVectorField; import org.apache.lucene.document.KnnFloatVectorField;
import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.DirectoryReader;
@ -33,11 +31,9 @@ import org.elasticsearch.test.ESTestCase;
import java.io.IOException; import java.io.IOException;
import java.io.UnsupportedEncodingException; import java.io.UnsupportedEncodingException;
import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collection; import java.util.Collection;
import java.util.HashSet; import java.util.HashSet;
import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.PriorityQueue; import java.util.PriorityQueue;
import java.util.stream.Collectors; import java.util.stream.Collectors;
@ -49,21 +45,11 @@ import static org.hamcrest.Matchers.greaterThan;
public class RescoreKnnVectorQueryTests extends ESTestCase { public class RescoreKnnVectorQueryTests extends ESTestCase {
public static final String FIELD_NAME = "float_vector"; public static final String FIELD_NAME = "float_vector";
private final int numDocs;
private final Integer k;
public RescoreKnnVectorQueryTests(boolean useK) {
this.numDocs = randomIntBetween(10, 100);
this.k = useK ? randomIntBetween(1, numDocs - 1) : null;
}
public void testRescoreDocs() throws Exception { public void testRescoreDocs() throws Exception {
int numDocs = randomIntBetween(10, 100);
int numDims = randomIntBetween(5, 100); int numDims = randomIntBetween(5, 100);
int k = randomIntBetween(1, numDocs - 1);
Integer adjustedK = k;
if (k == null) {
adjustedK = numDocs;
}
try (Directory d = newDirectory()) { try (Directory d = newDirectory()) {
addRandomDocuments(numDocs, d, numDims); addRandomDocuments(numDocs, d, numDims);
@ -77,7 +63,7 @@ public class RescoreKnnVectorQueryTests extends ESTestCase {
FIELD_NAME, FIELD_NAME,
queryVector, queryVector,
VectorSimilarityFunction.COSINE, VectorSimilarityFunction.COSINE,
adjustedK, k,
new MatchAllDocsQuery() new MatchAllDocsQuery()
); );
@ -86,7 +72,7 @@ public class RescoreKnnVectorQueryTests extends ESTestCase {
Map<Integer, Float> rescoredDocs = Arrays.stream(docs.scoreDocs) Map<Integer, Float> rescoredDocs = Arrays.stream(docs.scoreDocs)
.collect(Collectors.toMap(scoreDoc -> scoreDoc.doc, scoreDoc -> scoreDoc.score)); .collect(Collectors.toMap(scoreDoc -> scoreDoc.doc, scoreDoc -> scoreDoc.score));
assertThat(rescoredDocs.size(), equalTo(adjustedK)); assertThat(rescoredDocs.size(), equalTo(k));
Collection<Float> rescoredScores = new HashSet<>(rescoredDocs.values()); Collection<Float> rescoredScores = new HashSet<>(rescoredDocs.values());
@ -113,7 +99,7 @@ public class RescoreKnnVectorQueryTests extends ESTestCase {
assertThat(rescoredDocs.size(), equalTo(0)); assertThat(rescoredDocs.size(), equalTo(0));
// Check top scoring docs are contained in rescored docs // Check top scoring docs are contained in rescored docs
for (int i = 0; i < adjustedK; i++) { for (int i = 0; i < k; i++) {
Float topScore = topK.poll(); Float topScore = topK.poll();
if (rescoredScores.contains(topScore) == false) { if (rescoredScores.contains(topScore) == false) {
fail("Top score " + topScore + " not contained in rescored doc scores " + rescoredScores); fail("Top score " + topScore + " not contained in rescored doc scores " + rescoredScores);
@ -124,7 +110,9 @@ public class RescoreKnnVectorQueryTests extends ESTestCase {
} }
public void testProfiling() throws Exception { public void testProfiling() throws Exception {
int numDocs = randomIntBetween(10, 100);
int numDims = randomIntBetween(5, 100); int numDims = randomIntBetween(5, 100);
int k = randomIntBetween(1, numDocs - 1);
try (Directory d = newDirectory()) { try (Directory d = newDirectory()) {
addRandomDocuments(numDocs, d, numDims); addRandomDocuments(numDocs, d, numDims);
@ -132,13 +120,13 @@ public class RescoreKnnVectorQueryTests extends ESTestCase {
try (IndexReader reader = DirectoryReader.open(d)) { try (IndexReader reader = DirectoryReader.open(d)) {
float[] queryVector = randomVector(numDims); float[] queryVector = randomVector(numDims);
checkProfiling(queryVector, reader, new MatchAllDocsQuery()); checkProfiling(k, numDocs, queryVector, reader, new MatchAllDocsQuery());
checkProfiling(queryVector, reader, new MockQueryProfilerProvider(randomIntBetween(1, 100))); checkProfiling(k, numDocs, queryVector, reader, new MockQueryProfilerProvider(randomIntBetween(1, 100)));
} }
} }
} }
private void checkProfiling(float[] queryVector, IndexReader reader, Query innerQuery) throws IOException { private void checkProfiling(int k, int numDocs, float[] queryVector, IndexReader reader, Query innerQuery) throws IOException {
RescoreKnnVectorQuery rescoreKnnVectorQuery = new RescoreKnnVectorQuery( RescoreKnnVectorQuery rescoreKnnVectorQuery = new RescoreKnnVectorQuery(
FIELD_NAME, FIELD_NAME,
queryVector, queryVector,
@ -229,13 +217,4 @@ public class RescoreKnnVectorQueryTests extends ESTestCase {
w.forceMerge(1); w.forceMerge(1);
} }
} }
@ParametersFactory
public static Iterable<Object[]> parameters() {
List<Object[]> params = new ArrayList<>();
params.add(new Object[] { true });
params.add(new Object[] { false });
return params;
}
} }

View file

@ -26,7 +26,7 @@ import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.cluster.routing.UnassignedInfo;
import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.AllocationService;
import org.elasticsearch.cluster.routing.allocation.FailedShard; import org.elasticsearch.cluster.routing.allocation.FailedShard;
import org.elasticsearch.cluster.routing.allocation.NodeAllocationStatsProvider; import org.elasticsearch.cluster.routing.allocation.NodeAllocationStatsAndWeightsCalculator;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.routing.allocation.WriteLoadForecaster; import org.elasticsearch.cluster.routing.allocation.WriteLoadForecaster;
import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator;
@ -438,18 +438,16 @@ public abstract class ESAllocationTestCase extends ESTestCase {
} }
} }
protected static final NodeAllocationStatsProvider EMPTY_NODE_ALLOCATION_STATS = new NodeAllocationStatsProvider( protected static final NodeAllocationStatsAndWeightsCalculator EMPTY_NODE_ALLOCATION_STATS =
WriteLoadForecaster.DEFAULT, new NodeAllocationStatsAndWeightsCalculator(WriteLoadForecaster.DEFAULT, createBuiltInClusterSettings()) {
createBuiltInClusterSettings() @Override
) { public Map<String, NodeAllocationStatsAndWeight> nodesAllocationStatsAndWeights(
@Override Metadata metadata,
public Map<String, NodeAllocationAndClusterBalanceStats> stats( RoutingNodes routingNodes,
Metadata metadata, ClusterInfo clusterInfo,
RoutingNodes routingNodes, @Nullable DesiredBalance desiredBalance
ClusterInfo clusterInfo, ) {
@Nullable DesiredBalance desiredBalance return Map.of();
) { }
return Map.of(); };
}
};
} }

View file

@ -119,7 +119,7 @@ public class AutoscalingMetadata implements Metadata.ClusterCustom {
@Override @Override
public Iterator<? extends ToXContent> toXContentChunked(ToXContent.Params ignored) { public Iterator<? extends ToXContent> toXContentChunked(ToXContent.Params ignored) {
return ChunkedToXContentHelper.xContentValuesMap(POLICIES_FIELD.getPreferredName(), policies); return ChunkedToXContentHelper.xContentObjectFields(POLICIES_FIELD.getPreferredName(), policies);
} }
@Override @Override

View file

@ -151,9 +151,9 @@ public class AutoFollowMetadata extends AbstractNamedDiffable<Metadata.ProjectCu
@Override @Override
public Iterator<? extends ToXContent> toXContentChunked(ToXContent.Params params) { public Iterator<? extends ToXContent> toXContentChunked(ToXContent.Params params) {
return Iterators.concat( return Iterators.concat(
ChunkedToXContentHelper.xContentFragmentValuesMap(PATTERNS_FIELD.getPreferredName(), patterns), ChunkedToXContentHelper.xContentObjectFieldObjects(PATTERNS_FIELD.getPreferredName(), patterns),
ChunkedToXContentHelper.map(FOLLOWED_LEADER_INDICES_FIELD.getPreferredName(), followedLeaderIndexUUIDs), ChunkedToXContentHelper.object(FOLLOWED_LEADER_INDICES_FIELD.getPreferredName(), followedLeaderIndexUUIDs),
ChunkedToXContentHelper.map(HEADERS.getPreferredName(), headers) ChunkedToXContentHelper.object(HEADERS.getPreferredName(), headers)
); );
} }

Some files were not shown because too many files have changed in this diff Show more