Merge main into multi-project

This commit is contained in:
Tim Vernum 2024-12-11 16:15:02 +11:00
commit 64d5baf753
209 changed files with 7196 additions and 4549 deletions

View file

@ -20,6 +20,7 @@ import org.elasticsearch.gradle.internal.test.SimpleCommandLineArgumentProvider;
import org.elasticsearch.gradle.test.GradleTestPolicySetupPlugin; import org.elasticsearch.gradle.test.GradleTestPolicySetupPlugin;
import org.elasticsearch.gradle.test.SystemPropertyCommandLineArgumentProvider; import org.elasticsearch.gradle.test.SystemPropertyCommandLineArgumentProvider;
import org.gradle.api.Action; import org.gradle.api.Action;
import org.gradle.api.JavaVersion;
import org.gradle.api.Plugin; import org.gradle.api.Plugin;
import org.gradle.api.Project; import org.gradle.api.Project;
import org.gradle.api.Task; import org.gradle.api.Task;
@ -112,7 +113,6 @@ public abstract class ElasticsearchTestBasePlugin implements Plugin<Project> {
test.jvmArgs( test.jvmArgs(
"-Xmx" + System.getProperty("tests.heap.size", "512m"), "-Xmx" + System.getProperty("tests.heap.size", "512m"),
"-Xms" + System.getProperty("tests.heap.size", "512m"), "-Xms" + System.getProperty("tests.heap.size", "512m"),
"-Djava.security.manager=allow",
"-Dtests.testfeatures.enabled=true", "-Dtests.testfeatures.enabled=true",
"--add-opens=java.base/java.util=ALL-UNNAMED", "--add-opens=java.base/java.util=ALL-UNNAMED",
// TODO: only open these for mockito when it is modularized // TODO: only open these for mockito when it is modularized
@ -127,6 +127,13 @@ public abstract class ElasticsearchTestBasePlugin implements Plugin<Project> {
); );
test.getJvmArgumentProviders().add(new SimpleCommandLineArgumentProvider("-XX:HeapDumpPath=" + heapdumpDir)); test.getJvmArgumentProviders().add(new SimpleCommandLineArgumentProvider("-XX:HeapDumpPath=" + heapdumpDir));
test.getJvmArgumentProviders().add(() -> {
if (test.getJavaVersion().compareTo(JavaVersion.VERSION_23) <= 0) {
return List.of("-Djava.security.manager=allow");
} else {
return List.of();
}
});
String argline = System.getProperty("tests.jvm.argline"); String argline = System.getProperty("tests.jvm.argline");
if (argline != null) { if (argline != null) {

View file

@ -9,11 +9,14 @@
package org.elasticsearch.gradle.test; package org.elasticsearch.gradle.test;
import org.gradle.api.JavaVersion;
import org.gradle.api.Plugin; import org.gradle.api.Plugin;
import org.gradle.api.Project; import org.gradle.api.Project;
import org.gradle.api.invocation.Gradle; import org.gradle.api.invocation.Gradle;
import org.gradle.api.tasks.testing.Test; import org.gradle.api.tasks.testing.Test;
import java.util.List;
public class GradleTestPolicySetupPlugin implements Plugin<Project> { public class GradleTestPolicySetupPlugin implements Plugin<Project> {
@Override @Override
@ -23,8 +26,13 @@ public class GradleTestPolicySetupPlugin implements Plugin<Project> {
test.systemProperty("tests.gradle", true); test.systemProperty("tests.gradle", true);
test.systemProperty("tests.task", test.getPath()); test.systemProperty("tests.task", test.getPath());
// Flag is required for later Java versions since our tests use a custom security manager test.getJvmArgumentProviders().add(() -> {
test.jvmArgs("-Djava.security.manager=allow"); if (test.getJavaVersion().compareTo(JavaVersion.VERSION_23) <= 0) {
return List.of("-Djava.security.manager=allow");
} else {
return List.of();
}
});
SystemPropertyCommandLineArgumentProvider nonInputProperties = new SystemPropertyCommandLineArgumentProvider(); SystemPropertyCommandLineArgumentProvider nonInputProperties = new SystemPropertyCommandLineArgumentProvider();
// don't track these as inputs since they contain absolute paths and break cache relocatability // don't track these as inputs since they contain absolute paths and break cache relocatability

View file

@ -11,6 +11,8 @@ package org.elasticsearch.server.cli;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.EsExecutors;
import org.elasticsearch.core.UpdateForV9;
import org.elasticsearch.jdk.RuntimeVersionFeature;
import java.io.IOException; import java.io.IOException;
import java.nio.file.Files; import java.nio.file.Files;
@ -137,9 +139,13 @@ final class SystemJvmOptions {
return Stream.of(); return Stream.of();
} }
@UpdateForV9(owner = UpdateForV9.Owner.CORE_INFRA)
private static Stream<String> maybeAllowSecurityManager() { private static Stream<String> maybeAllowSecurityManager() {
// Will become conditional on useEntitlements once entitlements can run without SM if (RuntimeVersionFeature.isSecurityManagerAvailable()) {
return Stream.of("-Djava.security.manager=allow"); // Will become conditional on useEntitlements once entitlements can run without SM
return Stream.of("-Djava.security.manager=allow");
}
return Stream.of();
} }
private static Stream<String> maybeAttachEntitlementAgent(boolean useEntitlements) { private static Stream<String> maybeAttachEntitlementAgent(boolean useEntitlements) {

View file

@ -0,0 +1,5 @@
pr: 114618
summary: Add a new index setting to skip recovery source when synthetic source is enabled
area: Logs
type: enhancement
issues: []

View file

@ -0,0 +1,6 @@
pr: 117469
summary: Handle exceptions in query phase can match
area: Search
type: bug
issues:
- 104994

View file

@ -0,0 +1,5 @@
pr: 118025
summary: Update sparse text embeddings API route for Inference Service
area: Inference
type: enhancement
issues: []

View file

@ -0,0 +1,12 @@
pr: 118104
summary: Remove old `_knn_search` tech preview API in v9
area: Vector Search
type: breaking
issues: []
breaking:
title: Remove old `_knn_search` tech preview API in v9
area: REST API
details: The original, tech-preview api for vector search, `_knn_search`, has been removed in v9. For all vector search
operations, you should utilize the `_search` endpoint.
impact: The `_knn_search` API is now inaccessible without providing a compatible-with flag for v8.
notable: false

View file

@ -0,0 +1,6 @@
pr: 118177
summary: Fixing bedrock event executor terminated cache issue
area: Machine Learning
type: bug
issues:
- 117916

View file

@ -0,0 +1,5 @@
pr: 118267
summary: Adding get migration reindex status
area: Data streams
type: enhancement
issues: []

View file

@ -0,0 +1,5 @@
pr: 118354
summary: Fix log message format bugs
area: Ingest Node
type: bug
issues: []

View file

@ -0,0 +1,5 @@
pr: 118378
summary: Opt into extra data stream resolution
area: ES|QL
type: bug
issues: []

View file

@ -475,7 +475,7 @@ The input is untokenized text and the result is the single term attribute emitte
- 영영칠 -> 7 - 영영칠 -> 7
- 일영영영 -> 1000 - 일영영영 -> 1000
- 삼천2백2십삼 -> 3223 - 삼천2백2십삼 -> 3223
- 조육백만오천일 -> 1000006005001 - 조육백만오천일 -> 1000006005001
- .2천 -> 3200 - .2천 -> 3200
- .2만345. -> 12345.67 - .2만345. -> 12345.67
- 4,647.100 -> 4647.1 - 4,647.100 -> 4647.1

View file

@ -232,8 +232,8 @@ it will be set to the length of the first vector added to the field.
`index`:: `index`::
(Optional, Boolean) (Optional, Boolean)
If `true`, you can search this field using the <<knn-search-api, kNN search If `true`, you can search this field using the <<query-dsl-knn-query, knn query>>
API>>. Defaults to `true`. or <<search-api-knn, knn in _search>> . Defaults to `true`.
[[dense-vector-similarity]] [[dense-vector-similarity]]
`similarity`:: `similarity`::

View file

@ -244,6 +244,25 @@ The deprecated highlighting `force_source` parameter is no longer supported.
Users should remove usages of the `force_source` parameter from their search requests. Users should remove usages of the `force_source` parameter from their search requests.
==== ====
[discrete]
[[breaking_90_transforms_changes]]
==== {transforms-cap} changes
[[updating_deprecated_transform_roles]]
.Updating deprecated {transform} roles (`data_frame_transforms_admin` and `data_frame_transforms_user`)
[%collapsible]
====
*Details* +
The `data_frame_transforms_admin` and `data_frame_transforms_user` {transform} roles have been deprecated.
*Impact* +
Users must update any existing {transforms} that use deprecated {transform} roles (`data_frame_transforms_admin` or `data_frame_transforms_user`) to use the new equivalent {transform} roles (`transform_admin` or `transform_user`).
To update the {transform} roles:
1. Switch to a user with the `transform_admin` role (to replace `data_frame_transforms_admin`) or the `transform_user` role (to replace `data_frame_transforms_user`).
2. Call the <<update-transform, update {transforms} API>> with that user.
====
[discrete] [discrete]
[[deprecated-9.0]] [[deprecated-9.0]]

View file

@ -1,9 +0,0 @@
[[transforms-migration-guide]]
== {transforms-cap} migration guide
This migration guide helps you upgrade your {transforms} to work with the 9.0 release. Each section outlines a breaking change and any manual steps needed to upgrade your {transforms} to be compatible with 9.0.
=== Updating deprecated {transform} roles (`data_frame_transforms_admin` and `data_frame_transforms_user`)
If you have existing {transforms} that use deprecated {transform} roles (`data_frame_transforms_admin` or `data_frame_transforms_user`) you must update them to use the new equivalent {transform} roles (`transform_admin` or `transform_user`). To update your {transform} roles:
1. Switch to a user with the `transform_admin` role (to replace `data_frame_transforms_admin`) or the `transform_user` role (to replace `data_frame_transforms_user`).
2. Call the <<update-transform, update {transforms} API>> with that user.

View file

@ -1942,3 +1942,8 @@ Refer to <<get-ip-location-database-api>>.
=== Delete geoip database configuration API === Delete geoip database configuration API
Refer to <<delete-ip-location-database-api>>. Refer to <<delete-ip-location-database-api>>.
[role="exclude",id="knn-search-api"]
=== Delete _knn_search API
Refer to <<search-api-knn>>.

View file

@ -50,8 +50,6 @@ include::search/async-search.asciidoc[]
include::search/point-in-time-api.asciidoc[] include::search/point-in-time-api.asciidoc[]
include::search/knn-search.asciidoc[]
include::search/retriever.asciidoc[] include::search/retriever.asciidoc[]
include::search/rrf.asciidoc[] include::search/rrf.asciidoc[]

View file

@ -1,146 +0,0 @@
[[knn-search-api]]
=== kNN search API
++++
<titleabbrev>kNN search</titleabbrev>
++++
deprecated::[8.4.0,"The kNN search API has been replaced by the <<search-api-knn, `knn` option>> in the search API."]
Performs a k-nearest neighbor (kNN) search and returns the matching documents.
////
[source,console]
----
PUT my-index
{
"mappings": {
"properties": {
"image_vector": {
"type": "dense_vector",
"dims": 3,
"index": true,
"similarity": "l2_norm"
}
}
}
}
PUT my-index/_doc/1?refresh
{
"image_vector" : [0.5, 10, 6]
}
----
////
[source,console]
----
GET my-index/_knn_search
{
"knn": {
"field": "image_vector",
"query_vector": [0.3, 0.1, 1.2],
"k": 10,
"num_candidates": 100
},
"_source": ["name", "file_type"]
}
----
// TEST[continued]
// TEST[warning:The kNN search API has been replaced by the `knn` option in the search API.]
[[knn-search-api-request]]
==== {api-request-title}
`GET <target>/_knn_search`
`POST <target>/_knn_search`
[[knn-search-api-prereqs]]
==== {api-prereq-title}
* If the {es} {security-features} are enabled, you must have the `read`
<<privileges-list-indices,index privilege>> for the target data stream, index,
or alias.
[[knn-search-api-desc]]
==== {api-description-title}
The kNN search API performs a k-nearest neighbor (kNN) search on a
<<dense-vector,`dense_vector`>> field. Given a query vector, it finds the _k_
closest vectors and returns those documents as search hits.
//tag::hnsw-algorithm[]
{es} uses the https://arxiv.org/abs/1603.09320[HNSW algorithm] to support
efficient kNN search. Like most kNN algorithms, HNSW is an approximate method
that sacrifices result accuracy for improved search speed. This means the
results returned are not always the true _k_ closest neighbors.
//end::hnsw-algorithm[]
The kNN search API supports restricting the search using a filter. The search
will return the top `k` documents that also match the filter query.
[[knn-search-api-path-params]]
==== {api-path-parms-title}
`<target>`::
(Optional, string) Comma-separated list of data streams, indices, and aliases
to search. Supports wildcards (`*`). To search all data streams and indices,
use `*` or `_all`.
[role="child_attributes"]
[[knn-search-api-query-params]]
==== {api-query-parms-title}
include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=routing]
[role="child_attributes"]
[[knn-search-api-request-body]]
==== {api-request-body-title}
`filter`::
(Optional, <<query-dsl,Query DSL object>>)
include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=knn-filter]
`knn`::
(Required, object)
include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=knn]
+
.Properties of `knn` object
[%collapsible%open]
====
`field`::
(Required, string)
include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=knn-field]
`k`::
(Optional, integer)
include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=knn-k]
`num_candidates`::
(Optional, integer)
include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=knn-num-candidates]
`query_vector`::
(Required, array of floats or string)
include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=knn-query-vector]
====
include::{es-ref-dir}/search/search.asciidoc[tag=docvalue-fields-def]
include::{es-ref-dir}/search/search.asciidoc[tag=fields-param-def]
include::{es-ref-dir}/search/search.asciidoc[tag=source-filtering-def]
include::{es-ref-dir}/search/search.asciidoc[tag=stored-fields-def]
[role="child_attributes"]
[[knn-search-api-response-body]]
==== {api-response-body-title}
A kNN search response has the exact same structure as a
<<search-api-response-body, search API response>>. However, certain sections
have a meaning specific to kNN search:
* The <<search-api-response-body-score,document `_score`>> is determined by
the similarity between the query and document vector. See
<<dense-vector-similarity, `similarity`>>.
* The `hits.total` object contains the total number of nearest neighbor
candidates considered, which is `num_candidates * num_shards`. The
`hits.total.relation` will always be `eq`, indicating an exact value.

View file

@ -1058,8 +1058,10 @@ PUT image-index
* When using kNN search in <<modules-cross-cluster-search,{ccs}>>, the <<ccs-min-roundtrips,`ccs_minimize_roundtrips`>> * When using kNN search in <<modules-cross-cluster-search,{ccs}>>, the <<ccs-min-roundtrips,`ccs_minimize_roundtrips`>>
option is not supported. option is not supported.
* {blank} * {es} uses the https://arxiv.org/abs/1603.09320[HNSW algorithm] to support
include::{es-ref-dir}/search/knn-search.asciidoc[tag=hnsw-algorithm] efficient kNN search. Like most kNN algorithms, HNSW is an approximate method
that sacrifices result accuracy for improved search speed. This means the
results returned are not always the true _k_ closest neighbors.
NOTE: Approximate kNN search always uses the NOTE: Approximate kNN search always uses the
<<dfs-query-then-fetch,`dfs_query_then_fetch`>> search type in order to gather <<dfs-query-then-fetch,`dfs_query_then_fetch`>> search type in order to gather

View file

@ -39,7 +39,7 @@ adjust memory usage in Docker Desktop by going to **Settings > Resources**.
---- ----
docker network create elastic docker network create elastic
---- ----
// REVIEWED[DEC.10.24]
. Pull the {es} Docker image. . Pull the {es} Docker image.
+ +
-- --
@ -52,10 +52,11 @@ endif::[]
---- ----
docker pull {docker-image} docker pull {docker-image}
---- ----
// REVIEWED[DEC.10.24]
-- --
. Optional: Install . Optional: Install
https://docs.sigstore.dev/system_config/installation/[Cosign] for your https://docs.sigstore.dev/cosign/system_config/installation/[Cosign] for your
environment. Then use Cosign to verify the {es} image's signature. environment. Then use Cosign to verify the {es} image's signature.
+ +
[[docker-verify-signature]] [[docker-verify-signature]]
@ -64,6 +65,7 @@ environment. Then use Cosign to verify the {es} image's signature.
wget https://artifacts.elastic.co/cosign.pub wget https://artifacts.elastic.co/cosign.pub
cosign verify --key cosign.pub {docker-image} cosign verify --key cosign.pub {docker-image}
---- ----
// REVIEWED[DEC.10.24]
+ +
The `cosign` command prints the check results and the signature payload in JSON format: The `cosign` command prints the check results and the signature payload in JSON format:
+ +
@ -75,6 +77,7 @@ The following checks were performed on each of these signatures:
- Existence of the claims in the transparency log was verified offline - Existence of the claims in the transparency log was verified offline
- The signatures were verified against the specified public key - The signatures were verified against the specified public key
---- ----
// REVIEWED[DEC.10.24]
. Start an {es} container. . Start an {es} container.
+ +
@ -82,6 +85,7 @@ The following checks were performed on each of these signatures:
---- ----
docker run --name es01 --net elastic -p 9200:9200 -it -m 1GB {docker-image} docker run --name es01 --net elastic -p 9200:9200 -it -m 1GB {docker-image}
---- ----
// REVIEWED[DEC.10.24]
+ +
TIP: Use the `-m` flag to set a memory limit for the container. This removes the TIP: Use the `-m` flag to set a memory limit for the container. This removes the
need to <<docker-set-heap-size,manually set the JVM size>>. need to <<docker-set-heap-size,manually set the JVM size>>.
@ -95,6 +99,7 @@ If you intend to use the {ml} capabilities, then start the container with this c
---- ----
docker run --name es01 --net elastic -p 9200:9200 -it -m 6GB -e "xpack.ml.use_auto_machine_memory_percent=true" {docker-image} docker run --name es01 --net elastic -p 9200:9200 -it -m 6GB -e "xpack.ml.use_auto_machine_memory_percent=true" {docker-image}
---- ----
// REVIEWED[DEC.10.24]
The command prints the `elastic` user password and an enrollment token for {kib}. The command prints the `elastic` user password and an enrollment token for {kib}.
. Copy the generated `elastic` password and enrollment token. These credentials . Copy the generated `elastic` password and enrollment token. These credentials
@ -106,6 +111,7 @@ credentials using the following commands.
docker exec -it es01 /usr/share/elasticsearch/bin/elasticsearch-reset-password -u elastic docker exec -it es01 /usr/share/elasticsearch/bin/elasticsearch-reset-password -u elastic
docker exec -it es01 /usr/share/elasticsearch/bin/elasticsearch-create-enrollment-token -s kibana docker exec -it es01 /usr/share/elasticsearch/bin/elasticsearch-create-enrollment-token -s kibana
---- ----
// REVIEWED[DEC.10.24]
+ +
We recommend storing the `elastic` password as an environment variable in your shell. Example: We recommend storing the `elastic` password as an environment variable in your shell. Example:
+ +
@ -113,6 +119,7 @@ We recommend storing the `elastic` password as an environment variable in your s
---- ----
export ELASTIC_PASSWORD="your_password" export ELASTIC_PASSWORD="your_password"
---- ----
// REVIEWED[DEC.10.24]
. Copy the `http_ca.crt` SSL certificate from the container to your local machine. . Copy the `http_ca.crt` SSL certificate from the container to your local machine.
+ +
@ -120,6 +127,7 @@ export ELASTIC_PASSWORD="your_password"
---- ----
docker cp es01:/usr/share/elasticsearch/config/certs/http_ca.crt . docker cp es01:/usr/share/elasticsearch/config/certs/http_ca.crt .
---- ----
// REVIEWED[DEC.10.24]
. Make a REST API call to {es} to ensure the {es} container is running. . Make a REST API call to {es} to ensure the {es} container is running.
+ +
@ -128,6 +136,7 @@ docker cp es01:/usr/share/elasticsearch/config/certs/http_ca.crt .
curl --cacert http_ca.crt -u elastic:$ELASTIC_PASSWORD https://localhost:9200 curl --cacert http_ca.crt -u elastic:$ELASTIC_PASSWORD https://localhost:9200
---- ----
// NOTCONSOLE // NOTCONSOLE
// REVIEWED[DEC.10.24]
===== Add more nodes ===== Add more nodes
@ -137,6 +146,7 @@ curl --cacert http_ca.crt -u elastic:$ELASTIC_PASSWORD https://localhost:9200
---- ----
docker exec -it es01 /usr/share/elasticsearch/bin/elasticsearch-create-enrollment-token -s node docker exec -it es01 /usr/share/elasticsearch/bin/elasticsearch-create-enrollment-token -s node
---- ----
// REVIEWED[DEC.10.24]
+ +
The enrollment token is valid for 30 minutes. The enrollment token is valid for 30 minutes.
@ -146,6 +156,7 @@ The enrollment token is valid for 30 minutes.
---- ----
docker run -e ENROLLMENT_TOKEN="<token>" --name es02 --net elastic -it -m 1GB {docker-image} docker run -e ENROLLMENT_TOKEN="<token>" --name es02 --net elastic -it -m 1GB {docker-image}
---- ----
// REVIEWED[DEC.10.24]
. Call the <<cat-nodes,cat nodes API>> to verify the node was added to the cluster. . Call the <<cat-nodes,cat nodes API>> to verify the node was added to the cluster.
+ +
@ -154,6 +165,7 @@ docker run -e ENROLLMENT_TOKEN="<token>" --name es02 --net elastic -it -m 1GB {d
curl --cacert http_ca.crt -u elastic:$ELASTIC_PASSWORD https://localhost:9200/_cat/nodes curl --cacert http_ca.crt -u elastic:$ELASTIC_PASSWORD https://localhost:9200/_cat/nodes
---- ----
// NOTCONSOLE // NOTCONSOLE
// REVIEWED[DEC.10.24]
[[run-kibana-docker]] [[run-kibana-docker]]
===== Run {kib} ===== Run {kib}
@ -170,6 +182,7 @@ endif::[]
---- ----
docker pull {kib-docker-image} docker pull {kib-docker-image}
---- ----
// REVIEWED[DEC.10.24]
-- --
. Optional: Verify the {kib} image's signature. . Optional: Verify the {kib} image's signature.
@ -179,6 +192,7 @@ docker pull {kib-docker-image}
wget https://artifacts.elastic.co/cosign.pub wget https://artifacts.elastic.co/cosign.pub
cosign verify --key cosign.pub {kib-docker-image} cosign verify --key cosign.pub {kib-docker-image}
---- ----
// REVIEWED[DEC.10.24]
. Start a {kib} container. . Start a {kib} container.
+ +
@ -186,6 +200,7 @@ cosign verify --key cosign.pub {kib-docker-image}
---- ----
docker run --name kib01 --net elastic -p 5601:5601 {kib-docker-image} docker run --name kib01 --net elastic -p 5601:5601 {kib-docker-image}
---- ----
// REVIEWED[DEC.10.24]
. When {kib} starts, it outputs a unique generated link to the terminal. To . When {kib} starts, it outputs a unique generated link to the terminal. To
access {kib}, open this link in a web browser. access {kib}, open this link in a web browser.
@ -198,6 +213,7 @@ To regenerate the token, run:
---- ----
docker exec -it es01 /usr/share/elasticsearch/bin/elasticsearch-create-enrollment-token -s kibana docker exec -it es01 /usr/share/elasticsearch/bin/elasticsearch-create-enrollment-token -s kibana
---- ----
// REVIEWED[DEC.10.24]
. Log in to {kib} as the `elastic` user with the password that was generated . Log in to {kib} as the `elastic` user with the password that was generated
when you started {es}. when you started {es}.
@ -208,6 +224,7 @@ To regenerate the password, run:
---- ----
docker exec -it es01 /usr/share/elasticsearch/bin/elasticsearch-reset-password -u elastic docker exec -it es01 /usr/share/elasticsearch/bin/elasticsearch-reset-password -u elastic
---- ----
// REVIEWED[DEC.10.24]
[[remove-containers-docker]] [[remove-containers-docker]]
===== Remove containers ===== Remove containers
@ -226,6 +243,7 @@ docker rm es02
# Remove the {kib} container # Remove the {kib} container
docker rm kib01 docker rm kib01
---- ----
// REVIEWED[DEC.10.24]
===== Next steps ===== Next steps
@ -306,6 +324,7 @@ ES_PORT=127.0.0.1:9200
---- ----
docker-compose up -d docker-compose up -d
---- ----
// REVIEWED[DEC.10.24]
. After the cluster has started, open http://localhost:5601 in a web browser to . After the cluster has started, open http://localhost:5601 in a web browser to
access {kib}. access {kib}.
@ -321,6 +340,7 @@ is preserved and loaded when you restart the cluster with `docker-compose up`.
---- ----
docker-compose down docker-compose down
---- ----
// REVIEWED[DEC.10.24]
To delete the network, containers, and volumes when you stop the cluster, To delete the network, containers, and volumes when you stop the cluster,
specify the `-v` option: specify the `-v` option:
@ -329,6 +349,7 @@ specify the `-v` option:
---- ----
docker-compose down -v docker-compose down -v
---- ----
// REVIEWED[DEC.10.24]
===== Next steps ===== Next steps
@ -377,6 +398,7 @@ The `vm.max_map_count` setting must be set within the xhyve virtual machine:
-------------------------------------------- --------------------------------------------
screen ~/Library/Containers/com.docker.docker/Data/vms/0/tty screen ~/Library/Containers/com.docker.docker/Data/vms/0/tty
-------------------------------------------- --------------------------------------------
// REVIEWED[DEC.10.24]
. Press enter and use `sysctl` to configure `vm.max_map_count`: . Press enter and use `sysctl` to configure `vm.max_map_count`:
+ +
@ -494,6 +516,7 @@ To check the Docker daemon defaults for ulimits, run:
-------------------------------------------- --------------------------------------------
docker run --rm {docker-image} /bin/bash -c 'ulimit -Hn && ulimit -Sn && ulimit -Hu && ulimit -Su' docker run --rm {docker-image} /bin/bash -c 'ulimit -Hn && ulimit -Sn && ulimit -Hu && ulimit -Su'
-------------------------------------------- --------------------------------------------
// REVIEWED[DEC.10.24]
If needed, adjust them in the Daemon or override them per container. If needed, adjust them in the Daemon or override them per container.
For example, when using `docker run`, set: For example, when using `docker run`, set:
@ -502,6 +525,7 @@ For example, when using `docker run`, set:
-------------------------------------------- --------------------------------------------
--ulimit nofile=65535:65535 --ulimit nofile=65535:65535
-------------------------------------------- --------------------------------------------
// REVIEWED[DEC.10.24]
===== Disable swapping ===== Disable swapping
@ -518,6 +542,7 @@ When using `docker run`, you can specify:
---- ----
-e "bootstrap.memory_lock=true" --ulimit memlock=-1:-1 -e "bootstrap.memory_lock=true" --ulimit memlock=-1:-1
---- ----
// REVIEWED[DEC.10.24]
===== Randomize published ports ===== Randomize published ports
@ -545,6 +570,7 @@ environment variable. For example, to use 1GB, use the following command.
---- ----
docker run -e ES_JAVA_OPTS="-Xms1g -Xmx1g" -e ENROLLMENT_TOKEN="<token>" --name es01 -p 9200:9200 --net elastic -it {docker-image} docker run -e ES_JAVA_OPTS="-Xms1g -Xmx1g" -e ENROLLMENT_TOKEN="<token>" --name es01 -p 9200:9200 --net elastic -it {docker-image}
---- ----
// REVIEWED[DEC.10.24]
The `ES_JAVA_OPTS` variable overrides all other JVM options. The `ES_JAVA_OPTS` variable overrides all other JVM options.
We do not recommend using `ES_JAVA_OPTS` in production. We do not recommend using `ES_JAVA_OPTS` in production.
@ -616,6 +642,7 @@ If you mount the password file to `/run/secrets/bootstrapPassword.txt`, specify:
-------------------------------------------- --------------------------------------------
-e ELASTIC_PASSWORD_FILE=/run/secrets/bootstrapPassword.txt -e ELASTIC_PASSWORD_FILE=/run/secrets/bootstrapPassword.txt
-------------------------------------------- --------------------------------------------
// REVIEWED[DEC.10.24]
You can override the default command for the image to pass {es} configuration You can override the default command for the image to pass {es} configuration
parameters as command line options. For example: parameters as command line options. For example:

View file

@ -0,0 +1,21 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the "Elastic License
* 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
* Public License v 1"; you may not use this file except in compliance with, at
* your election, the "Elastic License 2.0", the "GNU Affero General Public
* License v3.0 only", or the "Server Side Public License, v 1".
*/
package org.elasticsearch.jdk;
import org.elasticsearch.core.UpdateForV9;
public class RuntimeVersionFeature {
private RuntimeVersionFeature() {}
@UpdateForV9(owner = UpdateForV9.Owner.CORE_INFRA) // Remove once we removed all references to SecurityManager in code
public static boolean isSecurityManagerAvailable() {
return Runtime.version().feature() < 24;
}
}

View file

@ -9,6 +9,8 @@
package org.elasticsearch.nativeaccess.jdk; package org.elasticsearch.nativeaccess.jdk;
import org.elasticsearch.logging.LogManager;
import org.elasticsearch.logging.Logger;
import org.elasticsearch.nativeaccess.VectorSimilarityFunctions; import org.elasticsearch.nativeaccess.VectorSimilarityFunctions;
import org.elasticsearch.nativeaccess.lib.LoaderHelper; import org.elasticsearch.nativeaccess.lib.LoaderHelper;
import org.elasticsearch.nativeaccess.lib.VectorLibrary; import org.elasticsearch.nativeaccess.lib.VectorLibrary;
@ -25,6 +27,8 @@ import static org.elasticsearch.nativeaccess.jdk.LinkerHelper.downcallHandle;
public final class JdkVectorLibrary implements VectorLibrary { public final class JdkVectorLibrary implements VectorLibrary {
static final Logger logger = LogManager.getLogger(JdkVectorLibrary.class);
static final MethodHandle dot7u$mh; static final MethodHandle dot7u$mh;
static final MethodHandle sqr7u$mh; static final MethodHandle sqr7u$mh;
@ -36,6 +40,7 @@ public final class JdkVectorLibrary implements VectorLibrary {
try { try {
int caps = (int) vecCaps$mh.invokeExact(); int caps = (int) vecCaps$mh.invokeExact();
logger.info("vec_caps=" + caps);
if (caps != 0) { if (caps != 0) {
if (caps == 2) { if (caps == 2) {
dot7u$mh = downcallHandle( dot7u$mh = downcallHandle(

View file

@ -28,4 +28,5 @@ tasks.named('forbiddenApisMain').configure {
tasks.named("jarHell").configure { enabled = false } tasks.named("jarHell").configure { enabled = false }
tasks.named("testTestingConventions").configure { tasks.named("testTestingConventions").configure {
baseClass 'junit.framework.TestCase' baseClass 'junit.framework.TestCase'
baseClass 'org.junit.Assert'
} }

View file

@ -9,27 +9,43 @@
package org.elasticsearch.secure_sm; package org.elasticsearch.secure_sm;
import junit.framework.TestCase; import com.carrotsearch.randomizedtesting.JUnit3MethodProvider;
import com.carrotsearch.randomizedtesting.RandomizedRunner;
import com.carrotsearch.randomizedtesting.RandomizedTest;
import com.carrotsearch.randomizedtesting.annotations.TestMethodProviders;
import org.elasticsearch.jdk.RuntimeVersionFeature;
import org.junit.BeforeClass;
import org.junit.runner.RunWith;
import java.security.Permission; import java.security.Permission;
import java.security.Policy; import java.security.Policy;
import java.security.ProtectionDomain; import java.security.ProtectionDomain;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.List; import java.util.List;
import java.util.Set;
import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicBoolean;
import java.util.stream.Collectors; import java.util.stream.Collectors;
/** Simple tests for SecureSM */ /** Simple tests for SecureSM */
public class SecureSMTests extends TestCase { @TestMethodProviders({ JUnit3MethodProvider.class })
static { @RunWith(RandomizedRunner.class)
public class SecureSMTests extends org.junit.Assert {
@BeforeClass
public static void initialize() {
RandomizedTest.assumeFalse(
"SecurityManager has been permanently removed in JDK 24",
RuntimeVersionFeature.isSecurityManagerAvailable() == false
);
// install a mock security policy: // install a mock security policy:
// AllPermission to source code // AllPermission to source code
// ThreadPermission not granted anywhere else // ThreadPermission not granted anywhere else
final ProtectionDomain sourceCode = SecureSM.class.getProtectionDomain(); final var sourceCode = Set.of(SecureSM.class.getProtectionDomain(), RandomizedRunner.class.getProtectionDomain());
Policy.setPolicy(new Policy() { Policy.setPolicy(new Policy() {
@Override @Override
public boolean implies(ProtectionDomain domain, Permission permission) { public boolean implies(ProtectionDomain domain, Permission permission) {
if (domain == sourceCode) { if (sourceCode.contains(domain)) {
return true; return true;
} else if (permission instanceof ThreadPermission) { } else if (permission instanceof ThreadPermission) {
return false; return false;

View file

@ -124,17 +124,13 @@ public class AutoDateHistogramAggregationBuilder extends ValuesSourceAggregation
public AutoDateHistogramAggregationBuilder(StreamInput in) throws IOException { public AutoDateHistogramAggregationBuilder(StreamInput in) throws IOException {
super(in); super(in);
numBuckets = in.readVInt(); numBuckets = in.readVInt();
if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_3_0)) { minimumIntervalExpression = in.readOptionalString();
minimumIntervalExpression = in.readOptionalString();
}
} }
@Override @Override
protected void innerWriteTo(StreamOutput out) throws IOException { protected void innerWriteTo(StreamOutput out) throws IOException {
out.writeVInt(numBuckets); out.writeVInt(numBuckets);
if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_3_0)) { out.writeOptionalString(minimumIntervalExpression);
out.writeOptionalString(minimumIntervalExpression);
}
} }
protected AutoDateHistogramAggregationBuilder( protected AutoDateHistogramAggregationBuilder(

View file

@ -259,6 +259,6 @@ public class DerivativePipelineAggregationBuilder extends AbstractPipelineAggreg
@Override @Override
public TransportVersion getMinimalSupportedVersion() { public TransportVersion getMinimalSupportedVersion() {
return TransportVersions.V_7_4_0; return TransportVersions.ZERO;
} }
} }

View file

@ -1025,7 +1025,7 @@ class S3BlobContainer extends AbstractBlobContainer {
// should be no other processes interacting with the repository. // should be no other processes interacting with the repository.
logger.warn( logger.warn(
Strings.format( Strings.format(
"failed to clean up multipart upload [{}] of blob [{}][{}][{}]", "failed to clean up multipart upload [%s] of blob [%s][%s][%s]",
abortMultipartUploadRequest.getUploadId(), abortMultipartUploadRequest.getUploadId(),
blobStore.getRepositoryMetadata().name(), blobStore.getRepositoryMetadata().name(),
abortMultipartUploadRequest.getBucketName(), abortMultipartUploadRequest.getBucketName(),

View file

@ -274,32 +274,50 @@ tests:
- class: org.elasticsearch.datastreams.DataStreamsClientYamlTestSuiteIT - class: org.elasticsearch.datastreams.DataStreamsClientYamlTestSuiteIT
method: test {p0=data_stream/120_data_streams_stats/Multiple data stream} method: test {p0=data_stream/120_data_streams_stats/Multiple data stream}
issue: https://github.com/elastic/elasticsearch/issues/118217 issue: https://github.com/elastic/elasticsearch/issues/118217
- class: org.elasticsearch.xpack.security.operator.OperatorPrivilegesIT
method: testEveryActionIsEitherOperatorOnlyOrNonOperator
issue: https://github.com/elastic/elasticsearch/issues/118220
- class: org.elasticsearch.validation.DotPrefixClientYamlTestSuiteIT - class: org.elasticsearch.validation.DotPrefixClientYamlTestSuiteIT
issue: https://github.com/elastic/elasticsearch/issues/118224 issue: https://github.com/elastic/elasticsearch/issues/118224
- class: org.elasticsearch.packaging.test.ArchiveTests - class: org.elasticsearch.packaging.test.ArchiveTests
method: test60StartAndStop method: test60StartAndStop
issue: https://github.com/elastic/elasticsearch/issues/118216 issue: https://github.com/elastic/elasticsearch/issues/118216
- class: org.elasticsearch.xpack.test.rest.XPackRestIT
method: test {p0=migrate/10_reindex/Test Reindex With Bad Data Stream Name}
issue: https://github.com/elastic/elasticsearch/issues/118272
- class: org.elasticsearch.xpack.test.rest.XPackRestIT
method: test {p0=migrate/10_reindex/Test Reindex With Unsupported Mode}
issue: https://github.com/elastic/elasticsearch/issues/118273
- class: org.elasticsearch.xpack.test.rest.XPackRestIT
method: test {p0=migrate/10_reindex/Test Reindex With Nonexistent Data Stream}
issue: https://github.com/elastic/elasticsearch/issues/118274
- class: org.elasticsearch.index.codec.vectors.es818.ES818HnswBinaryQuantizedVectorsFormatTests
method: testSingleVectorCase
issue: https://github.com/elastic/elasticsearch/issues/118306
- class: org.elasticsearch.action.search.SearchQueryThenFetchAsyncActionTests - class: org.elasticsearch.action.search.SearchQueryThenFetchAsyncActionTests
method: testBottomFieldSort method: testBottomFieldSort
issue: https://github.com/elastic/elasticsearch/issues/118214 issue: https://github.com/elastic/elasticsearch/issues/118214
- class: org.elasticsearch.xpack.esql.action.CrossClustersEnrichIT - class: org.elasticsearch.xpack.esql.action.CrossClustersEnrichIT
method: testTopNThenEnrichRemote method: testTopNThenEnrichRemote
issue: https://github.com/elastic/elasticsearch/issues/118307 issue: https://github.com/elastic/elasticsearch/issues/118307
- class: org.elasticsearch.xpack.remotecluster.CrossClusterEsqlRCS1UnavailableRemotesIT
method: testEsqlRcs1UnavailableRemoteScenarios
issue: https://github.com/elastic/elasticsearch/issues/118350
- class: org.elasticsearch.xpack.searchablesnapshots.RetrySearchIntegTests
method: testSearcherId
issue: https://github.com/elastic/elasticsearch/issues/118374
- class: org.elasticsearch.docker.test.DockerYmlTestSuiteIT
method: test {p0=/10_info/Info}
issue: https://github.com/elastic/elasticsearch/issues/118394
- class: org.elasticsearch.docker.test.DockerYmlTestSuiteIT
method: test {p0=/11_nodes/Additional disk information}
issue: https://github.com/elastic/elasticsearch/issues/118395
- class: org.elasticsearch.docker.test.DockerYmlTestSuiteIT
method: test {p0=/11_nodes/Test cat nodes output with full_id set}
issue: https://github.com/elastic/elasticsearch/issues/118396
- class: org.elasticsearch.docker.test.DockerYmlTestSuiteIT
method: test {p0=/11_nodes/Test cat nodes output}
issue: https://github.com/elastic/elasticsearch/issues/118397
- class: org.elasticsearch.xpack.test.rest.XPackRestIT
method: test {p0=migrate/20_reindex_status/Test get reindex status with nonexistent task id}
issue: https://github.com/elastic/elasticsearch/issues/118401
- class: org.elasticsearch.xpack.test.rest.XPackRestIT
method: test {p0=migrate/10_reindex/Test Reindex With Nonexistent Data Stream}
issue: https://github.com/elastic/elasticsearch/issues/118274
- class: org.elasticsearch.xpack.test.rest.XPackRestIT
method: test {p0=migrate/10_reindex/Test Reindex With Bad Data Stream Name}
issue: https://github.com/elastic/elasticsearch/issues/118272
- class: org.elasticsearch.xpack.test.rest.XPackRestIT
method: test {p0=migrate/10_reindex/Test Reindex With Unsupported Mode}
issue: https://github.com/elastic/elasticsearch/issues/118273
- class: org.elasticsearch.xpack.inference.InferenceCrudIT
method: testUnifiedCompletionInference
issue: https://github.com/elastic/elasticsearch/issues/118405
# Examples: # Examples:
# #

View file

@ -263,7 +263,7 @@ public class FullClusterRestartDownsampleIT extends ParameterizedFullClusterRest
if (asMap.size() == 1) { if (asMap.size() == 1) {
return (String) asMap.keySet().toArray()[0]; return (String) asMap.keySet().toArray()[0];
} }
logger.warn("--> No matching rollup name for path [%s]", endpoint); logger.warn("--> No matching rollup name for path [{}]", endpoint);
return null; return null;
} }

View file

@ -238,7 +238,7 @@ public class DownsampleIT extends AbstractRollingUpgradeTestCase {
if (asMap.size() == 1) { if (asMap.size() == 1) {
return (String) asMap.keySet().toArray()[0]; return (String) asMap.keySet().toArray()[0];
} }
logger.warn("--> No matching rollup name for path [%s]", endpoint); logger.warn("--> No matching rollup name for path [{}]", endpoint);
return null; return null;
} }

View file

@ -0,0 +1,31 @@
{
"migrate.get_reindex_status":{
"documentation":{
"url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/data-stream-reindex.html",
"description":"This API returns the status of a migration reindex attempt for a data stream or index"
},
"stability":"experimental",
"visibility":"private",
"headers":{
"accept": [ "application/json"],
"content_type": ["application/json"]
},
"url":{
"paths":[
{
"path":"/_migration/reindex/{index}/_status",
"methods":[
"GET"
],
"parts":{
"index":{
"type":"string",
"description":"The index or data stream name"
}
}
}
]
}
}
}

View file

@ -215,8 +215,11 @@ setup:
--- ---
"kNN search in _knn_search endpoint": "kNN search in _knn_search endpoint":
- skip: - skip:
features: [ "allowed_warnings" ] features: [ "allowed_warnings", "headers" ]
- do: - do:
headers:
Content-Type: "application/vnd.elasticsearch+json;compatible-with=8"
Accept: "application/vnd.elasticsearch+json;compatible-with=8"
allowed_warnings: allowed_warnings:
- "The kNN search API has been replaced by the `knn` option in the search API." - "The kNN search API has been replaced by the `knn` option in the search API."
knn_search: knn_search:
@ -240,8 +243,11 @@ setup:
- requires: - requires:
cluster_features: "gte_v8.2.0" cluster_features: "gte_v8.2.0"
reason: 'kNN with filtering added in 8.2' reason: 'kNN with filtering added in 8.2'
test_runner_features: [ "allowed_warnings" ] test_runner_features: [ "allowed_warnings", "headers" ]
- do: - do:
headers:
Content-Type: "application/vnd.elasticsearch+json;compatible-with=8"
Accept: "application/vnd.elasticsearch+json;compatible-with=8"
allowed_warnings: allowed_warnings:
- "The kNN search API has been replaced by the `knn` option in the search API." - "The kNN search API has been replaced by the `knn` option in the search API."
knn_search: knn_search:
@ -262,6 +268,9 @@ setup:
- match: { hits.hits.0.fields.name.0: "rabbit.jpg" } - match: { hits.hits.0.fields.name.0: "rabbit.jpg" }
- do: - do:
headers:
Content-Type: "application/vnd.elasticsearch+json;compatible-with=8"
Accept: "application/vnd.elasticsearch+json;compatible-with=8"
allowed_warnings: allowed_warnings:
- "The kNN search API has been replaced by the `knn` option in the search API." - "The kNN search API has been replaced by the `knn` option in the search API."
knn_search: knn_search:

View file

@ -55,6 +55,9 @@ setup:
reason: 'dense_vector field usage was added in 8.1' reason: 'dense_vector field usage was added in 8.1'
test_runner_features: ["allowed_warnings"] test_runner_features: ["allowed_warnings"]
- do: - do:
headers:
Content-Type: "application/vnd.elasticsearch+json;compatible-with=8"
Accept: "application/vnd.elasticsearch+json;compatible-with=8"
allowed_warnings: allowed_warnings:
- "The kNN search API has been replaced by the `knn` option in the search API." - "The kNN search API has been replaced by the `knn` option in the search API."
knn_search: knn_search:

View file

@ -15,6 +15,7 @@ import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDeci
import org.elasticsearch.common.ValidationException; import org.elasticsearch.common.ValidationException;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersion;
import org.elasticsearch.index.IndexVersions;
import org.elasticsearch.index.query.TermsQueryBuilder; import org.elasticsearch.index.query.TermsQueryBuilder;
import org.elasticsearch.index.seqno.SeqNoStats; import org.elasticsearch.index.seqno.SeqNoStats;
import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase;
@ -26,6 +27,7 @@ import java.util.List;
import static org.elasticsearch.action.admin.indices.create.ShrinkIndexIT.assertNoResizeSourceIndexSettings; import static org.elasticsearch.action.admin.indices.create.ShrinkIndexIT.assertNoResizeSourceIndexSettings;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
import static org.hamcrest.Matchers.anyOf;
import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.equalTo;
@ -143,6 +145,51 @@ public class CloneIndexIT extends ESIntegTestCase {
assertThat(error.getMessage(), containsString("can't change setting [index.mapping.source.mode] during resize")); assertThat(error.getMessage(), containsString("can't change setting [index.mapping.source.mode] during resize"));
} }
public void testResizeChangeRecoveryUseSyntheticSource() {
prepareCreate("source").setSettings(
indexSettings(between(1, 5), 0).put("index.mode", "logsdb")
.put(
"index.version.created",
IndexVersionUtils.randomVersionBetween(
random(),
IndexVersions.USE_SYNTHETIC_SOURCE_FOR_RECOVERY,
IndexVersion.current()
)
)
).setMapping("@timestamp", "type=date", "host.name", "type=keyword").get();
updateIndexSettings(Settings.builder().put("index.blocks.write", true), "source");
IllegalArgumentException error = expectThrows(IllegalArgumentException.class, () -> {
indicesAdmin().prepareResizeIndex("source", "target")
.setResizeType(ResizeType.CLONE)
.setSettings(
Settings.builder()
.put(
"index.version.created",
IndexVersionUtils.randomVersionBetween(
random(),
IndexVersions.USE_SYNTHETIC_SOURCE_FOR_RECOVERY,
IndexVersion.current()
)
)
.put("index.recovery.use_synthetic_source", true)
.put("index.mode", "logsdb")
.putNull("index.blocks.write")
.build()
)
.get();
});
// The index.recovery.use_synthetic_source setting requires either index.mode or index.mapping.source.mode
// to be present in the settings. Since these are all unmodifiable settings with a non-deterministic evaluation
// order, any of them may trigger a failure first.
assertThat(
error.getMessage(),
anyOf(
containsString("can't change setting [index.mode] during resize"),
containsString("can't change setting [index.recovery.use_synthetic_source] during resize")
)
);
}
public void testResizeChangeIndexSorts() { public void testResizeChangeIndexSorts() {
prepareCreate("source").setSettings(indexSettings(between(1, 5), 0)) prepareCreate("source").setSettings(indexSettings(between(1, 5), 0))
.setMapping("@timestamp", "type=date", "host.name", "type=keyword") .setMapping("@timestamp", "type=date", "host.name", "type=keyword")

View file

@ -336,7 +336,7 @@ public class RetentionLeaseIT extends ESIntegTestCase {
.getShardOrNull(new ShardId(resolveIndex("index"), 0)); .getShardOrNull(new ShardId(resolveIndex("index"), 0));
final int length = randomIntBetween(1, 8); final int length = randomIntBetween(1, 8);
final Map<String, RetentionLease> currentRetentionLeases = new LinkedHashMap<>(); final Map<String, RetentionLease> currentRetentionLeases = new LinkedHashMap<>();
logger.info("adding retention [{}}] leases", length); logger.info("adding retention [{}] leases", length);
for (int i = 0; i < length; i++) { for (int i = 0; i < length; i++) {
final String id = randomValueOtherThanMany(currentRetentionLeases.keySet()::contains, () -> randomAlphaOfLength(8)); final String id = randomValueOtherThanMany(currentRetentionLeases.keySet()::contains, () -> randomAlphaOfLength(8));
final long retainingSequenceNumber = randomLongBetween(0, Long.MAX_VALUE); final long retainingSequenceNumber = randomLongBetween(0, Long.MAX_VALUE);

View file

@ -714,7 +714,15 @@ public class IndexShardIT extends ESSingleNodeTestCase {
} }
IndexShard shard = indexService.getShard(0); IndexShard shard = indexService.getShard(0);
try ( try (
Translog.Snapshot luceneSnapshot = shard.newChangesSnapshot("test", 0, numOps - 1, true, randomBoolean(), randomBoolean()); Translog.Snapshot luceneSnapshot = shard.newChangesSnapshot(
"test",
0,
numOps - 1,
true,
randomBoolean(),
randomBoolean(),
randomLongBetween(1, ByteSizeValue.ofMb(32).getBytes())
);
Translog.Snapshot translogSnapshot = getTranslog(shard).newSnapshot() Translog.Snapshot translogSnapshot = getTranslog(shard).newSnapshot()
) { ) {
List<Translog.Operation> opsFromLucene = TestTranslog.drainSnapshot(luceneSnapshot, true); List<Translog.Operation> opsFromLucene = TestTranslog.drainSnapshot(luceneSnapshot, true);

View file

@ -156,7 +156,6 @@ import static org.elasticsearch.index.MergePolicyConfig.INDEX_MERGE_ENABLED;
import static org.elasticsearch.index.seqno.SequenceNumbers.NO_OPS_PERFORMED; import static org.elasticsearch.index.seqno.SequenceNumbers.NO_OPS_PERFORMED;
import static org.elasticsearch.indices.IndexingMemoryController.SHARD_INACTIVE_TIME_SETTING; import static org.elasticsearch.indices.IndexingMemoryController.SHARD_INACTIVE_TIME_SETTING;
import static org.elasticsearch.node.NodeRoleSettings.NODE_ROLES_SETTING; import static org.elasticsearch.node.NodeRoleSettings.NODE_ROLES_SETTING;
import static org.elasticsearch.node.RecoverySettingsChunkSizePlugin.CHUNK_SIZE_SETTING;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.empty;
@ -257,7 +256,7 @@ public class IndexRecoveryIT extends AbstractIndexRecoveryIntegTestCase {
public Settings.Builder createRecoverySettingsChunkPerSecond(long chunkSizeBytes) { public Settings.Builder createRecoverySettingsChunkPerSecond(long chunkSizeBytes) {
return Settings.builder() return Settings.builder()
// Set the chunk size in bytes // Set the chunk size in bytes
.put(CHUNK_SIZE_SETTING.getKey(), new ByteSizeValue(chunkSizeBytes, ByteSizeUnit.BYTES)) .put(RecoverySettings.INDICES_RECOVERY_CHUNK_SIZE.getKey(), new ByteSizeValue(chunkSizeBytes, ByteSizeUnit.BYTES))
// Set one chunk of bytes per second. // Set one chunk of bytes per second.
.put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), chunkSizeBytes, ByteSizeUnit.BYTES); .put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), chunkSizeBytes, ByteSizeUnit.BYTES);
} }
@ -280,7 +279,7 @@ public class IndexRecoveryIT extends AbstractIndexRecoveryIntegTestCase {
Settings.builder() Settings.builder()
// 200mb is an arbitrary number intended to be large enough to avoid more throttling. // 200mb is an arbitrary number intended to be large enough to avoid more throttling.
.put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), "200mb") .put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), "200mb")
.put(CHUNK_SIZE_SETTING.getKey(), RecoverySettings.DEFAULT_CHUNK_SIZE) .put(RecoverySettings.INDICES_RECOVERY_CHUNK_SIZE.getKey(), RecoverySettings.DEFAULT_CHUNK_SIZE)
); );
} }

View file

@ -24,7 +24,7 @@ import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.indices.recovery.PeerRecoveryTargetService; import org.elasticsearch.indices.recovery.PeerRecoveryTargetService;
import org.elasticsearch.indices.recovery.RecoveryFileChunkRequest; import org.elasticsearch.indices.recovery.RecoveryFileChunkRequest;
import org.elasticsearch.indices.recovery.RecoveryFilesInfoRequest; import org.elasticsearch.indices.recovery.RecoveryFilesInfoRequest;
import org.elasticsearch.node.RecoverySettingsChunkSizePlugin; import org.elasticsearch.indices.recovery.RecoverySettings;
import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.test.transport.MockTransportService;
@ -41,7 +41,6 @@ import java.util.concurrent.CountDownLatch;
import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.Function; import java.util.function.Function;
import static org.elasticsearch.node.RecoverySettingsChunkSizePlugin.CHUNK_SIZE_SETTING;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo;
@ -52,7 +51,7 @@ public class TruncatedRecoveryIT extends ESIntegTestCase {
@Override @Override
protected Collection<Class<? extends Plugin>> nodePlugins() { protected Collection<Class<? extends Plugin>> nodePlugins() {
return Arrays.asList(MockTransportService.TestPlugin.class, RecoverySettingsChunkSizePlugin.class); return Arrays.asList(MockTransportService.TestPlugin.class);
} }
/** /**
@ -63,7 +62,11 @@ public class TruncatedRecoveryIT extends ESIntegTestCase {
*/ */
public void testCancelRecoveryAndResume() throws Exception { public void testCancelRecoveryAndResume() throws Exception {
updateClusterSettings( updateClusterSettings(
Settings.builder().put(CHUNK_SIZE_SETTING.getKey(), new ByteSizeValue(randomIntBetween(50, 300), ByteSizeUnit.BYTES)) Settings.builder()
.put(
RecoverySettings.INDICES_RECOVERY_CHUNK_SIZE.getKey(),
new ByteSizeValue(randomIntBetween(50, 300), ByteSizeUnit.BYTES)
)
); );
NodesStatsResponse nodeStats = clusterAdmin().prepareNodesStats().get(); NodesStatsResponse nodeStats = clusterAdmin().prepareNodesStats().get();

View file

@ -812,6 +812,24 @@ public class RestoreSnapshotIT extends AbstractSnapshotIntegTestCase {
assertThat(error.getMessage(), containsString("cannot modify setting [index.mapping.source.mode] on restore")); assertThat(error.getMessage(), containsString("cannot modify setting [index.mapping.source.mode] on restore"));
} }
public void testRestoreChangeRecoveryUseSyntheticSource() {
Client client = client();
createRepository("test-repo", "fs");
String indexName = "test-idx";
assertAcked(client.admin().indices().prepareCreate(indexName).setSettings(Settings.builder().put(indexSettings())));
createSnapshot("test-repo", "test-snap", Collections.singletonList(indexName));
cluster().wipeIndices(indexName);
var error = expectThrows(SnapshotRestoreException.class, () -> {
client.admin()
.cluster()
.prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap")
.setIndexSettings(Settings.builder().put("index.recovery.use_synthetic_source", true))
.setWaitForCompletion(true)
.get();
});
assertThat(error.getMessage(), containsString("cannot modify setting [index.recovery.use_synthetic_source] on restore"));
}
public void testRestoreChangeIndexSorts() { public void testRestoreChangeIndexSorts() {
Client client = client(); Client client = client();
createRepository("test-repo", "fs"); createRepository("test-repo", "fs");

View file

@ -524,6 +524,15 @@ public class SnapshotShutdownIT extends AbstractSnapshotIntegTestCase {
"Pause signals have been set for all shard snapshots on data node [" + nodeForRemovalId + "]" "Pause signals have been set for all shard snapshots on data node [" + nodeForRemovalId + "]"
) )
); );
mockLog.addExpectation(
new MockLog.SeenEventExpectation(
"SnapshotShutdownProgressTracker index shard snapshot status messages",
SnapshotShutdownProgressTracker.class.getCanonicalName(),
Level.INFO,
// Expect the shard snapshot to stall in data file upload, since we've blocked the data node file upload to the blob store.
"statusDescription='enqueued file snapshot tasks: threads running concurrent file uploads'"
)
);
putShutdownForRemovalMetadata(nodeForRemoval, clusterService); putShutdownForRemovalMetadata(nodeForRemoval, clusterService);
@ -583,6 +592,14 @@ public class SnapshotShutdownIT extends AbstractSnapshotIntegTestCase {
"Current active shard snapshot stats on data node [" + nodeForRemovalId + "]*Paused [" + numShards + "]" "Current active shard snapshot stats on data node [" + nodeForRemovalId + "]*Paused [" + numShards + "]"
) )
); );
mockLog.addExpectation(
new MockLog.SeenEventExpectation(
"SnapshotShutdownProgressTracker index shard snapshot messages",
SnapshotShutdownProgressTracker.class.getCanonicalName(),
Level.INFO,
"statusDescription='finished: master notification attempt complete'"
)
);
// Release the master node to respond // Release the master node to respond
snapshotStatusUpdateLatch.countDown(); snapshotStatusUpdateLatch.countDown();

View file

@ -21,6 +21,7 @@ import org.elasticsearch.core.PathUtils;
import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.SuppressForbidden;
import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.discovery.DiscoveryModule;
import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexModule;
import org.elasticsearch.jdk.RuntimeVersionFeature;
import org.elasticsearch.monitor.jvm.JvmInfo; import org.elasticsearch.monitor.jvm.JvmInfo;
import org.elasticsearch.monitor.process.ProcessProbe; import org.elasticsearch.monitor.process.ProcessProbe;
import org.elasticsearch.nativeaccess.NativeAccess; import org.elasticsearch.nativeaccess.NativeAccess;
@ -722,6 +723,9 @@ final class BootstrapChecks {
} }
boolean isAllPermissionGranted() { boolean isAllPermissionGranted() {
if (RuntimeVersionFeature.isSecurityManagerAvailable() == false) {
return false;
}
final SecurityManager sm = System.getSecurityManager(); final SecurityManager sm = System.getSecurityManager();
assert sm != null; assert sm != null;
try { try {

View file

@ -35,6 +35,7 @@ import org.elasticsearch.entitlement.bootstrap.EntitlementBootstrap;
import org.elasticsearch.env.Environment; import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersion;
import org.elasticsearch.jdk.JarHell; import org.elasticsearch.jdk.JarHell;
import org.elasticsearch.jdk.RuntimeVersionFeature;
import org.elasticsearch.monitor.jvm.HotThreads; import org.elasticsearch.monitor.jvm.HotThreads;
import org.elasticsearch.monitor.jvm.JvmInfo; import org.elasticsearch.monitor.jvm.JvmInfo;
import org.elasticsearch.monitor.os.OsProbe; import org.elasticsearch.monitor.os.OsProbe;
@ -43,6 +44,8 @@ import org.elasticsearch.nativeaccess.NativeAccess;
import org.elasticsearch.node.Node; import org.elasticsearch.node.Node;
import org.elasticsearch.node.NodeValidationException; import org.elasticsearch.node.NodeValidationException;
import org.elasticsearch.plugins.PluginsLoader; import org.elasticsearch.plugins.PluginsLoader;
import org.elasticsearch.rest.MethodHandlers;
import org.elasticsearch.transport.RequestHandlerRegistry;
import java.io.IOException; import java.io.IOException;
import java.io.InputStream; import java.io.InputStream;
@ -113,12 +116,14 @@ class Elasticsearch {
* the presence of a security manager or lack thereof act as if there is a security manager present (e.g., DNS cache policy). * the presence of a security manager or lack thereof act as if there is a security manager present (e.g., DNS cache policy).
* This forces such policies to take effect immediately. * This forces such policies to take effect immediately.
*/ */
org.elasticsearch.bootstrap.Security.setSecurityManager(new SecurityManager() { if (RuntimeVersionFeature.isSecurityManagerAvailable()) {
@Override org.elasticsearch.bootstrap.Security.setSecurityManager(new SecurityManager() {
public void checkPermission(Permission perm) { @Override
// grant all permissions so that we can later set the security manager to the one that we want public void checkPermission(Permission perm) {
} // grant all permissions so that we can later set the security manager to the one that we want
}); }
});
}
LogConfigurator.registerErrorListener(); LogConfigurator.registerErrorListener();
BootstrapInfo.init(); BootstrapInfo.init();
@ -198,7 +203,11 @@ class Elasticsearch {
SubscribableListener.class, SubscribableListener.class,
RunOnce.class, RunOnce.class,
// We eagerly initialize to work around log4j permissions & JDK-8309727 // We eagerly initialize to work around log4j permissions & JDK-8309727
VectorUtil.class VectorUtil.class,
// RequestHandlerRegistry and MethodHandlers classes do nontrivial static initialization which should always succeed but load
// it now (before SM) to be sure
RequestHandlerRegistry.class,
MethodHandlers.class
); );
// load the plugin Java modules and layers now for use in entitlements // load the plugin Java modules and layers now for use in entitlements
@ -215,7 +224,7 @@ class Elasticsearch {
.toList(); .toList();
EntitlementBootstrap.bootstrap(pluginData, pluginsResolver::resolveClassToPluginName); EntitlementBootstrap.bootstrap(pluginData, pluginsResolver::resolveClassToPluginName);
} else { } else if (RuntimeVersionFeature.isSecurityManagerAvailable()) {
// install SM after natives, shutdown hooks, etc. // install SM after natives, shutdown hooks, etc.
LogManager.getLogger(Elasticsearch.class).info("Bootstrapping java SecurityManager"); LogManager.getLogger(Elasticsearch.class).info("Bootstrapping java SecurityManager");
org.elasticsearch.bootstrap.Security.configure( org.elasticsearch.bootstrap.Security.configure(
@ -223,6 +232,8 @@ class Elasticsearch {
SECURITY_FILTER_BAD_DEFAULTS_SETTING.get(args.nodeSettings()), SECURITY_FILTER_BAD_DEFAULTS_SETTING.get(args.nodeSettings()),
args.pidFile() args.pidFile()
); );
} else {
LogManager.getLogger(Elasticsearch.class).warn("Bootstrapping without any protection");
} }
} }

View file

@ -93,7 +93,7 @@ public final class RepositoryCleanupInProgress extends AbstractNamedDiffable<Clu
@Override @Override
public TransportVersion getMinimalSupportedVersion() { public TransportVersion getMinimalSupportedVersion() {
return TransportVersions.V_7_4_0; return TransportVersions.ZERO;
} }
public record Entry(String repository, long repositoryStateId) implements Writeable, RepositoryOperation { public record Entry(String repository, long repositoryStateId) implements Writeable, RepositoryOperation {

View file

@ -1648,6 +1648,7 @@ public class MetadataCreateIndexService {
private static final Set<String> UNMODIFIABLE_SETTINGS_DURING_RESIZE = Set.of( private static final Set<String> UNMODIFIABLE_SETTINGS_DURING_RESIZE = Set.of(
IndexSettings.MODE.getKey(), IndexSettings.MODE.getKey(),
SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(),
IndexSettings.RECOVERY_USE_SYNTHETIC_SOURCE_SETTING.getKey(),
IndexSortConfig.INDEX_SORT_FIELD_SETTING.getKey(), IndexSortConfig.INDEX_SORT_FIELD_SETTING.getKey(),
IndexSortConfig.INDEX_SORT_ORDER_SETTING.getKey(), IndexSortConfig.INDEX_SORT_ORDER_SETTING.getKey(),
IndexSortConfig.INDEX_SORT_MODE_SETTING.getKey(), IndexSortConfig.INDEX_SORT_MODE_SETTING.getKey(),

View file

@ -257,6 +257,7 @@ public final class ClusterSettings extends AbstractScopedSettings {
RecoverySettings.INDICES_RECOVERY_USE_SNAPSHOTS_SETTING, RecoverySettings.INDICES_RECOVERY_USE_SNAPSHOTS_SETTING,
RecoverySettings.INDICES_RECOVERY_MAX_CONCURRENT_SNAPSHOT_FILE_DOWNLOADS, RecoverySettings.INDICES_RECOVERY_MAX_CONCURRENT_SNAPSHOT_FILE_DOWNLOADS,
RecoverySettings.INDICES_RECOVERY_MAX_CONCURRENT_SNAPSHOT_FILE_DOWNLOADS_PER_NODE, RecoverySettings.INDICES_RECOVERY_MAX_CONCURRENT_SNAPSHOT_FILE_DOWNLOADS_PER_NODE,
RecoverySettings.INDICES_RECOVERY_CHUNK_SIZE,
RecoverySettings.NODE_BANDWIDTH_RECOVERY_FACTOR_READ_SETTING, RecoverySettings.NODE_BANDWIDTH_RECOVERY_FACTOR_READ_SETTING,
RecoverySettings.NODE_BANDWIDTH_RECOVERY_FACTOR_WRITE_SETTING, RecoverySettings.NODE_BANDWIDTH_RECOVERY_FACTOR_WRITE_SETTING,
RecoverySettings.NODE_BANDWIDTH_RECOVERY_OPERATOR_FACTOR_SETTING, RecoverySettings.NODE_BANDWIDTH_RECOVERY_OPERATOR_FACTOR_SETTING,

View file

@ -188,6 +188,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings {
IgnoredSourceFieldMapper.SKIP_IGNORED_SOURCE_WRITE_SETTING, IgnoredSourceFieldMapper.SKIP_IGNORED_SOURCE_WRITE_SETTING,
IgnoredSourceFieldMapper.SKIP_IGNORED_SOURCE_READ_SETTING, IgnoredSourceFieldMapper.SKIP_IGNORED_SOURCE_READ_SETTING,
SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING, SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING,
IndexSettings.RECOVERY_USE_SYNTHETIC_SOURCE_SETTING,
// validate that built-in similarities don't get redefined // validate that built-in similarities don't get redefined
Setting.groupSetting("index.similarity.", (s) -> { Setting.groupSetting("index.similarity.", (s) -> {

View file

@ -38,6 +38,7 @@ import java.time.Instant;
import java.util.Collections; import java.util.Collections;
import java.util.Iterator; import java.util.Iterator;
import java.util.List; import java.util.List;
import java.util.Locale;
import java.util.Map; import java.util.Map;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import java.util.function.Consumer; import java.util.function.Consumer;
@ -51,6 +52,7 @@ import static org.elasticsearch.index.mapper.MapperService.INDEX_MAPPING_IGNORE_
import static org.elasticsearch.index.mapper.MapperService.INDEX_MAPPING_NESTED_DOCS_LIMIT_SETTING; import static org.elasticsearch.index.mapper.MapperService.INDEX_MAPPING_NESTED_DOCS_LIMIT_SETTING;
import static org.elasticsearch.index.mapper.MapperService.INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING; import static org.elasticsearch.index.mapper.MapperService.INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING;
import static org.elasticsearch.index.mapper.MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING; import static org.elasticsearch.index.mapper.MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING;
import static org.elasticsearch.index.mapper.SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING;
/** /**
* This class encapsulates all index level settings and handles settings updates. * This class encapsulates all index level settings and handles settings updates.
@ -653,6 +655,62 @@ public final class IndexSettings {
Property.Final Property.Final
); );
public static final Setting<Boolean> RECOVERY_USE_SYNTHETIC_SOURCE_SETTING = Setting.boolSetting(
"index.recovery.use_synthetic_source",
false,
new Setting.Validator<>() {
@Override
public void validate(Boolean value) {}
@Override
public void validate(Boolean enabled, Map<Setting<?>, Object> settings) {
if (enabled == false) {
return;
}
// Verify if synthetic source is enabled on the index; fail if it is not
var indexMode = (IndexMode) settings.get(MODE);
if (indexMode.defaultSourceMode() != SourceFieldMapper.Mode.SYNTHETIC) {
var sourceMode = (SourceFieldMapper.Mode) settings.get(INDEX_MAPPER_SOURCE_MODE_SETTING);
if (sourceMode != SourceFieldMapper.Mode.SYNTHETIC) {
throw new IllegalArgumentException(
String.format(
Locale.ROOT,
"The setting [%s] is only permitted when [%s] is set to [%s]. Current mode: [%s].",
RECOVERY_USE_SYNTHETIC_SOURCE_SETTING.getKey(),
INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(),
SourceFieldMapper.Mode.SYNTHETIC.name(),
sourceMode.name()
)
);
}
}
// Verify that all nodes can handle this setting
var version = (IndexVersion) settings.get(SETTING_INDEX_VERSION_CREATED);
if (version.before(IndexVersions.USE_SYNTHETIC_SOURCE_FOR_RECOVERY)) {
throw new IllegalArgumentException(
String.format(
Locale.ROOT,
"The setting [%s] is unavailable on this cluster because some nodes are running older "
+ "versions that do not support it. Please upgrade all nodes to the latest version "
+ "and try again.",
RECOVERY_USE_SYNTHETIC_SOURCE_SETTING.getKey()
)
);
}
}
@Override
public Iterator<Setting<?>> settings() {
List<Setting<?>> res = List.of(INDEX_MAPPER_SOURCE_MODE_SETTING, SETTING_INDEX_VERSION_CREATED, MODE);
return res.iterator();
}
},
Property.IndexScope,
Property.Final
);
/** /**
* Returns <code>true</code> if TSDB encoding is enabled. The default is <code>true</code> * Returns <code>true</code> if TSDB encoding is enabled. The default is <code>true</code>
*/ */
@ -824,6 +882,7 @@ public final class IndexSettings {
private volatile boolean skipIgnoredSourceRead; private volatile boolean skipIgnoredSourceRead;
private final SourceFieldMapper.Mode indexMappingSourceMode; private final SourceFieldMapper.Mode indexMappingSourceMode;
private final boolean recoverySourceEnabled; private final boolean recoverySourceEnabled;
private final boolean recoverySourceSyntheticEnabled;
/** /**
* The maximum number of refresh listeners allows on this shard. * The maximum number of refresh listeners allows on this shard.
@ -984,8 +1043,9 @@ public final class IndexSettings {
es87TSDBCodecEnabled = scopedSettings.get(TIME_SERIES_ES87TSDB_CODEC_ENABLED_SETTING); es87TSDBCodecEnabled = scopedSettings.get(TIME_SERIES_ES87TSDB_CODEC_ENABLED_SETTING);
skipIgnoredSourceWrite = scopedSettings.get(IgnoredSourceFieldMapper.SKIP_IGNORED_SOURCE_WRITE_SETTING); skipIgnoredSourceWrite = scopedSettings.get(IgnoredSourceFieldMapper.SKIP_IGNORED_SOURCE_WRITE_SETTING);
skipIgnoredSourceRead = scopedSettings.get(IgnoredSourceFieldMapper.SKIP_IGNORED_SOURCE_READ_SETTING); skipIgnoredSourceRead = scopedSettings.get(IgnoredSourceFieldMapper.SKIP_IGNORED_SOURCE_READ_SETTING);
indexMappingSourceMode = scopedSettings.get(SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING); indexMappingSourceMode = scopedSettings.get(INDEX_MAPPER_SOURCE_MODE_SETTING);
recoverySourceEnabled = RecoverySettings.INDICES_RECOVERY_SOURCE_ENABLED_SETTING.get(nodeSettings); recoverySourceEnabled = RecoverySettings.INDICES_RECOVERY_SOURCE_ENABLED_SETTING.get(nodeSettings);
recoverySourceSyntheticEnabled = scopedSettings.get(RECOVERY_USE_SYNTHETIC_SOURCE_SETTING);
scopedSettings.addSettingsUpdateConsumer( scopedSettings.addSettingsUpdateConsumer(
MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING, MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING,
@ -1677,6 +1737,13 @@ public final class IndexSettings {
return recoverySourceEnabled; return recoverySourceEnabled;
} }
/**
* @return Whether recovery source should always be bypassed in favor of using synthetic source.
*/
public boolean isRecoverySourceSyntheticEnabled() {
return recoverySourceSyntheticEnabled;
}
/** /**
* The bounds for {@code @timestamp} on this index or * The bounds for {@code @timestamp} on this index or
* {@code null} if there are no bounds. * {@code null} if there are no bounds.

View file

@ -136,6 +136,7 @@ public class IndexVersions {
public static final IndexVersion LOGSDB_DEFAULT_IGNORE_DYNAMIC_BEYOND_LIMIT = def(9_001_00_0, Version.LUCENE_10_0_0); public static final IndexVersion LOGSDB_DEFAULT_IGNORE_DYNAMIC_BEYOND_LIMIT = def(9_001_00_0, Version.LUCENE_10_0_0);
public static final IndexVersion TIME_BASED_K_ORDERED_DOC_ID = def(9_002_00_0, Version.LUCENE_10_0_0); public static final IndexVersion TIME_BASED_K_ORDERED_DOC_ID = def(9_002_00_0, Version.LUCENE_10_0_0);
public static final IndexVersion DEPRECATE_SOURCE_MODE_MAPPER = def(9_003_00_0, Version.LUCENE_10_0_0); public static final IndexVersion DEPRECATE_SOURCE_MODE_MAPPER = def(9_003_00_0, Version.LUCENE_10_0_0);
public static final IndexVersion USE_SYNTHETIC_SOURCE_FOR_RECOVERY = def(9_004_00_0, Version.LUCENE_10_0_0);
/* /*
* STOP! READ THIS FIRST! No, really, * STOP! READ THIS FIRST! No, really,
* ____ _____ ___ ____ _ ____ _____ _ ____ _____ _ _ ___ ____ _____ ___ ____ ____ _____ _ * ____ _____ ___ ____ _ ____ _____ _ ____ _____ _ _ ___ ____ _____ ___ ____ ____ _____ _

View file

@ -24,6 +24,7 @@ final class CombinedDocValues {
private final NumericDocValues primaryTermDV; private final NumericDocValues primaryTermDV;
private final NumericDocValues tombstoneDV; private final NumericDocValues tombstoneDV;
private final NumericDocValues recoverySource; private final NumericDocValues recoverySource;
private final NumericDocValues recoverySourceSize;
CombinedDocValues(LeafReader leafReader) throws IOException { CombinedDocValues(LeafReader leafReader) throws IOException {
this.versionDV = Objects.requireNonNull(leafReader.getNumericDocValues(VersionFieldMapper.NAME), "VersionDV is missing"); this.versionDV = Objects.requireNonNull(leafReader.getNumericDocValues(VersionFieldMapper.NAME), "VersionDV is missing");
@ -34,6 +35,7 @@ final class CombinedDocValues {
); );
this.tombstoneDV = leafReader.getNumericDocValues(SeqNoFieldMapper.TOMBSTONE_NAME); this.tombstoneDV = leafReader.getNumericDocValues(SeqNoFieldMapper.TOMBSTONE_NAME);
this.recoverySource = leafReader.getNumericDocValues(SourceFieldMapper.RECOVERY_SOURCE_NAME); this.recoverySource = leafReader.getNumericDocValues(SourceFieldMapper.RECOVERY_SOURCE_NAME);
this.recoverySourceSize = leafReader.getNumericDocValues(SourceFieldMapper.RECOVERY_SOURCE_SIZE_NAME);
} }
long docVersion(int segmentDocId) throws IOException { long docVersion(int segmentDocId) throws IOException {
@ -79,4 +81,12 @@ final class CombinedDocValues {
assert recoverySource.docID() < segmentDocId; assert recoverySource.docID() < segmentDocId;
return recoverySource.advanceExact(segmentDocId); return recoverySource.advanceExact(segmentDocId);
} }
long recoverySourceSize(int segmentDocId) throws IOException {
if (recoverySourceSize == null) {
return -1;
}
assert recoverySourceSize.docID() < segmentDocId;
return recoverySourceSize.advanceExact(segmentDocId) ? recoverySourceSize.longValue() : -1;
}
} }

View file

@ -937,14 +937,15 @@ public abstract class Engine implements Closeable {
* @param source the source of the request * @param source the source of the request
* @param fromSeqNo the start sequence number (inclusive) * @param fromSeqNo the start sequence number (inclusive)
* @param toSeqNo the end sequence number (inclusive) * @param toSeqNo the end sequence number (inclusive)
* @see #newChangesSnapshot(String, long, long, boolean, boolean, boolean) * @see #newChangesSnapshot(String, long, long, boolean, boolean, boolean, long)
*/ */
public abstract int countChanges(String source, long fromSeqNo, long toSeqNo) throws IOException; public abstract int countChanges(String source, long fromSeqNo, long toSeqNo) throws IOException;
/** /**
* Creates a new history snapshot from Lucene for reading operations whose seqno in the requesting seqno range (both inclusive). * @deprecated This method is deprecated will and be removed once #114618 is applied to the serverless repository.
* This feature requires soft-deletes enabled. If soft-deletes are disabled, this method will throw an {@link IllegalStateException}. * @see #newChangesSnapshot(String, long, long, boolean, boolean, boolean, long)
*/ */
@Deprecated
public abstract Translog.Snapshot newChangesSnapshot( public abstract Translog.Snapshot newChangesSnapshot(
String source, String source,
long fromSeqNo, long fromSeqNo,
@ -954,6 +955,23 @@ public abstract class Engine implements Closeable {
boolean accessStats boolean accessStats
) throws IOException; ) throws IOException;
/**
* Creates a new history snapshot from Lucene for reading operations whose seqno in the requesting seqno range (both inclusive).
* This feature requires soft-deletes enabled. If soft-deletes are disabled, this method will throw an {@link IllegalStateException}.
*/
public Translog.Snapshot newChangesSnapshot(
String source,
long fromSeqNo,
long toSeqNo,
boolean requiredFullRange,
boolean singleConsumer,
boolean accessStats,
long maxChunkSize
) throws IOException {
// TODO: Remove this default implementation once the deprecated newChangesSnapshot is removed
return newChangesSnapshot(source, fromSeqNo, toSeqNo, requiredFullRange, singleConsumer, accessStats);
}
/** /**
* Checks if this engine has every operations since {@code startingSeqNo}(inclusive) in its history (either Lucene or translog) * Checks if this engine has every operations since {@code startingSeqNo}(inclusive) in its history (either Lucene or translog)
*/ */

View file

@ -2709,7 +2709,10 @@ public class InternalEngine extends Engine {
// always configure soft-deletes field so an engine with soft-deletes disabled can open a Lucene index with soft-deletes. // always configure soft-deletes field so an engine with soft-deletes disabled can open a Lucene index with soft-deletes.
iwc.setSoftDeletesField(Lucene.SOFT_DELETES_FIELD); iwc.setSoftDeletesField(Lucene.SOFT_DELETES_FIELD);
mergePolicy = new RecoverySourcePruneMergePolicy( mergePolicy = new RecoverySourcePruneMergePolicy(
SourceFieldMapper.RECOVERY_SOURCE_NAME, engineConfig.getIndexSettings().isRecoverySourceSyntheticEnabled() ? null : SourceFieldMapper.RECOVERY_SOURCE_NAME,
engineConfig.getIndexSettings().isRecoverySourceSyntheticEnabled()
? SourceFieldMapper.RECOVERY_SOURCE_SIZE_NAME
: SourceFieldMapper.RECOVERY_SOURCE_NAME,
engineConfig.getIndexSettings().getMode() == IndexMode.TIME_SERIES, engineConfig.getIndexSettings().getMode() == IndexMode.TIME_SERIES,
softDeletesPolicy::getRetentionQuery, softDeletesPolicy::getRetentionQuery,
new SoftDeletesRetentionMergePolicy( new SoftDeletesRetentionMergePolicy(
@ -3141,6 +3144,19 @@ public class InternalEngine extends Engine {
boolean requiredFullRange, boolean requiredFullRange,
boolean singleConsumer, boolean singleConsumer,
boolean accessStats boolean accessStats
) throws IOException {
return newChangesSnapshot(source, fromSeqNo, toSeqNo, requiredFullRange, singleConsumer, accessStats, -1);
}
@Override
public Translog.Snapshot newChangesSnapshot(
String source,
long fromSeqNo,
long toSeqNo,
boolean requiredFullRange,
boolean singleConsumer,
boolean accessStats,
long maxChunkSize
) throws IOException { ) throws IOException {
if (enableRecoverySource == false) { if (enableRecoverySource == false) {
throw new IllegalStateException( throw new IllegalStateException(
@ -3153,16 +3169,31 @@ public class InternalEngine extends Engine {
refreshIfNeeded(source, toSeqNo); refreshIfNeeded(source, toSeqNo);
Searcher searcher = acquireSearcher(source, SearcherScope.INTERNAL); Searcher searcher = acquireSearcher(source, SearcherScope.INTERNAL);
try { try {
LuceneChangesSnapshot snapshot = new LuceneChangesSnapshot( final Translog.Snapshot snapshot;
searcher, if (engineConfig.getIndexSettings().isRecoverySourceSyntheticEnabled()) {
LuceneChangesSnapshot.DEFAULT_BATCH_SIZE, snapshot = new LuceneSyntheticSourceChangesSnapshot(
fromSeqNo, engineConfig.getMapperService().mappingLookup(),
toSeqNo, searcher,
requiredFullRange, SearchBasedChangesSnapshot.DEFAULT_BATCH_SIZE,
singleConsumer, maxChunkSize,
accessStats, fromSeqNo,
config().getIndexSettings().getIndexVersionCreated() toSeqNo,
); requiredFullRange,
accessStats,
config().getIndexSettings().getIndexVersionCreated()
);
} else {
snapshot = new LuceneChangesSnapshot(
searcher,
SearchBasedChangesSnapshot.DEFAULT_BATCH_SIZE,
fromSeqNo,
toSeqNo,
requiredFullRange,
singleConsumer,
accessStats,
config().getIndexSettings().getIndexVersionCreated()
);
}
searcher = null; searcher = null;
return snapshot; return snapshot;
} catch (Exception e) { } catch (Exception e) {

View file

@ -10,61 +10,33 @@
package org.elasticsearch.index.engine; package org.elasticsearch.index.engine;
import org.apache.lucene.codecs.StoredFieldsReader; import org.apache.lucene.codecs.StoredFieldsReader;
import org.apache.lucene.document.LongPoint;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.NumericDocValues;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.FieldDoc;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.Sort;
import org.apache.lucene.search.SortField;
import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.TopFieldCollectorManager;
import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.ArrayUtil;
import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.lucene.index.SequentialStoredFieldsLeafReader; import org.elasticsearch.common.lucene.index.SequentialStoredFieldsLeafReader;
import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.core.Assertions;
import org.elasticsearch.core.IOUtils;
import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersion;
import org.elasticsearch.index.fieldvisitor.FieldsVisitor; import org.elasticsearch.index.fieldvisitor.FieldsVisitor;
import org.elasticsearch.index.mapper.SeqNoFieldMapper;
import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.index.mapper.SourceFieldMapper;
import org.elasticsearch.index.translog.Translog; import org.elasticsearch.index.translog.Translog;
import org.elasticsearch.transport.Transports; import org.elasticsearch.transport.Transports;
import java.io.Closeable;
import java.io.IOException; import java.io.IOException;
import java.util.Comparator; import java.util.Comparator;
import java.util.List;
import java.util.concurrent.atomic.AtomicBoolean;
/** /**
* A {@link Translog.Snapshot} from changes in a Lucene index * A {@link Translog.Snapshot} from changes in a Lucene index
*/ */
final class LuceneChangesSnapshot implements Translog.Snapshot { public final class LuceneChangesSnapshot extends SearchBasedChangesSnapshot {
static final int DEFAULT_BATCH_SIZE = 1024;
private final int searchBatchSize;
private final long fromSeqNo, toSeqNo;
private long lastSeenSeqNo; private long lastSeenSeqNo;
private int skippedOperations; private int skippedOperations;
private final boolean requiredFullRange;
private final boolean singleConsumer; private final boolean singleConsumer;
private final IndexSearcher indexSearcher;
private int docIndex = 0; private int docIndex = 0;
private final boolean accessStats; private int maxDocIndex;
private final int totalHits;
private ScoreDoc[] scoreDocs;
private final ParallelArray parallelArray; private final ParallelArray parallelArray;
private final Closeable onClose;
private final IndexVersion indexVersionCreated;
private int storedFieldsReaderOrd = -1; private int storedFieldsReaderOrd = -1;
private StoredFieldsReader storedFieldsReader = null; private StoredFieldsReader storedFieldsReader = null;
@ -83,7 +55,7 @@ final class LuceneChangesSnapshot implements Translog.Snapshot {
* @param accessStats true if the stats of the snapshot can be accessed via {@link #totalOperations()} * @param accessStats true if the stats of the snapshot can be accessed via {@link #totalOperations()}
* @param indexVersionCreated the version on which this index was created * @param indexVersionCreated the version on which this index was created
*/ */
LuceneChangesSnapshot( public LuceneChangesSnapshot(
Engine.Searcher engineSearcher, Engine.Searcher engineSearcher,
int searchBatchSize, int searchBatchSize,
long fromSeqNo, long fromSeqNo,
@ -93,50 +65,26 @@ final class LuceneChangesSnapshot implements Translog.Snapshot {
boolean accessStats, boolean accessStats,
IndexVersion indexVersionCreated IndexVersion indexVersionCreated
) throws IOException { ) throws IOException {
if (fromSeqNo < 0 || toSeqNo < 0 || fromSeqNo > toSeqNo) { super(engineSearcher, searchBatchSize, fromSeqNo, toSeqNo, requiredFullRange, accessStats, indexVersionCreated);
throw new IllegalArgumentException("Invalid range; from_seqno [" + fromSeqNo + "], to_seqno [" + toSeqNo + "]"); this.creationThread = Assertions.ENABLED ? Thread.currentThread() : null;
}
if (searchBatchSize <= 0) {
throw new IllegalArgumentException("Search_batch_size must be positive [" + searchBatchSize + "]");
}
final AtomicBoolean closed = new AtomicBoolean();
this.onClose = () -> {
if (closed.compareAndSet(false, true)) {
IOUtils.close(engineSearcher);
}
};
final long requestingSize = (toSeqNo - fromSeqNo) == Long.MAX_VALUE ? Long.MAX_VALUE : (toSeqNo - fromSeqNo + 1L);
this.creationThread = Thread.currentThread();
this.searchBatchSize = requestingSize < searchBatchSize ? Math.toIntExact(requestingSize) : searchBatchSize;
this.fromSeqNo = fromSeqNo;
this.toSeqNo = toSeqNo;
this.lastSeenSeqNo = fromSeqNo - 1;
this.requiredFullRange = requiredFullRange;
this.singleConsumer = singleConsumer; this.singleConsumer = singleConsumer;
this.indexSearcher = newIndexSearcher(engineSearcher);
this.indexSearcher.setQueryCache(null);
this.accessStats = accessStats;
this.parallelArray = new ParallelArray(this.searchBatchSize); this.parallelArray = new ParallelArray(this.searchBatchSize);
this.indexVersionCreated = indexVersionCreated; this.lastSeenSeqNo = fromSeqNo - 1;
final TopDocs topDocs = searchOperations(null, accessStats); final TopDocs topDocs = nextTopDocs();
this.totalHits = Math.toIntExact(topDocs.totalHits.value()); this.maxDocIndex = topDocs.scoreDocs.length;
this.scoreDocs = topDocs.scoreDocs; fillParallelArray(topDocs.scoreDocs, parallelArray);
fillParallelArray(scoreDocs, parallelArray);
} }
@Override @Override
public void close() throws IOException { public void close() throws IOException {
assert assertAccessingThread(); assert assertAccessingThread();
onClose.close(); super.close();
} }
@Override @Override
public int totalOperations() { public int totalOperations() {
assert assertAccessingThread(); assert assertAccessingThread();
if (accessStats == false) { return super.totalOperations();
throw new IllegalStateException("Access stats of a snapshot created with [access_stats] is false");
}
return totalHits;
} }
@Override @Override
@ -146,7 +94,7 @@ final class LuceneChangesSnapshot implements Translog.Snapshot {
} }
@Override @Override
public Translog.Operation next() throws IOException { protected Translog.Operation nextOperation() throws IOException {
assert assertAccessingThread(); assert assertAccessingThread();
Translog.Operation op = null; Translog.Operation op = null;
for (int idx = nextDocIndex(); idx != -1; idx = nextDocIndex()) { for (int idx = nextDocIndex(); idx != -1; idx = nextDocIndex()) {
@ -155,12 +103,6 @@ final class LuceneChangesSnapshot implements Translog.Snapshot {
break; break;
} }
} }
if (requiredFullRange) {
rangeCheck(op);
}
if (op != null) {
lastSeenSeqNo = op.seqNo();
}
return op; return op;
} }
@ -171,48 +113,15 @@ final class LuceneChangesSnapshot implements Translog.Snapshot {
return true; return true;
} }
private void rangeCheck(Translog.Operation op) {
if (op == null) {
if (lastSeenSeqNo < toSeqNo) {
throw new MissingHistoryOperationsException(
"Not all operations between from_seqno ["
+ fromSeqNo
+ "] "
+ "and to_seqno ["
+ toSeqNo
+ "] found; prematurely terminated last_seen_seqno ["
+ lastSeenSeqNo
+ "]"
);
}
} else {
final long expectedSeqNo = lastSeenSeqNo + 1;
if (op.seqNo() != expectedSeqNo) {
throw new MissingHistoryOperationsException(
"Not all operations between from_seqno ["
+ fromSeqNo
+ "] "
+ "and to_seqno ["
+ toSeqNo
+ "] found; expected seqno ["
+ expectedSeqNo
+ "]; found ["
+ op
+ "]"
);
}
}
}
private int nextDocIndex() throws IOException { private int nextDocIndex() throws IOException {
// we have processed all docs in the current search - fetch the next batch // we have processed all docs in the current search - fetch the next batch
if (docIndex == scoreDocs.length && docIndex > 0) { if (docIndex == maxDocIndex && docIndex > 0) {
final ScoreDoc prev = scoreDocs[scoreDocs.length - 1]; var scoreDocs = nextTopDocs().scoreDocs;
scoreDocs = searchOperations((FieldDoc) prev, false).scoreDocs;
fillParallelArray(scoreDocs, parallelArray); fillParallelArray(scoreDocs, parallelArray);
docIndex = 0; docIndex = 0;
maxDocIndex = scoreDocs.length;
} }
if (docIndex < scoreDocs.length) { if (docIndex < maxDocIndex) {
int idx = docIndex; int idx = docIndex;
docIndex++; docIndex++;
return idx; return idx;
@ -237,14 +146,13 @@ final class LuceneChangesSnapshot implements Translog.Snapshot {
} }
int docBase = -1; int docBase = -1;
int maxDoc = 0; int maxDoc = 0;
List<LeafReaderContext> leaves = indexSearcher.getIndexReader().leaves();
int readerIndex = 0; int readerIndex = 0;
CombinedDocValues combinedDocValues = null; CombinedDocValues combinedDocValues = null;
LeafReaderContext leaf = null; LeafReaderContext leaf = null;
for (ScoreDoc scoreDoc : scoreDocs) { for (ScoreDoc scoreDoc : scoreDocs) {
if (scoreDoc.doc >= docBase + maxDoc) { if (scoreDoc.doc >= docBase + maxDoc) {
do { do {
leaf = leaves.get(readerIndex++); leaf = leaves().get(readerIndex++);
docBase = leaf.docBase; docBase = leaf.docBase;
maxDoc = leaf.reader().maxDoc(); maxDoc = leaf.reader().maxDoc();
} while (scoreDoc.doc >= docBase + maxDoc); } while (scoreDoc.doc >= docBase + maxDoc);
@ -253,6 +161,7 @@ final class LuceneChangesSnapshot implements Translog.Snapshot {
final int segmentDocID = scoreDoc.doc - docBase; final int segmentDocID = scoreDoc.doc - docBase;
final int index = scoreDoc.shardIndex; final int index = scoreDoc.shardIndex;
parallelArray.leafReaderContexts[index] = leaf; parallelArray.leafReaderContexts[index] = leaf;
parallelArray.docID[index] = scoreDoc.doc;
parallelArray.seqNo[index] = combinedDocValues.docSeqNo(segmentDocID); parallelArray.seqNo[index] = combinedDocValues.docSeqNo(segmentDocID);
parallelArray.primaryTerm[index] = combinedDocValues.docPrimaryTerm(segmentDocID); parallelArray.primaryTerm[index] = combinedDocValues.docPrimaryTerm(segmentDocID);
parallelArray.version[index] = combinedDocValues.docVersion(segmentDocID); parallelArray.version[index] = combinedDocValues.docVersion(segmentDocID);
@ -275,16 +184,6 @@ final class LuceneChangesSnapshot implements Translog.Snapshot {
return true; return true;
} }
private static IndexSearcher newIndexSearcher(Engine.Searcher engineSearcher) throws IOException {
return new IndexSearcher(Lucene.wrapAllDocsLive(engineSearcher.getDirectoryReader()));
}
private static Query rangeQuery(long fromSeqNo, long toSeqNo, IndexVersion indexVersionCreated) {
return new BooleanQuery.Builder().add(LongPoint.newRangeQuery(SeqNoFieldMapper.NAME, fromSeqNo, toSeqNo), BooleanClause.Occur.MUST)
.add(Queries.newNonNestedFilter(indexVersionCreated), BooleanClause.Occur.MUST) // exclude non-root nested documents
.build();
}
static int countOperations(Engine.Searcher engineSearcher, long fromSeqNo, long toSeqNo, IndexVersion indexVersionCreated) static int countOperations(Engine.Searcher engineSearcher, long fromSeqNo, long toSeqNo, IndexVersion indexVersionCreated)
throws IOException { throws IOException {
if (fromSeqNo < 0 || toSeqNo < 0 || fromSeqNo > toSeqNo) { if (fromSeqNo < 0 || toSeqNo < 0 || fromSeqNo > toSeqNo) {
@ -293,23 +192,9 @@ final class LuceneChangesSnapshot implements Translog.Snapshot {
return newIndexSearcher(engineSearcher).count(rangeQuery(fromSeqNo, toSeqNo, indexVersionCreated)); return newIndexSearcher(engineSearcher).count(rangeQuery(fromSeqNo, toSeqNo, indexVersionCreated));
} }
private TopDocs searchOperations(FieldDoc after, boolean accurateTotalHits) throws IOException {
final Query rangeQuery = rangeQuery(Math.max(fromSeqNo, lastSeenSeqNo), toSeqNo, indexVersionCreated);
assert accurateTotalHits == false || after == null : "accurate total hits is required by the first batch only";
final SortField sortBySeqNo = new SortField(SeqNoFieldMapper.NAME, SortField.Type.LONG);
TopFieldCollectorManager topFieldCollectorManager = new TopFieldCollectorManager(
new Sort(sortBySeqNo),
searchBatchSize,
after,
accurateTotalHits ? Integer.MAX_VALUE : 0,
false
);
return indexSearcher.search(rangeQuery, topFieldCollectorManager);
}
private Translog.Operation readDocAsOp(int docIndex) throws IOException { private Translog.Operation readDocAsOp(int docIndex) throws IOException {
final LeafReaderContext leaf = parallelArray.leafReaderContexts[docIndex]; final LeafReaderContext leaf = parallelArray.leafReaderContexts[docIndex];
final int segmentDocID = scoreDocs[docIndex].doc - leaf.docBase; final int segmentDocID = parallelArray.docID[docIndex] - leaf.docBase;
final long primaryTerm = parallelArray.primaryTerm[docIndex]; final long primaryTerm = parallelArray.primaryTerm[docIndex];
assert primaryTerm > 0 : "nested child document must be excluded"; assert primaryTerm > 0 : "nested child document must be excluded";
final long seqNo = parallelArray.seqNo[docIndex]; final long seqNo = parallelArray.seqNo[docIndex];
@ -385,19 +270,13 @@ final class LuceneChangesSnapshot implements Translog.Snapshot {
+ "], op [" + "], op ["
+ op + op
+ "]"; + "]";
lastSeenSeqNo = op.seqNo();
return op; return op;
} }
private static boolean assertDocSoftDeleted(LeafReader leafReader, int segmentDocId) throws IOException {
final NumericDocValues ndv = leafReader.getNumericDocValues(Lucene.SOFT_DELETES_FIELD);
if (ndv == null || ndv.advanceExact(segmentDocId) == false) {
throw new IllegalStateException("DocValues for field [" + Lucene.SOFT_DELETES_FIELD + "] is not found");
}
return ndv.longValue() == 1;
}
private static final class ParallelArray { private static final class ParallelArray {
final LeafReaderContext[] leafReaderContexts; final LeafReaderContext[] leafReaderContexts;
final int[] docID;
final long[] version; final long[] version;
final long[] seqNo; final long[] seqNo;
final long[] primaryTerm; final long[] primaryTerm;
@ -406,6 +285,7 @@ final class LuceneChangesSnapshot implements Translog.Snapshot {
boolean useSequentialStoredFieldsReader = false; boolean useSequentialStoredFieldsReader = false;
ParallelArray(int size) { ParallelArray(int size) {
docID = new int[size];
version = new long[size]; version = new long[size];
seqNo = new long[size]; seqNo = new long[size];
primaryTerm = new long[size]; primaryTerm = new long[size];

View file

@ -0,0 +1,244 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the "Elastic License
* 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
* Public License v 1"; you may not use this file except in compliance with, at
* your election, the "Elastic License 2.0", the "GNU Affero General Public
* License v3.0 only", or the "Server Side Public License, v 1".
*/
package org.elasticsearch.index.engine;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.FieldDoc;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.util.ArrayUtil;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.IndexVersion;
import org.elasticsearch.index.fieldvisitor.LeafStoredFieldLoader;
import org.elasticsearch.index.fieldvisitor.StoredFieldLoader;
import org.elasticsearch.index.mapper.MappingLookup;
import org.elasticsearch.index.mapper.SourceFieldMetrics;
import org.elasticsearch.index.mapper.SourceLoader;
import org.elasticsearch.index.translog.Translog;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Comparator;
import java.util.Deque;
import java.util.LinkedList;
import java.util.List;
import java.util.Set;
/**
* A {@link SearchBasedChangesSnapshot} that utilizes a synthetic field loader to rebuild the recovery source.
* This snapshot is activated when {@link IndexSettings#RECOVERY_USE_SYNTHETIC_SOURCE_SETTING}
* is enabled on the underlying index.
*
* The {@code maxMemorySizeInBytes} parameter limits the total size of uncompressed _sources
* loaded into memory during batch retrieval.
*/
public class LuceneSyntheticSourceChangesSnapshot extends SearchBasedChangesSnapshot {
private final long maxMemorySizeInBytes;
private final StoredFieldLoader storedFieldLoader;
private final SourceLoader sourceLoader;
private int skippedOperations;
private long lastSeenSeqNo;
private record SearchRecord(FieldDoc doc, boolean isTombstone, long seqNo, long primaryTerm, long version, long size) {
int index() {
return doc.shardIndex;
}
int docID() {
return doc.doc;
}
boolean hasRecoverySourceSize() {
return size != -1;
}
}
private final Deque<SearchRecord> pendingDocs = new LinkedList<>();
private final Deque<Translog.Operation> operationQueue = new LinkedList<>();
public LuceneSyntheticSourceChangesSnapshot(
MappingLookup mappingLookup,
Engine.Searcher engineSearcher,
int searchBatchSize,
long maxMemorySizeInBytes,
long fromSeqNo,
long toSeqNo,
boolean requiredFullRange,
boolean accessStats,
IndexVersion indexVersionCreated
) throws IOException {
super(engineSearcher, searchBatchSize, fromSeqNo, toSeqNo, requiredFullRange, accessStats, indexVersionCreated);
assert mappingLookup.isSourceSynthetic();
// ensure we can buffer at least one document
this.maxMemorySizeInBytes = maxMemorySizeInBytes > 0 ? maxMemorySizeInBytes : 1;
this.sourceLoader = mappingLookup.newSourceLoader(SourceFieldMetrics.NOOP);
Set<String> storedFields = sourceLoader.requiredStoredFields();
assert mappingLookup.isSourceSynthetic() : "synthetic source must be enabled for proper functionality.";
this.storedFieldLoader = StoredFieldLoader.create(false, storedFields);
this.lastSeenSeqNo = fromSeqNo - 1;
}
@Override
public int skippedOperations() {
return skippedOperations;
}
@Override
protected Translog.Operation nextOperation() throws IOException {
while (true) {
if (operationQueue.isEmpty()) {
loadNextBatch();
}
if (operationQueue.isEmpty()) {
return null;
}
var op = operationQueue.pollFirst();
if (op.seqNo() == lastSeenSeqNo) {
skippedOperations++;
continue;
}
lastSeenSeqNo = op.seqNo();
return op;
}
}
private void loadNextBatch() throws IOException {
List<SearchRecord> documentsToLoad = new ArrayList<>();
long accumulatedSize = 0;
while (accumulatedSize < maxMemorySizeInBytes) {
if (pendingDocs.isEmpty()) {
ScoreDoc[] topDocs = nextTopDocs().scoreDocs;
if (topDocs.length == 0) {
break;
}
pendingDocs.addAll(Arrays.asList(transformScoreDocsToRecords(topDocs)));
}
SearchRecord document = pendingDocs.pollFirst();
document.doc().shardIndex = documentsToLoad.size();
documentsToLoad.add(document);
accumulatedSize += document.size();
}
for (var op : loadDocuments(documentsToLoad)) {
if (op == null) {
skippedOperations++;
continue;
}
operationQueue.add(op);
}
}
private SearchRecord[] transformScoreDocsToRecords(ScoreDoc[] scoreDocs) throws IOException {
ArrayUtil.introSort(scoreDocs, Comparator.comparingInt(doc -> doc.doc));
SearchRecord[] documentRecords = new SearchRecord[scoreDocs.length];
CombinedDocValues combinedDocValues = null;
int docBase = -1;
int maxDoc = 0;
int readerIndex = 0;
LeafReaderContext leafReaderContext;
for (int i = 0; i < scoreDocs.length; i++) {
ScoreDoc scoreDoc = scoreDocs[i];
if (scoreDoc.doc >= docBase + maxDoc) {
do {
leafReaderContext = leaves().get(readerIndex++);
docBase = leafReaderContext.docBase;
maxDoc = leafReaderContext.reader().maxDoc();
} while (scoreDoc.doc >= docBase + maxDoc);
combinedDocValues = new CombinedDocValues(leafReaderContext.reader());
}
int segmentDocID = scoreDoc.doc - docBase;
int index = scoreDoc.shardIndex;
var primaryTerm = combinedDocValues.docPrimaryTerm(segmentDocID);
assert primaryTerm > 0 : "nested child document must be excluded";
documentRecords[index] = new SearchRecord(
(FieldDoc) scoreDoc,
combinedDocValues.isTombstone(segmentDocID),
combinedDocValues.docSeqNo(segmentDocID),
primaryTerm,
combinedDocValues.docVersion(segmentDocID),
combinedDocValues.recoverySourceSize(segmentDocID)
);
}
return documentRecords;
}
private Translog.Operation[] loadDocuments(List<SearchRecord> documentRecords) throws IOException {
documentRecords.sort(Comparator.comparingInt(doc -> doc.docID()));
Translog.Operation[] operations = new Translog.Operation[documentRecords.size()];
int docBase = -1;
int maxDoc = 0;
int readerIndex = 0;
LeafReaderContext leafReaderContext = null;
LeafStoredFieldLoader leafFieldLoader = null;
SourceLoader.Leaf leafSourceLoader = null;
for (int i = 0; i < documentRecords.size(); i++) {
SearchRecord docRecord = documentRecords.get(i);
if (docRecord.docID() >= docBase + maxDoc) {
do {
leafReaderContext = leaves().get(readerIndex++);
docBase = leafReaderContext.docBase;
maxDoc = leafReaderContext.reader().maxDoc();
} while (docRecord.docID() >= docBase + maxDoc);
leafFieldLoader = storedFieldLoader.getLoader(leafReaderContext, null);
leafSourceLoader = sourceLoader.leaf(leafReaderContext.reader(), null);
}
int segmentDocID = docRecord.docID() - docBase;
leafFieldLoader.advanceTo(segmentDocID);
operations[docRecord.index()] = createOperation(docRecord, leafFieldLoader, leafSourceLoader, segmentDocID, leafReaderContext);
}
return operations;
}
private Translog.Operation createOperation(
SearchRecord docRecord,
LeafStoredFieldLoader fieldLoader,
SourceLoader.Leaf sourceLoader,
int segmentDocID,
LeafReaderContext context
) throws IOException {
if (docRecord.isTombstone() && fieldLoader.id() == null) {
assert docRecord.version() == 1L : "Noop tombstone should have version 1L; actual version [" + docRecord.version() + "]";
assert assertDocSoftDeleted(context.reader(), segmentDocID) : "Noop but soft_deletes field is not set [" + docRecord + "]";
return new Translog.NoOp(docRecord.seqNo(), docRecord.primaryTerm(), "null");
} else if (docRecord.isTombstone()) {
assert assertDocSoftDeleted(context.reader(), segmentDocID) : "Delete op but soft_deletes field is not set [" + docRecord + "]";
return new Translog.Delete(fieldLoader.id(), docRecord.seqNo(), docRecord.primaryTerm(), docRecord.version());
} else {
if (docRecord.hasRecoverySourceSize() == false) {
// TODO: Callers should ask for the range that source should be retained. Thus we should always
// check for the existence source once we make peer-recovery to send ops after the local checkpoint.
if (requiredFullRange) {
throw new MissingHistoryOperationsException(
"source not found for seqno=" + docRecord.seqNo() + " from_seqno=" + fromSeqNo + " to_seqno=" + toSeqNo
);
} else {
skippedOperations++;
return null;
}
}
BytesReference source = sourceLoader.source(fieldLoader, segmentDocID).internalSourceRef();
return new Translog.Index(
fieldLoader.id(),
docRecord.seqNo(),
docRecord.primaryTerm(),
docRecord.version(),
source,
fieldLoader.routing(),
-1 // autogenerated timestamp
);
}
}
}

View file

@ -356,7 +356,7 @@ public class ReadOnlyEngine extends Engine {
@Override @Override
public int countChanges(String source, long fromSeqNo, long toSeqNo) throws IOException { public int countChanges(String source, long fromSeqNo, long toSeqNo) throws IOException {
try (Translog.Snapshot snapshot = newChangesSnapshot(source, fromSeqNo, toSeqNo, false, true, true)) { try (Translog.Snapshot snapshot = newChangesSnapshot(source, fromSeqNo, toSeqNo, false, true, true, -1)) {
return snapshot.totalOperations(); return snapshot.totalOperations();
} }
} }
@ -369,6 +369,19 @@ public class ReadOnlyEngine extends Engine {
boolean requiredFullRange, boolean requiredFullRange,
boolean singleConsumer, boolean singleConsumer,
boolean accessStats boolean accessStats
) throws IOException {
return Translog.Snapshot.EMPTY;
}
@Override
public Translog.Snapshot newChangesSnapshot(
String source,
long fromSeqNo,
long toSeqNo,
boolean requiredFullRange,
boolean singleConsumer,
boolean accessStats,
long maxChunkSize
) { ) {
return Translog.Snapshot.EMPTY; return Translog.Snapshot.EMPTY;
} }

View file

@ -33,17 +33,18 @@ import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Weight; import org.apache.lucene.search.Weight;
import org.apache.lucene.util.BitSet; import org.apache.lucene.util.BitSet;
import org.apache.lucene.util.BitSetIterator; import org.apache.lucene.util.BitSetIterator;
import org.elasticsearch.core.Nullable;
import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.IdFieldMapper;
import org.elasticsearch.search.internal.FilterStoredFieldVisitor; import org.elasticsearch.search.internal.FilterStoredFieldVisitor;
import java.io.IOException; import java.io.IOException;
import java.util.Arrays; import java.util.Arrays;
import java.util.Objects;
import java.util.function.Supplier; import java.util.function.Supplier;
final class RecoverySourcePruneMergePolicy extends OneMergeWrappingMergePolicy { final class RecoverySourcePruneMergePolicy extends OneMergeWrappingMergePolicy {
RecoverySourcePruneMergePolicy( RecoverySourcePruneMergePolicy(
String recoverySourceField, @Nullable String pruneStoredFieldName,
String pruneNumericDVFieldName,
boolean pruneIdField, boolean pruneIdField,
Supplier<Query> retainSourceQuerySupplier, Supplier<Query> retainSourceQuerySupplier,
MergePolicy in MergePolicy in
@ -52,18 +53,19 @@ final class RecoverySourcePruneMergePolicy extends OneMergeWrappingMergePolicy {
@Override @Override
public CodecReader wrapForMerge(CodecReader reader) throws IOException { public CodecReader wrapForMerge(CodecReader reader) throws IOException {
CodecReader wrapped = toWrap.wrapForMerge(reader); CodecReader wrapped = toWrap.wrapForMerge(reader);
return wrapReader(recoverySourceField, pruneIdField, wrapped, retainSourceQuerySupplier); return wrapReader(pruneStoredFieldName, pruneNumericDVFieldName, pruneIdField, wrapped, retainSourceQuerySupplier);
} }
}); });
} }
private static CodecReader wrapReader( private static CodecReader wrapReader(
String recoverySourceField, String pruneStoredFieldName,
String pruneNumericDVFieldName,
boolean pruneIdField, boolean pruneIdField,
CodecReader reader, CodecReader reader,
Supplier<Query> retainSourceQuerySupplier Supplier<Query> retainSourceQuerySupplier
) throws IOException { ) throws IOException {
NumericDocValues recoverySource = reader.getNumericDocValues(recoverySourceField); NumericDocValues recoverySource = reader.getNumericDocValues(pruneNumericDVFieldName);
if (recoverySource == null || recoverySource.nextDoc() == DocIdSetIterator.NO_MORE_DOCS) { if (recoverySource == null || recoverySource.nextDoc() == DocIdSetIterator.NO_MORE_DOCS) {
return reader; // early terminate - nothing to do here since non of the docs has a recovery source anymore. return reader; // early terminate - nothing to do here since non of the docs has a recovery source anymore.
} }
@ -78,21 +80,35 @@ final class RecoverySourcePruneMergePolicy extends OneMergeWrappingMergePolicy {
if (recoverySourceToKeep.cardinality() == reader.maxDoc()) { if (recoverySourceToKeep.cardinality() == reader.maxDoc()) {
return reader; // keep all source return reader; // keep all source
} }
return new SourcePruningFilterCodecReader(recoverySourceField, pruneIdField, reader, recoverySourceToKeep); return new SourcePruningFilterCodecReader(
pruneStoredFieldName,
pruneNumericDVFieldName,
pruneIdField,
reader,
recoverySourceToKeep
);
} else { } else {
return new SourcePruningFilterCodecReader(recoverySourceField, pruneIdField, reader, null); return new SourcePruningFilterCodecReader(pruneStoredFieldName, pruneNumericDVFieldName, pruneIdField, reader, null);
} }
} }
private static class SourcePruningFilterCodecReader extends FilterCodecReader { private static class SourcePruningFilterCodecReader extends FilterCodecReader {
private final BitSet recoverySourceToKeep; private final BitSet recoverySourceToKeep;
private final String recoverySourceField; private final String pruneStoredFieldName;
private final String pruneNumericDVFieldName;
private final boolean pruneIdField; private final boolean pruneIdField;
SourcePruningFilterCodecReader(String recoverySourceField, boolean pruneIdField, CodecReader reader, BitSet recoverySourceToKeep) { SourcePruningFilterCodecReader(
@Nullable String pruneStoredFieldName,
String pruneNumericDVFieldName,
boolean pruneIdField,
CodecReader reader,
BitSet recoverySourceToKeep
) {
super(reader); super(reader);
this.recoverySourceField = recoverySourceField; this.pruneStoredFieldName = pruneStoredFieldName;
this.recoverySourceToKeep = recoverySourceToKeep; this.recoverySourceToKeep = recoverySourceToKeep;
this.pruneNumericDVFieldName = pruneNumericDVFieldName;
this.pruneIdField = pruneIdField; this.pruneIdField = pruneIdField;
} }
@ -103,8 +119,8 @@ final class RecoverySourcePruneMergePolicy extends OneMergeWrappingMergePolicy {
@Override @Override
public NumericDocValues getNumeric(FieldInfo field) throws IOException { public NumericDocValues getNumeric(FieldInfo field) throws IOException {
NumericDocValues numeric = super.getNumeric(field); NumericDocValues numeric = super.getNumeric(field);
if (recoverySourceField.equals(field.name)) { if (field.name.equals(pruneNumericDVFieldName)) {
assert numeric != null : recoverySourceField + " must have numeric DV but was null"; assert numeric != null : pruneNumericDVFieldName + " must have numeric DV but was null";
final DocIdSetIterator intersection; final DocIdSetIterator intersection;
if (recoverySourceToKeep == null) { if (recoverySourceToKeep == null) {
// we can't return null here lucenes DocIdMerger expects an instance // we can't return null here lucenes DocIdMerger expects an instance
@ -139,10 +155,14 @@ final class RecoverySourcePruneMergePolicy extends OneMergeWrappingMergePolicy {
@Override @Override
public StoredFieldsReader getFieldsReader() { public StoredFieldsReader getFieldsReader() {
if (pruneStoredFieldName == null && pruneIdField == false) {
// nothing to prune, we can use the original fields reader
return super.getFieldsReader();
}
return new RecoverySourcePruningStoredFieldsReader( return new RecoverySourcePruningStoredFieldsReader(
super.getFieldsReader(), super.getFieldsReader(),
recoverySourceToKeep, recoverySourceToKeep,
recoverySourceField, pruneStoredFieldName,
pruneIdField pruneIdField
); );
} }
@ -241,12 +261,13 @@ final class RecoverySourcePruneMergePolicy extends OneMergeWrappingMergePolicy {
RecoverySourcePruningStoredFieldsReader( RecoverySourcePruningStoredFieldsReader(
StoredFieldsReader in, StoredFieldsReader in,
BitSet recoverySourceToKeep, BitSet recoverySourceToKeep,
String recoverySourceField, @Nullable String recoverySourceField,
boolean pruneIdField boolean pruneIdField
) { ) {
super(in); super(in);
assert recoverySourceField != null || pruneIdField : "nothing to prune";
this.recoverySourceToKeep = recoverySourceToKeep; this.recoverySourceToKeep = recoverySourceToKeep;
this.recoverySourceField = Objects.requireNonNull(recoverySourceField); this.recoverySourceField = recoverySourceField;
this.pruneIdField = pruneIdField; this.pruneIdField = pruneIdField;
} }
@ -258,7 +279,7 @@ final class RecoverySourcePruneMergePolicy extends OneMergeWrappingMergePolicy {
super.document(docID, new FilterStoredFieldVisitor(visitor) { super.document(docID, new FilterStoredFieldVisitor(visitor) {
@Override @Override
public Status needsField(FieldInfo fieldInfo) throws IOException { public Status needsField(FieldInfo fieldInfo) throws IOException {
if (recoverySourceField.equals(fieldInfo.name)) { if (fieldInfo.name.equals(recoverySourceField)) {
return Status.NO; return Status.NO;
} }
if (pruneIdField && IdFieldMapper.NAME.equals(fieldInfo.name)) { if (pruneIdField && IdFieldMapper.NAME.equals(fieldInfo.name)) {

View file

@ -0,0 +1,233 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the "Elastic License
* 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
* Public License v 1"; you may not use this file except in compliance with, at
* your election, the "Elastic License 2.0", the "GNU Affero General Public
* License v3.0 only", or the "Server Side Public License, v 1".
*/
package org.elasticsearch.index.engine;
import org.apache.lucene.document.LongPoint;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.NumericDocValues;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.FieldDoc;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Sort;
import org.apache.lucene.search.SortField;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.TopFieldCollectorManager;
import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.lucene.search.Queries;
import org.elasticsearch.core.IOUtils;
import org.elasticsearch.index.IndexVersion;
import org.elasticsearch.index.mapper.SeqNoFieldMapper;
import org.elasticsearch.index.translog.Translog;
import java.io.Closeable;
import java.io.IOException;
import java.util.List;
import java.util.concurrent.atomic.AtomicBoolean;
/**
* Abstract class that provides a snapshot mechanism to retrieve operations from a live Lucene index
* within a specified range of sequence numbers. Subclasses are expected to define the
* method to fetch the next batch of operations.
*/
public abstract class SearchBasedChangesSnapshot implements Translog.Snapshot, Closeable {
public static final int DEFAULT_BATCH_SIZE = 1024;
private final IndexVersion indexVersionCreated;
private final IndexSearcher indexSearcher;
private final Closeable onClose;
protected final long fromSeqNo, toSeqNo;
protected final boolean requiredFullRange;
protected final int searchBatchSize;
private final boolean accessStats;
private final int totalHits;
private FieldDoc afterDoc;
private long lastSeenSeqNo;
/**
* Constructs a new snapshot for fetching changes within a sequence number range.
*
* @param engineSearcher Engine searcher instance.
* @param searchBatchSize Number of documents to retrieve per batch.
* @param fromSeqNo Starting sequence number.
* @param toSeqNo Ending sequence number.
* @param requiredFullRange Whether the full range is required.
* @param accessStats If true, enable access statistics for counting total operations.
* @param indexVersionCreated Version of the index when it was created.
*/
protected SearchBasedChangesSnapshot(
Engine.Searcher engineSearcher,
int searchBatchSize,
long fromSeqNo,
long toSeqNo,
boolean requiredFullRange,
boolean accessStats,
IndexVersion indexVersionCreated
) throws IOException {
if (fromSeqNo < 0 || toSeqNo < 0 || fromSeqNo > toSeqNo) {
throw new IllegalArgumentException("Invalid range; from_seqno [" + fromSeqNo + "], to_seqno [" + toSeqNo + "]");
}
if (searchBatchSize <= 0) {
throw new IllegalArgumentException("Search_batch_size must be positive [" + searchBatchSize + "]");
}
final AtomicBoolean closed = new AtomicBoolean();
this.onClose = () -> {
if (closed.compareAndSet(false, true)) {
IOUtils.close(engineSearcher);
}
};
this.indexVersionCreated = indexVersionCreated;
this.fromSeqNo = fromSeqNo;
this.toSeqNo = toSeqNo;
this.lastSeenSeqNo = fromSeqNo - 1;
this.requiredFullRange = requiredFullRange;
this.indexSearcher = newIndexSearcher(engineSearcher);
this.indexSearcher.setQueryCache(null);
long requestingSize = (toSeqNo - fromSeqNo == Long.MAX_VALUE) ? Long.MAX_VALUE : (toSeqNo - fromSeqNo + 1L);
this.searchBatchSize = (int) Math.min(requestingSize, searchBatchSize);
this.accessStats = accessStats;
this.totalHits = accessStats ? indexSearcher.count(rangeQuery(fromSeqNo, toSeqNo, indexVersionCreated)) : -1;
}
/**
* Abstract method for retrieving the next operation. Should be implemented by subclasses.
*
* @return The next Translog.Operation in the snapshot.
* @throws IOException If an I/O error occurs.
*/
protected abstract Translog.Operation nextOperation() throws IOException;
/**
* Returns the list of index leaf reader contexts.
*
* @return List of LeafReaderContext.
*/
public List<LeafReaderContext> leaves() {
return indexSearcher.getIndexReader().leaves();
}
@Override
public int totalOperations() {
if (accessStats == false) {
throw new IllegalStateException("Access stats of a snapshot created with [access_stats] is false");
}
return totalHits;
}
@Override
public final Translog.Operation next() throws IOException {
Translog.Operation op = nextOperation();
if (requiredFullRange) {
verifyRange(op);
}
if (op != null) {
assert fromSeqNo <= op.seqNo() && op.seqNo() <= toSeqNo && lastSeenSeqNo < op.seqNo()
: "Unexpected operation; last_seen_seqno ["
+ lastSeenSeqNo
+ "], from_seqno ["
+ fromSeqNo
+ "], to_seqno ["
+ toSeqNo
+ "], op ["
+ op
+ "]";
lastSeenSeqNo = op.seqNo();
}
return op;
}
@Override
public void close() throws IOException {
onClose.close();
}
/**
* Retrieves the next batch of top documents based on the sequence range.
*
* @return TopDocs instance containing the documents in the current batch.
*/
protected TopDocs nextTopDocs() throws IOException {
Query rangeQuery = rangeQuery(Math.max(fromSeqNo, lastSeenSeqNo), toSeqNo, indexVersionCreated);
SortField sortBySeqNo = new SortField(SeqNoFieldMapper.NAME, SortField.Type.LONG);
TopFieldCollectorManager collectorManager = new TopFieldCollectorManager(
new Sort(sortBySeqNo),
searchBatchSize,
afterDoc,
0,
false
);
TopDocs results = indexSearcher.search(rangeQuery, collectorManager);
if (results.scoreDocs.length > 0) {
afterDoc = (FieldDoc) results.scoreDocs[results.scoreDocs.length - 1];
}
for (int i = 0; i < results.scoreDocs.length; i++) {
results.scoreDocs[i].shardIndex = i;
}
return results;
}
static IndexSearcher newIndexSearcher(Engine.Searcher engineSearcher) throws IOException {
return new IndexSearcher(Lucene.wrapAllDocsLive(engineSearcher.getDirectoryReader()));
}
static Query rangeQuery(long fromSeqNo, long toSeqNo, IndexVersion indexVersionCreated) {
return new BooleanQuery.Builder().add(LongPoint.newRangeQuery(SeqNoFieldMapper.NAME, fromSeqNo, toSeqNo), BooleanClause.Occur.MUST)
.add(Queries.newNonNestedFilter(indexVersionCreated), BooleanClause.Occur.MUST)
.build();
}
private void verifyRange(Translog.Operation op) {
if (op == null && lastSeenSeqNo < toSeqNo) {
throw new MissingHistoryOperationsException(
"Not all operations between from_seqno ["
+ fromSeqNo
+ "] "
+ "and to_seqno ["
+ toSeqNo
+ "] found; prematurely terminated last_seen_seqno ["
+ lastSeenSeqNo
+ "]"
);
} else if (op != null && op.seqNo() != lastSeenSeqNo + 1) {
throw new MissingHistoryOperationsException(
"Not all operations between from_seqno ["
+ fromSeqNo
+ "] "
+ "and to_seqno ["
+ toSeqNo
+ "] found; expected seqno ["
+ lastSeenSeqNo
+ 1
+ "]; found ["
+ op
+ "]"
);
}
}
protected static boolean assertDocSoftDeleted(LeafReader leafReader, int segmentDocId) throws IOException {
NumericDocValues docValues = leafReader.getNumericDocValues(Lucene.SOFT_DELETES_FIELD);
if (docValues == null || docValues.advanceExact(segmentDocId) == false) {
throw new IllegalStateException("DocValues for field [" + Lucene.SOFT_DELETES_FIELD + "] is not found");
}
return docValues.longValue() == 1;
}
}

View file

@ -47,5 +47,4 @@ public interface LeafStoredFieldLoader {
* @return stored fields for the current document * @return stored fields for the current document
*/ */
Map<String, List<Object>> storedFields(); Map<String, List<Object>> storedFields();
} }

View file

@ -30,6 +30,7 @@ public class DocumentMapper {
private final MapperMetrics mapperMetrics; private final MapperMetrics mapperMetrics;
private final IndexVersion indexVersion; private final IndexVersion indexVersion;
private final Logger logger; private final Logger logger;
private final String indexName;
/** /**
* Create a new {@link DocumentMapper} that holds empty mappings. * Create a new {@link DocumentMapper} that holds empty mappings.
@ -67,6 +68,7 @@ public class DocumentMapper {
this.mapperMetrics = mapperMetrics; this.mapperMetrics = mapperMetrics;
this.indexVersion = version; this.indexVersion = version;
this.logger = Loggers.getLogger(getClass(), indexName); this.logger = Loggers.getLogger(getClass(), indexName);
this.indexName = indexName;
assert mapping.toCompressedXContent().equals(source) || isSyntheticSourceMalformed(source, version) assert mapping.toCompressedXContent().equals(source) || isSyntheticSourceMalformed(source, version)
: "provided source [" + source + "] differs from mapping [" + mapping.toCompressedXContent() + "]"; : "provided source [" + source + "] differs from mapping [" + mapping.toCompressedXContent() + "]";
@ -74,9 +76,9 @@ public class DocumentMapper {
private void maybeLog(Exception ex) { private void maybeLog(Exception ex) {
if (logger.isDebugEnabled()) { if (logger.isDebugEnabled()) {
logger.debug("Error while parsing document: " + ex.getMessage(), ex); logger.debug("Error while parsing document for index [" + indexName + "]: " + ex.getMessage(), ex);
} else if (IntervalThrottler.DOCUMENT_PARSING_FAILURE.accept()) { } else if (IntervalThrottler.DOCUMENT_PARSING_FAILURE.accept()) {
logger.info("Error while parsing document: " + ex.getMessage(), ex); logger.info("Error while parsing document for index [" + indexName + "]: " + ex.getMessage(), ex);
} }
} }

View file

@ -60,6 +60,8 @@ public class SourceFieldMapper extends MetadataFieldMapper {
public static final String NAME = "_source"; public static final String NAME = "_source";
public static final String RECOVERY_SOURCE_NAME = "_recovery_source"; public static final String RECOVERY_SOURCE_NAME = "_recovery_source";
public static final String RECOVERY_SOURCE_SIZE_NAME = "_recovery_source_size";
public static final String CONTENT_TYPE = "_source"; public static final String CONTENT_TYPE = "_source";
public static final String LOSSY_PARAMETERS_ALLOWED_SETTING_NAME = "index.lossy.source-mapping-parameters"; public static final String LOSSY_PARAMETERS_ALLOWED_SETTING_NAME = "index.lossy.source-mapping-parameters";
@ -413,8 +415,19 @@ public class SourceFieldMapper extends MetadataFieldMapper {
if (enableRecoverySource && originalSource != null && adaptedSource != originalSource) { if (enableRecoverySource && originalSource != null && adaptedSource != originalSource) {
// if we omitted source or modified it we add the _recovery_source to ensure we have it for ops based recovery // if we omitted source or modified it we add the _recovery_source to ensure we have it for ops based recovery
BytesRef ref = originalSource.toBytesRef(); BytesRef ref = originalSource.toBytesRef();
context.doc().add(new StoredField(RECOVERY_SOURCE_NAME, ref.bytes, ref.offset, ref.length)); if (context.indexSettings().isRecoverySourceSyntheticEnabled()) {
context.doc().add(new NumericDocValuesField(RECOVERY_SOURCE_NAME, 1)); assert isSynthetic() : "recovery source should not be disabled on non-synthetic source";
/**
* We use synthetic source for recovery, so we omit the recovery source.
* Instead, we record only the size of the uncompressed source.
* This size is used in {@link LuceneSyntheticSourceChangesSnapshot} to control memory
* usage during the recovery process when loading a batch of synthetic sources.
*/
context.doc().add(new NumericDocValuesField(RECOVERY_SOURCE_SIZE_NAME, ref.length));
} else {
context.doc().add(new StoredField(RECOVERY_SOURCE_NAME, ref.bytes, ref.offset, ref.length));
context.doc().add(new NumericDocValuesField(RECOVERY_SOURCE_NAME, 1));
}
} }
} }

View file

@ -2600,7 +2600,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
* @param source the source of the request * @param source the source of the request
* @param fromSeqNo the start sequence number (inclusive) * @param fromSeqNo the start sequence number (inclusive)
* @param toSeqNo the end sequence number (inclusive) * @param toSeqNo the end sequence number (inclusive)
* @see #newChangesSnapshot(String, long, long, boolean, boolean, boolean) * @see #newChangesSnapshot(String, long, long, boolean, boolean, boolean, long)
*/ */
public int countChanges(String source, long fromSeqNo, long toSeqNo) throws IOException { public int countChanges(String source, long fromSeqNo, long toSeqNo) throws IOException {
return getEngine().countChanges(source, fromSeqNo, toSeqNo); return getEngine().countChanges(source, fromSeqNo, toSeqNo);
@ -2619,6 +2619,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
* @param singleConsumer true if the snapshot is accessed by only the thread that creates the snapshot. In this case, the * @param singleConsumer true if the snapshot is accessed by only the thread that creates the snapshot. In this case, the
* snapshot can enable some optimizations to improve the performance. * snapshot can enable some optimizations to improve the performance.
* @param accessStats true if the stats of the snapshot is accessed via {@link Translog.Snapshot#totalOperations()} * @param accessStats true if the stats of the snapshot is accessed via {@link Translog.Snapshot#totalOperations()}
* @param maxChunkSize The maximum allowable size, in bytes, for buffering source documents during recovery.
*/ */
public Translog.Snapshot newChangesSnapshot( public Translog.Snapshot newChangesSnapshot(
String source, String source,
@ -2626,9 +2627,10 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
long toSeqNo, long toSeqNo,
boolean requiredFullRange, boolean requiredFullRange,
boolean singleConsumer, boolean singleConsumer,
boolean accessStats boolean accessStats,
long maxChunkSize
) throws IOException { ) throws IOException {
return getEngine().newChangesSnapshot(source, fromSeqNo, toSeqNo, requiredFullRange, singleConsumer, accessStats); return getEngine().newChangesSnapshot(source, fromSeqNo, toSeqNo, requiredFullRange, singleConsumer, accessStats, maxChunkSize);
} }
public List<Segment> segments() { public List<Segment> segments() {

View file

@ -81,7 +81,7 @@ public class PrimaryReplicaSyncer {
// Wrap translog snapshot to make it synchronized as it is accessed by different threads through SnapshotSender. // Wrap translog snapshot to make it synchronized as it is accessed by different threads through SnapshotSender.
// Even though those calls are not concurrent, snapshot.next() uses non-synchronized state and is not multi-thread-compatible // Even though those calls are not concurrent, snapshot.next() uses non-synchronized state and is not multi-thread-compatible
// Also fail the resync early if the shard is shutting down // Also fail the resync early if the shard is shutting down
snapshot = indexShard.newChangesSnapshot("resync", startingSeqNo, Long.MAX_VALUE, false, false, true); snapshot = indexShard.newChangesSnapshot("resync", startingSeqNo, Long.MAX_VALUE, false, false, true, chunkSize.getBytes());
final Translog.Snapshot originalSnapshot = snapshot; final Translog.Snapshot originalSnapshot = snapshot;
final Translog.Snapshot wrappedSnapshot = new Translog.Snapshot() { final Translog.Snapshot wrappedSnapshot = new Translog.Snapshot() {
@Override @Override

View file

@ -98,6 +98,7 @@ public class IndexShardSnapshotStatus {
private long processedSize; private long processedSize;
private String failure; private String failure;
private final SubscribableListener<AbortStatus> abortListeners = new SubscribableListener<>(); private final SubscribableListener<AbortStatus> abortListeners = new SubscribableListener<>();
private volatile String statusDescription;
private IndexShardSnapshotStatus( private IndexShardSnapshotStatus(
final Stage stage, final Stage stage,
@ -110,7 +111,8 @@ public class IndexShardSnapshotStatus {
final long totalSize, final long totalSize,
final long processedSize, final long processedSize,
final String failure, final String failure,
final ShardGeneration generation final ShardGeneration generation,
final String statusDescription
) { ) {
this.stage = new AtomicReference<>(Objects.requireNonNull(stage)); this.stage = new AtomicReference<>(Objects.requireNonNull(stage));
this.generation = new AtomicReference<>(generation); this.generation = new AtomicReference<>(generation);
@ -124,6 +126,7 @@ public class IndexShardSnapshotStatus {
this.processedSize = processedSize; this.processedSize = processedSize;
this.incrementalSize = incrementalSize; this.incrementalSize = incrementalSize;
this.failure = failure; this.failure = failure;
updateStatusDescription(statusDescription);
} }
public synchronized Copy moveToStarted( public synchronized Copy moveToStarted(
@ -272,6 +275,15 @@ public class IndexShardSnapshotStatus {
processedSize += totalSize; processedSize += totalSize;
} }
/**
* Updates the string explanation for what the snapshot is actively doing right now.
*/
public void updateStatusDescription(String statusString) {
assert statusString != null;
assert statusString.isEmpty() == false;
this.statusDescription = statusString;
}
/** /**
* Returns a copy of the current {@link IndexShardSnapshotStatus}. This method is * Returns a copy of the current {@link IndexShardSnapshotStatus}. This method is
* intended to be used when a coherent state of {@link IndexShardSnapshotStatus} is needed. * intended to be used when a coherent state of {@link IndexShardSnapshotStatus} is needed.
@ -289,12 +301,13 @@ public class IndexShardSnapshotStatus {
incrementalSize, incrementalSize,
totalSize, totalSize,
processedSize, processedSize,
failure failure,
statusDescription
); );
} }
public static IndexShardSnapshotStatus newInitializing(ShardGeneration generation) { public static IndexShardSnapshotStatus newInitializing(ShardGeneration generation) {
return new IndexShardSnapshotStatus(Stage.INIT, 0L, 0L, 0, 0, 0, 0, 0, 0, null, generation); return new IndexShardSnapshotStatus(Stage.INIT, 0L, 0L, 0, 0, 0, 0, 0, 0, null, generation, "initializing");
} }
public static IndexShardSnapshotStatus.Copy newFailed(final String failure) { public static IndexShardSnapshotStatus.Copy newFailed(final String failure) {
@ -302,7 +315,7 @@ public class IndexShardSnapshotStatus {
if (failure == null) { if (failure == null) {
throw new IllegalArgumentException("A failure description is required for a failed IndexShardSnapshotStatus"); throw new IllegalArgumentException("A failure description is required for a failed IndexShardSnapshotStatus");
} }
return new IndexShardSnapshotStatus(Stage.FAILURE, 0L, 0L, 0, 0, 0, 0, 0, 0, failure, null).asCopy(); return new IndexShardSnapshotStatus(Stage.FAILURE, 0L, 0L, 0, 0, 0, 0, 0, 0, failure, null, "initialized as failed").asCopy();
} }
public static IndexShardSnapshotStatus.Copy newDone( public static IndexShardSnapshotStatus.Copy newDone(
@ -326,7 +339,8 @@ public class IndexShardSnapshotStatus {
size, size,
incrementalSize, incrementalSize,
null, null,
generation generation,
"initialized as done"
).asCopy(); ).asCopy();
} }
@ -345,6 +359,7 @@ public class IndexShardSnapshotStatus {
private final long processedSize; private final long processedSize;
private final long incrementalSize; private final long incrementalSize;
private final String failure; private final String failure;
private final String statusDescription;
public Copy( public Copy(
final Stage stage, final Stage stage,
@ -356,7 +371,8 @@ public class IndexShardSnapshotStatus {
final long incrementalSize, final long incrementalSize,
final long totalSize, final long totalSize,
final long processedSize, final long processedSize,
final String failure final String failure,
final String statusDescription
) { ) {
this.stage = stage; this.stage = stage;
this.startTime = startTime; this.startTime = startTime;
@ -368,6 +384,7 @@ public class IndexShardSnapshotStatus {
this.processedSize = processedSize; this.processedSize = processedSize;
this.incrementalSize = incrementalSize; this.incrementalSize = incrementalSize;
this.failure = failure; this.failure = failure;
this.statusDescription = statusDescription;
} }
public Stage getStage() { public Stage getStage() {
@ -410,6 +427,10 @@ public class IndexShardSnapshotStatus {
return failure; return failure;
} }
public String getStatusDescription() {
return statusDescription;
}
@Override @Override
public String toString() { public String toString() {
return "index shard snapshot status (" return "index shard snapshot status ("
@ -433,6 +454,8 @@ public class IndexShardSnapshotStatus {
+ processedSize + processedSize
+ ", failure='" + ", failure='"
+ failure + failure
+ "', statusDescription='"
+ statusDescription
+ '\'' + '\''
+ ')'; + ')';
} }
@ -461,6 +484,8 @@ public class IndexShardSnapshotStatus {
+ processedSize + processedSize
+ ", failure='" + ", failure='"
+ failure + failure
+ "', statusDescription='"
+ statusDescription
+ '\'' + '\''
+ ')'; + ')';
} }

View file

@ -399,6 +399,18 @@ public class RecoverySettings {
public static final ByteSizeValue DEFAULT_CHUNK_SIZE = new ByteSizeValue(512, ByteSizeUnit.KB); public static final ByteSizeValue DEFAULT_CHUNK_SIZE = new ByteSizeValue(512, ByteSizeUnit.KB);
/**
* The maximum allowable size, in bytes, for buffering source documents during recovery.
*/
public static final Setting<ByteSizeValue> INDICES_RECOVERY_CHUNK_SIZE = Setting.byteSizeSetting(
"indices.recovery.chunk_size",
DEFAULT_CHUNK_SIZE,
ByteSizeValue.ZERO,
ByteSizeValue.ofBytes(Integer.MAX_VALUE),
Property.NodeScope,
Property.Dynamic
);
private volatile ByteSizeValue maxBytesPerSec; private volatile ByteSizeValue maxBytesPerSec;
private volatile int maxConcurrentFileChunks; private volatile int maxConcurrentFileChunks;
private volatile int maxConcurrentOperations; private volatile int maxConcurrentOperations;
@ -417,7 +429,7 @@ public class RecoverySettings {
private final AdjustableSemaphore maxSnapshotFileDownloadsPerNodeSemaphore; private final AdjustableSemaphore maxSnapshotFileDownloadsPerNodeSemaphore;
private volatile ByteSizeValue chunkSize = DEFAULT_CHUNK_SIZE; private volatile ByteSizeValue chunkSize;
private final ByteSizeValue availableNetworkBandwidth; private final ByteSizeValue availableNetworkBandwidth;
private final ByteSizeValue availableDiskReadBandwidth; private final ByteSizeValue availableDiskReadBandwidth;
@ -444,6 +456,7 @@ public class RecoverySettings {
this.availableNetworkBandwidth = NODE_BANDWIDTH_RECOVERY_NETWORK_SETTING.get(settings); this.availableNetworkBandwidth = NODE_BANDWIDTH_RECOVERY_NETWORK_SETTING.get(settings);
this.availableDiskReadBandwidth = NODE_BANDWIDTH_RECOVERY_DISK_READ_SETTING.get(settings); this.availableDiskReadBandwidth = NODE_BANDWIDTH_RECOVERY_DISK_READ_SETTING.get(settings);
this.availableDiskWriteBandwidth = NODE_BANDWIDTH_RECOVERY_DISK_WRITE_SETTING.get(settings); this.availableDiskWriteBandwidth = NODE_BANDWIDTH_RECOVERY_DISK_WRITE_SETTING.get(settings);
this.chunkSize = INDICES_RECOVERY_CHUNK_SIZE.get(settings);
validateNodeBandwidthRecoverySettings(settings); validateNodeBandwidthRecoverySettings(settings);
this.nodeBandwidthSettingsExist = hasNodeBandwidthRecoverySettings(settings); this.nodeBandwidthSettingsExist = hasNodeBandwidthRecoverySettings(settings);
computeMaxBytesPerSec(settings); computeMaxBytesPerSec(settings);
@ -493,6 +506,7 @@ public class RecoverySettings {
CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING, CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING,
this::setMaxConcurrentIncomingRecoveries this::setMaxConcurrentIncomingRecoveries
); );
clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_CHUNK_SIZE, this::setChunkSize);
} }
private void computeMaxBytesPerSec(Settings settings) { private void computeMaxBytesPerSec(Settings settings) {
@ -597,7 +611,7 @@ public class RecoverySettings {
return chunkSize; return chunkSize;
} }
public void setChunkSize(ByteSizeValue chunkSize) { // only settable for tests public void setChunkSize(ByteSizeValue chunkSize) {
if (chunkSize.bytesAsInt() <= 0) { if (chunkSize.bytesAsInt() <= 0) {
throw new IllegalArgumentException("chunkSize must be > 0"); throw new IllegalArgumentException("chunkSize must be > 0");
} }

View file

@ -324,7 +324,8 @@ public class RecoverySourceHandler {
Long.MAX_VALUE, Long.MAX_VALUE,
false, false,
false, false,
true true,
chunkSizeInBytes
); );
resources.add(phase2Snapshot); resources.add(phase2Snapshot);
retentionLock.close(); retentionLock.close();

View file

@ -3186,6 +3186,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
@Override @Override
public void snapshotShard(SnapshotShardContext context) { public void snapshotShard(SnapshotShardContext context) {
context.status().updateStatusDescription("queued in snapshot task runner");
shardSnapshotTaskRunner.enqueueShardSnapshot(context); shardSnapshotTaskRunner.enqueueShardSnapshot(context);
} }
@ -3198,6 +3199,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
final ShardId shardId = store.shardId(); final ShardId shardId = store.shardId();
final SnapshotId snapshotId = context.snapshotId(); final SnapshotId snapshotId = context.snapshotId();
final IndexShardSnapshotStatus snapshotStatus = context.status(); final IndexShardSnapshotStatus snapshotStatus = context.status();
snapshotStatus.updateStatusDescription("snapshot task runner: setting up shard snapshot");
final long startTime = threadPool.absoluteTimeInMillis(); final long startTime = threadPool.absoluteTimeInMillis();
try { try {
final ShardGeneration generation = snapshotStatus.generation(); final ShardGeneration generation = snapshotStatus.generation();
@ -3206,6 +3208,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
final Set<String> blobs; final Set<String> blobs;
if (generation == null) { if (generation == null) {
snapshotStatus.ensureNotAborted(); snapshotStatus.ensureNotAborted();
snapshotStatus.updateStatusDescription("snapshot task runner: listing blob prefixes");
try { try {
blobs = shardContainer.listBlobsByPrefix(OperationPurpose.SNAPSHOT_METADATA, SNAPSHOT_INDEX_PREFIX).keySet(); blobs = shardContainer.listBlobsByPrefix(OperationPurpose.SNAPSHOT_METADATA, SNAPSHOT_INDEX_PREFIX).keySet();
} catch (IOException e) { } catch (IOException e) {
@ -3216,6 +3219,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
} }
snapshotStatus.ensureNotAborted(); snapshotStatus.ensureNotAborted();
snapshotStatus.updateStatusDescription("snapshot task runner: loading snapshot blobs");
Tuple<BlobStoreIndexShardSnapshots, ShardGeneration> tuple = buildBlobStoreIndexShardSnapshots( Tuple<BlobStoreIndexShardSnapshots, ShardGeneration> tuple = buildBlobStoreIndexShardSnapshots(
context.indexId(), context.indexId(),
shardId.id(), shardId.id(),
@ -3316,6 +3320,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
indexCommitPointFiles = filesFromSegmentInfos; indexCommitPointFiles = filesFromSegmentInfos;
} }
snapshotStatus.updateStatusDescription("snapshot task runner: starting shard snapshot");
snapshotStatus.moveToStarted( snapshotStatus.moveToStarted(
startTime, startTime,
indexIncrementalFileCount, indexIncrementalFileCount,
@ -3342,6 +3347,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
BlobStoreIndexShardSnapshot.FileInfo.SERIALIZE_WRITER_UUID, BlobStoreIndexShardSnapshot.FileInfo.SERIALIZE_WRITER_UUID,
Boolean.toString(writeFileInfoWriterUUID) Boolean.toString(writeFileInfoWriterUUID)
); );
snapshotStatus.updateStatusDescription("snapshot task runner: updating blob store with new shard generation");
INDEX_SHARD_SNAPSHOTS_FORMAT.write( INDEX_SHARD_SNAPSHOTS_FORMAT.write(
updatedBlobStoreIndexShardSnapshots, updatedBlobStoreIndexShardSnapshots,
shardContainer, shardContainer,
@ -3387,6 +3393,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
BlobStoreIndexShardSnapshot.FileInfo.SERIALIZE_WRITER_UUID, BlobStoreIndexShardSnapshot.FileInfo.SERIALIZE_WRITER_UUID,
Boolean.toString(writeFileInfoWriterUUID) Boolean.toString(writeFileInfoWriterUUID)
); );
snapshotStatus.updateStatusDescription("no shard generations: writing new index-${N} file");
writeShardIndexBlobAtomic(shardContainer, newGen, updatedBlobStoreIndexShardSnapshots, serializationParams); writeShardIndexBlobAtomic(shardContainer, newGen, updatedBlobStoreIndexShardSnapshots, serializationParams);
} catch (IOException e) { } catch (IOException e) {
throw new IndexShardSnapshotFailedException( throw new IndexShardSnapshotFailedException(
@ -3401,6 +3408,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
} }
snapshotStatus.addProcessedFiles(finalFilesInShardMetadataCount, finalFilesInShardMetadataSize); snapshotStatus.addProcessedFiles(finalFilesInShardMetadataCount, finalFilesInShardMetadataSize);
try { try {
snapshotStatus.updateStatusDescription("no shard generations: deleting blobs");
deleteFromContainer(OperationPurpose.SNAPSHOT_METADATA, shardContainer, blobsToDelete.iterator()); deleteFromContainer(OperationPurpose.SNAPSHOT_METADATA, shardContainer, blobsToDelete.iterator());
} catch (IOException e) { } catch (IOException e) {
logger.warn( logger.warn(
@ -3414,6 +3422,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
// filesToSnapshot will be emptied while snapshotting the file. We make a copy here for cleanup purpose in case of failure. // filesToSnapshot will be emptied while snapshotting the file. We make a copy here for cleanup purpose in case of failure.
final AtomicReference<List<FileInfo>> fileToCleanUp = new AtomicReference<>(List.copyOf(filesToSnapshot)); final AtomicReference<List<FileInfo>> fileToCleanUp = new AtomicReference<>(List.copyOf(filesToSnapshot));
final ActionListener<Collection<Void>> allFilesUploadedListener = ActionListener.assertOnce(ActionListener.wrap(ignore -> { final ActionListener<Collection<Void>> allFilesUploadedListener = ActionListener.assertOnce(ActionListener.wrap(ignore -> {
snapshotStatus.updateStatusDescription("all files uploaded: finalizing");
final IndexShardSnapshotStatus.Copy lastSnapshotStatus = snapshotStatus.moveToFinalize(); final IndexShardSnapshotStatus.Copy lastSnapshotStatus = snapshotStatus.moveToFinalize();
// now create and write the commit point // now create and write the commit point
@ -3435,6 +3444,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
BlobStoreIndexShardSnapshot.FileInfo.SERIALIZE_WRITER_UUID, BlobStoreIndexShardSnapshot.FileInfo.SERIALIZE_WRITER_UUID,
Boolean.toString(writeFileInfoWriterUUID) Boolean.toString(writeFileInfoWriterUUID)
); );
snapshotStatus.updateStatusDescription("all files uploaded: writing to index shard file");
INDEX_SHARD_SNAPSHOT_FORMAT.write( INDEX_SHARD_SNAPSHOT_FORMAT.write(
blobStoreIndexShardSnapshot, blobStoreIndexShardSnapshot,
shardContainer, shardContainer,
@ -3451,10 +3461,12 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
ByteSizeValue.ofBytes(blobStoreIndexShardSnapshot.totalSize()), ByteSizeValue.ofBytes(blobStoreIndexShardSnapshot.totalSize()),
getSegmentInfoFileCount(blobStoreIndexShardSnapshot.indexFiles()) getSegmentInfoFileCount(blobStoreIndexShardSnapshot.indexFiles())
); );
snapshotStatus.updateStatusDescription("all files uploaded: done");
snapshotStatus.moveToDone(threadPool.absoluteTimeInMillis(), shardSnapshotResult); snapshotStatus.moveToDone(threadPool.absoluteTimeInMillis(), shardSnapshotResult);
context.onResponse(shardSnapshotResult); context.onResponse(shardSnapshotResult);
}, e -> { }, e -> {
try { try {
snapshotStatus.updateStatusDescription("all files uploaded: cleaning up data files, exception while finalizing: " + e);
shardContainer.deleteBlobsIgnoringIfNotExists( shardContainer.deleteBlobsIgnoringIfNotExists(
OperationPurpose.SNAPSHOT_DATA, OperationPurpose.SNAPSHOT_DATA,
Iterators.flatMap(fileToCleanUp.get().iterator(), f -> Iterators.forRange(0, f.numberOfParts(), f::partName)) Iterators.flatMap(fileToCleanUp.get().iterator(), f -> Iterators.forRange(0, f.numberOfParts(), f::partName))
@ -3484,12 +3496,10 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
// A normally running shard snapshot should be in stage INIT or STARTED. And we know it's not in PAUSING or ABORTED because // A normally running shard snapshot should be in stage INIT or STARTED. And we know it's not in PAUSING or ABORTED because
// the ensureNotAborted() call above did not throw. The remaining options don't make sense, if they ever happen. // the ensureNotAborted() call above did not throw. The remaining options don't make sense, if they ever happen.
logger.error( logger.error(
() -> Strings.format( "Shard snapshot found an unexpected state. ShardId [{}], SnapshotID [{}], Stage [{}]",
"Shard snapshot found an unexpected state. ShardId [{}], SnapshotID [{}], Stage [{}]", shardId,
shardId, snapshotId,
snapshotId, shardSnapshotStage
shardSnapshotStage
)
); );
assert false; assert false;
} }
@ -3519,6 +3529,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
) { ) {
final int noOfFilesToSnapshot = filesToSnapshot.size(); final int noOfFilesToSnapshot = filesToSnapshot.size();
final ActionListener<Void> filesListener = fileQueueListener(filesToSnapshot, noOfFilesToSnapshot, allFilesUploadedListener); final ActionListener<Void> filesListener = fileQueueListener(filesToSnapshot, noOfFilesToSnapshot, allFilesUploadedListener);
context.status().updateStatusDescription("enqueued file snapshot tasks: threads running concurrent file uploads");
for (int i = 0; i < noOfFilesToSnapshot; i++) { for (int i = 0; i < noOfFilesToSnapshot; i++) {
shardSnapshotTaskRunner.enqueueFileSnapshot(context, filesToSnapshot::poll, filesListener); shardSnapshotTaskRunner.enqueueFileSnapshot(context, filesToSnapshot::poll, filesListener);
} }

View file

@ -22,13 +22,13 @@ import java.util.Set;
/** /**
* Encapsulate multiple handlers for the same path, allowing different handlers for different HTTP verbs and versions. * Encapsulate multiple handlers for the same path, allowing different handlers for different HTTP verbs and versions.
*/ */
final class MethodHandlers { public final class MethodHandlers {
private final String path; private final String path;
private final Map<RestRequest.Method, Map<RestApiVersion, RestHandler>> methodHandlers; private final Map<RestRequest.Method, Map<RestApiVersion, RestHandler>> methodHandlers;
@SuppressWarnings("unused") // only accessed via #STATS_TRACKER_HANDLE, lazy initialized because instances consume non-trivial heap @SuppressWarnings("unused") // only accessed via #STATS_TRACKER_HANDLE, lazy initialized because instances consume non-trivial heap
private volatile HttpRouteStatsTracker statsTracker; private HttpRouteStatsTracker statsTracker;
private static final VarHandle STATS_TRACKER_HANDLE; private static final VarHandle STATS_TRACKER_HANDLE;

View file

@ -11,7 +11,8 @@ package org.elasticsearch.rest.action.search;
import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchRequestBuilder;
import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.client.internal.node.NodeClient;
import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.core.RestApiVersion;
import org.elasticsearch.core.UpdateForV10;
import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.BaseRestHandler;
import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestRequest;
import org.elasticsearch.rest.action.RestCancellableNodeClient; import org.elasticsearch.rest.action.RestCancellableNodeClient;
@ -34,19 +35,12 @@ public class RestKnnSearchAction extends BaseRestHandler {
public RestKnnSearchAction() {} public RestKnnSearchAction() {}
@UpdateForV9(owner = UpdateForV9.Owner.SEARCH_RELEVANCE)
// these routes were ".deprecated" in RestApiVersion.V_8 which will require use of REST API compatibility headers to access
// this API in v9. It is unclear if this was intentional for v9, and the code has been updated to ".deprecateAndKeep" which will
// continue to emit deprecations warnings but will not require any special headers to access the API in v9.
// Please review and update the code and tests as needed. The original code remains commented out below for reference.
@Override @Override
@UpdateForV10(owner = UpdateForV10.Owner.SEARCH_RELEVANCE)
public List<Route> routes() { public List<Route> routes() {
return List.of( return List.of(
// Route.builder(GET, "{index}/_knn_search").deprecated(DEPRECATION_MESSAGE, RestApiVersion.V_8).build(), Route.builder(GET, "{index}/_knn_search").deprecatedForRemoval(DEPRECATION_MESSAGE, RestApiVersion.V_8).build(),
// Route.builder(POST, "{index}/_knn_search").deprecated(DEPRECATION_MESSAGE, RestApiVersion.V_8).build() Route.builder(POST, "{index}/_knn_search").deprecatedForRemoval(DEPRECATION_MESSAGE, RestApiVersion.V_8).build()
Route.builder(GET, "{index}/_knn_search").deprecateAndKeep(DEPRECATION_MESSAGE).build(),
Route.builder(POST, "{index}/_knn_search").deprecateAndKeep(DEPRECATION_MESSAGE).build()
); );
} }

View file

@ -147,6 +147,8 @@ import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicLong;
import java.util.function.BiFunction;
import java.util.function.Function;
import java.util.function.LongSupplier; import java.util.function.LongSupplier;
import java.util.function.Supplier; import java.util.function.Supplier;
@ -549,16 +551,17 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
// check if we can shortcut the query phase entirely. // check if we can shortcut the query phase entirely.
if (orig.canReturnNullResponseIfMatchNoDocs()) { if (orig.canReturnNullResponseIfMatchNoDocs()) {
assert orig.scroll() == null; assert orig.scroll() == null;
final CanMatchShardResponse canMatchResp; ShardSearchRequest clone = new ShardSearchRequest(orig);
try { CanMatchContext canMatchContext = new CanMatchContext(
ShardSearchRequest clone = new ShardSearchRequest(orig); clone,
canMatchResp = canMatch(clone, false); indicesService::indexServiceSafe,
} catch (Exception exc) { this::findReaderContext,
l.onFailure(exc); defaultKeepAlive,
return; maxKeepAlive
} );
CanMatchShardResponse canMatchResp = canMatch(canMatchContext, false);
if (canMatchResp.canMatch() == false) { if (canMatchResp.canMatch() == false) {
l.onResponse(QuerySearchResult.nullInstance()); listener.onResponse(QuerySearchResult.nullInstance());
return; return;
} }
} }
@ -1191,10 +1194,14 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
} }
private long getKeepAlive(ShardSearchRequest request) { private long getKeepAlive(ShardSearchRequest request) {
return getKeepAlive(request, defaultKeepAlive, maxKeepAlive);
}
private static long getKeepAlive(ShardSearchRequest request, long defaultKeepAlive, long maxKeepAlive) {
if (request.scroll() != null) { if (request.scroll() != null) {
return getScrollKeepAlive(request.scroll()); return getScrollKeepAlive(request.scroll(), defaultKeepAlive, maxKeepAlive);
} else if (request.keepAlive() != null) { } else if (request.keepAlive() != null) {
checkKeepAliveLimit(request.keepAlive().millis()); checkKeepAliveLimit(request.keepAlive().millis(), maxKeepAlive);
return request.keepAlive().getMillis(); return request.keepAlive().getMillis();
} else { } else {
return request.readerId() == null ? defaultKeepAlive : -1; return request.readerId() == null ? defaultKeepAlive : -1;
@ -1202,14 +1209,22 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
} }
private long getScrollKeepAlive(Scroll scroll) { private long getScrollKeepAlive(Scroll scroll) {
return getScrollKeepAlive(scroll, defaultKeepAlive, maxKeepAlive);
}
private static long getScrollKeepAlive(Scroll scroll, long defaultKeepAlive, long maxKeepAlive) {
if (scroll != null && scroll.keepAlive() != null) { if (scroll != null && scroll.keepAlive() != null) {
checkKeepAliveLimit(scroll.keepAlive().millis()); checkKeepAliveLimit(scroll.keepAlive().millis(), maxKeepAlive);
return scroll.keepAlive().getMillis(); return scroll.keepAlive().getMillis();
} }
return defaultKeepAlive; return defaultKeepAlive;
} }
private void checkKeepAliveLimit(long keepAlive) { private void checkKeepAliveLimit(long keepAlive) {
checkKeepAliveLimit(keepAlive, maxKeepAlive);
}
private static void checkKeepAliveLimit(long keepAlive, long maxKeepAlive) {
if (keepAlive > maxKeepAlive) { if (keepAlive > maxKeepAlive) {
throw new IllegalArgumentException( throw new IllegalArgumentException(
"Keep alive for request (" "Keep alive for request ("
@ -1620,6 +1635,7 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
final List<CanMatchNodeResponse.ResponseOrFailure> responses = new ArrayList<>(shardLevelRequests.size()); final List<CanMatchNodeResponse.ResponseOrFailure> responses = new ArrayList<>(shardLevelRequests.size());
for (var shardLevelRequest : shardLevelRequests) { for (var shardLevelRequest : shardLevelRequests) {
try { try {
// TODO remove the exception handling as it's now in canMatch itself
responses.add(new CanMatchNodeResponse.ResponseOrFailure(canMatch(request.createShardSearchRequest(shardLevelRequest)))); responses.add(new CanMatchNodeResponse.ResponseOrFailure(canMatch(request.createShardSearchRequest(shardLevelRequest))));
} catch (Exception e) { } catch (Exception e) {
responses.add(new CanMatchNodeResponse.ResponseOrFailure(e)); responses.add(new CanMatchNodeResponse.ResponseOrFailure(e));
@ -1631,82 +1647,145 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
/** /**
* This method uses a lightweight searcher without wrapping (i.e., not open a full reader on frozen indices) to rewrite the query * This method uses a lightweight searcher without wrapping (i.e., not open a full reader on frozen indices) to rewrite the query
* to check if the query can match any documents. This method can have false positives while if it returns {@code false} the query * to check if the query can match any documents. This method can have false positives while if it returns {@code false} the query
* won't match any documents on the current shard. * won't match any documents on the current shard. Exceptions are handled within the method, and never re-thrown.
*/ */
public CanMatchShardResponse canMatch(ShardSearchRequest request) throws IOException { public CanMatchShardResponse canMatch(ShardSearchRequest request) {
return canMatch(request, true); CanMatchContext canMatchContext = new CanMatchContext(
request,
indicesService::indexServiceSafe,
this::findReaderContext,
defaultKeepAlive,
maxKeepAlive
);
return canMatch(canMatchContext, true);
} }
private CanMatchShardResponse canMatch(ShardSearchRequest request, boolean checkRefreshPending) throws IOException { static class CanMatchContext {
assert request.searchType() == SearchType.QUERY_THEN_FETCH : "unexpected search type: " + request.searchType(); private final ShardSearchRequest request;
private final Function<Index, IndexService> indexServiceLookup;
private final BiFunction<ShardSearchContextId, TransportRequest, ReaderContext> findReaderContext;
private final long defaultKeepAlive;
private final long maxKeepAlive;
private IndexService indexService;
CanMatchContext(
ShardSearchRequest request,
Function<Index, IndexService> indexServiceLookup,
BiFunction<ShardSearchContextId, TransportRequest, ReaderContext> findReaderContext,
long defaultKeepAlive,
long maxKeepAlive
) {
this.request = request;
this.indexServiceLookup = indexServiceLookup;
this.findReaderContext = findReaderContext;
this.defaultKeepAlive = defaultKeepAlive;
this.maxKeepAlive = maxKeepAlive;
}
long getKeepAlive() {
return SearchService.getKeepAlive(request, defaultKeepAlive, maxKeepAlive);
}
ReaderContext findReaderContext() {
return findReaderContext.apply(request.readerId(), request);
}
QueryRewriteContext getQueryRewriteContext(IndexService indexService) {
return indexService.newQueryRewriteContext(request::nowInMillis, request.getRuntimeMappings(), request.getClusterAlias());
}
SearchExecutionContext getSearchExecutionContext(Engine.Searcher searcher) {
return getIndexService().newSearchExecutionContext(
request.shardId().id(),
0,
searcher,
request::nowInMillis,
request.getClusterAlias(),
request.getRuntimeMappings()
);
}
IndexShard getShard() {
return getIndexService().getShard(request.shardId().getId());
}
IndexService getIndexService() {
if (this.indexService == null) {
this.indexService = indexServiceLookup.apply(request.shardId().getIndex());
}
return this.indexService;
}
}
static CanMatchShardResponse canMatch(CanMatchContext canMatchContext, boolean checkRefreshPending) {
assert canMatchContext.request.searchType() == SearchType.QUERY_THEN_FETCH
: "unexpected search type: " + canMatchContext.request.searchType();
Releasable releasable = null; Releasable releasable = null;
try { try {
IndexService indexService; IndexService indexService;
final boolean hasRefreshPending; final boolean hasRefreshPending;
final Engine.Searcher canMatchSearcher; final Engine.Searcher canMatchSearcher;
if (request.readerId() != null) { if (canMatchContext.request.readerId() != null) {
hasRefreshPending = false; hasRefreshPending = false;
ReaderContext readerContext; ReaderContext readerContext;
Engine.Searcher searcher; Engine.Searcher searcher;
try { try {
readerContext = findReaderContext(request.readerId(), request); readerContext = canMatchContext.findReaderContext();
releasable = readerContext.markAsUsed(getKeepAlive(request)); releasable = readerContext.markAsUsed(canMatchContext.getKeepAlive());
indexService = readerContext.indexService(); indexService = readerContext.indexService();
if (canMatchAfterRewrite(request, indexService) == false) { QueryRewriteContext queryRewriteContext = canMatchContext.getQueryRewriteContext(indexService);
if (queryStillMatchesAfterRewrite(canMatchContext.request, queryRewriteContext) == false) {
return new CanMatchShardResponse(false, null); return new CanMatchShardResponse(false, null);
} }
searcher = readerContext.acquireSearcher(Engine.CAN_MATCH_SEARCH_SOURCE); searcher = readerContext.acquireSearcher(Engine.CAN_MATCH_SEARCH_SOURCE);
} catch (SearchContextMissingException e) { } catch (SearchContextMissingException e) {
final String searcherId = request.readerId().getSearcherId(); final String searcherId = canMatchContext.request.readerId().getSearcherId();
if (searcherId == null) { if (searcherId == null) {
throw e; return new CanMatchShardResponse(true, null);
} }
indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); if (queryStillMatchesAfterRewrite(
if (canMatchAfterRewrite(request, indexService) == false) { canMatchContext.request,
canMatchContext.getQueryRewriteContext(canMatchContext.getIndexService())
) == false) {
return new CanMatchShardResponse(false, null); return new CanMatchShardResponse(false, null);
} }
IndexShard indexShard = indexService.getShard(request.shardId().getId()); final Engine.SearcherSupplier searcherSupplier = canMatchContext.getShard().acquireSearcherSupplier();
final Engine.SearcherSupplier searcherSupplier = indexShard.acquireSearcherSupplier();
if (searcherId.equals(searcherSupplier.getSearcherId()) == false) { if (searcherId.equals(searcherSupplier.getSearcherId()) == false) {
searcherSupplier.close(); searcherSupplier.close();
throw e; return new CanMatchShardResponse(true, null);
} }
releasable = searcherSupplier; releasable = searcherSupplier;
searcher = searcherSupplier.acquireSearcher(Engine.CAN_MATCH_SEARCH_SOURCE); searcher = searcherSupplier.acquireSearcher(Engine.CAN_MATCH_SEARCH_SOURCE);
} }
canMatchSearcher = searcher; canMatchSearcher = searcher;
} else { } else {
indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); if (queryStillMatchesAfterRewrite(
if (canMatchAfterRewrite(request, indexService) == false) { canMatchContext.request,
canMatchContext.getQueryRewriteContext(canMatchContext.getIndexService())
) == false) {
return new CanMatchShardResponse(false, null); return new CanMatchShardResponse(false, null);
} }
IndexShard indexShard = indexService.getShard(request.shardId().getId()); boolean needsWaitForRefresh = canMatchContext.request.waitForCheckpoint() != UNASSIGNED_SEQ_NO;
boolean needsWaitForRefresh = request.waitForCheckpoint() != UNASSIGNED_SEQ_NO;
// If this request wait_for_refresh behavior, it is safest to assume a refresh is pending. Theoretically, // If this request wait_for_refresh behavior, it is safest to assume a refresh is pending. Theoretically,
// this can be improved in the future by manually checking that the requested checkpoint has already been refresh. // this can be improved in the future by manually checking that the requested checkpoint has already been refresh.
// However, this will request modifying the engine to surface that information. // However, this will request modifying the engine to surface that information.
IndexShard indexShard = canMatchContext.getShard();
hasRefreshPending = needsWaitForRefresh || (indexShard.hasRefreshPending() && checkRefreshPending); hasRefreshPending = needsWaitForRefresh || (indexShard.hasRefreshPending() && checkRefreshPending);
canMatchSearcher = indexShard.acquireSearcher(Engine.CAN_MATCH_SEARCH_SOURCE); canMatchSearcher = indexShard.acquireSearcher(Engine.CAN_MATCH_SEARCH_SOURCE);
} }
try (canMatchSearcher) { try (canMatchSearcher) {
SearchExecutionContext context = indexService.newSearchExecutionContext( SearchExecutionContext context = canMatchContext.getSearchExecutionContext(canMatchSearcher);
request.shardId().id(), final boolean canMatch = queryStillMatchesAfterRewrite(canMatchContext.request, context);
0,
canMatchSearcher,
request::nowInMillis,
request.getClusterAlias(),
request.getRuntimeMappings()
);
final boolean canMatch = queryStillMatchesAfterRewrite(request, context);
final MinAndMax<?> minMax;
if (canMatch || hasRefreshPending) { if (canMatch || hasRefreshPending) {
FieldSortBuilder sortBuilder = FieldSortBuilder.getPrimaryFieldSortOrNull(request.source()); FieldSortBuilder sortBuilder = FieldSortBuilder.getPrimaryFieldSortOrNull(canMatchContext.request.source());
minMax = sortBuilder != null ? FieldSortBuilder.getMinMaxOrNull(context, sortBuilder) : null; final MinAndMax<?> minMax = sortBuilder != null ? FieldSortBuilder.getMinMaxOrNull(context, sortBuilder) : null;
} else { return new CanMatchShardResponse(true, minMax);
minMax = null;
} }
return new CanMatchShardResponse(canMatch || hasRefreshPending, minMax); return new CanMatchShardResponse(false, null);
} }
} catch (Exception e) {
return new CanMatchShardResponse(true, null);
} finally { } finally {
Releasables.close(releasable); Releasables.close(releasable);
} }
@ -1719,15 +1798,6 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
* {@link MatchNoneQueryBuilder}. This allows us to avoid extra work for example making the shard search active and waiting for * {@link MatchNoneQueryBuilder}. This allows us to avoid extra work for example making the shard search active and waiting for
* refreshes. * refreshes.
*/ */
private static boolean canMatchAfterRewrite(final ShardSearchRequest request, final IndexService indexService) throws IOException {
final QueryRewriteContext queryRewriteContext = indexService.newQueryRewriteContext(
request::nowInMillis,
request.getRuntimeMappings(),
request.getClusterAlias()
);
return queryStillMatchesAfterRewrite(request, queryRewriteContext);
}
@SuppressWarnings("unchecked") @SuppressWarnings("unchecked")
public static boolean queryStillMatchesAfterRewrite(ShardSearchRequest request, QueryRewriteContext context) throws IOException { public static boolean queryStillMatchesAfterRewrite(ShardSearchRequest request, QueryRewriteContext context) throws IOException {
Rewriteable.rewrite(request.getRewriteable(), context, false); Rewriteable.rewrite(request.getRewriteable(), context, false);

View file

@ -240,6 +240,6 @@ public class RareTermsAggregationBuilder extends ValuesSourceAggregationBuilder<
@Override @Override
public TransportVersion getMinimalSupportedVersion() { public TransportVersion getMinimalSupportedVersion() {
return TransportVersions.V_7_3_0; return TransportVersions.ZERO;
} }
} }

View file

@ -370,6 +370,6 @@ public class SignificantTermsAggregationBuilder extends ValuesSourceAggregationB
@Override @Override
public TransportVersion getMinimalSupportedVersion() { public TransportVersion getMinimalSupportedVersion() {
return TransportVersions.V_7_3_0; return TransportVersions.ZERO;
} }
} }

View file

@ -377,6 +377,6 @@ public class SignificantTextAggregationBuilder extends AbstractAggregationBuilde
@Override @Override
public TransportVersion getMinimalSupportedVersion() { public TransportVersion getMinimalSupportedVersion() {
return TransportVersions.V_7_3_0; return TransportVersions.ZERO;
} }
} }

View file

@ -124,6 +124,6 @@ public class CumulativeSumPipelineAggregationBuilder extends AbstractPipelineAgg
@Override @Override
public TransportVersion getMinimalSupportedVersion() { public TransportVersion getMinimalSupportedVersion() {
return TransportVersions.V_7_4_0; return TransportVersions.ZERO;
} }
} }

View file

@ -161,6 +161,7 @@ public final class RestoreService implements ClusterStateApplier {
SETTING_HISTORY_UUID, SETTING_HISTORY_UUID,
IndexSettings.MODE.getKey(), IndexSettings.MODE.getKey(),
SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(),
IndexSettings.RECOVERY_USE_SYNTHETIC_SOURCE_SETTING.getKey(),
IndexSortConfig.INDEX_SORT_FIELD_SETTING.getKey(), IndexSortConfig.INDEX_SORT_FIELD_SETTING.getKey(),
IndexSortConfig.INDEX_SORT_ORDER_SETTING.getKey(), IndexSortConfig.INDEX_SORT_ORDER_SETTING.getKey(),
IndexSortConfig.INDEX_SORT_MODE_SETTING.getKey(), IndexSortConfig.INDEX_SORT_MODE_SETTING.getKey(),

View file

@ -61,6 +61,7 @@ import java.util.HashMap;
import java.util.Iterator; import java.util.Iterator;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.function.Supplier;
import static java.util.Collections.emptyMap; import static java.util.Collections.emptyMap;
import static org.elasticsearch.core.Strings.format; import static org.elasticsearch.core.Strings.format;
@ -108,6 +109,7 @@ public final class SnapshotShardsService extends AbstractLifecycleComponent impl
this.threadPool = transportService.getThreadPool(); this.threadPool = transportService.getThreadPool();
this.snapshotShutdownProgressTracker = new SnapshotShutdownProgressTracker( this.snapshotShutdownProgressTracker = new SnapshotShutdownProgressTracker(
() -> clusterService.state().nodes().getLocalNodeId(), () -> clusterService.state().nodes().getLocalNodeId(),
(callerLogger) -> logIndexShardSnapshotStatuses(callerLogger),
clusterService.getClusterSettings(), clusterService.getClusterSettings(),
threadPool threadPool
); );
@ -234,6 +236,14 @@ public final class SnapshotShardsService extends AbstractLifecycleComponent impl
} }
} }
private void logIndexShardSnapshotStatuses(Logger callerLogger) {
for (var snapshotStatuses : shardSnapshots.values()) {
for (var shardSnapshot : snapshotStatuses.entrySet()) {
callerLogger.info(Strings.format("ShardId %s, %s", shardSnapshot.getKey(), shardSnapshot.getValue()));
}
}
}
/** /**
* Returns status of shards that are snapshotted on the node and belong to the given snapshot * Returns status of shards that are snapshotted on the node and belong to the given snapshot
* <p> * <p>
@ -321,7 +331,8 @@ public final class SnapshotShardsService extends AbstractLifecycleComponent impl
sid, sid,
ShardState.FAILED, ShardState.FAILED,
shard.getValue().reason(), shard.getValue().reason(),
shard.getValue().generation() shard.getValue().generation(),
() -> null
); );
} }
} else { } else {
@ -372,6 +383,7 @@ public final class SnapshotShardsService extends AbstractLifecycleComponent impl
+ snapshotStatus.generation() + snapshotStatus.generation()
+ "] for snapshot with old-format compatibility"; + "] for snapshot with old-format compatibility";
shardSnapshotTasks.add(newShardSnapshotTask(shardId, snapshot, indexId, snapshotStatus, entry.version(), entry.startTime())); shardSnapshotTasks.add(newShardSnapshotTask(shardId, snapshot, indexId, snapshotStatus, entry.version(), entry.startTime()));
snapshotStatus.updateStatusDescription("shard snapshot scheduled to start");
} }
threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(() -> shardSnapshotTasks.forEach(Runnable::run)); threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(() -> shardSnapshotTasks.forEach(Runnable::run));
@ -383,6 +395,7 @@ public final class SnapshotShardsService extends AbstractLifecycleComponent impl
for (final Map.Entry<ShardId, ShardSnapshotStatus> shardEntry : entry.shards().entrySet()) { for (final Map.Entry<ShardId, ShardSnapshotStatus> shardEntry : entry.shards().entrySet()) {
final ShardId shardId = shardEntry.getKey(); final ShardId shardId = shardEntry.getKey();
final ShardSnapshotStatus masterShardSnapshotStatus = shardEntry.getValue(); final ShardSnapshotStatus masterShardSnapshotStatus = shardEntry.getValue();
IndexShardSnapshotStatus indexShardSnapshotStatus = localShardSnapshots.get(shardId);
if (masterShardSnapshotStatus.state() != ShardState.INIT) { if (masterShardSnapshotStatus.state() != ShardState.INIT) {
// shard snapshot not currently scheduled by master // shard snapshot not currently scheduled by master
@ -402,7 +415,11 @@ public final class SnapshotShardsService extends AbstractLifecycleComponent impl
shardId, shardId,
ShardState.PAUSED_FOR_NODE_REMOVAL, ShardState.PAUSED_FOR_NODE_REMOVAL,
"paused", "paused",
masterShardSnapshotStatus.generation() masterShardSnapshotStatus.generation(),
() -> {
indexShardSnapshotStatus.updateStatusDescription("finished: master notification attempt complete");
return null;
}
); );
} else { } else {
// shard snapshot currently running, mark for pause // shard snapshot currently running, mark for pause
@ -419,9 +436,16 @@ public final class SnapshotShardsService extends AbstractLifecycleComponent impl
final IndexVersion entryVersion, final IndexVersion entryVersion,
final long entryStartTime final long entryStartTime
) { ) {
Supplier<Void> postMasterNotificationAction = () -> {
snapshotStatus.updateStatusDescription("finished: master notification attempt complete");
return null;
};
// Listener that runs on completion of the shard snapshot: it will notify the master node of success or failure.
ActionListener<ShardSnapshotResult> snapshotResultListener = new ActionListener<>() { ActionListener<ShardSnapshotResult> snapshotResultListener = new ActionListener<>() {
@Override @Override
public void onResponse(ShardSnapshotResult shardSnapshotResult) { public void onResponse(ShardSnapshotResult shardSnapshotResult) {
snapshotStatus.updateStatusDescription("snapshot succeeded: proceeding to notify master of success");
final ShardGeneration newGeneration = shardSnapshotResult.getGeneration(); final ShardGeneration newGeneration = shardSnapshotResult.getGeneration();
assert newGeneration != null; assert newGeneration != null;
assert newGeneration.equals(snapshotStatus.generation()); assert newGeneration.equals(snapshotStatus.generation());
@ -436,11 +460,13 @@ public final class SnapshotShardsService extends AbstractLifecycleComponent impl
snapshotStatus.generation() snapshotStatus.generation()
); );
} }
notifySuccessfulSnapshotShard(snapshot, shardId, shardSnapshotResult);
notifySuccessfulSnapshotShard(snapshot, shardId, shardSnapshotResult, postMasterNotificationAction);
} }
@Override @Override
public void onFailure(Exception e) { public void onFailure(Exception e) {
snapshotStatus.updateStatusDescription("failed with exception '" + e + ": proceeding to notify master of failure");
final String failure; final String failure;
final Stage nextStage; final Stage nextStage;
if (e instanceof AbortedSnapshotException) { if (e instanceof AbortedSnapshotException) {
@ -457,7 +483,14 @@ public final class SnapshotShardsService extends AbstractLifecycleComponent impl
logger.warn(() -> format("[%s][%s] failed to snapshot shard", shardId, snapshot), e); logger.warn(() -> format("[%s][%s] failed to snapshot shard", shardId, snapshot), e);
} }
final var shardState = snapshotStatus.moveToUnsuccessful(nextStage, failure, threadPool.absoluteTimeInMillis()); final var shardState = snapshotStatus.moveToUnsuccessful(nextStage, failure, threadPool.absoluteTimeInMillis());
notifyUnsuccessfulSnapshotShard(snapshot, shardId, shardState, failure, snapshotStatus.generation()); notifyUnsuccessfulSnapshotShard(
snapshot,
shardId,
shardState,
failure,
snapshotStatus.generation(),
postMasterNotificationAction
);
} }
}; };
@ -508,6 +541,7 @@ public final class SnapshotShardsService extends AbstractLifecycleComponent impl
ActionListener<ShardSnapshotResult> resultListener ActionListener<ShardSnapshotResult> resultListener
) { ) {
ActionListener.run(resultListener, listener -> { ActionListener.run(resultListener, listener -> {
snapshotStatus.updateStatusDescription("has started");
snapshotStatus.ensureNotAborted(); snapshotStatus.ensureNotAborted();
final IndexShard indexShard = indicesService.indexServiceSafe(shardId.getIndex()).getShard(shardId.id()); final IndexShard indexShard = indicesService.indexServiceSafe(shardId.getIndex()).getShard(shardId.id());
if (indexShard.routingEntry().primary() == false) { if (indexShard.routingEntry().primary() == false) {
@ -527,7 +561,9 @@ public final class SnapshotShardsService extends AbstractLifecycleComponent impl
final Repository repository = repositoriesService.repository(snapshot.getRepository()); final Repository repository = repositoriesService.repository(snapshot.getRepository());
SnapshotIndexCommit snapshotIndexCommit = null; SnapshotIndexCommit snapshotIndexCommit = null;
try { try {
snapshotStatus.updateStatusDescription("acquiring commit reference from IndexShard: triggers a shard flush");
snapshotIndexCommit = new SnapshotIndexCommit(indexShard.acquireIndexCommitForSnapshot()); snapshotIndexCommit = new SnapshotIndexCommit(indexShard.acquireIndexCommitForSnapshot());
snapshotStatus.updateStatusDescription("commit reference acquired, proceeding with snapshot");
final var shardStateId = getShardStateId(indexShard, snapshotIndexCommit.indexCommit()); // not aborted so indexCommit() ok final var shardStateId = getShardStateId(indexShard, snapshotIndexCommit.indexCommit()); // not aborted so indexCommit() ok
snapshotStatus.addAbortListener(makeAbortListener(indexShard.shardId(), snapshot, snapshotIndexCommit)); snapshotStatus.addAbortListener(makeAbortListener(indexShard.shardId(), snapshot, snapshotIndexCommit));
snapshotStatus.ensureNotAborted(); snapshotStatus.ensureNotAborted();
@ -652,8 +688,12 @@ public final class SnapshotShardsService extends AbstractLifecycleComponent impl
snapshot.snapshot(), snapshot.snapshot(),
shardId shardId
); );
notifySuccessfulSnapshotShard(snapshot.snapshot(), shardId, localShard.getValue().getShardSnapshotResult()); notifySuccessfulSnapshotShard(
snapshot.snapshot(),
shardId,
localShard.getValue().getShardSnapshotResult(),
() -> null
);
} else if (stage == Stage.FAILURE) { } else if (stage == Stage.FAILURE) {
// but we think the shard failed - we need to make new master know that the shard failed // but we think the shard failed - we need to make new master know that the shard failed
logger.debug( logger.debug(
@ -667,7 +707,8 @@ public final class SnapshotShardsService extends AbstractLifecycleComponent impl
shardId, shardId,
ShardState.FAILED, ShardState.FAILED,
indexShardSnapshotStatus.getFailure(), indexShardSnapshotStatus.getFailure(),
localShard.getValue().generation() localShard.getValue().generation(),
() -> null
); );
} else if (stage == Stage.PAUSED) { } else if (stage == Stage.PAUSED) {
// but we think the shard has paused - we need to make new master know that // but we think the shard has paused - we need to make new master know that
@ -680,7 +721,8 @@ public final class SnapshotShardsService extends AbstractLifecycleComponent impl
shardId, shardId,
ShardState.PAUSED_FOR_NODE_REMOVAL, ShardState.PAUSED_FOR_NODE_REMOVAL,
indexShardSnapshotStatus.getFailure(), indexShardSnapshotStatus.getFailure(),
localShard.getValue().generation() localShard.getValue().generation(),
() -> null
); );
} }
} }
@ -693,10 +735,20 @@ public final class SnapshotShardsService extends AbstractLifecycleComponent impl
/** /**
* Notify the master node that the given shard snapshot completed successfully. * Notify the master node that the given shard snapshot completed successfully.
*/ */
private void notifySuccessfulSnapshotShard(final Snapshot snapshot, final ShardId shardId, ShardSnapshotResult shardSnapshotResult) { private void notifySuccessfulSnapshotShard(
final Snapshot snapshot,
final ShardId shardId,
ShardSnapshotResult shardSnapshotResult,
Supplier<Void> postMasterNotificationAction
) {
assert shardSnapshotResult != null; assert shardSnapshotResult != null;
assert shardSnapshotResult.getGeneration() != null; assert shardSnapshotResult.getGeneration() != null;
sendSnapshotShardUpdate(snapshot, shardId, ShardSnapshotStatus.success(clusterService.localNode().getId(), shardSnapshotResult)); sendSnapshotShardUpdate(
snapshot,
shardId,
ShardSnapshotStatus.success(clusterService.localNode().getId(), shardSnapshotResult),
postMasterNotificationAction
);
} }
/** /**
@ -707,13 +759,15 @@ public final class SnapshotShardsService extends AbstractLifecycleComponent impl
final ShardId shardId, final ShardId shardId,
final ShardState shardState, final ShardState shardState,
final String failure, final String failure,
final ShardGeneration generation final ShardGeneration generation,
Supplier<Void> postMasterNotificationAction
) { ) {
assert shardState == ShardState.FAILED || shardState == ShardState.PAUSED_FOR_NODE_REMOVAL : shardState; assert shardState == ShardState.FAILED || shardState == ShardState.PAUSED_FOR_NODE_REMOVAL : shardState;
sendSnapshotShardUpdate( sendSnapshotShardUpdate(
snapshot, snapshot,
shardId, shardId,
new ShardSnapshotStatus(clusterService.localNode().getId(), shardState, generation, failure) new ShardSnapshotStatus(clusterService.localNode().getId(), shardState, generation, failure),
postMasterNotificationAction
); );
if (shardState == ShardState.PAUSED_FOR_NODE_REMOVAL) { if (shardState == ShardState.PAUSED_FOR_NODE_REMOVAL) {
logger.debug( logger.debug(
@ -726,7 +780,12 @@ public final class SnapshotShardsService extends AbstractLifecycleComponent impl
} }
/** Updates the shard snapshot status by sending a {@link UpdateIndexShardSnapshotStatusRequest} to the master node */ /** Updates the shard snapshot status by sending a {@link UpdateIndexShardSnapshotStatusRequest} to the master node */
private void sendSnapshotShardUpdate(final Snapshot snapshot, final ShardId shardId, final ShardSnapshotStatus status) { private void sendSnapshotShardUpdate(
final Snapshot snapshot,
final ShardId shardId,
final ShardSnapshotStatus status,
Supplier<Void> postMasterNotificationAction
) {
ActionListener<Void> updateResultListener = new ActionListener<>() { ActionListener<Void> updateResultListener = new ActionListener<>() {
@Override @Override
public void onResponse(Void aVoid) { public void onResponse(Void aVoid) {
@ -738,9 +797,11 @@ public final class SnapshotShardsService extends AbstractLifecycleComponent impl
logger.warn(() -> format("[%s][%s] failed to update snapshot state to [%s]", shardId, snapshot, status), e); logger.warn(() -> format("[%s][%s] failed to update snapshot state to [%s]", shardId, snapshot, status), e);
} }
}; };
snapshotShutdownProgressTracker.trackRequestSentToMaster(snapshot, shardId); snapshotShutdownProgressTracker.trackRequestSentToMaster(snapshot, shardId);
var releaseTrackerRequestRunsBeforeResultListener = ActionListener.runBefore(updateResultListener, () -> { var releaseTrackerRequestRunsBeforeResultListener = ActionListener.runBefore(updateResultListener, () -> {
snapshotShutdownProgressTracker.releaseRequestSentToMaster(snapshot, shardId); snapshotShutdownProgressTracker.releaseRequestSentToMaster(snapshot, shardId);
postMasterNotificationAction.get();
}); });
remoteFailedRequestDeduplicator.executeOnce( remoteFailedRequestDeduplicator.executeOnce(

View file

@ -25,6 +25,7 @@ import org.elasticsearch.threadpool.ThreadPool;
import java.util.Map; import java.util.Map;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicLong;
import java.util.function.Consumer;
import java.util.function.Supplier; import java.util.function.Supplier;
/** /**
@ -45,6 +46,7 @@ public class SnapshotShutdownProgressTracker {
private static final Logger logger = LogManager.getLogger(SnapshotShutdownProgressTracker.class); private static final Logger logger = LogManager.getLogger(SnapshotShutdownProgressTracker.class);
private final Supplier<String> getLocalNodeId; private final Supplier<String> getLocalNodeId;
private final Consumer<Logger> logIndexShardSnapshotStatuses;
private final ThreadPool threadPool; private final ThreadPool threadPool;
private volatile TimeValue progressLoggerInterval; private volatile TimeValue progressLoggerInterval;
@ -83,8 +85,14 @@ public class SnapshotShutdownProgressTracker {
private final AtomicLong abortedCount = new AtomicLong(); private final AtomicLong abortedCount = new AtomicLong();
private final AtomicLong pausedCount = new AtomicLong(); private final AtomicLong pausedCount = new AtomicLong();
public SnapshotShutdownProgressTracker(Supplier<String> localNodeIdSupplier, ClusterSettings clusterSettings, ThreadPool threadPool) { public SnapshotShutdownProgressTracker(
Supplier<String> localNodeIdSupplier,
Consumer<Logger> logShardStatuses,
ClusterSettings clusterSettings,
ThreadPool threadPool
) {
this.getLocalNodeId = localNodeIdSupplier; this.getLocalNodeId = localNodeIdSupplier;
this.logIndexShardSnapshotStatuses = logShardStatuses;
clusterSettings.initializeAndWatch( clusterSettings.initializeAndWatch(
SNAPSHOT_PROGRESS_DURING_SHUTDOWN_LOG_INTERVAL_SETTING, SNAPSHOT_PROGRESS_DURING_SHUTDOWN_LOG_INTERVAL_SETTING,
value -> this.progressLoggerInterval = value value -> this.progressLoggerInterval = value
@ -122,14 +130,14 @@ public class SnapshotShutdownProgressTracker {
} }
/** /**
* Logs some statistics about shard snapshot progress. * Logs information about shard snapshot progress.
*/ */
private void logProgressReport() { private void logProgressReport() {
logger.info( logger.info(
""" """
Current active shard snapshot stats on data node [{}]. \ Current active shard snapshot stats on data node [{}]. \
Node shutdown cluster state update received at [{}]. \ Node shutdown cluster state update received at [{} millis]. \
Finished signalling shard snapshots to pause at [{}]. \ Finished signalling shard snapshots to pause at [{} millis]. \
Number shard snapshots running [{}]. \ Number shard snapshots running [{}]. \
Number shard snapshots waiting for master node reply to status update request [{}] \ Number shard snapshots waiting for master node reply to status update request [{}] \
Shard snapshot completion stats since shutdown began: Done [{}]; Failed [{}]; Aborted [{}]; Paused [{}]\ Shard snapshot completion stats since shutdown began: Done [{}]; Failed [{}]; Aborted [{}]; Paused [{}]\
@ -144,6 +152,8 @@ public class SnapshotShutdownProgressTracker {
abortedCount.get(), abortedCount.get(),
pausedCount.get() pausedCount.get()
); );
// Use a callback to log the shard snapshot details.
logIndexShardSnapshotStatuses.accept(logger);
} }
/** /**

View file

@ -19,6 +19,8 @@ import org.elasticsearch.tasks.TaskManager;
import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.telemetry.tracing.Tracer;
import java.io.IOException; import java.io.IOException;
import java.lang.invoke.MethodHandles;
import java.lang.invoke.VarHandle;
import java.util.concurrent.Executor; import java.util.concurrent.Executor;
import static org.elasticsearch.core.Releasables.assertOnce; import static org.elasticsearch.core.Releasables.assertOnce;
@ -33,7 +35,19 @@ public class RequestHandlerRegistry<Request extends TransportRequest> implements
private final TaskManager taskManager; private final TaskManager taskManager;
private final Tracer tracer; private final Tracer tracer;
private final Writeable.Reader<Request> requestReader; private final Writeable.Reader<Request> requestReader;
private final TransportActionStatsTracker statsTracker = new TransportActionStatsTracker(); @SuppressWarnings("unused") // only accessed via #STATS_TRACKER_HANDLE, lazy initialized because instances consume non-trivial heap
private TransportActionStatsTracker statsTracker;
private static final VarHandle STATS_TRACKER_HANDLE;
static {
try {
STATS_TRACKER_HANDLE = MethodHandles.lookup()
.findVarHandle(RequestHandlerRegistry.class, "statsTracker", TransportActionStatsTracker.class);
} catch (Exception e) {
throw new ExceptionInInitializerError(e);
}
}
public RequestHandlerRegistry( public RequestHandlerRegistry(
String action, String action,
@ -118,15 +132,34 @@ public class RequestHandlerRegistry<Request extends TransportRequest> implements
} }
public void addRequestStats(int messageSize) { public void addRequestStats(int messageSize) {
statsTracker.addRequestStats(messageSize); statsTracker().addRequestStats(messageSize);
} }
@Override @Override
public void addResponseStats(int messageSize) { public void addResponseStats(int messageSize) {
statsTracker.addResponseStats(messageSize); statsTracker().addResponseStats(messageSize);
} }
public TransportActionStats getStats() { public TransportActionStats getStats() {
var statsTracker = existingStatsTracker();
if (statsTracker == null) {
return TransportActionStats.EMPTY;
}
return statsTracker.getStats(); return statsTracker.getStats();
} }
private TransportActionStatsTracker statsTracker() {
var tracker = existingStatsTracker();
if (tracker == null) {
var newTracker = new TransportActionStatsTracker();
if ((tracker = (TransportActionStatsTracker) STATS_TRACKER_HANDLE.compareAndExchange(this, null, newTracker)) == null) {
tracker = newTracker;
}
}
return tracker;
}
private TransportActionStatsTracker existingStatsTracker() {
return (TransportActionStatsTracker) STATS_TRACKER_HANDLE.getAcquire(this);
}
} }

View file

@ -27,6 +27,8 @@ public record TransportActionStats(
long[] responseSizeHistogram long[] responseSizeHistogram
) implements Writeable, ToXContentObject { ) implements Writeable, ToXContentObject {
public static final TransportActionStats EMPTY = new TransportActionStats(0, 0, new long[0], 0, 0, new long[0]);
public TransportActionStats(StreamInput in) throws IOException { public TransportActionStats(StreamInput in) throws IOException {
this(in.readVLong(), in.readVLong(), in.readVLongArray(), in.readVLong(), in.readVLong(), in.readVLongArray()); this(in.readVLong(), in.readVLong(), in.readVLongArray(), in.readVLong(), in.readVLong(), in.readVLongArray());
} }

View file

@ -9,6 +9,7 @@
package org.elasticsearch.bootstrap; package org.elasticsearch.bootstrap;
import org.elasticsearch.jdk.RuntimeVersionFeature;
import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.ESTestCase;
import java.security.AccessControlContext; import java.security.AccessControlContext;
@ -27,7 +28,10 @@ public class ESPolicyTests extends ESTestCase {
* test restricting privileges to no permissions actually works * test restricting privileges to no permissions actually works
*/ */
public void testRestrictPrivileges() { public void testRestrictPrivileges() {
assumeTrue("test requires security manager", System.getSecurityManager() != null); assumeTrue(
"test requires security manager",
RuntimeVersionFeature.isSecurityManagerAvailable() && System.getSecurityManager() != null
);
try { try {
System.getProperty("user.home"); System.getProperty("user.home");
} catch (SecurityException e) { } catch (SecurityException e) {

View file

@ -9,6 +9,7 @@
package org.elasticsearch.bootstrap; package org.elasticsearch.bootstrap;
import org.elasticsearch.jdk.RuntimeVersionFeature;
import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.ESTestCase;
import java.io.IOException; import java.io.IOException;
@ -50,7 +51,10 @@ public class SecurityTests extends ESTestCase {
/** can't execute processes */ /** can't execute processes */
public void testProcessExecution() throws Exception { public void testProcessExecution() throws Exception {
assumeTrue("test requires security manager", System.getSecurityManager() != null); assumeTrue(
"test requires security manager",
RuntimeVersionFeature.isSecurityManagerAvailable() && System.getSecurityManager() != null
);
try { try {
Runtime.getRuntime().exec("ls"); Runtime.getRuntime().exec("ls");
fail("didn't get expected exception"); fail("didn't get expected exception");

View file

@ -103,7 +103,7 @@ public class ES818HnswBinaryQuantizedVectorsFormatTests extends BaseKnnVectorsFo
assertEquals(1, td.totalHits.value()); assertEquals(1, td.totalHits.value());
assertTrue(td.scoreDocs[0].score >= 0); assertTrue(td.scoreDocs[0].score >= 0);
// When it's the only vector in a segment, the score should be very close to the true score // When it's the only vector in a segment, the score should be very close to the true score
assertEquals(trueScore, td.scoreDocs[0].score, 0.0001f); assertEquals(trueScore, td.scoreDocs[0].score, 0.01f);
} }
} }
} }

View file

@ -89,6 +89,7 @@ import org.elasticsearch.common.lucene.uid.Versions;
import org.elasticsearch.common.lucene.uid.VersionsAndSeqNoResolver; import org.elasticsearch.common.lucene.uid.VersionsAndSeqNoResolver;
import org.elasticsearch.common.lucene.uid.VersionsAndSeqNoResolver.DocIdAndSeqNo; import org.elasticsearch.common.lucene.uid.VersionsAndSeqNoResolver.DocIdAndSeqNo;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.AbstractRunnable;
import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
@ -3448,7 +3449,7 @@ public class InternalEngineTests extends EngineTestCase {
assertThat(indexResult.getVersion(), equalTo(1L)); assertThat(indexResult.getVersion(), equalTo(1L));
} }
assertVisibleCount(engine, numDocs); assertVisibleCount(engine, numDocs);
translogHandler = createTranslogHandler(engine.engineConfig.getIndexSettings()); translogHandler = createTranslogHandler(mapperService);
engine.close(); engine.close();
// we need to reuse the engine config unless the parser.mappingModified won't work // we need to reuse the engine config unless the parser.mappingModified won't work
@ -3460,7 +3461,7 @@ public class InternalEngineTests extends EngineTestCase {
assertEquals(numDocs, translogHandler.appliedOperations()); assertEquals(numDocs, translogHandler.appliedOperations());
engine.close(); engine.close();
translogHandler = createTranslogHandler(engine.engineConfig.getIndexSettings()); translogHandler = createTranslogHandler(mapperService);
engine = createEngine(store, primaryTranslogDir, inSyncGlobalCheckpointSupplier); engine = createEngine(store, primaryTranslogDir, inSyncGlobalCheckpointSupplier);
engine.refresh("warm_up"); engine.refresh("warm_up");
assertVisibleCount(engine, numDocs, false); assertVisibleCount(engine, numDocs, false);
@ -3514,7 +3515,7 @@ public class InternalEngineTests extends EngineTestCase {
} }
engine.close(); engine.close();
translogHandler = createTranslogHandler(engine.engineConfig.getIndexSettings()); translogHandler = createTranslogHandler(mapperService);
engine = createEngine(store, primaryTranslogDir, inSyncGlobalCheckpointSupplier); engine = createEngine(store, primaryTranslogDir, inSyncGlobalCheckpointSupplier);
engine.refresh("warm_up"); engine.refresh("warm_up");
try (Engine.Searcher searcher = engine.acquireSearcher("test")) { try (Engine.Searcher searcher = engine.acquireSearcher("test")) {
@ -6447,7 +6448,8 @@ public class InternalEngineTests extends EngineTestCase {
max, max,
true, true,
randomBoolean(), randomBoolean(),
randomBoolean() randomBoolean(),
randomLongBetween(1, ByteSizeValue.ofMb(32).getBytes())
) )
) {} ) {}
} else { } else {
@ -7673,7 +7675,7 @@ public class InternalEngineTests extends EngineTestCase {
) { ) {
IllegalStateException exc = expectThrows( IllegalStateException exc = expectThrows(
IllegalStateException.class, IllegalStateException.class,
() -> engine.newChangesSnapshot("test", 0, 1000, true, true, true) () -> engine.newChangesSnapshot("test", 0, 1000, true, true, true, randomLongBetween(1, ByteSizeValue.ofMb(32).getBytes()))
); );
assertThat(exc.getMessage(), containsString("unavailable")); assertThat(exc.getMessage(), containsString("unavailable"));
} }

View file

@ -10,289 +10,37 @@
package org.elasticsearch.index.engine; package org.elasticsearch.index.engine;
import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.index.NoMergePolicy;
import org.elasticsearch.common.Randomness; import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.core.IOUtils;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersion;
import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.MappingLookup;
import org.elasticsearch.index.mapper.Uid;
import org.elasticsearch.index.store.Store; import org.elasticsearch.index.store.Store;
import org.elasticsearch.index.translog.SnapshotMatchers;
import org.elasticsearch.index.translog.Translog; import org.elasticsearch.index.translog.Translog;
import org.elasticsearch.test.IndexSettingsModule;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.LongSupplier;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.hasSize;
public class LuceneChangesSnapshotTests extends EngineTestCase {
public class LuceneChangesSnapshotTests extends SearchBasedChangesSnapshotTests {
@Override @Override
protected Settings indexSettings() { protected Translog.Snapshot newRandomSnapshot(
return Settings.builder() MappingLookup mappingLookup,
.put(super.indexSettings()) Engine.Searcher engineSearcher,
.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) // always enable soft-deletes int searchBatchSize,
.build(); long fromSeqNo,
} long toSeqNo,
boolean requiredFullRange,
public void testBasics() throws Exception { boolean singleConsumer,
long fromSeqNo = randomNonNegativeLong(); boolean accessStats,
long toSeqNo = randomLongBetween(fromSeqNo, Long.MAX_VALUE); IndexVersion indexVersionCreated
// Empty engine ) throws IOException {
try (Translog.Snapshot snapshot = engine.newChangesSnapshot("test", fromSeqNo, toSeqNo, true, randomBoolean(), randomBoolean())) { return new LuceneChangesSnapshot(
IllegalStateException error = expectThrows(IllegalStateException.class, () -> drainAll(snapshot)); engineSearcher,
assertThat( searchBatchSize,
error.getMessage(), fromSeqNo,
containsString("Not all operations between from_seqno [" + fromSeqNo + "] and to_seqno [" + toSeqNo + "] found") toSeqNo,
); requiredFullRange,
} singleConsumer,
try (Translog.Snapshot snapshot = engine.newChangesSnapshot("test", fromSeqNo, toSeqNo, false, randomBoolean(), randomBoolean())) { accessStats,
assertThat(snapshot, SnapshotMatchers.size(0)); indexVersionCreated
} );
int numOps = between(1, 100);
int refreshedSeqNo = -1;
for (int i = 0; i < numOps; i++) {
String id = Integer.toString(randomIntBetween(i, i + 5));
ParsedDocument doc = createParsedDoc(id, null, randomBoolean());
if (randomBoolean()) {
engine.index(indexForDoc(doc));
} else {
engine.delete(new Engine.Delete(doc.id(), Uid.encodeId(doc.id()), primaryTerm.get()));
}
if (rarely()) {
if (randomBoolean()) {
engine.flush();
} else {
engine.refresh("test");
}
refreshedSeqNo = i;
}
}
if (refreshedSeqNo == -1) {
fromSeqNo = between(0, numOps);
toSeqNo = randomLongBetween(fromSeqNo, numOps * 2);
Engine.Searcher searcher = engine.acquireSearcher("test", Engine.SearcherScope.INTERNAL);
try (
Translog.Snapshot snapshot = new LuceneChangesSnapshot(
searcher,
between(1, LuceneChangesSnapshot.DEFAULT_BATCH_SIZE),
fromSeqNo,
toSeqNo,
false,
randomBoolean(),
randomBoolean(),
IndexVersion.current()
)
) {
searcher = null;
assertThat(snapshot, SnapshotMatchers.size(0));
} finally {
IOUtils.close(searcher);
}
searcher = engine.acquireSearcher("test", Engine.SearcherScope.INTERNAL);
try (
Translog.Snapshot snapshot = new LuceneChangesSnapshot(
searcher,
between(1, LuceneChangesSnapshot.DEFAULT_BATCH_SIZE),
fromSeqNo,
toSeqNo,
true,
randomBoolean(),
randomBoolean(),
IndexVersion.current()
)
) {
searcher = null;
IllegalStateException error = expectThrows(IllegalStateException.class, () -> drainAll(snapshot));
assertThat(
error.getMessage(),
containsString("Not all operations between from_seqno [" + fromSeqNo + "] and to_seqno [" + toSeqNo + "] found")
);
} finally {
IOUtils.close(searcher);
}
} else {
fromSeqNo = randomLongBetween(0, refreshedSeqNo);
toSeqNo = randomLongBetween(refreshedSeqNo + 1, numOps * 2);
Engine.Searcher searcher = engine.acquireSearcher("test", Engine.SearcherScope.INTERNAL);
try (
Translog.Snapshot snapshot = new LuceneChangesSnapshot(
searcher,
between(1, LuceneChangesSnapshot.DEFAULT_BATCH_SIZE),
fromSeqNo,
toSeqNo,
false,
randomBoolean(),
randomBoolean(),
IndexVersion.current()
)
) {
searcher = null;
assertThat(snapshot, SnapshotMatchers.containsSeqNoRange(fromSeqNo, refreshedSeqNo));
} finally {
IOUtils.close(searcher);
}
searcher = engine.acquireSearcher("test", Engine.SearcherScope.INTERNAL);
try (
Translog.Snapshot snapshot = new LuceneChangesSnapshot(
searcher,
between(1, LuceneChangesSnapshot.DEFAULT_BATCH_SIZE),
fromSeqNo,
toSeqNo,
true,
randomBoolean(),
randomBoolean(),
IndexVersion.current()
)
) {
searcher = null;
IllegalStateException error = expectThrows(IllegalStateException.class, () -> drainAll(snapshot));
assertThat(
error.getMessage(),
containsString("Not all operations between from_seqno [" + fromSeqNo + "] and to_seqno [" + toSeqNo + "] found")
);
} finally {
IOUtils.close(searcher);
}
toSeqNo = randomLongBetween(fromSeqNo, refreshedSeqNo);
searcher = engine.acquireSearcher("test", Engine.SearcherScope.INTERNAL);
try (
Translog.Snapshot snapshot = new LuceneChangesSnapshot(
searcher,
between(1, LuceneChangesSnapshot.DEFAULT_BATCH_SIZE),
fromSeqNo,
toSeqNo,
true,
randomBoolean(),
randomBoolean(),
IndexVersion.current()
)
) {
searcher = null;
assertThat(snapshot, SnapshotMatchers.containsSeqNoRange(fromSeqNo, toSeqNo));
} finally {
IOUtils.close(searcher);
}
}
// Get snapshot via engine will auto refresh
fromSeqNo = randomLongBetween(0, numOps - 1);
toSeqNo = randomLongBetween(fromSeqNo, numOps - 1);
try (
Translog.Snapshot snapshot = engine.newChangesSnapshot(
"test",
fromSeqNo,
toSeqNo,
randomBoolean(),
randomBoolean(),
randomBoolean()
)
) {
assertThat(snapshot, SnapshotMatchers.containsSeqNoRange(fromSeqNo, toSeqNo));
}
}
/**
* A nested document is indexed into Lucene as multiple documents. While the root document has both sequence number and primary term,
* non-root documents don't have primary term but only sequence numbers. This test verifies that {@link LuceneChangesSnapshot}
* correctly skip non-root documents and returns at most one operation per sequence number.
*/
public void testSkipNonRootOfNestedDocuments() throws Exception {
Map<Long, Long> seqNoToTerm = new HashMap<>();
List<Engine.Operation> operations = generateHistoryOnReplica(between(1, 100), randomBoolean(), randomBoolean(), randomBoolean());
for (Engine.Operation op : operations) {
if (engine.getLocalCheckpointTracker().hasProcessed(op.seqNo()) == false) {
seqNoToTerm.put(op.seqNo(), op.primaryTerm());
}
applyOperation(engine, op);
if (rarely()) {
engine.refresh("test");
}
if (rarely()) {
engine.rollTranslogGeneration();
}
if (rarely()) {
engine.flush();
}
}
long maxSeqNo = engine.getLocalCheckpointTracker().getMaxSeqNo();
engine.refresh("test");
Engine.Searcher searcher = engine.acquireSearcher("test", Engine.SearcherScope.INTERNAL);
final boolean accessStats = randomBoolean();
try (
Translog.Snapshot snapshot = new LuceneChangesSnapshot(
searcher,
between(1, 100),
0,
maxSeqNo,
false,
randomBoolean(),
accessStats,
IndexVersion.current()
)
) {
if (accessStats) {
assertThat(snapshot.totalOperations(), equalTo(seqNoToTerm.size()));
}
Translog.Operation op;
while ((op = snapshot.next()) != null) {
assertThat(op.toString(), op.primaryTerm(), equalTo(seqNoToTerm.get(op.seqNo())));
}
assertThat(snapshot.skippedOperations(), equalTo(0));
}
}
public void testUpdateAndReadChangesConcurrently() throws Exception {
Follower[] followers = new Follower[between(1, 3)];
CountDownLatch readyLatch = new CountDownLatch(followers.length + 1);
AtomicBoolean isDone = new AtomicBoolean();
for (int i = 0; i < followers.length; i++) {
followers[i] = new Follower(engine, isDone, readyLatch);
followers[i].start();
}
boolean onPrimary = randomBoolean();
List<Engine.Operation> operations = new ArrayList<>();
int numOps = frequently() ? scaledRandomIntBetween(1, 1500) : scaledRandomIntBetween(5000, 20_000);
for (int i = 0; i < numOps; i++) {
String id = Integer.toString(randomIntBetween(0, randomBoolean() ? 10 : numOps * 2));
ParsedDocument doc = createParsedDoc(id, randomAlphaOfLengthBetween(1, 5), randomBoolean());
final Engine.Operation op;
if (onPrimary) {
if (randomBoolean()) {
op = new Engine.Index(newUid(doc), primaryTerm.get(), doc);
} else {
op = new Engine.Delete(doc.id(), Uid.encodeId(doc.id()), primaryTerm.get());
}
} else {
if (randomBoolean()) {
op = replicaIndexForDoc(doc, randomNonNegativeLong(), i, randomBoolean());
} else {
op = replicaDeleteForDoc(doc.id(), randomNonNegativeLong(), i, randomNonNegativeLong());
}
}
operations.add(op);
}
readyLatch.countDown();
readyLatch.await();
Randomness.shuffle(operations);
concurrentlyApplyOps(operations, engine);
assertThat(engine.getLocalCheckpointTracker().getProcessedCheckpoint(), equalTo(operations.size() - 1L));
isDone.set(true);
for (Follower follower : followers) {
follower.join();
IOUtils.close(follower.engine, follower.engine.store);
}
} }
public void testAccessStoredFieldsSequentially() throws Exception { public void testAccessStoredFieldsSequentially() throws Exception {
@ -319,7 +67,8 @@ public class LuceneChangesSnapshotTests extends EngineTestCase {
between(1, smallBatch), between(1, smallBatch),
false, false,
randomBoolean(), randomBoolean(),
randomBoolean() randomBoolean(),
randomLongBetween(1, ByteSizeValue.ofMb(32).getBytes())
) )
) { ) {
while ((op = snapshot.next()) != null) { while ((op = snapshot.next()) != null) {
@ -335,7 +84,8 @@ public class LuceneChangesSnapshotTests extends EngineTestCase {
between(20, 100), between(20, 100),
false, false,
randomBoolean(), randomBoolean(),
randomBoolean() randomBoolean(),
randomLongBetween(1, ByteSizeValue.ofMb(32).getBytes())
) )
) { ) {
while ((op = snapshot.next()) != null) { while ((op = snapshot.next()) != null) {
@ -351,7 +101,8 @@ public class LuceneChangesSnapshotTests extends EngineTestCase {
between(21, 100), between(21, 100),
false, false,
true, true,
randomBoolean() randomBoolean(),
randomLongBetween(1, ByteSizeValue.ofMb(32).getBytes())
) )
) { ) {
while ((op = snapshot.next()) != null) { while ((op = snapshot.next()) != null) {
@ -367,7 +118,8 @@ public class LuceneChangesSnapshotTests extends EngineTestCase {
between(21, 100), between(21, 100),
false, false,
false, false,
randomBoolean() randomBoolean(),
randomLongBetween(1, ByteSizeValue.ofMb(32).getBytes())
) )
) { ) {
while ((op = snapshot.next()) != null) { while ((op = snapshot.next()) != null) {
@ -377,165 +129,4 @@ public class LuceneChangesSnapshotTests extends EngineTestCase {
} }
} }
} }
class Follower extends Thread {
private final InternalEngine leader;
private final InternalEngine engine;
private final TranslogHandler translogHandler;
private final AtomicBoolean isDone;
private final CountDownLatch readLatch;
Follower(InternalEngine leader, AtomicBoolean isDone, CountDownLatch readLatch) throws IOException {
this.leader = leader;
this.isDone = isDone;
this.readLatch = readLatch;
this.translogHandler = new TranslogHandler(
xContentRegistry(),
IndexSettingsModule.newIndexSettings(shardId.getIndexName(), leader.engineConfig.getIndexSettings().getSettings())
);
this.engine = createEngine(createStore(), createTempDir());
}
void pullOperations(InternalEngine follower) throws IOException {
long leaderCheckpoint = leader.getLocalCheckpointTracker().getProcessedCheckpoint();
long followerCheckpoint = follower.getLocalCheckpointTracker().getProcessedCheckpoint();
if (followerCheckpoint < leaderCheckpoint) {
long fromSeqNo = followerCheckpoint + 1;
long batchSize = randomLongBetween(0, 100);
long toSeqNo = Math.min(fromSeqNo + batchSize, leaderCheckpoint);
try (
Translog.Snapshot snapshot = leader.newChangesSnapshot(
"test",
fromSeqNo,
toSeqNo,
true,
randomBoolean(),
randomBoolean()
)
) {
translogHandler.run(follower, snapshot);
}
}
}
@Override
public void run() {
try {
readLatch.countDown();
readLatch.await();
while (isDone.get() == false
|| engine.getLocalCheckpointTracker().getProcessedCheckpoint() < leader.getLocalCheckpointTracker()
.getProcessedCheckpoint()) {
pullOperations(engine);
}
assertConsistentHistoryBetweenTranslogAndLuceneIndex(engine);
// have to verify without source since we are randomly testing without _source
List<DocIdSeqNoAndSource> docsWithoutSourceOnFollower = getDocIds(engine, true).stream()
.map(d -> new DocIdSeqNoAndSource(d.id(), null, d.seqNo(), d.primaryTerm(), d.version()))
.toList();
List<DocIdSeqNoAndSource> docsWithoutSourceOnLeader = getDocIds(leader, true).stream()
.map(d -> new DocIdSeqNoAndSource(d.id(), null, d.seqNo(), d.primaryTerm(), d.version()))
.toList();
assertThat(docsWithoutSourceOnFollower, equalTo(docsWithoutSourceOnLeader));
} catch (Exception ex) {
throw new AssertionError(ex);
}
}
}
private List<Translog.Operation> drainAll(Translog.Snapshot snapshot) throws IOException {
List<Translog.Operation> operations = new ArrayList<>();
Translog.Operation op;
while ((op = snapshot.next()) != null) {
final Translog.Operation newOp = op;
logger.trace("Reading [{}]", op);
assert operations.stream().allMatch(o -> o.seqNo() < newOp.seqNo()) : "Operations [" + operations + "], op [" + op + "]";
operations.add(newOp);
}
return operations;
}
public void testOverFlow() throws Exception {
long fromSeqNo = randomLongBetween(0, 5);
long toSeqNo = randomLongBetween(Long.MAX_VALUE - 5, Long.MAX_VALUE);
try (Translog.Snapshot snapshot = engine.newChangesSnapshot("test", fromSeqNo, toSeqNo, true, randomBoolean(), randomBoolean())) {
IllegalStateException error = expectThrows(IllegalStateException.class, () -> drainAll(snapshot));
assertThat(
error.getMessage(),
containsString("Not all operations between from_seqno [" + fromSeqNo + "] and to_seqno [" + toSeqNo + "] found")
);
}
}
public void testStats() throws Exception {
try (Store store = createStore(); Engine engine = createEngine(defaultSettings, store, createTempDir(), NoMergePolicy.INSTANCE)) {
int numOps = between(100, 5000);
long startingSeqNo = randomLongBetween(0, Integer.MAX_VALUE);
List<Engine.Operation> operations = generateHistoryOnReplica(
numOps,
startingSeqNo,
randomBoolean(),
randomBoolean(),
randomBoolean()
);
applyOperations(engine, operations);
LongSupplier fromSeqNo = () -> {
if (randomBoolean()) {
return 0L;
} else if (randomBoolean()) {
return startingSeqNo;
} else {
return randomLongBetween(0, startingSeqNo);
}
};
LongSupplier toSeqNo = () -> {
final long maxSeqNo = engine.getSeqNoStats(-1).getMaxSeqNo();
if (randomBoolean()) {
return maxSeqNo;
} else if (randomBoolean()) {
return Long.MAX_VALUE;
} else {
return randomLongBetween(maxSeqNo, Long.MAX_VALUE);
}
};
// Can't access stats if didn't request it
try (
Translog.Snapshot snapshot = engine.newChangesSnapshot(
"test",
fromSeqNo.getAsLong(),
toSeqNo.getAsLong(),
false,
randomBoolean(),
false
)
) {
IllegalStateException error = expectThrows(IllegalStateException.class, snapshot::totalOperations);
assertThat(error.getMessage(), equalTo("Access stats of a snapshot created with [access_stats] is false"));
final List<Translog.Operation> translogOps = drainAll(snapshot);
assertThat(translogOps, hasSize(numOps));
error = expectThrows(IllegalStateException.class, snapshot::totalOperations);
assertThat(error.getMessage(), equalTo("Access stats of a snapshot created with [access_stats] is false"));
}
// Access stats and operations
try (
Translog.Snapshot snapshot = engine.newChangesSnapshot(
"test",
fromSeqNo.getAsLong(),
toSeqNo.getAsLong(),
false,
randomBoolean(),
true
)
) {
assertThat(snapshot.totalOperations(), equalTo(numOps));
final List<Translog.Operation> translogOps = drainAll(snapshot);
assertThat(translogOps, hasSize(numOps));
assertThat(snapshot.totalOperations(), equalTo(numOps));
}
// Verify count
assertThat(engine.countChanges("test", fromSeqNo.getAsLong(), toSeqNo.getAsLong()), equalTo(numOps));
}
}
} }

View file

@ -0,0 +1,58 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the "Elastic License
* 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
* Public License v 1"; you may not use this file except in compliance with, at
* your election, the "Elastic License 2.0", the "GNU Affero General Public
* License v3.0 only", or the "Server Side Public License, v 1".
*/
package org.elasticsearch.index.engine;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.IndexVersion;
import org.elasticsearch.index.mapper.MappingLookup;
import org.elasticsearch.index.mapper.SourceFieldMapper;
import org.elasticsearch.index.translog.Translog;
import java.io.IOException;
import static org.elasticsearch.index.mapper.SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING;
public class LuceneSyntheticSourceChangesSnapshotTests extends SearchBasedChangesSnapshotTests {
@Override
protected Settings indexSettings() {
return Settings.builder()
.put(super.indexSettings())
.put(INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), SourceFieldMapper.Mode.SYNTHETIC.name())
.put(IndexSettings.RECOVERY_USE_SYNTHETIC_SOURCE_SETTING.getKey(), true)
.build();
}
@Override
protected Translog.Snapshot newRandomSnapshot(
MappingLookup mappingLookup,
Engine.Searcher engineSearcher,
int searchBatchSize,
long fromSeqNo,
long toSeqNo,
boolean requiredFullRange,
boolean singleConsumer,
boolean accessStats,
IndexVersion indexVersionCreated
) throws IOException {
return new LuceneSyntheticSourceChangesSnapshot(
mappingLookup,
engineSearcher,
searchBatchSize,
randomLongBetween(0, ByteSizeValue.ofBytes(Integer.MAX_VALUE).getBytes()),
fromSeqNo,
toSeqNo,
requiredFullRange,
accessStats,
indexVersionCreated
);
}
}

View file

@ -39,83 +39,99 @@ import org.elasticsearch.test.ESTestCase;
import java.io.IOException; import java.io.IOException;
import java.util.Collections; import java.util.Collections;
import java.util.List;
import java.util.Set; import java.util.Set;
import java.util.stream.Collectors; import java.util.stream.Collectors;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThan;
public class RecoverySourcePruneMergePolicyTests extends ESTestCase { public class RecoverySourcePruneMergePolicyTests extends ESTestCase {
public void testPruneAll() throws IOException { public void testPruneAll() throws IOException {
try (Directory dir = newDirectory()) { for (boolean pruneIdField : List.of(true, false)) {
boolean pruneIdField = randomBoolean(); for (boolean syntheticRecoverySource : List.of(true, false)) {
IndexWriterConfig iwc = newIndexWriterConfig(); try (Directory dir = newDirectory()) {
RecoverySourcePruneMergePolicy mp = new RecoverySourcePruneMergePolicy( IndexWriterConfig iwc = newIndexWriterConfig();
"extra_source", RecoverySourcePruneMergePolicy mp = new RecoverySourcePruneMergePolicy(
pruneIdField, syntheticRecoverySource ? null : "extra_source",
MatchNoDocsQuery::new, syntheticRecoverySource ? "extra_source_size" : "extra_source",
newLogMergePolicy() pruneIdField,
); MatchNoDocsQuery::new,
iwc.setMergePolicy(new ShuffleForcedMergePolicy(mp)); newLogMergePolicy()
try (IndexWriter writer = new IndexWriter(dir, iwc)) { );
for (int i = 0; i < 20; i++) { iwc.setMergePolicy(new ShuffleForcedMergePolicy(mp));
if (i > 0 && randomBoolean()) { try (IndexWriter writer = new IndexWriter(dir, iwc)) {
writer.flush(); for (int i = 0; i < 20; i++) {
} if (i > 0 && randomBoolean()) {
Document doc = new Document(); writer.flush();
doc.add(new StoredField(IdFieldMapper.NAME, "_id")); }
doc.add(new StoredField("source", "hello world")); Document doc = new Document();
doc.add(new StoredField("extra_source", "hello world")); doc.add(new StoredField(IdFieldMapper.NAME, "_id"));
doc.add(new NumericDocValuesField("extra_source", 1)); doc.add(new StoredField("source", "hello world"));
writer.addDocument(doc); if (syntheticRecoverySource) {
} doc.add(new NumericDocValuesField("extra_source_size", randomIntBetween(10, 10000)));
writer.forceMerge(1); } else {
writer.commit(); doc.add(new StoredField("extra_source", "hello world"));
try (DirectoryReader reader = DirectoryReader.open(writer)) { doc.add(new NumericDocValuesField("extra_source", 1));
StoredFields storedFields = reader.storedFields(); }
for (int i = 0; i < reader.maxDoc(); i++) { writer.addDocument(doc);
Document document = storedFields.document(i);
if (pruneIdField) {
assertEquals(1, document.getFields().size());
assertEquals("source", document.getFields().get(0).name());
} else {
assertEquals(2, document.getFields().size());
assertEquals(IdFieldMapper.NAME, document.getFields().get(0).name());
assertEquals("source", document.getFields().get(1).name());
} }
} writer.forceMerge(1);
assertEquals(1, reader.leaves().size()); writer.commit();
LeafReader leafReader = reader.leaves().get(0).reader(); try (DirectoryReader reader = DirectoryReader.open(writer)) {
NumericDocValues extra_source = leafReader.getNumericDocValues("extra_source"); StoredFields storedFields = reader.storedFields();
if (extra_source != null) { for (int i = 0; i < reader.maxDoc(); i++) {
assertEquals(DocIdSetIterator.NO_MORE_DOCS, extra_source.nextDoc()); Document document = storedFields.document(i);
} if (pruneIdField) {
if (leafReader instanceof CodecReader codecReader && reader instanceof StandardDirectoryReader sdr) { assertEquals(1, document.getFields().size());
SegmentInfos segmentInfos = sdr.getSegmentInfos(); assertEquals("source", document.getFields().get(0).name());
MergePolicy.MergeSpecification forcedMerges = mp.findForcedDeletesMerges( } else {
segmentInfos, assertEquals(2, document.getFields().size());
new MergePolicy.MergeContext() { assertEquals(IdFieldMapper.NAME, document.getFields().get(0).name());
@Override assertEquals("source", document.getFields().get(1).name());
public int numDeletesToMerge(SegmentCommitInfo info) {
return info.info.maxDoc() - 1;
}
@Override
public int numDeletedDocs(SegmentCommitInfo info) {
return info.info.maxDoc() - 1;
}
@Override
public InfoStream getInfoStream() {
return new NullInfoStream();
}
@Override
public Set<SegmentCommitInfo> getMergingSegments() {
return Collections.emptySet();
} }
} }
);
// don't wrap if there is nothing to do assertEquals(1, reader.leaves().size());
assertSame(codecReader, forcedMerges.merges.get(0).wrapForMerge(codecReader)); LeafReader leafReader = reader.leaves().get(0).reader();
NumericDocValues extra_source = leafReader.getNumericDocValues(
syntheticRecoverySource ? "extra_source_size" : "extra_source"
);
if (extra_source != null) {
assertEquals(DocIdSetIterator.NO_MORE_DOCS, extra_source.nextDoc());
}
if (leafReader instanceof CodecReader codecReader && reader instanceof StandardDirectoryReader sdr) {
SegmentInfos segmentInfos = sdr.getSegmentInfos();
MergePolicy.MergeSpecification forcedMerges = mp.findForcedDeletesMerges(
segmentInfos,
new MergePolicy.MergeContext() {
@Override
public int numDeletesToMerge(SegmentCommitInfo info) {
return info.info.maxDoc() - 1;
}
@Override
public int numDeletedDocs(SegmentCommitInfo info) {
return info.info.maxDoc() - 1;
}
@Override
public InfoStream getInfoStream() {
return new NullInfoStream();
}
@Override
public Set<SegmentCommitInfo> getMergingSegments() {
return Collections.emptySet();
}
}
);
// don't wrap if there is nothing to do
assertSame(codecReader, forcedMerges.merges.get(0).wrapForMerge(codecReader));
}
}
} }
} }
} }
@ -123,87 +139,126 @@ public class RecoverySourcePruneMergePolicyTests extends ESTestCase {
} }
public void testPruneSome() throws IOException { public void testPruneSome() throws IOException {
try (Directory dir = newDirectory()) { for (boolean pruneIdField : List.of(true, false)) {
boolean pruneIdField = randomBoolean(); for (boolean syntheticRecoverySource : List.of(true, false)) {
IndexWriterConfig iwc = newIndexWriterConfig(); try (Directory dir = newDirectory()) {
iwc.setMergePolicy( IndexWriterConfig iwc = newIndexWriterConfig();
new RecoverySourcePruneMergePolicy( iwc.setMergePolicy(
"extra_source", new RecoverySourcePruneMergePolicy(
pruneIdField, syntheticRecoverySource ? null : "extra_source",
() -> new TermQuery(new Term("even", "true")), syntheticRecoverySource ? "extra_source_size" : "extra_source",
iwc.getMergePolicy() pruneIdField,
) () -> new TermQuery(new Term("even", "true")),
); iwc.getMergePolicy()
try (IndexWriter writer = new IndexWriter(dir, iwc)) { )
for (int i = 0; i < 20; i++) { );
if (i > 0 && randomBoolean()) { try (IndexWriter writer = new IndexWriter(dir, iwc)) {
writer.flush(); for (int i = 0; i < 20; i++) {
} if (i > 0 && randomBoolean()) {
Document doc = new Document(); writer.flush();
doc.add(new StoredField(IdFieldMapper.NAME, "_id")); }
doc.add(new StringField("even", Boolean.toString(i % 2 == 0), Field.Store.YES)); Document doc = new Document();
doc.add(new StoredField("source", "hello world")); doc.add(new StoredField(IdFieldMapper.NAME, "_id"));
doc.add(new StoredField("extra_source", "hello world")); doc.add(new StringField("even", Boolean.toString(i % 2 == 0), Field.Store.YES));
doc.add(new NumericDocValuesField("extra_source", 1)); doc.add(new StoredField("source", "hello world"));
writer.addDocument(doc); if (syntheticRecoverySource) {
} doc.add(new NumericDocValuesField("extra_source_size", randomIntBetween(10, 10000)));
writer.forceMerge(1); } else {
writer.commit(); doc.add(new StoredField("extra_source", "hello world"));
try (DirectoryReader reader = DirectoryReader.open(writer)) { doc.add(new NumericDocValuesField("extra_source", 1));
assertEquals(1, reader.leaves().size()); }
NumericDocValues extra_source = reader.leaves().get(0).reader().getNumericDocValues("extra_source"); writer.addDocument(doc);
assertNotNull(extra_source); }
StoredFields storedFields = reader.storedFields(); writer.forceMerge(1);
for (int i = 0; i < reader.maxDoc(); i++) { writer.commit();
Document document = storedFields.document(i); try (DirectoryReader reader = DirectoryReader.open(writer)) {
Set<String> collect = document.getFields().stream().map(IndexableField::name).collect(Collectors.toSet()); assertEquals(1, reader.leaves().size());
assertTrue(collect.contains("source")); String extraSourceDVName = syntheticRecoverySource ? "extra_source_size" : "extra_source";
assertTrue(collect.contains("even")); NumericDocValues extra_source = reader.leaves().get(0).reader().getNumericDocValues(extraSourceDVName);
if (collect.size() == 4) { assertNotNull(extra_source);
assertTrue(collect.contains("extra_source")); StoredFields storedFields = reader.storedFields();
assertTrue(collect.contains(IdFieldMapper.NAME)); for (int i = 0; i < reader.maxDoc(); i++) {
assertEquals("true", document.getField("even").stringValue()); Document document = storedFields.document(i);
assertEquals(i, extra_source.nextDoc()); Set<String> collect = document.getFields().stream().map(IndexableField::name).collect(Collectors.toSet());
} else { assertTrue(collect.contains("source"));
assertEquals(pruneIdField ? 2 : 3, document.getFields().size()); assertTrue(collect.contains("even"));
boolean isEven = Boolean.parseBoolean(document.getField("even").stringValue());
if (isEven) {
assertTrue(collect.contains(IdFieldMapper.NAME));
assertThat(collect.contains("extra_source"), equalTo(syntheticRecoverySource == false));
if (extra_source.docID() < i) {
extra_source.advance(i);
}
assertEquals(i, extra_source.docID());
if (syntheticRecoverySource) {
assertThat(extra_source.longValue(), greaterThan(10L));
} else {
assertThat(extra_source.longValue(), equalTo(1L));
}
} else {
assertThat(collect.contains(IdFieldMapper.NAME), equalTo(pruneIdField == false));
assertFalse(collect.contains("extra_source"));
if (extra_source.docID() < i) {
extra_source.advance(i);
}
assertNotEquals(i, extra_source.docID());
}
}
if (extra_source.docID() != DocIdSetIterator.NO_MORE_DOCS) {
assertEquals(DocIdSetIterator.NO_MORE_DOCS, extra_source.nextDoc());
}
} }
} }
assertEquals(DocIdSetIterator.NO_MORE_DOCS, extra_source.nextDoc());
} }
} }
} }
} }
public void testPruneNone() throws IOException { public void testPruneNone() throws IOException {
try (Directory dir = newDirectory()) { for (boolean syntheticRecoverySource : List.of(true, false)) {
IndexWriterConfig iwc = newIndexWriterConfig(); try (Directory dir = newDirectory()) {
iwc.setMergePolicy(new RecoverySourcePruneMergePolicy("extra_source", false, MatchAllDocsQuery::new, iwc.getMergePolicy())); IndexWriterConfig iwc = newIndexWriterConfig();
try (IndexWriter writer = new IndexWriter(dir, iwc)) { iwc.setMergePolicy(
for (int i = 0; i < 20; i++) { new RecoverySourcePruneMergePolicy(
if (i > 0 && randomBoolean()) { syntheticRecoverySource ? null : "extra_source",
writer.flush(); syntheticRecoverySource ? "extra_source_size" : "extra_source",
false,
MatchAllDocsQuery::new,
iwc.getMergePolicy()
)
);
try (IndexWriter writer = new IndexWriter(dir, iwc)) {
for (int i = 0; i < 20; i++) {
if (i > 0 && randomBoolean()) {
writer.flush();
}
Document doc = new Document();
doc.add(new StoredField("source", "hello world"));
if (syntheticRecoverySource) {
doc.add(new NumericDocValuesField("extra_source_size", randomIntBetween(10, 10000)));
} else {
doc.add(new StoredField("extra_source", "hello world"));
doc.add(new NumericDocValuesField("extra_source", 1));
}
writer.addDocument(doc);
} }
Document doc = new Document(); writer.forceMerge(1);
doc.add(new StoredField("source", "hello world")); writer.commit();
doc.add(new StoredField("extra_source", "hello world")); try (DirectoryReader reader = DirectoryReader.open(writer)) {
doc.add(new NumericDocValuesField("extra_source", 1)); assertEquals(1, reader.leaves().size());
writer.addDocument(doc); String extraSourceDVName = syntheticRecoverySource ? "extra_source_size" : "extra_source";
} NumericDocValues extra_source = reader.leaves().get(0).reader().getNumericDocValues(extraSourceDVName);
writer.forceMerge(1); assertNotNull(extra_source);
writer.commit(); StoredFields storedFields = reader.storedFields();
try (DirectoryReader reader = DirectoryReader.open(writer)) { for (int i = 0; i < reader.maxDoc(); i++) {
assertEquals(1, reader.leaves().size()); Document document = storedFields.document(i);
NumericDocValues extra_source = reader.leaves().get(0).reader().getNumericDocValues("extra_source"); Set<String> collect = document.getFields().stream().map(IndexableField::name).collect(Collectors.toSet());
assertNotNull(extra_source); assertTrue(collect.contains("source"));
StoredFields storedFields = reader.storedFields(); assertThat(collect.contains("extra_source"), equalTo(syntheticRecoverySource == false));
for (int i = 0; i < reader.maxDoc(); i++) { assertEquals(i, extra_source.nextDoc());
Document document = storedFields.document(i); }
Set<String> collect = document.getFields().stream().map(IndexableField::name).collect(Collectors.toSet()); assertEquals(DocIdSetIterator.NO_MORE_DOCS, extra_source.nextDoc());
assertTrue(collect.contains("source"));
assertTrue(collect.contains("extra_source"));
assertEquals(i, extra_source.nextDoc());
} }
assertEquals(DocIdSetIterator.NO_MORE_DOCS, extra_source.nextDoc());
} }
} }
} }

View file

@ -0,0 +1,507 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the "Elastic License
* 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
* Public License v 1"; you may not use this file except in compliance with, at
* your election, the "Elastic License 2.0", the "GNU Affero General Public
* License v3.0 only", or the "Server Side Public License, v 1".
*/
package org.elasticsearch.index.engine;
import org.apache.lucene.index.NoMergePolicy;
import org.elasticsearch.common.Randomness;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.core.IOUtils;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.IndexVersion;
import org.elasticsearch.index.mapper.MappingLookup;
import org.elasticsearch.index.mapper.ParsedDocument;
import org.elasticsearch.index.mapper.Uid;
import org.elasticsearch.index.store.Store;
import org.elasticsearch.index.translog.SnapshotMatchers;
import org.elasticsearch.index.translog.Translog;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.LongSupplier;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.hasSize;
public abstract class SearchBasedChangesSnapshotTests extends EngineTestCase {
@Override
protected Settings indexSettings() {
return Settings.builder()
.put(super.indexSettings())
.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) // always enable soft-deletes
.build();
}
protected abstract Translog.Snapshot newRandomSnapshot(
MappingLookup mappingLookup,
Engine.Searcher engineSearcher,
int searchBatchSize,
long fromSeqNo,
long toSeqNo,
boolean requiredFullRange,
boolean singleConsumer,
boolean accessStats,
IndexVersion indexVersionCreated
) throws IOException;
public void testBasics() throws Exception {
long fromSeqNo = randomNonNegativeLong();
long toSeqNo = randomLongBetween(fromSeqNo, Long.MAX_VALUE);
// Empty engine
try (
Translog.Snapshot snapshot = engine.newChangesSnapshot(
"test",
fromSeqNo,
toSeqNo,
true,
randomBoolean(),
randomBoolean(),
randomLongBetween(1, ByteSizeValue.ofMb(32).getBytes())
)
) {
IllegalStateException error = expectThrows(IllegalStateException.class, () -> drainAll(snapshot));
assertThat(
error.getMessage(),
containsString("Not all operations between from_seqno [" + fromSeqNo + "] and to_seqno [" + toSeqNo + "] found")
);
}
try (
Translog.Snapshot snapshot = engine.newChangesSnapshot(
"test",
fromSeqNo,
toSeqNo,
false,
randomBoolean(),
randomBoolean(),
randomLongBetween(1, ByteSizeValue.ofMb(32).getBytes())
)
) {
assertThat(snapshot, SnapshotMatchers.size(0));
}
int numOps = between(1, 100);
int refreshedSeqNo = -1;
for (int i = 0; i < numOps; i++) {
String id = Integer.toString(randomIntBetween(i, i + 5));
ParsedDocument doc = parseDocument(engine.engineConfig.getMapperService(), id, null);
if (randomBoolean()) {
engine.index(indexForDoc(doc));
} else {
engine.delete(new Engine.Delete(doc.id(), Uid.encodeId(doc.id()), primaryTerm.get()));
}
if (rarely()) {
if (randomBoolean()) {
engine.flush();
} else {
engine.refresh("test");
}
refreshedSeqNo = i;
}
}
if (refreshedSeqNo == -1) {
fromSeqNo = between(0, numOps);
toSeqNo = randomLongBetween(fromSeqNo, numOps * 2);
Engine.Searcher searcher = engine.acquireSearcher("test", Engine.SearcherScope.INTERNAL);
try (
Translog.Snapshot snapshot = newRandomSnapshot(
engine.engineConfig.getMapperService().mappingLookup(),
searcher,
between(1, SearchBasedChangesSnapshot.DEFAULT_BATCH_SIZE),
fromSeqNo,
toSeqNo,
false,
randomBoolean(),
randomBoolean(),
IndexVersion.current()
)
) {
searcher = null;
assertThat(snapshot, SnapshotMatchers.size(0));
} finally {
IOUtils.close(searcher);
}
searcher = engine.acquireSearcher("test", Engine.SearcherScope.INTERNAL);
try (
Translog.Snapshot snapshot = newRandomSnapshot(
engine.engineConfig.getMapperService().mappingLookup(),
searcher,
between(1, SearchBasedChangesSnapshot.DEFAULT_BATCH_SIZE),
fromSeqNo,
toSeqNo,
true,
randomBoolean(),
randomBoolean(),
IndexVersion.current()
)
) {
searcher = null;
IllegalStateException error = expectThrows(IllegalStateException.class, () -> drainAll(snapshot));
assertThat(
error.getMessage(),
containsString("Not all operations between from_seqno [" + fromSeqNo + "] and to_seqno [" + toSeqNo + "] found")
);
} finally {
IOUtils.close(searcher);
}
} else {
fromSeqNo = randomLongBetween(0, refreshedSeqNo);
toSeqNo = randomLongBetween(refreshedSeqNo + 1, numOps * 2);
Engine.Searcher searcher = engine.acquireSearcher("test", Engine.SearcherScope.INTERNAL);
try (
Translog.Snapshot snapshot = newRandomSnapshot(
engine.engineConfig.getMapperService().mappingLookup(),
searcher,
between(1, SearchBasedChangesSnapshot.DEFAULT_BATCH_SIZE),
fromSeqNo,
toSeqNo,
false,
randomBoolean(),
randomBoolean(),
IndexVersion.current()
)
) {
searcher = null;
assertThat(snapshot, SnapshotMatchers.containsSeqNoRange(fromSeqNo, refreshedSeqNo));
} finally {
IOUtils.close(searcher);
}
searcher = engine.acquireSearcher("test", Engine.SearcherScope.INTERNAL);
try (
Translog.Snapshot snapshot = newRandomSnapshot(
engine.engineConfig.getMapperService().mappingLookup(),
searcher,
between(1, SearchBasedChangesSnapshot.DEFAULT_BATCH_SIZE),
fromSeqNo,
toSeqNo,
true,
randomBoolean(),
randomBoolean(),
IndexVersion.current()
)
) {
searcher = null;
IllegalStateException error = expectThrows(IllegalStateException.class, () -> drainAll(snapshot));
assertThat(
error.getMessage(),
containsString("Not all operations between from_seqno [" + fromSeqNo + "] and to_seqno [" + toSeqNo + "] found")
);
} finally {
IOUtils.close(searcher);
}
toSeqNo = randomLongBetween(fromSeqNo, refreshedSeqNo);
searcher = engine.acquireSearcher("test", Engine.SearcherScope.INTERNAL);
try (
Translog.Snapshot snapshot = newRandomSnapshot(
engine.engineConfig.getMapperService().mappingLookup(),
searcher,
between(1, SearchBasedChangesSnapshot.DEFAULT_BATCH_SIZE),
fromSeqNo,
toSeqNo,
true,
randomBoolean(),
randomBoolean(),
IndexVersion.current()
)
) {
searcher = null;
assertThat(snapshot, SnapshotMatchers.containsSeqNoRange(fromSeqNo, toSeqNo));
} finally {
IOUtils.close(searcher);
}
}
// Get snapshot via engine will auto refresh
fromSeqNo = randomLongBetween(0, numOps - 1);
toSeqNo = randomLongBetween(fromSeqNo, numOps - 1);
try (
Translog.Snapshot snapshot = engine.newChangesSnapshot(
"test",
fromSeqNo,
toSeqNo,
randomBoolean(),
randomBoolean(),
randomBoolean(),
randomLongBetween(1, ByteSizeValue.ofMb(32).getBytes())
)
) {
assertThat(snapshot, SnapshotMatchers.containsSeqNoRange(fromSeqNo, toSeqNo));
}
}
/**
* A nested document is indexed into Lucene as multiple documents. While the root document has both sequence number and primary term,
* non-root documents don't have primary term but only sequence numbers. This test verifies that {@link LuceneChangesSnapshot}
* correctly skip non-root documents and returns at most one operation per sequence number.
*/
public void testSkipNonRootOfNestedDocuments() throws Exception {
Map<Long, Long> seqNoToTerm = new HashMap<>();
List<Engine.Operation> operations = generateHistoryOnReplica(between(1, 100), randomBoolean(), randomBoolean(), randomBoolean());
for (Engine.Operation op : operations) {
if (engine.getLocalCheckpointTracker().hasProcessed(op.seqNo()) == false) {
seqNoToTerm.put(op.seqNo(), op.primaryTerm());
}
applyOperation(engine, op);
if (rarely()) {
engine.refresh("test");
}
if (rarely()) {
engine.rollTranslogGeneration();
}
if (rarely()) {
engine.flush();
}
}
long maxSeqNo = engine.getLocalCheckpointTracker().getMaxSeqNo();
engine.refresh("test");
Engine.Searcher searcher = engine.acquireSearcher("test", Engine.SearcherScope.INTERNAL);
final boolean accessStats = randomBoolean();
try (
Translog.Snapshot snapshot = newRandomSnapshot(
engine.engineConfig.getMapperService().mappingLookup(),
searcher,
between(1, 100),
0,
maxSeqNo,
false,
randomBoolean(),
accessStats,
IndexVersion.current()
)
) {
if (accessStats) {
assertThat(snapshot.totalOperations(), equalTo(seqNoToTerm.size()));
}
Translog.Operation op;
while ((op = snapshot.next()) != null) {
assertThat(op.toString(), op.primaryTerm(), equalTo(seqNoToTerm.get(op.seqNo())));
}
assertThat(snapshot.skippedOperations(), equalTo(0));
}
}
public void testUpdateAndReadChangesConcurrently() throws Exception {
Follower[] followers = new Follower[between(1, 3)];
CountDownLatch readyLatch = new CountDownLatch(followers.length + 1);
AtomicBoolean isDone = new AtomicBoolean();
for (int i = 0; i < followers.length; i++) {
followers[i] = new Follower(engine, isDone, readyLatch);
followers[i].start();
}
boolean onPrimary = randomBoolean();
List<Engine.Operation> operations = new ArrayList<>();
int numOps = frequently() ? scaledRandomIntBetween(1, 1500) : scaledRandomIntBetween(5000, 20_000);
for (int i = 0; i < numOps; i++) {
String id = Integer.toString(randomIntBetween(0, randomBoolean() ? 10 : numOps * 2));
ParsedDocument doc = parseDocument(engine.engineConfig.getMapperService(), id, randomAlphaOfLengthBetween(1, 5));
final Engine.Operation op;
if (onPrimary) {
if (randomBoolean()) {
op = new Engine.Index(newUid(doc), primaryTerm.get(), doc);
} else {
op = new Engine.Delete(doc.id(), Uid.encodeId(doc.id()), primaryTerm.get());
}
} else {
if (randomBoolean()) {
op = replicaIndexForDoc(doc, randomNonNegativeLong(), i, randomBoolean());
} else {
op = replicaDeleteForDoc(doc.id(), randomNonNegativeLong(), i, randomNonNegativeLong());
}
}
operations.add(op);
}
readyLatch.countDown();
readyLatch.await();
Randomness.shuffle(operations);
concurrentlyApplyOps(operations, engine);
assertThat(engine.getLocalCheckpointTracker().getProcessedCheckpoint(), equalTo(operations.size() - 1L));
isDone.set(true);
for (Follower follower : followers) {
follower.join();
IOUtils.close(follower.engine, follower.engine.store);
}
}
class Follower extends Thread {
private final InternalEngine leader;
private final InternalEngine engine;
private final TranslogHandler translogHandler;
private final AtomicBoolean isDone;
private final CountDownLatch readLatch;
Follower(InternalEngine leader, AtomicBoolean isDone, CountDownLatch readLatch) throws IOException {
this.leader = leader;
this.isDone = isDone;
this.readLatch = readLatch;
this.engine = createEngine(defaultSettings, createStore(), createTempDir(), newMergePolicy());
this.translogHandler = new TranslogHandler(engine.engineConfig.getMapperService());
}
void pullOperations(InternalEngine follower) throws IOException {
long leaderCheckpoint = leader.getLocalCheckpointTracker().getProcessedCheckpoint();
long followerCheckpoint = follower.getLocalCheckpointTracker().getProcessedCheckpoint();
if (followerCheckpoint < leaderCheckpoint) {
long fromSeqNo = followerCheckpoint + 1;
long batchSize = randomLongBetween(0, 100);
long toSeqNo = Math.min(fromSeqNo + batchSize, leaderCheckpoint);
try (
Translog.Snapshot snapshot = leader.newChangesSnapshot(
"test",
fromSeqNo,
toSeqNo,
true,
randomBoolean(),
randomBoolean(),
randomLongBetween(1, ByteSizeValue.ofMb(32).getBytes())
)
) {
translogHandler.run(follower, snapshot);
}
}
}
@Override
public void run() {
try {
readLatch.countDown();
readLatch.await();
while (isDone.get() == false
|| engine.getLocalCheckpointTracker().getProcessedCheckpoint() < leader.getLocalCheckpointTracker()
.getProcessedCheckpoint()) {
pullOperations(engine);
}
assertConsistentHistoryBetweenTranslogAndLuceneIndex(engine);
// have to verify without source since we are randomly testing without _source
List<DocIdSeqNoAndSource> docsWithoutSourceOnFollower = getDocIds(engine, true).stream()
.map(d -> new DocIdSeqNoAndSource(d.id(), null, d.seqNo(), d.primaryTerm(), d.version()))
.toList();
List<DocIdSeqNoAndSource> docsWithoutSourceOnLeader = getDocIds(leader, true).stream()
.map(d -> new DocIdSeqNoAndSource(d.id(), null, d.seqNo(), d.primaryTerm(), d.version()))
.toList();
assertThat(docsWithoutSourceOnFollower, equalTo(docsWithoutSourceOnLeader));
} catch (Exception ex) {
throw new AssertionError(ex);
}
}
}
private List<Translog.Operation> drainAll(Translog.Snapshot snapshot) throws IOException {
List<Translog.Operation> operations = new ArrayList<>();
Translog.Operation op;
while ((op = snapshot.next()) != null) {
final Translog.Operation newOp = op;
logger.trace("Reading [{}]", op);
assert operations.stream().allMatch(o -> o.seqNo() < newOp.seqNo()) : "Operations [" + operations + "], op [" + op + "]";
operations.add(newOp);
}
return operations;
}
public void testOverFlow() throws Exception {
long fromSeqNo = randomLongBetween(0, 5);
long toSeqNo = randomLongBetween(Long.MAX_VALUE - 5, Long.MAX_VALUE);
try (
Translog.Snapshot snapshot = engine.newChangesSnapshot(
"test",
fromSeqNo,
toSeqNo,
true,
randomBoolean(),
randomBoolean(),
randomLongBetween(1, ByteSizeValue.ofMb(32).getBytes())
)
) {
IllegalStateException error = expectThrows(IllegalStateException.class, () -> drainAll(snapshot));
assertThat(
error.getMessage(),
containsString("Not all operations between from_seqno [" + fromSeqNo + "] and to_seqno [" + toSeqNo + "] found")
);
}
}
public void testStats() throws Exception {
try (Store store = createStore(); Engine engine = createEngine(defaultSettings, store, createTempDir(), NoMergePolicy.INSTANCE)) {
int numOps = between(100, 5000);
long startingSeqNo = randomLongBetween(0, Integer.MAX_VALUE);
List<Engine.Operation> operations = generateHistoryOnReplica(
numOps,
startingSeqNo,
randomBoolean(),
randomBoolean(),
randomBoolean()
);
applyOperations(engine, operations);
LongSupplier fromSeqNo = () -> {
if (randomBoolean()) {
return 0L;
} else if (randomBoolean()) {
return startingSeqNo;
} else {
return randomLongBetween(0, startingSeqNo);
}
};
LongSupplier toSeqNo = () -> {
final long maxSeqNo = engine.getSeqNoStats(-1).getMaxSeqNo();
if (randomBoolean()) {
return maxSeqNo;
} else if (randomBoolean()) {
return Long.MAX_VALUE;
} else {
return randomLongBetween(maxSeqNo, Long.MAX_VALUE);
}
};
// Can't access stats if didn't request it
try (
Translog.Snapshot snapshot = engine.newChangesSnapshot(
"test",
fromSeqNo.getAsLong(),
toSeqNo.getAsLong(),
false,
randomBoolean(),
false,
randomLongBetween(1, ByteSizeValue.ofMb(32).getBytes())
)
) {
IllegalStateException error = expectThrows(IllegalStateException.class, snapshot::totalOperations);
assertThat(error.getMessage(), equalTo("Access stats of a snapshot created with [access_stats] is false"));
final List<Translog.Operation> translogOps = drainAll(snapshot);
assertThat(translogOps, hasSize(numOps));
error = expectThrows(IllegalStateException.class, snapshot::totalOperations);
assertThat(error.getMessage(), equalTo("Access stats of a snapshot created with [access_stats] is false"));
}
// Access stats and operations
try (
Translog.Snapshot snapshot = engine.newChangesSnapshot(
"test",
fromSeqNo.getAsLong(),
toSeqNo.getAsLong(),
false,
randomBoolean(),
true,
randomLongBetween(1, ByteSizeValue.ofMb(32).getBytes())
)
) {
assertThat(snapshot.totalOperations(), equalTo(numOps));
final List<Translog.Operation> translogOps = drainAll(snapshot);
assertThat(translogOps, hasSize(numOps));
assertThat(snapshot.totalOperations(), equalTo(numOps));
}
// Verify count
assertThat(engine.countChanges("test", fromSeqNo.getAsLong(), toSeqNo.getAsLong()), equalTo(numOps));
}
}
}

View file

@ -28,6 +28,7 @@ import org.elasticsearch.xcontent.json.JsonXContent;
import java.io.IOException; import java.io.IOException;
import java.util.List; import java.util.List;
import java.util.Locale;
import java.util.Map; import java.util.Map;
import static org.elasticsearch.indices.recovery.RecoverySettings.INDICES_RECOVERY_SOURCE_ENABLED_SETTING; import static org.elasticsearch.indices.recovery.RecoverySettings.INDICES_RECOVERY_SOURCE_ENABLED_SETTING;
@ -405,16 +406,114 @@ public class SourceFieldMapperTests extends MetadataMapperTestCase {
} }
} }
public void testRecoverySourceWitInvalidSettings() {
{
Settings settings = Settings.builder().put(IndexSettings.RECOVERY_USE_SYNTHETIC_SOURCE_SETTING.getKey(), true).build();
IllegalArgumentException exc = expectThrows(
IllegalArgumentException.class,
() -> createMapperService(settings, topMapping(b -> {}))
);
assertThat(
exc.getMessage(),
containsString(
String.format(
Locale.ROOT,
"The setting [%s] is only permitted",
IndexSettings.RECOVERY_USE_SYNTHETIC_SOURCE_SETTING.getKey()
)
)
);
}
{
Settings settings = Settings.builder()
.put(SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), SourceFieldMapper.Mode.STORED.toString())
.put(IndexSettings.RECOVERY_USE_SYNTHETIC_SOURCE_SETTING.getKey(), true)
.build();
IllegalArgumentException exc = expectThrows(
IllegalArgumentException.class,
() -> createMapperService(settings, topMapping(b -> {}))
);
assertThat(
exc.getMessage(),
containsString(
String.format(
Locale.ROOT,
"The setting [%s] is only permitted",
IndexSettings.RECOVERY_USE_SYNTHETIC_SOURCE_SETTING.getKey()
)
)
);
}
{
Settings settings = Settings.builder()
.put(IndexSettings.MODE.getKey(), IndexMode.STANDARD.toString())
.put(IndexSettings.RECOVERY_USE_SYNTHETIC_SOURCE_SETTING.getKey(), true)
.build();
IllegalArgumentException exc = expectThrows(
IllegalArgumentException.class,
() -> createMapperService(settings, topMapping(b -> {}))
);
assertThat(
exc.getMessage(),
containsString(
String.format(
Locale.ROOT,
"The setting [%s] is only permitted",
IndexSettings.RECOVERY_USE_SYNTHETIC_SOURCE_SETTING.getKey()
)
)
);
}
{
Settings settings = Settings.builder()
.put(SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), SourceFieldMapper.Mode.SYNTHETIC.toString())
.put(IndexSettings.RECOVERY_USE_SYNTHETIC_SOURCE_SETTING.getKey(), true)
.build();
IllegalArgumentException exc = expectThrows(
IllegalArgumentException.class,
() -> createMapperService(
IndexVersionUtils.randomPreviousCompatibleVersion(random(), IndexVersions.USE_SYNTHETIC_SOURCE_FOR_RECOVERY),
settings,
() -> false,
topMapping(b -> {})
)
);
assertThat(
exc.getMessage(),
containsString(
String.format(
Locale.ROOT,
"The setting [%s] is unavailable on this cluster",
IndexSettings.RECOVERY_USE_SYNTHETIC_SOURCE_SETTING.getKey()
)
)
);
}
}
public void testRecoverySourceWithSyntheticSource() throws IOException { public void testRecoverySourceWithSyntheticSource() throws IOException {
{ {
MapperService mapperService = createMapperService( Settings settings = Settings.builder()
topMapping(b -> b.startObject(SourceFieldMapper.NAME).field("mode", "synthetic").endObject()) .put(SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), SourceFieldMapper.Mode.SYNTHETIC.toString())
); .build();
MapperService mapperService = createMapperService(settings, topMapping(b -> {}));
DocumentMapper docMapper = mapperService.documentMapper(); DocumentMapper docMapper = mapperService.documentMapper();
ParsedDocument doc = docMapper.parse(source(b -> { b.field("field1", "value1"); })); ParsedDocument doc = docMapper.parse(source(b -> b.field("field1", "value1")));
assertNotNull(doc.rootDoc().getField("_recovery_source")); assertNotNull(doc.rootDoc().getField("_recovery_source"));
assertThat(doc.rootDoc().getField("_recovery_source").binaryValue(), equalTo(new BytesRef("{\"field1\":\"value1\"}"))); assertThat(doc.rootDoc().getField("_recovery_source").binaryValue(), equalTo(new BytesRef("{\"field1\":\"value1\"}")));
} }
{
Settings settings = Settings.builder()
.put(SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), SourceFieldMapper.Mode.SYNTHETIC.toString())
.put(IndexSettings.RECOVERY_USE_SYNTHETIC_SOURCE_SETTING.getKey(), true)
.build();
MapperService mapperService = createMapperService(settings, topMapping(b -> {}));
DocumentMapper docMapper = mapperService.documentMapper();
ParsedDocument doc = docMapper.parse(source(b -> b.field("field1", "value1")));
assertNotNull(doc.rootDoc().getField("_recovery_source_size"));
assertThat(doc.rootDoc().getField("_recovery_source_size").numericValue(), equalTo(19L));
}
{ {
Settings settings = Settings.builder().put(INDICES_RECOVERY_SOURCE_ENABLED_SETTING.getKey(), false).build(); Settings settings = Settings.builder().put(INDICES_RECOVERY_SOURCE_ENABLED_SETTING.getKey(), false).build();
MapperService mapperService = createMapperService( MapperService mapperService = createMapperService(
@ -436,6 +535,17 @@ public class SourceFieldMapperTests extends MetadataMapperTestCase {
assertNotNull(doc.rootDoc().getField("_recovery_source")); assertNotNull(doc.rootDoc().getField("_recovery_source"));
assertThat(doc.rootDoc().getField("_recovery_source").binaryValue(), equalTo(new BytesRef("{\"@timestamp\":\"2012-02-13\"}"))); assertThat(doc.rootDoc().getField("_recovery_source").binaryValue(), equalTo(new BytesRef("{\"@timestamp\":\"2012-02-13\"}")));
} }
{
Settings settings = Settings.builder()
.put(IndexSettings.MODE.getKey(), IndexMode.LOGSDB.getName())
.put(IndexSettings.RECOVERY_USE_SYNTHETIC_SOURCE_SETTING.getKey(), true)
.build();
MapperService mapperService = createMapperService(settings, mapping(b -> {}));
DocumentMapper docMapper = mapperService.documentMapper();
ParsedDocument doc = docMapper.parse(source(b -> { b.field("@timestamp", "2012-02-13"); }));
assertNotNull(doc.rootDoc().getField("_recovery_source_size"));
assertThat(doc.rootDoc().getField("_recovery_source_size").numericValue(), equalTo(27L));
}
{ {
Settings settings = Settings.builder() Settings settings = Settings.builder()
.put(IndexSettings.MODE.getKey(), IndexMode.LOGSDB.getName()) .put(IndexSettings.MODE.getKey(), IndexMode.LOGSDB.getName())

View file

@ -22,6 +22,7 @@ import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.util.iterable.Iterables; import org.elasticsearch.common.util.iterable.Iterables;
import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.TimeValue;
import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexSettings;
@ -486,7 +487,8 @@ public class IndexLevelReplicationTests extends ESIndexLevelReplicationTestCase
Long.MAX_VALUE, Long.MAX_VALUE,
false, false,
randomBoolean(), randomBoolean(),
randomBoolean() randomBoolean(),
randomLongBetween(1, ByteSizeValue.ofMb(32).getBytes())
) )
) { ) {
assertThat(snapshot, SnapshotMatchers.containsOperationsInAnyOrder(expectedTranslogOps)); assertThat(snapshot, SnapshotMatchers.containsOperationsInAnyOrder(expectedTranslogOps));
@ -513,7 +515,8 @@ public class IndexLevelReplicationTests extends ESIndexLevelReplicationTestCase
Long.MAX_VALUE, Long.MAX_VALUE,
false, false,
randomBoolean(), randomBoolean(),
randomBoolean() randomBoolean(),
randomLongBetween(1, ByteSizeValue.ofMb(32).getBytes())
) )
) { ) {
assertThat(snapshot, SnapshotMatchers.containsOperationsInAnyOrder(expectedTranslogOps)); assertThat(snapshot, SnapshotMatchers.containsOperationsInAnyOrder(expectedTranslogOps));
@ -608,7 +611,17 @@ public class IndexLevelReplicationTests extends ESIndexLevelReplicationTestCase
shards.promoteReplicaToPrimary(replica2).get(); shards.promoteReplicaToPrimary(replica2).get();
logger.info("--> Recover replica3 from replica2"); logger.info("--> Recover replica3 from replica2");
recoverReplica(replica3, replica2, true); recoverReplica(replica3, replica2, true);
try (Translog.Snapshot snapshot = replica3.newChangesSnapshot("test", 0, Long.MAX_VALUE, false, randomBoolean(), true)) { try (
Translog.Snapshot snapshot = replica3.newChangesSnapshot(
"test",
0,
Long.MAX_VALUE,
false,
randomBoolean(),
true,
randomLongBetween(1, ByteSizeValue.ofMb(32).getBytes())
)
) {
assertThat(snapshot.totalOperations(), equalTo(initDocs + 1)); assertThat(snapshot.totalOperations(), equalTo(initDocs + 1));
final List<Translog.Operation> expectedOps = new ArrayList<>(initOperations); final List<Translog.Operation> expectedOps = new ArrayList<>(initOperations);
expectedOps.add(op2); expectedOps.add(op2);

View file

@ -1819,7 +1819,15 @@ public class IndexShardTests extends IndexShardTestCase {
shard.refresh("test"); shard.refresh("test");
} else { } else {
// trigger internal refresh // trigger internal refresh
shard.newChangesSnapshot("test", 0, Long.MAX_VALUE, false, randomBoolean(), randomBoolean()).close(); shard.newChangesSnapshot(
"test",
0,
Long.MAX_VALUE,
false,
randomBoolean(),
randomBoolean(),
randomLongBetween(1, ByteSizeValue.ofMb(32).getBytes())
).close();
} }
assertThat(shard.getShardFieldStats(), sameInstance(stats)); assertThat(shard.getShardFieldStats(), sameInstance(stats));
// index more docs // index more docs
@ -1837,7 +1845,15 @@ public class IndexShardTests extends IndexShardTestCase {
shard.refresh("test"); shard.refresh("test");
} else { } else {
// trigger internal refresh // trigger internal refresh
shard.newChangesSnapshot("test", 0, Long.MAX_VALUE, false, randomBoolean(), randomBoolean()).close(); shard.newChangesSnapshot(
"test",
0,
Long.MAX_VALUE,
false,
randomBoolean(),
randomBoolean(),
randomLongBetween(1, ByteSizeValue.ofMb(32).getBytes())
).close();
} }
stats = shard.getShardFieldStats(); stats = shard.getShardFieldStats();
assertThat(stats.numSegments(), equalTo(2)); assertThat(stats.numSegments(), equalTo(2));

View file

@ -158,7 +158,7 @@ public class RefreshListenersTests extends ESTestCase {
System::nanoTime, System::nanoTime,
null, null,
true, true,
null EngineTestCase.createMapperService()
); );
engine = new InternalEngine(config); engine = new InternalEngine(config);
EngineTestCase.recoverFromTranslog(engine, (e, s) -> 0, Long.MAX_VALUE); EngineTestCase.recoverFromTranslog(engine, (e, s) -> 0, Long.MAX_VALUE);

View file

@ -29,6 +29,7 @@ import org.elasticsearch.common.UUIDs;
import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.lucene.uid.Versions;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.MergePolicyConfig; import org.elasticsearch.index.MergePolicyConfig;
import org.elasticsearch.index.VersionType; import org.elasticsearch.index.VersionType;
@ -211,7 +212,8 @@ public class RecoveryTests extends ESIndexLevelReplicationTestCase {
Long.MAX_VALUE, Long.MAX_VALUE,
false, false,
randomBoolean(), randomBoolean(),
randomBoolean() randomBoolean(),
randomLongBetween(1, ByteSizeValue.ofMb(32).getBytes())
) )
) { ) {
assertThat(snapshot, SnapshotMatchers.size(6)); assertThat(snapshot, SnapshotMatchers.size(6));

File diff suppressed because it is too large Load diff

View file

@ -110,8 +110,10 @@ public class SnapshotShutdownProgressTrackerTests extends ESTestCase {
} }
public void testTrackerLogsStats() { public void testTrackerLogsStats() {
final String dummyStatusMsg = "Dummy log message for index shard snapshot statuses";
SnapshotShutdownProgressTracker tracker = new SnapshotShutdownProgressTracker( SnapshotShutdownProgressTracker tracker = new SnapshotShutdownProgressTracker(
getLocalNodeIdSupplier, getLocalNodeIdSupplier,
(callerLogger) -> callerLogger.info(dummyStatusMsg),
clusterSettings, clusterSettings,
testThreadPool testThreadPool
); );
@ -144,6 +146,14 @@ public class SnapshotShutdownProgressTrackerTests extends ESTestCase {
"*Shard snapshot completion stats since shutdown began: Done [2]; Failed [1]; Aborted [1]; Paused [1]*" "*Shard snapshot completion stats since shutdown began: Done [2]; Failed [1]; Aborted [1]; Paused [1]*"
) )
); );
mockLog.addExpectation(
new MockLog.SeenEventExpectation(
"index shard snapshot statuses",
SnapshotShutdownProgressTracker.class.getCanonicalName(),
Level.INFO,
dummyStatusMsg
)
);
// Simulate updating the shard snapshot completion stats. // Simulate updating the shard snapshot completion stats.
simulateShardSnapshotsCompleting(tracker, 5); simulateShardSnapshotsCompleting(tracker, 5);
@ -171,6 +181,7 @@ public class SnapshotShutdownProgressTrackerTests extends ESTestCase {
); );
SnapshotShutdownProgressTracker tracker = new SnapshotShutdownProgressTracker( SnapshotShutdownProgressTracker tracker = new SnapshotShutdownProgressTracker(
getLocalNodeIdSupplier, getLocalNodeIdSupplier,
(callerLogger) -> {},
clusterSettingsDisabledLogging, clusterSettingsDisabledLogging,
testThreadPool testThreadPool
); );
@ -214,6 +225,7 @@ public class SnapshotShutdownProgressTrackerTests extends ESTestCase {
); );
SnapshotShutdownProgressTracker tracker = new SnapshotShutdownProgressTracker( SnapshotShutdownProgressTracker tracker = new SnapshotShutdownProgressTracker(
getLocalNodeIdSupplier, getLocalNodeIdSupplier,
(callerLogger) -> {},
clusterSettingsDisabledLogging, clusterSettingsDisabledLogging,
testThreadPool testThreadPool
); );
@ -253,6 +265,7 @@ public class SnapshotShutdownProgressTrackerTests extends ESTestCase {
public void testTrackerPauseTimestamp() { public void testTrackerPauseTimestamp() {
SnapshotShutdownProgressTracker tracker = new SnapshotShutdownProgressTracker( SnapshotShutdownProgressTracker tracker = new SnapshotShutdownProgressTracker(
getLocalNodeIdSupplier, getLocalNodeIdSupplier,
(callerLogger) -> {},
clusterSettings, clusterSettings,
testThreadPool testThreadPool
); );
@ -263,7 +276,7 @@ public class SnapshotShutdownProgressTrackerTests extends ESTestCase {
"pausing timestamp should be set", "pausing timestamp should be set",
SnapshotShutdownProgressTracker.class.getName(), SnapshotShutdownProgressTracker.class.getName(),
Level.INFO, Level.INFO,
"*Finished signalling shard snapshots to pause at [" + testThreadPool.relativeTimeInMillis() + "]*" "*Finished signalling shard snapshots to pause at [" + testThreadPool.relativeTimeInMillis() + " millis]*"
) )
); );
@ -283,6 +296,7 @@ public class SnapshotShutdownProgressTrackerTests extends ESTestCase {
public void testTrackerRequestsToMaster() { public void testTrackerRequestsToMaster() {
SnapshotShutdownProgressTracker tracker = new SnapshotShutdownProgressTracker( SnapshotShutdownProgressTracker tracker = new SnapshotShutdownProgressTracker(
getLocalNodeIdSupplier, getLocalNodeIdSupplier,
(callerLogger) -> {},
clusterSettings, clusterSettings,
testThreadPool testThreadPool
); );
@ -335,6 +349,7 @@ public class SnapshotShutdownProgressTrackerTests extends ESTestCase {
public void testTrackerClearShutdown() { public void testTrackerClearShutdown() {
SnapshotShutdownProgressTracker tracker = new SnapshotShutdownProgressTracker( SnapshotShutdownProgressTracker tracker = new SnapshotShutdownProgressTracker(
getLocalNodeIdSupplier, getLocalNodeIdSupplier,
(callerLogger) -> {},
clusterSettings, clusterSettings,
testThreadPool testThreadPool
); );
@ -345,7 +360,7 @@ public class SnapshotShutdownProgressTrackerTests extends ESTestCase {
"pausing timestamp should be unset", "pausing timestamp should be unset",
SnapshotShutdownProgressTracker.class.getName(), SnapshotShutdownProgressTracker.class.getName(),
Level.INFO, Level.INFO,
"*Finished signalling shard snapshots to pause at [-1]*" "*Finished signalling shard snapshots to pause at [-1 millis]*"
) )
); );

View file

@ -23,6 +23,7 @@ import org.elasticsearch.core.Booleans;
import org.elasticsearch.core.PathUtils; import org.elasticsearch.core.PathUtils;
import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.SuppressForbidden;
import org.elasticsearch.jdk.JarHell; import org.elasticsearch.jdk.JarHell;
import org.elasticsearch.jdk.RuntimeVersionFeature;
import org.elasticsearch.plugins.PluginDescriptor; import org.elasticsearch.plugins.PluginDescriptor;
import org.elasticsearch.secure_sm.SecureSM; import org.elasticsearch.secure_sm.SecureSM;
import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.ESTestCase;
@ -118,8 +119,8 @@ public class BootstrapForTesting {
// Log ifconfig output before SecurityManager is installed // Log ifconfig output before SecurityManager is installed
IfConfig.logIfNecessary(); IfConfig.logIfNecessary();
// install security manager if requested // install security manager if available and requested
if (systemPropertyAsBoolean("tests.security.manager", true)) { if (RuntimeVersionFeature.isSecurityManagerAvailable() && systemPropertyAsBoolean("tests.security.manager", true)) {
try { try {
// initialize paths the same exact way as bootstrap // initialize paths the same exact way as bootstrap
Permissions perms = new Permissions(); Permissions perms = new Permissions();

View file

@ -57,13 +57,12 @@ import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.cluster.routing.AllocationId; import org.elasticsearch.cluster.routing.AllocationId;
import org.elasticsearch.common.CheckedBiFunction; import org.elasticsearch.common.CheckedBiFunction;
import org.elasticsearch.common.Randomness; import org.elasticsearch.common.Randomness;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.lucene.uid.Versions;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.core.CheckedFunction; import org.elasticsearch.core.CheckedFunction;
import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.IOUtils;
@ -142,6 +141,7 @@ import static java.util.Collections.shuffle;
import static org.elasticsearch.index.engine.Engine.Operation.Origin.PEER_RECOVERY; import static org.elasticsearch.index.engine.Engine.Operation.Origin.PEER_RECOVERY;
import static org.elasticsearch.index.engine.Engine.Operation.Origin.PRIMARY; import static org.elasticsearch.index.engine.Engine.Operation.Origin.PRIMARY;
import static org.elasticsearch.index.engine.Engine.Operation.Origin.REPLICA; import static org.elasticsearch.index.engine.Engine.Operation.Origin.REPLICA;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent;
import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo;
import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.lessThanOrEqualTo;
@ -160,6 +160,8 @@ public abstract class EngineTestCase extends ESTestCase {
protected Store store; protected Store store;
protected Store storeReplica; protected Store storeReplica;
protected MapperService mapperService;
protected InternalEngine engine; protected InternalEngine engine;
protected InternalEngine replicaEngine; protected InternalEngine replicaEngine;
@ -198,6 +200,27 @@ public abstract class EngineTestCase extends ESTestCase {
.build(); .build();
} }
protected String defaultMapping() {
return """
{
"dynamic": false,
"properties": {
"value": {
"type": "keyword"
},
"nested_field": {
"type": "nested",
"properties": {
"field-0": {
"type": "keyword"
}
}
}
}
}
""";
}
@Override @Override
@Before @Before
public void setUp() throws Exception { public void setUp() throws Exception {
@ -212,15 +235,16 @@ public abstract class EngineTestCase extends ESTestCase {
} else { } else {
codecName = "default"; codecName = "default";
} }
defaultSettings = IndexSettingsModule.newIndexSettings("test", indexSettings()); defaultSettings = IndexSettingsModule.newIndexSettings("index", indexSettings());
threadPool = new TestThreadPool(getClass().getName()); threadPool = new TestThreadPool(getClass().getName());
store = createStore(); store = createStore();
storeReplica = createStore(); storeReplica = createStore();
Lucene.cleanLuceneIndex(store.directory()); Lucene.cleanLuceneIndex(store.directory());
Lucene.cleanLuceneIndex(storeReplica.directory()); Lucene.cleanLuceneIndex(storeReplica.directory());
primaryTranslogDir = createTempDir("translog-primary"); primaryTranslogDir = createTempDir("translog-primary");
translogHandler = createTranslogHandler(defaultSettings); mapperService = createMapperService(defaultSettings.getSettings(), defaultMapping());
engine = createEngine(store, primaryTranslogDir); translogHandler = createTranslogHandler(mapperService);
engine = createEngine(defaultSettings, store, primaryTranslogDir, newMergePolicy());
LiveIndexWriterConfig currentIndexWriterConfig = engine.getCurrentIndexWriterConfig(); LiveIndexWriterConfig currentIndexWriterConfig = engine.getCurrentIndexWriterConfig();
assertEquals(engine.config().getCodec().getName(), codecService.codec(codecName).getName()); assertEquals(engine.config().getCodec().getName(), codecService.codec(codecName).getName());
@ -230,7 +254,7 @@ public abstract class EngineTestCase extends ESTestCase {
engine.config().setEnableGcDeletes(false); engine.config().setEnableGcDeletes(false);
} }
replicaTranslogDir = createTempDir("translog-replica"); replicaTranslogDir = createTempDir("translog-replica");
replicaEngine = createEngine(storeReplica, replicaTranslogDir); replicaEngine = createEngine(defaultSettings, storeReplica, replicaTranslogDir, newMergePolicy());
currentIndexWriterConfig = replicaEngine.getCurrentIndexWriterConfig(); currentIndexWriterConfig = replicaEngine.getCurrentIndexWriterConfig();
assertEquals(replicaEngine.config().getCodec().getName(), codecService.codec(codecName).getName()); assertEquals(replicaEngine.config().getCodec().getName(), codecService.codec(codecName).getName());
@ -433,37 +457,9 @@ public abstract class EngineTestCase extends ESTestCase {
); );
} }
public static CheckedBiFunction<String, Integer, ParsedDocument, IOException> nestedParsedDocFactory() throws Exception { public static ParsedDocument parseDocument(MapperService mapperService, String id, String routing) {
final MapperService mapperService = createMapperService(); SourceToParse sourceToParse = new SourceToParse(id, new BytesArray("{ \"value\" : \"test\" }"), XContentType.JSON, routing);
final String nestedMapping = Strings.toString( return mapperService.documentMapper().parse(sourceToParse);
XContentFactory.jsonBuilder()
.startObject()
.startObject("type")
.startObject("properties")
.startObject("nested_field")
.field("type", "nested")
.endObject()
.endObject()
.endObject()
.endObject()
);
final DocumentMapper nestedMapper = mapperService.merge(
"type",
new CompressedXContent(nestedMapping),
MapperService.MergeReason.MAPPING_UPDATE
);
return (docId, nestedFieldValues) -> {
final XContentBuilder source = XContentFactory.jsonBuilder().startObject().field("field", "value");
if (nestedFieldValues > 0) {
XContentBuilder nestedField = source.startObject("nested_field");
for (int i = 0; i < nestedFieldValues; i++) {
nestedField.field("field-" + i, "value-" + i);
}
source.endObject();
}
source.endObject();
return nestedMapper.parse(new SourceToParse(docId, BytesReference.bytes(source), XContentType.JSON));
};
} }
protected Store createStore() throws IOException { protected Store createStore() throws IOException {
@ -500,8 +496,8 @@ public abstract class EngineTestCase extends ESTestCase {
); );
} }
protected TranslogHandler createTranslogHandler(IndexSettings indexSettings) { protected TranslogHandler createTranslogHandler(MapperService mapperService) {
return new TranslogHandler(xContentRegistry(), indexSettings); return new TranslogHandler(mapperService);
} }
protected InternalEngine createEngine(Store store, Path translogPath) throws IOException { protected InternalEngine createEngine(Store store, Path translogPath) throws IOException {
@ -857,7 +853,7 @@ public abstract class EngineTestCase extends ESTestCase {
this::relativeTimeInNanos, this::relativeTimeInNanos,
indexCommitListener, indexCommitListener,
true, true,
null mapperService
); );
} }
@ -1031,6 +1027,22 @@ public abstract class EngineTestCase extends ESTestCase {
return ops; return ops;
} }
private CheckedBiFunction<String, Integer, ParsedDocument, IOException> nestedParsedDocFactory(MapperService mapperService) {
final DocumentMapper nestedMapper = mapperService.documentMapper();
return (docId, nestedFieldValues) -> {
final XContentBuilder source = XContentFactory.jsonBuilder().startObject().field("value", "test");
if (nestedFieldValues > 0) {
XContentBuilder nestedField = source.startObject("nested_field");
for (int i = 0; i < nestedFieldValues; i++) {
nestedField.field("field-" + i, "value-" + i);
}
source.endObject();
}
source.endObject();
return nestedMapper.parse(new SourceToParse(docId, BytesReference.bytes(source), XContentType.JSON));
};
}
public List<Engine.Operation> generateHistoryOnReplica( public List<Engine.Operation> generateHistoryOnReplica(
int numOps, int numOps,
boolean allowGapInSeqNo, boolean allowGapInSeqNo,
@ -1050,7 +1062,9 @@ public abstract class EngineTestCase extends ESTestCase {
long seqNo = startingSeqNo; long seqNo = startingSeqNo;
final int maxIdValue = randomInt(numOps * 2); final int maxIdValue = randomInt(numOps * 2);
final List<Engine.Operation> operations = new ArrayList<>(numOps); final List<Engine.Operation> operations = new ArrayList<>(numOps);
CheckedBiFunction<String, Integer, ParsedDocument, IOException> nestedParsedDocFactory = nestedParsedDocFactory(); CheckedBiFunction<String, Integer, ParsedDocument, IOException> nestedParsedDocFactory = nestedParsedDocFactory(
engine.engineConfig.getMapperService()
);
for (int i = 0; i < numOps; i++) { for (int i = 0; i < numOps; i++) {
final String id = Integer.toString(randomInt(maxIdValue)); final String id = Integer.toString(randomInt(maxIdValue));
final Engine.Operation.TYPE opType = randomFrom(Engine.Operation.TYPE.values()); final Engine.Operation.TYPE opType = randomFrom(Engine.Operation.TYPE.values());
@ -1059,7 +1073,9 @@ public abstract class EngineTestCase extends ESTestCase {
final long startTime = threadPool.relativeTimeInNanos(); final long startTime = threadPool.relativeTimeInNanos();
final int copies = allowDuplicate && rarely() ? between(2, 4) : 1; final int copies = allowDuplicate && rarely() ? between(2, 4) : 1;
for (int copy = 0; copy < copies; copy++) { for (int copy = 0; copy < copies; copy++) {
final ParsedDocument doc = isNestedDoc ? nestedParsedDocFactory.apply(id, nestedValues) : createParsedDoc(id, null); final ParsedDocument doc = isNestedDoc
? nestedParsedDocFactory.apply(id, nestedValues)
: parseDocument(engine.engineConfig.getMapperService(), id, null);
switch (opType) { switch (opType) {
case INDEX -> operations.add( case INDEX -> operations.add(
new Engine.Index( new Engine.Index(
@ -1274,7 +1290,17 @@ public abstract class EngineTestCase extends ESTestCase {
*/ */
public static List<Translog.Operation> readAllOperationsInLucene(Engine engine) throws IOException { public static List<Translog.Operation> readAllOperationsInLucene(Engine engine) throws IOException {
final List<Translog.Operation> operations = new ArrayList<>(); final List<Translog.Operation> operations = new ArrayList<>();
try (Translog.Snapshot snapshot = engine.newChangesSnapshot("test", 0, Long.MAX_VALUE, false, randomBoolean(), randomBoolean())) { try (
Translog.Snapshot snapshot = engine.newChangesSnapshot(
"test",
0,
Long.MAX_VALUE,
false,
randomBoolean(),
randomBoolean(),
randomLongBetween(1, ByteSizeValue.ofMb(32).getBytes())
)
) {
Translog.Operation op; Translog.Operation op;
while ((op = snapshot.next()) != null) { while ((op = snapshot.next()) != null) {
operations.add(op); operations.add(op);
@ -1345,7 +1371,15 @@ public abstract class EngineTestCase extends ESTestCase {
assertThat(luceneOp.toString(), luceneOp.primaryTerm(), equalTo(translogOp.primaryTerm())); assertThat(luceneOp.toString(), luceneOp.primaryTerm(), equalTo(translogOp.primaryTerm()));
assertThat(luceneOp.opType(), equalTo(translogOp.opType())); assertThat(luceneOp.opType(), equalTo(translogOp.opType()));
if (luceneOp.opType() == Translog.Operation.Type.INDEX) { if (luceneOp.opType() == Translog.Operation.Type.INDEX) {
assertThat(((Translog.Index) luceneOp).source(), equalTo(((Translog.Index) translogOp).source())); if (engine.engineConfig.getIndexSettings().isRecoverySourceSyntheticEnabled()) {
assertToXContentEquivalent(
((Translog.Index) luceneOp).source(),
((Translog.Index) translogOp).source(),
XContentFactory.xContentType(((Translog.Index) luceneOp).source().array())
);
} else {
assertThat(((Translog.Index) luceneOp).source(), equalTo(((Translog.Index) translogOp).source()));
}
} }
} }
} }
@ -1401,15 +1435,19 @@ public abstract class EngineTestCase extends ESTestCase {
} }
public static MapperService createMapperService() throws IOException { public static MapperService createMapperService() throws IOException {
IndexMetadata indexMetadata = IndexMetadata.builder("test") return createMapperService(Settings.EMPTY, "{}");
.settings(indexSettings(1, 1).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())) }
.putMapping("{\"properties\": {}}")
public static MapperService createMapperService(Settings settings, String mappings) throws IOException {
IndexMetadata indexMetadata = IndexMetadata.builder("index")
.settings(indexSettings(1, 1).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()).put(settings))
.putMapping(mappings)
.build(); .build();
MapperService mapperService = MapperTestUtils.newMapperService( MapperService mapperService = MapperTestUtils.newMapperService(
new NamedXContentRegistry(ClusterModule.getNamedXWriteables()), new NamedXContentRegistry(ClusterModule.getNamedXWriteables()),
createTempDir(), createTempDir(),
Settings.EMPTY, indexMetadata.getSettings(),
"test" "index"
); );
mapperService.merge(indexMetadata, MapperService.MergeReason.MAPPING_UPDATE); mapperService.merge(indexMetadata, MapperService.MergeReason.MAPPING_UPDATE);
return mapperService; return mapperService;

View file

@ -43,6 +43,10 @@ public class TranslogHandler implements Engine.TranslogRecoveryRunner {
return appliedOperations.get(); return appliedOperations.get();
} }
public TranslogHandler(MapperService mapperService) {
this.mapperService = mapperService;
}
public TranslogHandler(NamedXContentRegistry xContentRegistry, IndexSettings indexSettings) { public TranslogHandler(NamedXContentRegistry xContentRegistry, IndexSettings indexSettings) {
SimilarityService similarityService = new SimilarityService(indexSettings, null, emptyMap()); SimilarityService similarityService = new SimilarityService(indexSettings, null, emptyMap());
MapperRegistry mapperRegistry = new IndicesModule(emptyList()).getMapperRegistry(); MapperRegistry mapperRegistry = new IndicesModule(emptyList()).getMapperRegistry();

View file

@ -9,9 +9,12 @@
package org.elasticsearch.index.mapper; package org.elasticsearch.index.mapper;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.NumericDocValuesField;
import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.DocValuesType; import org.apache.lucene.index.DocValuesType;
import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.IndexableField;
import org.apache.lucene.index.IndexableFieldType; import org.apache.lucene.index.IndexableFieldType;
import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.LeafReader;
@ -20,7 +23,11 @@ import org.apache.lucene.index.NoMergePolicy;
import org.apache.lucene.search.FieldExistsQuery; import org.apache.lucene.search.FieldExistsQuery;
import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query; import org.apache.lucene.search.Query;
import org.apache.lucene.search.Sort;
import org.apache.lucene.search.SortField;
import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.UsageTrackingQueryCachingPolicy;
import org.apache.lucene.search.similarities.BM25Similarity;
import org.apache.lucene.store.Directory; import org.apache.lucene.store.Directory;
import org.apache.lucene.tests.analysis.MockAnalyzer; import org.apache.lucene.tests.analysis.MockAnalyzer;
import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.tests.index.RandomIndexWriter;
@ -30,11 +37,14 @@ import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.CheckedConsumer;
import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersion;
import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.IndexVersions;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.engine.LuceneSyntheticSourceChangesSnapshot;
import org.elasticsearch.index.fielddata.FieldDataContext; import org.elasticsearch.index.fielddata.FieldDataContext;
import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexFieldData;
import org.elasticsearch.index.fielddata.IndexFieldDataCache; import org.elasticsearch.index.fielddata.IndexFieldDataCache;
@ -43,6 +53,7 @@ import org.elasticsearch.index.fieldvisitor.LeafStoredFieldLoader;
import org.elasticsearch.index.fieldvisitor.StoredFieldLoader; import org.elasticsearch.index.fieldvisitor.StoredFieldLoader;
import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.index.query.SearchExecutionContext;
import org.elasticsearch.index.termvectors.TermVectorsService; import org.elasticsearch.index.termvectors.TermVectorsService;
import org.elasticsearch.index.translog.Translog;
import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
import org.elasticsearch.script.Script; import org.elasticsearch.script.Script;
import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.ScriptContext;
@ -1130,6 +1141,11 @@ public abstract class MapperTestCase extends MapperServiceTestCase {
assertSyntheticSource(syntheticSourceSupport(shouldUseIgnoreMalformed()).example(5)); assertSyntheticSource(syntheticSourceSupport(shouldUseIgnoreMalformed()).example(5));
} }
public final void testSyntheticSourceWithTranslogSnapshot() throws IOException {
assertSyntheticSourceWithTranslogSnapshot(syntheticSourceSupport(shouldUseIgnoreMalformed()), true);
assertSyntheticSourceWithTranslogSnapshot(syntheticSourceSupport(shouldUseIgnoreMalformed()), false);
}
public void testSyntheticSourceIgnoreMalformedExamples() throws IOException { public void testSyntheticSourceIgnoreMalformedExamples() throws IOException {
assumeTrue("type doesn't support ignore_malformed", supportsIgnoreMalformed()); assumeTrue("type doesn't support ignore_malformed", supportsIgnoreMalformed());
// We need to call this in order to hit the assumption inside so that // We need to call this in order to hit the assumption inside so that
@ -1155,6 +1171,71 @@ public abstract class MapperTestCase extends MapperServiceTestCase {
assertThat(syntheticSource(mapper, example::buildInput), equalTo(example.expected())); assertThat(syntheticSource(mapper, example::buildInput), equalTo(example.expected()));
} }
private void assertSyntheticSourceWithTranslogSnapshot(SyntheticSourceSupport support, boolean doIndexSort) throws IOException {
var firstExample = support.example(1);
int maxDocs = randomIntBetween(20, 50);
var settings = Settings.builder()
.put(SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), SourceFieldMapper.Mode.SYNTHETIC)
.put(IndexSettings.RECOVERY_USE_SYNTHETIC_SOURCE_SETTING.getKey(), true)
.build();
var mapperService = createMapperService(getVersion(), settings, () -> true, mapping(b -> {
b.startObject("field");
firstExample.mapping().accept(b);
b.endObject();
}));
var docMapper = mapperService.documentMapper();
try (var directory = newDirectory()) {
List<SyntheticSourceExample> examples = new ArrayList<>();
IndexWriterConfig config = newIndexWriterConfig(random(), new StandardAnalyzer());
config.setIndexSort(new Sort(new SortField("sort", SortField.Type.LONG)));
try (var iw = new RandomIndexWriter(random(), directory, config)) {
for (int seqNo = 0; seqNo < maxDocs; seqNo++) {
var example = support.example(randomIntBetween(1, 5));
examples.add(example);
var doc = docMapper.parse(source(example::buildInput));
assertNull(doc.dynamicMappingsUpdate());
doc.updateSeqID(seqNo, 1);
doc.version().setLongValue(0);
if (doIndexSort) {
doc.rootDoc().add(new NumericDocValuesField("sort", randomLong()));
}
iw.addDocuments(doc.docs());
if (frequently()) {
iw.flush();
}
}
}
try (var indexReader = wrapInMockESDirectoryReader(DirectoryReader.open(directory))) {
int start = randomBoolean() ? 0 : randomIntBetween(1, maxDocs - 10);
var snapshot = new LuceneSyntheticSourceChangesSnapshot(
mapperService.mappingLookup(),
new Engine.Searcher(
"recovery",
indexReader,
new BM25Similarity(),
null,
new UsageTrackingQueryCachingPolicy(),
() -> {}
),
randomIntBetween(1, maxDocs),
randomLongBetween(0, ByteSizeValue.ofBytes(Integer.MAX_VALUE).getBytes()),
start,
maxDocs,
true,
randomBoolean(),
IndexVersion.current()
);
for (int i = start; i < maxDocs; i++) {
var example = examples.get(i);
var op = snapshot.next();
if (op instanceof Translog.Index opIndex) {
assertThat(opIndex.source().utf8ToString(), equalTo(example.expected()));
}
}
}
}
}
protected boolean supportsEmptyInputArray() { protected boolean supportsEmptyInputArray() {
return true; return true;
} }

View file

@ -26,7 +26,6 @@ import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.TimeValue;
import org.elasticsearch.index.MockEngineFactoryPlugin; import org.elasticsearch.index.MockEngineFactoryPlugin;
import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.node.RecoverySettingsChunkSizePlugin;
import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.repositories.blobstore.BlobStoreRepository; import org.elasticsearch.repositories.blobstore.BlobStoreRepository;
import org.elasticsearch.snapshots.SnapshotState; import org.elasticsearch.snapshots.SnapshotState;
@ -75,7 +74,6 @@ public abstract class AbstractIndexRecoveryIntegTestCase extends ESIntegTestCase
return Arrays.asList( return Arrays.asList(
MockTransportService.TestPlugin.class, MockTransportService.TestPlugin.class,
MockFSIndexStore.TestPlugin.class, MockFSIndexStore.TestPlugin.class,
RecoverySettingsChunkSizePlugin.class,
InternalSettingsPlugin.class, InternalSettingsPlugin.class,
MockEngineFactoryPlugin.class MockEngineFactoryPlugin.class
); );

View file

@ -28,7 +28,6 @@ import org.elasticsearch.http.HttpServerTransport;
import org.elasticsearch.indices.ExecutorSelector; import org.elasticsearch.indices.ExecutorSelector;
import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.breaker.CircuitBreakerService;
import org.elasticsearch.indices.recovery.RecoverySettings;
import org.elasticsearch.plugins.MockPluginsService; import org.elasticsearch.plugins.MockPluginsService;
import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.plugins.PluginsLoader; import org.elasticsearch.plugins.PluginsLoader;
@ -194,16 +193,6 @@ public class MockNode extends Node {
} }
} }
@Override
void processRecoverySettings(PluginsService pluginsService, ClusterSettings clusterSettings, RecoverySettings recoverySettings) {
if (pluginsService.filterPlugins(RecoverySettingsChunkSizePlugin.class).findAny().isEmpty() == false) {
clusterSettings.addSettingsUpdateConsumer(
RecoverySettingsChunkSizePlugin.CHUNK_SIZE_SETTING,
recoverySettings::setChunkSize
);
}
}
@Override @Override
protected ClusterInfoService newClusterInfoService( protected ClusterInfoService newClusterInfoService(
PluginsService pluginsService, PluginsService pluginsService,

View file

@ -1,40 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the "Elastic License
* 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
* Public License v 1"; you may not use this file except in compliance with, at
* your election, the "Elastic License 2.0", the "GNU Affero General Public
* License v3.0 only", or the "Server Side Public License, v 1".
*/
package org.elasticsearch.node;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.indices.recovery.RecoverySettings;
import org.elasticsearch.plugins.Plugin;
import java.util.List;
import static java.util.Collections.singletonList;
/**
* Marker plugin that will trigger {@link MockNode} making {@link #CHUNK_SIZE_SETTING} dynamic.
*/
public class RecoverySettingsChunkSizePlugin extends Plugin {
/**
* The chunk size. Only exposed by tests.
*/
public static final Setting<ByteSizeValue> CHUNK_SIZE_SETTING = Setting.byteSizeSetting(
"indices.recovery.chunk_size",
RecoverySettings.DEFAULT_CHUNK_SIZE,
Property.Dynamic,
Property.NodeScope
);
@Override
public List<Setting<?>> getSettings() {
return singletonList(CHUNK_SIZE_SETTING);
}
}

Some files were not shown because too many files have changed in this diff Show more