diff --git a/.buildkite/scripts/get-latest-test-mutes.sh b/.buildkite/scripts/get-latest-test-mutes.sh
index 5721e29f1b77..1dafcebec24b 100755
--- a/.buildkite/scripts/get-latest-test-mutes.sh
+++ b/.buildkite/scripts/get-latest-test-mutes.sh
@@ -1,6 +1,6 @@
#!/bin/bash
-if [[ ! "${BUILDKITE_PULL_REQUEST:-}" || "${BUILDKITE_AGENT_META_DATA_PROVIDER:-}" == "k8s" ]]; then
+if [[ "${BUILDKITE_PULL_REQUEST:-false}" == "false" || "${BUILDKITE_AGENT_META_DATA_PROVIDER:-}" == "k8s" ]]; then
exit 0
fi
diff --git a/benchmarks/build.gradle b/benchmarks/build.gradle
index 25cfae6c9803..632bae64389a 100644
--- a/benchmarks/build.gradle
+++ b/benchmarks/build.gradle
@@ -1,4 +1,5 @@
import org.elasticsearch.gradle.internal.test.TestUtil
+import org.elasticsearch.gradle.OS
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
@@ -77,7 +78,7 @@ tasks.register("copyPainless", Copy) {
}
tasks.named("run").configure {
- executable = "${buildParams.runtimeJavaHome.get()}/bin/java"
+ executable = "${buildParams.runtimeJavaHome.get()}/bin/java" + (OS.current() == OS.WINDOWS ? '.exe' : '')
args << "-Dplugins.dir=${buildDir}/plugins" << "-Dtests.index=${buildDir}/index"
dependsOn "copyExpression", "copyPainless", configurations.nativeLib
systemProperty 'es.nativelibs.path', TestUtil.getTestLibraryPath(file("../libs/native/libraries/build/platform/").toString())
diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/EvalBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/EvalBenchmark.java
index 9aab4a3e3210..d3259b960471 100644
--- a/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/EvalBenchmark.java
+++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/EvalBenchmark.java
@@ -27,6 +27,7 @@ import org.elasticsearch.compute.operator.Operator;
import org.elasticsearch.core.TimeValue;
import org.elasticsearch.xpack.esql.core.expression.Expression;
import org.elasticsearch.xpack.esql.core.expression.FieldAttribute;
+import org.elasticsearch.xpack.esql.core.expression.FoldContext;
import org.elasticsearch.xpack.esql.core.expression.Literal;
import org.elasticsearch.xpack.esql.core.expression.predicate.regex.RLikePattern;
import org.elasticsearch.xpack.esql.core.tree.Source;
@@ -71,12 +72,11 @@ public class EvalBenchmark {
BigArrays.NON_RECYCLING_INSTANCE
);
+ private static final FoldContext FOLD_CONTEXT = FoldContext.small();
+
private static final int BLOCK_LENGTH = 8 * 1024;
- static final DriverContext driverContext = new DriverContext(
- BigArrays.NON_RECYCLING_INSTANCE,
- BlockFactory.getInstance(new NoopCircuitBreaker("noop"), BigArrays.NON_RECYCLING_INSTANCE)
- );
+ static final DriverContext driverContext = new DriverContext(BigArrays.NON_RECYCLING_INSTANCE, blockFactory);
static {
// Smoke test all the expected values and force loading subclasses more like prod
@@ -114,11 +114,12 @@ public class EvalBenchmark {
return switch (operation) {
case "abs" -> {
FieldAttribute longField = longField();
- yield EvalMapper.toEvaluator(new Abs(Source.EMPTY, longField), layout(longField)).get(driverContext);
+ yield EvalMapper.toEvaluator(FOLD_CONTEXT, new Abs(Source.EMPTY, longField), layout(longField)).get(driverContext);
}
case "add" -> {
FieldAttribute longField = longField();
yield EvalMapper.toEvaluator(
+ FOLD_CONTEXT,
new Add(Source.EMPTY, longField, new Literal(Source.EMPTY, 1L, DataType.LONG)),
layout(longField)
).get(driverContext);
@@ -126,6 +127,7 @@ public class EvalBenchmark {
case "add_double" -> {
FieldAttribute doubleField = doubleField();
yield EvalMapper.toEvaluator(
+ FOLD_CONTEXT,
new Add(Source.EMPTY, doubleField, new Literal(Source.EMPTY, 1D, DataType.DOUBLE)),
layout(doubleField)
).get(driverContext);
@@ -140,7 +142,8 @@ public class EvalBenchmark {
lhs = new Add(Source.EMPTY, lhs, new Literal(Source.EMPTY, 1L, DataType.LONG));
rhs = new Add(Source.EMPTY, rhs, new Literal(Source.EMPTY, 1L, DataType.LONG));
}
- yield EvalMapper.toEvaluator(new Case(Source.EMPTY, condition, List.of(lhs, rhs)), layout(f1, f2)).get(driverContext);
+ yield EvalMapper.toEvaluator(FOLD_CONTEXT, new Case(Source.EMPTY, condition, List.of(lhs, rhs)), layout(f1, f2))
+ .get(driverContext);
}
case "date_trunc" -> {
FieldAttribute timestamp = new FieldAttribute(
@@ -149,6 +152,7 @@ public class EvalBenchmark {
new EsField("timestamp", DataType.DATETIME, Map.of(), true)
);
yield EvalMapper.toEvaluator(
+ FOLD_CONTEXT,
new DateTrunc(Source.EMPTY, new Literal(Source.EMPTY, Duration.ofHours(24), DataType.TIME_DURATION), timestamp),
layout(timestamp)
).get(driverContext);
@@ -156,6 +160,7 @@ public class EvalBenchmark {
case "equal_to_const" -> {
FieldAttribute longField = longField();
yield EvalMapper.toEvaluator(
+ FOLD_CONTEXT,
new Equals(Source.EMPTY, longField, new Literal(Source.EMPTY, 100_000L, DataType.LONG)),
layout(longField)
).get(driverContext);
@@ -163,21 +168,21 @@ public class EvalBenchmark {
case "long_equal_to_long" -> {
FieldAttribute lhs = longField();
FieldAttribute rhs = longField();
- yield EvalMapper.toEvaluator(new Equals(Source.EMPTY, lhs, rhs), layout(lhs, rhs)).get(driverContext);
+ yield EvalMapper.toEvaluator(FOLD_CONTEXT, new Equals(Source.EMPTY, lhs, rhs), layout(lhs, rhs)).get(driverContext);
}
case "long_equal_to_int" -> {
FieldAttribute lhs = longField();
FieldAttribute rhs = intField();
- yield EvalMapper.toEvaluator(new Equals(Source.EMPTY, lhs, rhs), layout(lhs, rhs)).get(driverContext);
+ yield EvalMapper.toEvaluator(FOLD_CONTEXT, new Equals(Source.EMPTY, lhs, rhs), layout(lhs, rhs)).get(driverContext);
}
case "mv_min", "mv_min_ascending" -> {
FieldAttribute longField = longField();
- yield EvalMapper.toEvaluator(new MvMin(Source.EMPTY, longField), layout(longField)).get(driverContext);
+ yield EvalMapper.toEvaluator(FOLD_CONTEXT, new MvMin(Source.EMPTY, longField), layout(longField)).get(driverContext);
}
case "rlike" -> {
FieldAttribute keywordField = keywordField();
RLike rlike = new RLike(Source.EMPTY, keywordField, new RLikePattern(".ar"));
- yield EvalMapper.toEvaluator(rlike, layout(keywordField)).get(driverContext);
+ yield EvalMapper.toEvaluator(FOLD_CONTEXT, rlike, layout(keywordField)).get(driverContext);
}
default -> throw new UnsupportedOperationException();
};
diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/DockerBase.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/DockerBase.java
index 6dfb337a22ac..24ba0740cfe2 100644
--- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/DockerBase.java
+++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/DockerBase.java
@@ -22,7 +22,7 @@ public enum DockerBase {
// Chainguard based wolfi image with latest jdk
// This is usually updated via renovatebot
// spotless:off
- WOLFI("docker.elastic.co/wolfi/chainguard-base:latest@sha256:eef54b3a414aa53b98f0f8df2633aed83c3ba6230722769282925442968f0364",
+ WOLFI("docker.elastic.co/wolfi/chainguard-base:latest@sha256:dd66beec64a7f9b19c6c35a1195153b2b630a55e16ec71949ed5187c5947eea1",
"-wolfi",
"apk"
),
diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/testfixtures/TestFixturesPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/testfixtures/TestFixturesPlugin.java
index ab28a66d9306..fac7d86701d5 100644
--- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/testfixtures/TestFixturesPlugin.java
+++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/testfixtures/TestFixturesPlugin.java
@@ -122,7 +122,7 @@ public class TestFixturesPlugin implements Plugin {
composeExtension.getRemoveContainers().set(true);
composeExtension.getCaptureContainersOutput()
.set(EnumSet.of(LogLevel.INFO, LogLevel.DEBUG).contains(project.getGradle().getStartParameter().getLogLevel()));
- composeExtension.getUseDockerComposeV2().set(false);
+ composeExtension.getUseDockerComposeV2().set(true);
composeExtension.getExecutable().set(this.providerFactory.provider(() -> {
String composePath = dockerSupport.get().getDockerAvailability().dockerComposePath();
LOGGER.debug("Docker Compose path: {}", composePath);
diff --git a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/APMJvmOptions.java b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/APMJvmOptions.java
index c3b976894676..1e57d9fab7cf 100644
--- a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/APMJvmOptions.java
+++ b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/APMJvmOptions.java
@@ -187,20 +187,12 @@ class APMJvmOptions {
static void extractSecureSettings(SecureSettings secrets, Map propertiesMap) {
final Set settingNames = secrets.getSettingNames();
for (String key : List.of("api_key", "secret_token")) {
- for (String prefix : List.of("telemetry.", "tracing.apm.")) {
- if (settingNames.contains(prefix + key)) {
- if (propertiesMap.containsKey(key)) {
- throw new IllegalStateException(
- Strings.format("Duplicate telemetry setting: [telemetry.%s] and [tracing.apm.%s]", key, key)
- );
- }
-
- try (SecureString token = secrets.getString(prefix + key)) {
- propertiesMap.put(key, token.toString());
- }
+ String prefix = "telemetry.";
+ if (settingNames.contains(prefix + key)) {
+ try (SecureString token = secrets.getString(prefix + key)) {
+ propertiesMap.put(key, token.toString());
}
}
-
}
}
@@ -227,44 +219,12 @@ class APMJvmOptions {
static Map extractApmSettings(Settings settings) throws UserException {
final Map propertiesMap = new HashMap<>();
- // tracing.apm.agent. is deprecated by telemetry.agent.
final String telemetryAgentPrefix = "telemetry.agent.";
- final String deprecatedTelemetryAgentPrefix = "tracing.apm.agent.";
final Settings telemetryAgentSettings = settings.getByPrefix(telemetryAgentPrefix);
telemetryAgentSettings.keySet().forEach(key -> propertiesMap.put(key, String.valueOf(telemetryAgentSettings.get(key))));
- final Settings apmAgentSettings = settings.getByPrefix(deprecatedTelemetryAgentPrefix);
- for (String key : apmAgentSettings.keySet()) {
- if (propertiesMap.containsKey(key)) {
- throw new IllegalStateException(
- Strings.format(
- "Duplicate telemetry setting: [%s%s] and [%s%s]",
- telemetryAgentPrefix,
- key,
- deprecatedTelemetryAgentPrefix,
- key
- )
- );
- }
- propertiesMap.put(key, String.valueOf(apmAgentSettings.get(key)));
- }
-
StringJoiner globalLabels = extractGlobalLabels(telemetryAgentPrefix, propertiesMap, settings);
- if (globalLabels.length() == 0) {
- globalLabels = extractGlobalLabels(deprecatedTelemetryAgentPrefix, propertiesMap, settings);
- } else {
- StringJoiner tracingGlobalLabels = extractGlobalLabels(deprecatedTelemetryAgentPrefix, propertiesMap, settings);
- if (tracingGlobalLabels.length() != 0) {
- throw new IllegalArgumentException(
- "Cannot have global labels with tracing.agent prefix ["
- + globalLabels
- + "] and telemetry.apm.agent prefix ["
- + tracingGlobalLabels
- + "]"
- );
- }
- }
if (globalLabels.length() > 0) {
propertiesMap.put("global_labels", globalLabels.toString());
}
@@ -274,7 +234,7 @@ class APMJvmOptions {
if (propertiesMap.containsKey(key)) {
throw new UserException(
ExitCodes.CONFIG,
- "Do not set a value for [tracing.apm.agent." + key + "], as this is configured automatically by Elasticsearch"
+ "Do not set a value for [telemetry.agent." + key + "], as this is configured automatically by Elasticsearch"
);
}
}
diff --git a/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/APMJvmOptionsTests.java b/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/APMJvmOptionsTests.java
index a7ba8eb11fbc..0e067afc1aa7 100644
--- a/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/APMJvmOptionsTests.java
+++ b/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/APMJvmOptionsTests.java
@@ -25,18 +25,15 @@ import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
-import java.util.function.Function;
import static org.elasticsearch.test.MapMatcher.matchesMap;
import static org.hamcrest.Matchers.allOf;
import static org.hamcrest.Matchers.containsInAnyOrder;
-import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.endsWith;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.hasEntry;
import static org.hamcrest.Matchers.hasKey;
import static org.hamcrest.Matchers.hasSize;
-import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.not;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
@@ -82,109 +79,63 @@ public class APMJvmOptionsTests extends ESTestCase {
}
public void testExtractSecureSettings() {
- MockSecureSettings duplicateSecureSettings = new MockSecureSettings();
+ MockSecureSettings secureSettings = new MockSecureSettings();
+ secureSettings.setString("telemetry.secret_token", "token");
+ secureSettings.setString("telemetry.api_key", "key");
- for (String prefix : List.of("telemetry.", "tracing.apm.")) {
- MockSecureSettings secureSettings = new MockSecureSettings();
- secureSettings.setString(prefix + "secret_token", "token");
- secureSettings.setString(prefix + "api_key", "key");
-
- duplicateSecureSettings.setString(prefix + "api_key", "secret");
-
- Map propertiesMap = new HashMap<>();
- APMJvmOptions.extractSecureSettings(secureSettings, propertiesMap);
-
- assertThat(propertiesMap, matchesMap(Map.of("secret_token", "token", "api_key", "key")));
- }
-
- Exception exception = expectThrows(
- IllegalStateException.class,
- () -> APMJvmOptions.extractSecureSettings(duplicateSecureSettings, new HashMap<>())
- );
- assertThat(exception.getMessage(), containsString("Duplicate telemetry setting"));
- assertThat(exception.getMessage(), containsString("telemetry.api_key"));
- assertThat(exception.getMessage(), containsString("tracing.apm.api_key"));
+ Map propertiesMap = new HashMap<>();
+ APMJvmOptions.extractSecureSettings(secureSettings, propertiesMap);
+ assertThat(propertiesMap, matchesMap(Map.of("secret_token", "token", "api_key", "key")));
}
public void testExtractSettings() throws UserException {
- Function buildSettings = (prefix) -> Settings.builder()
- .put(prefix + "server_url", "https://myurl:443")
- .put(prefix + "service_node_name", "instance-0000000001");
-
- for (String prefix : List.of("tracing.apm.agent.", "telemetry.agent.")) {
- var name = "APM Tracing";
- var deploy = "123";
- var org = "456";
- var extracted = APMJvmOptions.extractApmSettings(
- buildSettings.apply(prefix)
- .put(prefix + "global_labels.deployment_name", name)
- .put(prefix + "global_labels.deployment_id", deploy)
- .put(prefix + "global_labels.organization_id", org)
- .build()
- );
-
- assertThat(
- extracted,
- allOf(
- hasEntry("server_url", "https://myurl:443"),
- hasEntry("service_node_name", "instance-0000000001"),
- hasEntry(equalTo("global_labels"), not(endsWith(","))), // test that we have collapsed all global labels into one
- not(hasKey("global_labels.organization_id")) // tests that we strip out the top level label keys
- )
- );
-
- List labels = Arrays.stream(extracted.get("global_labels").split(",")).toList();
- assertThat(labels, hasSize(3));
- assertThat(labels, containsInAnyOrder("deployment_name=APM Tracing", "organization_id=" + org, "deployment_id=" + deploy));
-
- // test replacing with underscores and skipping empty
- name = "APM=Tracing";
- deploy = "";
- org = ",456";
- extracted = APMJvmOptions.extractApmSettings(
- buildSettings.apply(prefix)
- .put(prefix + "global_labels.deployment_name", name)
- .put(prefix + "global_labels.deployment_id", deploy)
- .put(prefix + "global_labels.organization_id", org)
- .build()
- );
- labels = Arrays.stream(extracted.get("global_labels").split(",")).toList();
- assertThat(labels, hasSize(2));
- assertThat(labels, containsInAnyOrder("deployment_name=APM_Tracing", "organization_id=_456"));
- }
-
- IllegalStateException err = expectThrows(
- IllegalStateException.class,
- () -> APMJvmOptions.extractApmSettings(
- Settings.builder()
- .put("tracing.apm.agent.server_url", "https://myurl:443")
- .put("telemetry.agent.server_url", "https://myurl-2:443")
- .build()
- )
- );
- assertThat(err.getMessage(), is("Duplicate telemetry setting: [telemetry.agent.server_url] and [tracing.apm.agent.server_url]"));
- }
-
- public void testNoMixedLabels() {
- String telemetryAgent = "telemetry.agent.";
- String tracingAgent = "tracing.apm.agent.";
- Settings settings = Settings.builder()
- .put("tracing.apm.enabled", true)
- .put(telemetryAgent + "server_url", "https://myurl:443")
- .put(telemetryAgent + "service_node_name", "instance-0000000001")
- .put(tracingAgent + "global_labels.deployment_id", "123")
- .put(telemetryAgent + "global_labels.organization_id", "456")
+ Settings defaults = Settings.builder()
+ .put("telemetry.agent.server_url", "https://myurl:443")
+ .put("telemetry.agent.service_node_name", "instance-0000000001")
.build();
- IllegalArgumentException err = assertThrows(IllegalArgumentException.class, () -> APMJvmOptions.extractApmSettings(settings));
+ var name = "APM Tracing";
+ var deploy = "123";
+ var org = "456";
+ var extracted = APMJvmOptions.extractApmSettings(
+ Settings.builder()
+ .put(defaults)
+ .put("telemetry.agent.global_labels.deployment_name", name)
+ .put("telemetry.agent.global_labels.deployment_id", deploy)
+ .put("telemetry.agent.global_labels.organization_id", org)
+ .build()
+ );
+
assertThat(
- err.getMessage(),
- is(
- "Cannot have global labels with tracing.agent prefix [organization_id=456] and"
- + " telemetry.apm.agent prefix [deployment_id=123]"
+ extracted,
+ allOf(
+ hasEntry("server_url", "https://myurl:443"),
+ hasEntry("service_node_name", "instance-0000000001"),
+ hasEntry(equalTo("global_labels"), not(endsWith(","))), // test that we have collapsed all global labels into one
+ not(hasKey("global_labels.organization_id")) // tests that we strip out the top level label keys
)
);
+
+ List labels = Arrays.stream(extracted.get("global_labels").split(",")).toList();
+ assertThat(labels, hasSize(3));
+ assertThat(labels, containsInAnyOrder("deployment_name=APM Tracing", "organization_id=" + org, "deployment_id=" + deploy));
+
+ // test replacing with underscores and skipping empty
+ name = "APM=Tracing";
+ deploy = "";
+ org = ",456";
+ extracted = APMJvmOptions.extractApmSettings(
+ Settings.builder()
+ .put(defaults)
+ .put("telemetry.agent.global_labels.deployment_name", name)
+ .put("telemetry.agent.global_labels.deployment_id", deploy)
+ .put("telemetry.agent.global_labels.organization_id", org)
+ .build()
+ );
+ labels = Arrays.stream(extracted.get("global_labels").split(",")).toList();
+ assertThat(labels, hasSize(2));
+ assertThat(labels, containsInAnyOrder("deployment_name=APM_Tracing", "organization_id=_456"));
}
private Path makeFakeAgentJar() throws IOException {
diff --git a/docs/changelog/118188.yaml b/docs/changelog/118188.yaml
new file mode 100644
index 000000000000..f24651231b7a
--- /dev/null
+++ b/docs/changelog/118188.yaml
@@ -0,0 +1,5 @@
+pr: 118188
+summary: Check for early termination in Driver
+area: ES|QL
+type: enhancement
+issues: []
diff --git a/docs/changelog/118602.yaml b/docs/changelog/118602.yaml
new file mode 100644
index 000000000000..a75c5dcf11da
--- /dev/null
+++ b/docs/changelog/118602.yaml
@@ -0,0 +1,5 @@
+pr: 118602
+summary: Limit memory usage of `fold`
+area: ES|QL
+type: bug
+issues: []
diff --git a/docs/changelog/119227.yaml b/docs/changelog/119227.yaml
new file mode 100644
index 000000000000..1e3d4f97a3d2
--- /dev/null
+++ b/docs/changelog/119227.yaml
@@ -0,0 +1,13 @@
+pr: 119227
+summary: Remove unfreeze REST endpoint
+area: Indices APIs
+type: breaking
+issues: []
+breaking:
+ title: Remove unfreeze REST endpoint
+ area: REST API
+ details: >-
+ The `/{index}/_unfreeze` REST endpoint is no longer supported. This API was deprecated, and the corresponding
+ `/{index}/_freeze` endpoint was removed in 8.0.
+ impact: None, since it is not possible to have a frozen index in a version which is readable by Elasticsearch 9.0
+ notable: false
diff --git a/docs/changelog/119575.yaml b/docs/changelog/119575.yaml
new file mode 100644
index 000000000000..daa7e69118ac
--- /dev/null
+++ b/docs/changelog/119575.yaml
@@ -0,0 +1,6 @@
+pr: 119575
+summary: Fix realtime get of nested fields with synthetic source
+area: Mapping
+type: bug
+issues:
+ - 119553
diff --git a/docs/changelog/119679.yaml b/docs/changelog/119679.yaml
new file mode 100644
index 000000000000..a3fb36bcd01c
--- /dev/null
+++ b/docs/changelog/119679.yaml
@@ -0,0 +1,5 @@
+pr: 119679
+summary: Support mTLS for the Elastic Inference Service integration inside the inference API
+area: Machine Learning
+type: feature
+issues: []
diff --git a/docs/changelog/119772.yaml b/docs/changelog/119772.yaml
new file mode 100644
index 000000000000..58d483566b10
--- /dev/null
+++ b/docs/changelog/119772.yaml
@@ -0,0 +1,6 @@
+pr: 119772
+summary: ESQL Support IN operator for Date nanos
+area: ES|QL
+type: enhancement
+issues:
+ - 118578
diff --git a/docs/changelog/119831.yaml b/docs/changelog/119831.yaml
new file mode 100644
index 000000000000..61c09d7d54de
--- /dev/null
+++ b/docs/changelog/119831.yaml
@@ -0,0 +1,5 @@
+pr: 119831
+summary: Run `TransportClusterGetSettingsAction` on local node
+area: Infra/Settings
+type: enhancement
+issues: []
diff --git a/docs/changelog/119846.yaml b/docs/changelog/119846.yaml
new file mode 100644
index 000000000000..9e7d99fe1be1
--- /dev/null
+++ b/docs/changelog/119846.yaml
@@ -0,0 +1,12 @@
+pr: 119846
+summary: Drop support for brackets from METADATA syntax
+area: ES|QL
+type: deprecation
+issues:
+ - 115401
+deprecation:
+ title: Drop support for brackets from METADATA syntax
+ area: ES|QL
+ details: Please describe the details of this change for the release notes. You can
+ use asciidoc.
+ impact: Please describe the impact of this change to users
diff --git a/docs/changelog/119893.yaml b/docs/changelog/119893.yaml
new file mode 100644
index 000000000000..35a46ce0940d
--- /dev/null
+++ b/docs/changelog/119893.yaml
@@ -0,0 +1,5 @@
+pr: 119893
+summary: Add enterprise license check for Inference API actions
+area: Machine Learning
+type: enhancement
+issues: []
diff --git a/docs/changelog/119922.yaml b/docs/changelog/119922.yaml
new file mode 100644
index 000000000000..2fc9d9529c96
--- /dev/null
+++ b/docs/changelog/119922.yaml
@@ -0,0 +1,5 @@
+pr: 119922
+summary: "[Inference API] fix spell words: covertToString to convertToString"
+area: Machine Learning
+type: enhancement
+issues: []
diff --git a/docs/changelog/119926.yaml b/docs/changelog/119926.yaml
new file mode 100644
index 000000000000..3afafd5b2117
--- /dev/null
+++ b/docs/changelog/119926.yaml
@@ -0,0 +1,11 @@
+pr: 119926
+summary: "Deprecated tracing.apm.* settings got removed."
+area: Infra/Metrics
+type: breaking
+issues: []
+breaking:
+ title: "Deprecated tracing.apm.* settings got removed."
+ area: Cluster and node setting
+ details: Deprecated `tracing.apm.*` settings got removed, use respective `telemetry.*` / `telemetry.tracing.*` settings instead.
+ impact: 9.x nodes will refuse to start if any such setting (including secret settings) is still present.
+ notable: false
diff --git a/docs/changelog/120014.yaml b/docs/changelog/120014.yaml
new file mode 100644
index 000000000000..bef1f3ba4993
--- /dev/null
+++ b/docs/changelog/120014.yaml
@@ -0,0 +1,6 @@
+pr: 120014
+summary: Fix potential file leak in ES816BinaryQuantizedVectorsWriter
+area: Search
+type: bug
+issues:
+ - 119981
diff --git a/docs/changelog/120020.yaml b/docs/changelog/120020.yaml
new file mode 100644
index 000000000000..55a80187dbff
--- /dev/null
+++ b/docs/changelog/120020.yaml
@@ -0,0 +1,5 @@
+pr: 120020
+summary: Resume Driver on cancelled or early finished
+area: ES|QL
+type: enhancement
+issues: []
diff --git a/docs/changelog/120038.yaml b/docs/changelog/120038.yaml
new file mode 100644
index 000000000000..fe3a2ccccc09
--- /dev/null
+++ b/docs/changelog/120038.yaml
@@ -0,0 +1,5 @@
+pr: 120038
+summary: Run template simulation actions on local node
+area: Ingest Node
+type: enhancement
+issues: []
diff --git a/docs/changelog/120042.yaml b/docs/changelog/120042.yaml
new file mode 100644
index 000000000000..0093068ae989
--- /dev/null
+++ b/docs/changelog/120042.yaml
@@ -0,0 +1,5 @@
+pr: 120042
+summary: Match dot prefix of migrated DS backing index with the source index
+area: Data streams
+type: bug
+issues: []
diff --git a/docs/changelog/120055.yaml b/docs/changelog/120055.yaml
new file mode 100644
index 000000000000..05f66523d0ef
--- /dev/null
+++ b/docs/changelog/120055.yaml
@@ -0,0 +1,5 @@
+pr: 120055
+summary: Optimize loading mappings when determining synthetic source usage and whether host.name can be sorted on.
+area: Logs
+type: enhancement
+issues: []
diff --git a/docs/changelog/120062.yaml b/docs/changelog/120062.yaml
new file mode 100644
index 000000000000..42e8d97f1744
--- /dev/null
+++ b/docs/changelog/120062.yaml
@@ -0,0 +1,6 @@
+pr: 120062
+summary: Update Text Similarity Reranker to Properly Handle Aliases
+area: Ranking
+type: bug
+issues:
+ - 119617
diff --git a/docs/changelog/120084.yaml b/docs/changelog/120084.yaml
new file mode 100644
index 000000000000..aafe490d79f1
--- /dev/null
+++ b/docs/changelog/120084.yaml
@@ -0,0 +1,5 @@
+pr: 120084
+summary: Improve how reindex data stream index action handles api blocks
+area: Data streams
+type: enhancement
+issues: []
diff --git a/docs/changelog/120087.yaml b/docs/changelog/120087.yaml
new file mode 100644
index 000000000000..8539640809b0
--- /dev/null
+++ b/docs/changelog/120087.yaml
@@ -0,0 +1,5 @@
+pr: 120087
+summary: Include `clusterApplyListener` in long cluster apply warnings
+area: Cluster Coordination
+type: enhancement
+issues: []
diff --git a/docs/changelog/120133.yaml b/docs/changelog/120133.yaml
new file mode 100644
index 000000000000..4ec88267a1bf
--- /dev/null
+++ b/docs/changelog/120133.yaml
@@ -0,0 +1,6 @@
+pr: 120133
+summary: Use approximation to advance matched queries
+area: Search
+type: bug
+issues:
+ - 120130
diff --git a/docs/changelog/120143.yaml b/docs/changelog/120143.yaml
new file mode 100644
index 000000000000..7e8cd5a8ceae
--- /dev/null
+++ b/docs/changelog/120143.yaml
@@ -0,0 +1,6 @@
+pr: 120143
+summary: Esql - support date nanos in date format function
+area: ES|QL
+type: enhancement
+issues:
+ - 109994
diff --git a/docs/changelog/120193.yaml b/docs/changelog/120193.yaml
new file mode 100644
index 000000000000..18858e81d9b6
--- /dev/null
+++ b/docs/changelog/120193.yaml
@@ -0,0 +1,5 @@
+pr: 120193
+summary: "Do not capture `ClusterChangedEvent` in `IndicesStore` call to #onClusterStateShardsClosed"
+area: Store
+type: bug
+issues: []
diff --git a/docs/changelog/120198.yaml b/docs/changelog/120198.yaml
new file mode 100644
index 000000000000..076a2be942a3
--- /dev/null
+++ b/docs/changelog/120198.yaml
@@ -0,0 +1,5 @@
+pr: 120198
+summary: Bump `TrialLicenseVersion` to allow starting new trial on 9.0
+area: License
+type: enhancement
+issues: []
diff --git a/docs/changelog/120200.yaml b/docs/changelog/120200.yaml
new file mode 100644
index 000000000000..abde91aec0df
--- /dev/null
+++ b/docs/changelog/120200.yaml
@@ -0,0 +1,5 @@
+pr: 120200
+summary: "[Connector API] Support hard deletes with new URL param in delete endpoint"
+area: Extract&Transform
+type: feature
+issues: []
diff --git a/docs/reference/connector/apis/delete-connector-api.asciidoc b/docs/reference/connector/apis/delete-connector-api.asciidoc
index f161a3c3b593..a324630cc8a5 100644
--- a/docs/reference/connector/apis/delete-connector-api.asciidoc
+++ b/docs/reference/connector/apis/delete-connector-api.asciidoc
@@ -13,7 +13,7 @@ beta::[]
For the most up-to-date API details, refer to {api-es}/group/endpoint-connector[Connector APIs].
--
-Soft-deletes a connector and removes associated sync jobs.
+Deletes a connector and optionally removes associated sync jobs.
Note: this action doesn't delete any API key, ingest pipeline or data index associated with the connector. These need to be removed manually.
@@ -37,6 +37,9 @@ To get started with Connector APIs, check out <`::
(Required, string)
+``::
+(Optional, boolean) If `true`, the connector doc is deleted. If `false`, connector doc is marked as deleted (soft deletion). Defaults to `false`.
+
`delete_sync_jobs`::
(Optional, boolean) A flag indicating if associated sync jobs should be also removed. Defaults to `false`.
diff --git a/docs/reference/esql/esql-limitations.asciidoc b/docs/reference/esql/esql-limitations.asciidoc
index 8ce8064a8161..adfd38478ab2 100644
--- a/docs/reference/esql/esql-limitations.asciidoc
+++ b/docs/reference/esql/esql-limitations.asciidoc
@@ -30,11 +30,11 @@ include::processing-commands/limit.asciidoc[tag=limitation]
** You can use `to_datetime` to cast to millisecond dates to use unsupported functions
* `double` (`float`, `half_float`, `scaled_float` are represented as `double`)
* `ip`
-* `keyword` family including `keyword`, `constant_keyword`, and `wildcard`
+* `keyword` <> including `keyword`, `constant_keyword`, and `wildcard`
* `int` (`short` and `byte` are represented as `int`)
* `long`
* `null`
-* `text`
+* `text` <> including `text`, `semantic_text` and `match_only_text`
* experimental:[] `unsigned_long`
* `version`
* Spatial types
diff --git a/docs/reference/esql/functions/description/match.asciidoc b/docs/reference/esql/functions/description/match.asciidoc
index 931fd5eb2f94..0724f0f108e3 100644
--- a/docs/reference/esql/functions/description/match.asciidoc
+++ b/docs/reference/esql/functions/description/match.asciidoc
@@ -2,4 +2,4 @@
*Description*
-Use `MATCH` to perform a <> on the specified field. Using `MATCH` is equivalent to using the `match` query in the Elasticsearch Query DSL. Match can be used on text fields, as well as other field types like boolean, dates, and numeric types. For a simplified syntax, you can use the <> `:` operator instead of `MATCH`. `MATCH` returns true if the provided query matches the row.
+Use `MATCH` to perform a <> on the specified field. Using `MATCH` is equivalent to using the `match` query in the Elasticsearch Query DSL. Match can be used on fields from the text family like <> and <>, as well as other field types like keyword, boolean, dates, and numeric types. For a simplified syntax, you can use the <> `:` operator instead of `MATCH`. `MATCH` returns true if the provided query matches the row.
diff --git a/docs/reference/esql/functions/description/to_date_nanos.asciidoc b/docs/reference/esql/functions/description/to_date_nanos.asciidoc
index 3fac7295f1be..955c19b43a12 100644
--- a/docs/reference/esql/functions/description/to_date_nanos.asciidoc
+++ b/docs/reference/esql/functions/description/to_date_nanos.asciidoc
@@ -4,4 +4,4 @@
Converts an input to a nanosecond-resolution date value (aka date_nanos).
-NOTE: The range for date nanos is 1970-01-01T00:00:00.000000000Z to 2262-04-11T23:47:16.854775807Z. Additionally, integers cannot be converted into date nanos, as the range of integer nanoseconds only covers about 2 seconds after epoch.
+NOTE: The range for date nanos is 1970-01-01T00:00:00.000000000Z to 2262-04-11T23:47:16.854775807Z, attepting to convertvalues outside of that range will result in null with a warning.. Additionally, integers cannot be converted into date nanos, as the range of integer nanoseconds only covers about 2 seconds after epoch.
diff --git a/docs/reference/esql/functions/kibana/definition/bucket.json b/docs/reference/esql/functions/kibana/definition/bucket.json
index 3d96de05c840..f9c7f2f27d6f 100644
--- a/docs/reference/esql/functions/kibana/definition/bucket.json
+++ b/docs/reference/esql/functions/kibana/definition/bucket.json
@@ -1599,7 +1599,7 @@
"FROM sample_data \n| WHERE @timestamp >= NOW() - 1 day and @timestamp < NOW()\n| STATS COUNT(*) BY bucket = BUCKET(@timestamp, 25, NOW() - 1 day, NOW())",
"FROM employees\n| WHERE hire_date >= \"1985-01-01T00:00:00Z\" AND hire_date < \"1986-01-01T00:00:00Z\"\n| STATS AVG(salary) BY bucket = BUCKET(hire_date, 20, \"1985-01-01T00:00:00Z\", \"1986-01-01T00:00:00Z\")\n| SORT bucket",
"FROM employees\n| STATS s1 = b1 + 1, s2 = BUCKET(salary / 1000 + 999, 50.) + 2 BY b1 = BUCKET(salary / 100 + 99, 50.), b2 = BUCKET(salary / 1000 + 999, 50.)\n| SORT b1, b2\n| KEEP s1, b1, s2, b2",
- "FROM employees \n| STATS dates = VALUES(birth_date) BY b = BUCKET(birth_date + 1 HOUR, 1 YEAR) - 1 HOUR\n| EVAL d_count = MV_COUNT(dates)\n| SORT d_count\n| LIMIT 3"
+ "FROM employees\n| STATS dates = MV_SORT(VALUES(birth_date)) BY b = BUCKET(birth_date + 1 HOUR, 1 YEAR) - 1 HOUR\n| EVAL d_count = MV_COUNT(dates)\n| SORT d_count, b\n| LIMIT 3"
],
"preview" : false,
"snapshot_only" : false
diff --git a/docs/reference/esql/functions/kibana/definition/date_format.json b/docs/reference/esql/functions/kibana/definition/date_format.json
index 6e2738fafb96..f6f48e9df82b 100644
--- a/docs/reference/esql/functions/kibana/definition/date_format.json
+++ b/docs/reference/esql/functions/kibana/definition/date_format.json
@@ -4,6 +4,30 @@
"name" : "date_format",
"description" : "Returns a string representation of a date, in the provided format.",
"signatures" : [
+ {
+ "params" : [
+ {
+ "name" : "dateFormat",
+ "type" : "date",
+ "optional" : true,
+ "description" : "Date format (optional). If no format is specified, the `yyyy-MM-dd'T'HH:mm:ss.SSSZ` format is used. If `null`, the function returns `null`."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "keyword"
+ },
+ {
+ "params" : [
+ {
+ "name" : "dateFormat",
+ "type" : "date_nanos",
+ "optional" : true,
+ "description" : "Date format (optional). If no format is specified, the `yyyy-MM-dd'T'HH:mm:ss.SSSZ` format is used. If `null`, the function returns `null`."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "keyword"
+ },
{
"params" : [
{
@@ -22,6 +46,24 @@
"variadic" : false,
"returnType" : "keyword"
},
+ {
+ "params" : [
+ {
+ "name" : "dateFormat",
+ "type" : "keyword",
+ "optional" : true,
+ "description" : "Date format (optional). If no format is specified, the `yyyy-MM-dd'T'HH:mm:ss.SSSZ` format is used. If `null`, the function returns `null`."
+ },
+ {
+ "name" : "date",
+ "type" : "date_nanos",
+ "optional" : false,
+ "description" : "Date expression. If `null`, the function returns `null`."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "keyword"
+ },
{
"params" : [
{
@@ -39,6 +81,24 @@
],
"variadic" : false,
"returnType" : "keyword"
+ },
+ {
+ "params" : [
+ {
+ "name" : "dateFormat",
+ "type" : "text",
+ "optional" : true,
+ "description" : "Date format (optional). If no format is specified, the `yyyy-MM-dd'T'HH:mm:ss.SSSZ` format is used. If `null`, the function returns `null`."
+ },
+ {
+ "name" : "date",
+ "type" : "date_nanos",
+ "optional" : false,
+ "description" : "Date expression. If `null`, the function returns `null`."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "keyword"
}
],
"examples" : [
diff --git a/docs/reference/esql/functions/kibana/definition/match.json b/docs/reference/esql/functions/kibana/definition/match.json
index d61534da81a6..eb206cb9ddf4 100644
--- a/docs/reference/esql/functions/kibana/definition/match.json
+++ b/docs/reference/esql/functions/kibana/definition/match.json
@@ -2,7 +2,7 @@
"comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.",
"type" : "eval",
"name" : "match",
- "description" : "Use `MATCH` to perform a <> on the specified field.\nUsing `MATCH` is equivalent to using the `match` query in the Elasticsearch Query DSL.\n\nMatch can be used on text fields, as well as other field types like boolean, dates, and numeric types.\n\nFor a simplified syntax, you can use the <> `:` operator instead of `MATCH`.\n\n`MATCH` returns true if the provided query matches the row.",
+ "description" : "Use `MATCH` to perform a <> on the specified field.\nUsing `MATCH` is equivalent to using the `match` query in the Elasticsearch Query DSL.\n\nMatch can be used on fields from the text family like <> and <>,\nas well as other field types like keyword, boolean, dates, and numeric types.\n\nFor a simplified syntax, you can use the <> `:` operator instead of `MATCH`.\n\n`MATCH` returns true if the provided query matches the row.",
"signatures" : [
{
"params" : [
diff --git a/docs/reference/esql/functions/kibana/definition/match_operator.json b/docs/reference/esql/functions/kibana/definition/match_operator.json
index 44233bbddb65..b58f9d5835a2 100644
--- a/docs/reference/esql/functions/kibana/definition/match_operator.json
+++ b/docs/reference/esql/functions/kibana/definition/match_operator.json
@@ -2,7 +2,7 @@
"comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.",
"type" : "operator",
"name" : "match_operator",
- "description" : "Performs a <> on the specified field. Returns true if the provided query matches the row.",
+ "description" : "Use `MATCH` to perform a <> on the specified field.\nUsing `MATCH` is equivalent to using the `match` query in the Elasticsearch Query DSL.\n\nMatch can be used on fields from the text family like <> and <>,\nas well as other field types like keyword, boolean, dates, and numeric types.\n\nFor a simplified syntax, you can use the <> `:` operator instead of `MATCH`.\n\n`MATCH` returns true if the provided query matches the row.",
"signatures" : [
{
"params" : [
diff --git a/docs/reference/esql/functions/kibana/definition/to_date_nanos.json b/docs/reference/esql/functions/kibana/definition/to_date_nanos.json
index d9409bceb8e6..210b9608f9ef 100644
--- a/docs/reference/esql/functions/kibana/definition/to_date_nanos.json
+++ b/docs/reference/esql/functions/kibana/definition/to_date_nanos.json
@@ -3,7 +3,7 @@
"type" : "eval",
"name" : "to_date_nanos",
"description" : "Converts an input to a nanosecond-resolution date value (aka date_nanos).",
- "note" : "The range for date nanos is 1970-01-01T00:00:00.000000000Z to 2262-04-11T23:47:16.854775807Z. Additionally, integers cannot be converted into date nanos, as the range of integer nanoseconds only covers about 2 seconds after epoch.",
+ "note" : "The range for date nanos is 1970-01-01T00:00:00.000000000Z to 2262-04-11T23:47:16.854775807Z, attepting to convertvalues outside of that range will result in null with a warning.. Additionally, integers cannot be converted into date nanos, as the range of integer nanoseconds only covers about 2 seconds after epoch.",
"signatures" : [
{
"params" : [
@@ -90,6 +90,6 @@
"returnType" : "date_nanos"
}
],
- "preview" : true,
+ "preview" : false,
"snapshot_only" : false
}
diff --git a/docs/reference/esql/functions/kibana/docs/match.md b/docs/reference/esql/functions/kibana/docs/match.md
index 72258a168293..80bf84351c18 100644
--- a/docs/reference/esql/functions/kibana/docs/match.md
+++ b/docs/reference/esql/functions/kibana/docs/match.md
@@ -6,7 +6,8 @@ This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../READ
Use `MATCH` to perform a <> on the specified field.
Using `MATCH` is equivalent to using the `match` query in the Elasticsearch Query DSL.
-Match can be used on text fields, as well as other field types like boolean, dates, and numeric types.
+Match can be used on fields from the text family like <> and <>,
+as well as other field types like keyword, boolean, dates, and numeric types.
For a simplified syntax, you can use the <> `:` operator instead of `MATCH`.
diff --git a/docs/reference/esql/functions/kibana/docs/match_operator.md b/docs/reference/esql/functions/kibana/docs/match_operator.md
index b0b619679808..98f55aacde0b 100644
--- a/docs/reference/esql/functions/kibana/docs/match_operator.md
+++ b/docs/reference/esql/functions/kibana/docs/match_operator.md
@@ -3,7 +3,15 @@ This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../READ
-->
### MATCH_OPERATOR
-Performs a <> on the specified field. Returns true if the provided query matches the row.
+Use `MATCH` to perform a <> on the specified field.
+Using `MATCH` is equivalent to using the `match` query in the Elasticsearch Query DSL.
+
+Match can be used on fields from the text family like <> and <>,
+as well as other field types like keyword, boolean, dates, and numeric types.
+
+For a simplified syntax, you can use the <> `:` operator instead of `MATCH`.
+
+`MATCH` returns true if the provided query matches the row.
```
FROM books
diff --git a/docs/reference/esql/functions/kibana/docs/to_date_nanos.md b/docs/reference/esql/functions/kibana/docs/to_date_nanos.md
index 0294802485cc..1bce8d4fca83 100644
--- a/docs/reference/esql/functions/kibana/docs/to_date_nanos.md
+++ b/docs/reference/esql/functions/kibana/docs/to_date_nanos.md
@@ -5,4 +5,4 @@ This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../READ
### TO_DATE_NANOS
Converts an input to a nanosecond-resolution date value (aka date_nanos).
-Note: The range for date nanos is 1970-01-01T00:00:00.000000000Z to 2262-04-11T23:47:16.854775807Z. Additionally, integers cannot be converted into date nanos, as the range of integer nanoseconds only covers about 2 seconds after epoch.
+Note: The range for date nanos is 1970-01-01T00:00:00.000000000Z to 2262-04-11T23:47:16.854775807Z, attepting to convertvalues outside of that range will result in null with a warning.. Additionally, integers cannot be converted into date nanos, as the range of integer nanoseconds only covers about 2 seconds after epoch.
diff --git a/docs/reference/esql/functions/layout/to_date_nanos.asciidoc b/docs/reference/esql/functions/layout/to_date_nanos.asciidoc
index 977a0ac969e5..2dfd13dac7e2 100644
--- a/docs/reference/esql/functions/layout/to_date_nanos.asciidoc
+++ b/docs/reference/esql/functions/layout/to_date_nanos.asciidoc
@@ -4,8 +4,6 @@
[[esql-to_date_nanos]]
=== `TO_DATE_NANOS`
-preview::["Do not use on production environments. This functionality is in technical preview and may be changed or removed in a future release. Elastic will work to fix any issues, but features in technical preview are not subject to the support SLA of official GA features."]
-
*Syntax*
[.text-center]
diff --git a/docs/reference/esql/functions/type-conversion-functions.asciidoc b/docs/reference/esql/functions/type-conversion-functions.asciidoc
index 9ac9ec290c07..bd70c2789dfa 100644
--- a/docs/reference/esql/functions/type-conversion-functions.asciidoc
+++ b/docs/reference/esql/functions/type-conversion-functions.asciidoc
@@ -18,6 +18,7 @@
* <>
* experimental:[] <>
* <>
+* <>
* <>
* <>
* <>
@@ -37,6 +38,7 @@ include::layout/to_cartesianpoint.asciidoc[]
include::layout/to_cartesianshape.asciidoc[]
include::layout/to_dateperiod.asciidoc[]
include::layout/to_datetime.asciidoc[]
+include::layout/to_date_nanos.asciidoc[]
include::layout/to_degrees.asciidoc[]
include::layout/to_double.asciidoc[]
include::layout/to_geopoint.asciidoc[]
diff --git a/docs/reference/esql/functions/types/date_format.asciidoc b/docs/reference/esql/functions/types/date_format.asciidoc
index b2e97dfa8835..c8f4942d98a6 100644
--- a/docs/reference/esql/functions/types/date_format.asciidoc
+++ b/docs/reference/esql/functions/types/date_format.asciidoc
@@ -5,6 +5,10 @@
[%header.monospaced.styled,format=dsv,separator=|]
|===
dateFormat | date | result
+date | | keyword
+date_nanos | | keyword
keyword | date | keyword
+keyword | date_nanos | keyword
text | date | keyword
+text | date_nanos | keyword
|===
diff --git a/docs/reference/indices.asciidoc b/docs/reference/indices.asciidoc
index ca7de396147a..b6b82422cbb4 100644
--- a/docs/reference/indices.asciidoc
+++ b/docs/reference/indices.asciidoc
@@ -24,7 +24,6 @@ index settings, aliases, mappings, and index templates.
* <>
* <>
* <>
-* <>
* <>
* <>
* <>
@@ -143,6 +142,5 @@ include::indices/shrink-index.asciidoc[]
include::indices/simulate-index.asciidoc[]
include::indices/simulate-template.asciidoc[]
include::indices/split-index.asciidoc[]
-include::indices/apis/unfreeze.asciidoc[]
include::indices/update-settings.asciidoc[]
include::indices/put-mapping.asciidoc[]
diff --git a/docs/reference/indices/apis/unfreeze.asciidoc b/docs/reference/indices/apis/unfreeze.asciidoc
deleted file mode 100644
index 5d04d44db744..000000000000
--- a/docs/reference/indices/apis/unfreeze.asciidoc
+++ /dev/null
@@ -1,61 +0,0 @@
-[role="xpack"]
-[[unfreeze-index-api]]
-=== Unfreeze index API
-++++
-Unfreeze index
-++++
-
-[WARNING]
-.Deprecated in 7.14
-====
-In 8.0, we removed the ability to freeze an index. In previous versions,
-freezing an index reduced its memory overhead. However, frozen indices are no
-longer useful due to
-https://www.elastic.co/blog/significantly-decrease-your-elasticsearch-heap-memory-usage[recent
-improvements in heap memory usage].
-You can use this API to unfreeze indices that were frozen in 7.x. Frozen indices
-are not related to the frozen data tier.
-====
-
-.New API reference
-[sidebar]
---
-For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs].
---
-
-Unfreezes an index.
-
-[[unfreeze-index-api-request]]
-==== {api-request-title}
-
-`POST //_unfreeze`
-
-[[unfreeze-index-api-prereqs]]
-==== {api-prereq-title}
-
-* If the {es} {security-features} are enabled, you must have the `manage`
-<> for the target index or index alias.
-
-[[unfreeze-index-api-desc]]
-==== {api-description-title}
-
-When a frozen index is unfrozen, the index goes through the normal recovery
-process and becomes writeable again.
-
-[[unfreeze-index-api-path-parms]]
-==== {api-path-parms-title}
-
-``::
- (Required, string) Identifier for the index.
-
-[[unfreeze-index-api-examples]]
-==== {api-examples-title}
-
-The following example unfreezes an index:
-
-[source,console]
---------------------------------------------------
-POST /my-index-000001/_unfreeze
---------------------------------------------------
-// TEST[s/^/PUT my-index-000001\n/]
-// TEST[skip:unable to ignore deprecation warning]
diff --git a/docs/reference/indices/index-mgmt.asciidoc b/docs/reference/indices/index-mgmt.asciidoc
index 73643dbfd4b3..131bc79faa40 100644
--- a/docs/reference/indices/index-mgmt.asciidoc
+++ b/docs/reference/indices/index-mgmt.asciidoc
@@ -43,7 +43,7 @@ For more information on managing indices, refer to <>.
* To filter the list of indices, use the search bar or click a badge.
Badges indicate if an index is a <>, a
-<>, or <>.
+<>, or <>.
* To drill down into the index
<>, <>, and statistics,
diff --git a/docs/reference/inference/chat-completion-inference.asciidoc b/docs/reference/inference/chat-completion-inference.asciidoc
new file mode 100644
index 000000000000..83a8f94634f2
--- /dev/null
+++ b/docs/reference/inference/chat-completion-inference.asciidoc
@@ -0,0 +1,417 @@
+[role="xpack"]
+[[chat-completion-inference-api]]
+=== Chat completion inference API
+
+Streams a chat completion response.
+
+IMPORTANT: The {infer} APIs enable you to use certain services, such as built-in {ml} models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face.
+For built-in models and models uploaded through Eland, the {infer} APIs offer an alternative way to use and manage trained models.
+However, if you do not plan to use the {infer} APIs to use these models or if you want to use non-NLP models, use the <>.
+
+
+[discrete]
+[[chat-completion-inference-api-request]]
+==== {api-request-title}
+
+`POST /_inference//_unified`
+
+`POST /_inference/chat_completion//_unified`
+
+
+[discrete]
+[[chat-completion-inference-api-prereqs]]
+==== {api-prereq-title}
+
+* Requires the `monitor_inference` <>
+(the built-in `inference_admin` and `inference_user` roles grant this privilege)
+* You must use a client that supports streaming.
+
+
+[discrete]
+[[chat-completion-inference-api-desc]]
+==== {api-description-title}
+
+The chat completion {infer} API enables real-time responses for chat completion tasks by delivering answers incrementally, reducing response times during computation.
+It only works with the `chat_completion` task type for `openai` and `elastic` {infer} services.
+
+[NOTE]
+====
+The `chat_completion` task type is only available within the _unified API and only supports streaming.
+====
+
+[discrete]
+[[chat-completion-inference-api-path-params]]
+==== {api-path-parms-title}
+
+``::
+(Required, string)
+The unique identifier of the {infer} endpoint.
+
+
+``::
+(Optional, string)
+The type of {infer} task that the model performs. If included, this must be set to the value `chat_completion`.
+
+
+[discrete]
+[[chat-completion-inference-api-request-body]]
+==== {api-request-body-title}
+
+`messages`::
+(Required, array of objects) A list of objects representing the conversation.
+Requests should generally only add new messages from the user (role `user`). The other message roles (`assistant`, `system`, or `tool`) should generally only be copied from the response to a previous completion request, such that the messages array is built up throughout a conversation.
++
+.Assistant message
+[%collapsible%closed]
+=====
+`content`::
+(Required unless `tool_calls` is specified, string or array of objects)
+The contents of the message.
++
+include::inference-shared.asciidoc[tag=chat-completion-schema-content-with-examples]
++
+`role`::
+(Required, string)
+The role of the message author. This should be set to `assistant` for this type of message.
++
+`tool_calls`::
+(Optional, array of objects)
+The tool calls generated by the model.
++
+.Examples
+[%collapsible%closed]
+======
+[source,js]
+------------------------------------------------------------
+{
+ "tool_calls": [
+ {
+ "id": "call_KcAjWtAww20AihPHphUh46Gd",
+ "type": "function",
+ "function": {
+ "name": "get_current_weather",
+ "arguments": "{\"location\":\"Boston, MA\"}"
+ }
+ }
+ ]
+}
+------------------------------------------------------------
+// NOTCONSOLE
+======
++
+`id`:::
+(Required, string)
+The identifier of the tool call.
++
+`type`:::
+(Required, string)
+The type of tool call. This must be set to the value `function`.
++
+`function`:::
+(Required, object)
+The function that the model called.
++
+`name`::::
+(Required, string)
+The name of the function to call.
++
+`arguments`::::
+(Required, string)
+The arguments to call the function with in JSON format.
+=====
++
+.System message
+[%collapsible%closed]
+=====
+`content`:::
+(Required, string or array of objects)
+The contents of the message.
++
+include::inference-shared.asciidoc[tag=chat-completion-schema-content-with-examples]
++
+`role`:::
+(Required, string)
+The role of the message author. This should be set to `system` for this type of message.
+=====
++
+.Tool message
+[%collapsible%closed]
+=====
+`content`::
+(Required, string or array of objects)
+The contents of the message.
++
+include::inference-shared.asciidoc[tag=chat-completion-schema-content-with-examples]
++
+`role`::
+(Required, string)
+The role of the message author. This should be set to `tool` for this type of message.
++
+`tool_call_id`::
+(Required, string)
+The tool call that this message is responding to.
+=====
++
+.User message
+[%collapsible%closed]
+=====
+`content`::
+(Required, string or array of objects)
+The contents of the message.
++
+include::inference-shared.asciidoc[tag=chat-completion-schema-content-with-examples]
++
+`role`::
+(Required, string)
+The role of the message author. This should be set to `user` for this type of message.
+=====
+
+`model`::
+(Optional, string)
+The ID of the model to use. By default, the model ID is set to the value included when creating the inference endpoint.
+
+`max_completion_tokens`::
+(Optional, integer)
+The upper bound limit for the number of tokens that can be generated for a completion request.
+
+`stop`::
+(Optional, array of strings)
+A sequence of strings to control when the model should stop generating additional tokens.
+
+`temperature`::
+(Optional, float)
+The sampling temperature to use.
+
+`tools`::
+(Optional, array of objects)
+A list of tools that the model can call.
++
+.Structure
+[%collapsible%closed]
+=====
+`type`::
+(Required, string)
+The type of tool, must be set to the value `function`.
++
+`function`::
+(Required, object)
+The function definition.
++
+`description`:::
+(Optional, string)
+A description of what the function does. This is used by the model to choose when and how to call the function.
++
+`name`:::
+(Required, string)
+The name of the function.
++
+`parameters`:::
+(Optional, object)
+The parameters the functional accepts. This should be formatted as a JSON object.
++
+`strict`:::
+(Optional, boolean)
+Whether to enable schema adherence when generating the function call.
+=====
++
+.Examples
+[%collapsible%closed]
+======
+[source,js]
+------------------------------------------------------------
+{
+ "tools": [
+ {
+ "type": "function",
+ "function": {
+ "name": "get_price_of_item",
+ "description": "Get the current price of an item",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "item": {
+ "id": "12345"
+ },
+ "unit": {
+ "type": "currency"
+ }
+ }
+ }
+ }
+ }
+ ]
+}
+------------------------------------------------------------
+// NOTCONSOLE
+======
+
+`tool_choice`::
+(Optional, string or object)
+Controls which tool is called by the model.
++
+String representation:::
+One of `auto`, `none`, or `requrired`. `auto` allows the model to choose between calling tools and generating a message. `none` causes the model to not call any tools. `required` forces the model to call one or more tools.
++
+Object representation:::
++
+.Structure
+[%collapsible%closed]
+=====
+`type`::
+(Required, string)
+The type of the tool. This must be set to the value `function`.
++
+`function`::
+(Required, object)
++
+`name`:::
+(Required, string)
+The name of the function to call.
+=====
++
+.Examples
+[%collapsible%closed]
+=====
+[source,js]
+------------------------------------------------------------
+{
+ "tool_choice": {
+ "type": "function",
+ "function": {
+ "name": "get_current_weather"
+ }
+ }
+}
+------------------------------------------------------------
+// NOTCONSOLE
+=====
+
+`top_p`::
+(Optional, float)
+Nucleus sampling, an alternative to sampling with temperature.
+
+[discrete]
+[[chat-completion-inference-api-example]]
+==== {api-examples-title}
+
+The following example performs a chat completion on the example question with streaming.
+
+
+[source,console]
+------------------------------------------------------------
+POST _inference/chat_completion/openai-completion/_stream
+{
+ "model": "gpt-4o",
+ "messages": [
+ {
+ "role": "user",
+ "content": "What is Elastic?"
+ }
+ ]
+}
+------------------------------------------------------------
+// TEST[skip:TBD]
+
+The following example performs a chat completion using an Assistant message with `tool_calls`.
+
+[source,console]
+------------------------------------------------------------
+POST _inference/chat_completion/openai-completion/_stream
+{
+ "messages": [
+ {
+ "role": "assistant",
+ "content": "Let's find out what the weather is",
+ "tool_calls": [ <1>
+ {
+ "id": "call_KcAjWtAww20AihPHphUh46Gd",
+ "type": "function",
+ "function": {
+ "name": "get_current_weather",
+ "arguments": "{\"location\":\"Boston, MA\"}"
+ }
+ }
+ ]
+ },
+ { <2>
+ "role": "tool",
+ "content": "The weather is cold",
+ "tool_call_id": "call_KcAjWtAww20AihPHphUh46Gd"
+ }
+ ]
+}
+------------------------------------------------------------
+// TEST[skip:TBD]
+
+<1> Each tool call needs a corresponding Tool message.
+<2> The corresponding Tool message.
+
+The following example performs a chat completion using a User message with `tools` and `tool_choice`.
+
+[source,console]
+------------------------------------------------------------
+POST _inference/chat_completion/openai-completion/_stream
+{
+ "messages": [
+ {
+ "role": "user",
+ "content": [
+ {
+ "type": "text",
+ "text": "What's the price of a scarf?"
+ }
+ ]
+ }
+ ],
+ "tools": [
+ {
+ "type": "function",
+ "function": {
+ "name": "get_current_price",
+ "description": "Get the current price of a item",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "item": {
+ "id": "123"
+ }
+ }
+ }
+ }
+ }
+ ],
+ "tool_choice": {
+ "type": "function",
+ "function": {
+ "name": "get_current_price"
+ }
+ }
+}
+------------------------------------------------------------
+// TEST[skip:TBD]
+
+The API returns the following response when a request is made to the OpenAI service:
+
+
+[source,txt]
+------------------------------------------------------------
+event: message
+data: {"chat_completion":{"id":"chatcmpl-Ae0TWsy2VPnSfBbv5UztnSdYUMFP3","choices":[{"delta":{"content":"","role":"assistant"},"index":0}],"model":"gpt-4o-2024-08-06","object":"chat.completion.chunk"}}
+
+event: message
+data: {"chat_completion":{"id":"chatcmpl-Ae0TWsy2VPnSfBbv5UztnSdYUMFP3","choices":[{"delta":{"content":Elastic"},"index":0}],"model":"gpt-4o-2024-08-06","object":"chat.completion.chunk"}}
+
+event: message
+data: {"chat_completion":{"id":"chatcmpl-Ae0TWsy2VPnSfBbv5UztnSdYUMFP3","choices":[{"delta":{"content":" is"},"index":0}],"model":"gpt-4o-2024-08-06","object":"chat.completion.chunk"}}
+
+(...)
+
+event: message
+data: {"chat_completion":{"id":"chatcmpl-Ae0TWsy2VPnSfBbv5UztnSdYUMFP3","choices":[],"model":"gpt-4o-2024-08-06","object":"chat.completion.chunk","usage":{"completion_tokens":28,"prompt_tokens":16,"total_tokens":44}}} <1>
+
+event: message
+data: [DONE]
+------------------------------------------------------------
+// NOTCONSOLE
+
+<1> The last object message of the stream contains the token usage information.
diff --git a/docs/reference/inference/inference-apis.asciidoc b/docs/reference/inference/inference-apis.asciidoc
index ca273afc478e..4f27409973ca 100644
--- a/docs/reference/inference/inference-apis.asciidoc
+++ b/docs/reference/inference/inference-apis.asciidoc
@@ -26,6 +26,7 @@ the following APIs to manage {infer} models and perform {infer}:
* <>
* <>
* <>
+* <>
* <>
[[inference-landscape]]
@@ -34,9 +35,9 @@ image::images/inference-landscape.jpg[A representation of the Elastic inference
An {infer} endpoint enables you to use the corresponding {ml} model without
manual deployment and apply it to your data at ingestion time through
-<>.
+<>.
-Choose a model from your provider or use ELSER – a retrieval model trained by
+Choose a model from your provider or use ELSER – a retrieval model trained by
Elastic –, then create an {infer} endpoint by the <>.
Now use <> to perform
<> on your data.
@@ -67,7 +68,7 @@ The following list contains the default {infer} endpoints listed by `inference_i
Use the `inference_id` of the endpoint in a <> field definition or when creating an <>.
The API call will automatically download and deploy the model which might take a couple of minutes.
Default {infer} enpoints have {ml-docs}/ml-nlp-auto-scale.html#nlp-model-adaptive-allocations[adaptive allocations] enabled.
-For these models, the minimum number of allocations is `0`.
+For these models, the minimum number of allocations is `0`.
If there is no {infer} activity that uses the endpoint, the number of allocations will scale down to `0` automatically after 15 minutes.
@@ -84,7 +85,7 @@ Returning a long document in search results is less useful than providing the mo
Each chunk will include the text subpassage and the corresponding embedding generated from it.
By default, documents are split into sentences and grouped in sections up to 250 words with 1 sentence overlap so that each chunk shares a sentence with the previous chunk.
-Overlapping ensures continuity and prevents vital contextual information in the input text from being lost by a hard break.
+Overlapping ensures continuity and prevents vital contextual information in the input text from being lost by a hard break.
{es} uses the https://unicode-org.github.io/icu-docs/[ICU4J] library to detect word and sentence boundaries for chunking.
https://unicode-org.github.io/icu/userguide/boundaryanalysis/#word-boundary[Word boundaries] are identified by following a series of rules, not just the presence of a whitespace character.
@@ -135,6 +136,7 @@ PUT _inference/sparse_embedding/small_chunk_size
include::delete-inference.asciidoc[]
include::get-inference.asciidoc[]
include::post-inference.asciidoc[]
+include::chat-completion-inference.asciidoc[]
include::put-inference.asciidoc[]
include::stream-inference.asciidoc[]
include::update-inference.asciidoc[]
diff --git a/docs/reference/inference/inference-shared.asciidoc b/docs/reference/inference/inference-shared.asciidoc
index da497c6581e5..b133c5408281 100644
--- a/docs/reference/inference/inference-shared.asciidoc
+++ b/docs/reference/inference/inference-shared.asciidoc
@@ -41,7 +41,7 @@ end::chunking-settings[]
tag::chunking-settings-max-chunking-size[]
Specifies the maximum size of a chunk in words.
Defaults to `250`.
-This value cannot be higher than `300` or lower than `20` (for `sentence` strategy) or `10` (for `word` strategy).
+This value cannot be higher than `300` or lower than `20` (for `sentence` strategy) or `10` (for `word` strategy).
end::chunking-settings-max-chunking-size[]
tag::chunking-settings-overlap[]
@@ -63,4 +63,48 @@ Specifies the chunking strategy.
It could be either `sentence` or `word`.
end::chunking-settings-strategy[]
+tag::chat-completion-schema-content-with-examples[]
+.Examples
+[%collapsible%closed]
+======
+String example
+[source,js]
+------------------------------------------------------------
+{
+ "content": "Some string"
+}
+------------------------------------------------------------
+// NOTCONSOLE
+Object example
+[source,js]
+------------------------------------------------------------
+{
+ "content": [
+ {
+ "text": "Some text",
+ "type": "text"
+ }
+ ]
+}
+------------------------------------------------------------
+// NOTCONSOLE
+======
+
+String representation:::
+(Required, string)
+The text content.
++
+Object representation:::
+`text`::::
+(Required, string)
+The text content.
++
+`type`::::
+(Required, string)
+This must be set to the value `text`.
+end::chat-completion-schema-content-with-examples[]
+
+tag::chat-completion-docs[]
+For more information on how to use the `chat_completion` task type, please refer to the <>.
+end::chat-completion-docs[]
diff --git a/docs/reference/inference/put-inference.asciidoc b/docs/reference/inference/put-inference.asciidoc
index f0c15323863d..da07d1d3e7d8 100644
--- a/docs/reference/inference/put-inference.asciidoc
+++ b/docs/reference/inference/put-inference.asciidoc
@@ -42,7 +42,7 @@ include::inference-shared.asciidoc[tag=inference-id]
include::inference-shared.asciidoc[tag=task-type]
+
--
-Refer to the service list in the <> for the available task types.
+Refer to the service list in the <> for the available task types.
--
@@ -61,7 +61,7 @@ The create {infer} API enables you to create an {infer} endpoint and configure a
The following services are available through the {infer} API.
-You can find the available task types next to the service name.
+You can find the available task types next to the service name.
Click the links to review the configuration details of the services:
* <> (`completion`, `rerank`, `sparse_embedding`, `text_embedding`)
@@ -73,10 +73,10 @@ Click the links to review the configuration details of the services:
* <> (`rerank`, `sparse_embedding`, `text_embedding` - this service is for built-in models and models uploaded through Eland)
* <> (`sparse_embedding`)
* <> (`completion`, `text_embedding`)
-* <> (`rerank`, `text_embedding`)
+* <> (`rerank`, `text_embedding`)
* <> (`text_embedding`)
* <> (`text_embedding`)
-* <> (`completion`, `text_embedding`)
+* <> (`chat_completion`, `completion`, `text_embedding`)
* <> (`text_embedding`)
* <> (`text_embedding`, `rerank`)
diff --git a/docs/reference/inference/service-openai.asciidoc b/docs/reference/inference/service-openai.asciidoc
index e4be7f18e09d..590f280b1c49 100644
--- a/docs/reference/inference/service-openai.asciidoc
+++ b/docs/reference/inference/service-openai.asciidoc
@@ -31,10 +31,18 @@ include::inference-shared.asciidoc[tag=task-type]
--
Available task types:
+* `chat_completion`,
* `completion`,
* `text_embedding`.
--
+[NOTE]
+====
+The `chat_completion` task type only supports streaming and only through the `_unified` API.
+
+include::inference-shared.asciidoc[tag=chat-completion-docs]
+====
+
[discrete]
[[infer-service-openai-api-request-body]]
==== {api-request-body-title}
@@ -61,7 +69,7 @@ include::inference-shared.asciidoc[tag=chunking-settings-strategy]
`service`::
(Required, string)
-The type of service supported for the specified task type. In this case,
+The type of service supported for the specified task type. In this case,
`openai`.
`service_settings`::
@@ -176,4 +184,4 @@ PUT _inference/completion/openai-completion
}
}
------------------------------------------------------------
-// TEST[skip:TBD]
\ No newline at end of file
+// TEST[skip:TBD]
diff --git a/docs/reference/inference/stream-inference.asciidoc b/docs/reference/inference/stream-inference.asciidoc
index 42abb589f9af..4a3ce3190971 100644
--- a/docs/reference/inference/stream-inference.asciidoc
+++ b/docs/reference/inference/stream-inference.asciidoc
@@ -38,8 +38,12 @@ However, if you do not plan to use the {infer} APIs to use these models or if yo
==== {api-description-title}
The stream {infer} API enables real-time responses for completion tasks by delivering answers incrementally, reducing response times during computation.
-It only works with the `completion` task type.
+It only works with the `completion` and `chat_completion` task types.
+[NOTE]
+====
+include::inference-shared.asciidoc[tag=chat-completion-docs]
+====
[discrete]
[[stream-inference-api-path-params]]
diff --git a/docs/reference/inference/update-inference.asciidoc b/docs/reference/inference/update-inference.asciidoc
index d3a90f5d84e6..441c21629da5 100644
--- a/docs/reference/inference/update-inference.asciidoc
+++ b/docs/reference/inference/update-inference.asciidoc
@@ -19,9 +19,9 @@ However, if you do not plan to use the {infer} APIs to use these models or if yo
[[update-inference-api-request]]
==== {api-request-title}
-`POST _inference//_update`
+`PUT _inference//_update`
-`POST _inference///_update`
+`PUT _inference///_update`
[discrete]
@@ -52,7 +52,7 @@ Click the links to review the service configuration details:
* <> (`rerank`, `sparse_embedding`, `text_embedding` - this service is for built-in models and models uploaded through Eland)
* <> (`sparse_embedding`)
* <> (`completion`, `text_embedding`)
-* <> (`rerank`, `text_embedding`)
+* <> (`rerank`, `text_embedding`)
* <> (`text_embedding`)
* <> (`text_embedding`)
* <> (`completion`, `text_embedding`)
@@ -81,7 +81,7 @@ The following example shows how to update an API key of an {infer} endpoint call
[source,console]
------------------------------------------------------------
-POST _inference/my-inference-endpoint/_update
+PUT _inference/my-inference-endpoint/_update
{
"service_settings": {
"api_key": ""
diff --git a/docs/reference/ml/trained-models/apis/get-trained-models-stats.asciidoc b/docs/reference/ml/trained-models/apis/get-trained-models-stats.asciidoc
index 82263c98e911..766886a0b48a 100644
--- a/docs/reference/ml/trained-models/apis/get-trained-models-stats.asciidoc
+++ b/docs/reference/ml/trained-models/apis/get-trained-models-stats.asciidoc
@@ -384,6 +384,7 @@ A collection of model size stats fields.
`model_size_bytes`:::
(integer)
The size of the model in bytes.
+This parameter applies only to PyTorch models.
`required_native_memory_bytes`:::
(integer)
diff --git a/docs/reference/ml/trained-models/apis/get-trained-models.asciidoc b/docs/reference/ml/trained-models/apis/get-trained-models.asciidoc
index 4f583319ca38..03777afbd6ee 100644
--- a/docs/reference/ml/trained-models/apis/get-trained-models.asciidoc
+++ b/docs/reference/ml/trained-models/apis/get-trained-models.asciidoc
@@ -131,6 +131,7 @@ The free-text description of the trained model.
`model_size_bytes`:::
(integer)
The estimated model size in bytes to keep the trained model in memory.
+This parameter applies only to {dfanalytics} trained models.
`estimated_operations`:::
(integer)
diff --git a/docs/reference/query-dsl/bool-query.asciidoc b/docs/reference/query-dsl/bool-query.asciidoc
index c24135a37091..27220f0d8514 100644
--- a/docs/reference/query-dsl/bool-query.asciidoc
+++ b/docs/reference/query-dsl/bool-query.asciidoc
@@ -13,21 +13,24 @@ occurrence types are:
|=======================================================================
|Occur |Description
|`must` |The clause (query) must appear in matching documents and will
-contribute to the score.
+contribute to the score. Each query defined under a `must` acts as a logical "AND", returning only documents that match _all_ the specified queries.
+
+|`should` |The clause (query) should appear in the matching document. Each query defined under a `should` acts as a logical "OR", returning documents that match _any_ of the specified queries.
|`filter` |The clause (query) must appear in matching documents. However unlike
`must` the score of the query will be ignored. Filter clauses are executed
in <>, meaning that scoring is ignored
-and clauses are considered for caching.
-
-|`should` |The clause (query) should appear in the matching document.
+and clauses are considered for caching. Each query defined under a `filter` acts as a logical "AND", returning only documents that match _all_ the specified queries.
|`must_not` |The clause (query) must not appear in the matching
documents. Clauses are executed in <> meaning
that scoring is ignored and clauses are considered for caching. Because scoring is
-ignored, a score of `0` for all documents is returned.
+ignored, a score of `0` for all documents is returned. Each query defined under a `must_not` acts as a logical "NOT", returning only documents that do not match any of the specified queries.
+
|=======================================================================
+The `must` and `should` clauses function as logical AND, OR operators, contributing to the scoring of results. However, these results will not be cached for faster retrieval. In contrast, the `filter` and `must_not` clauses are used to include or exclude results without impacting the score, unless used within a `constant_score` query.
+
The `bool` query takes a _more-matches-is-better_ approach, so the score from
each matching `must` or `should` clause will be added together to provide the
final `_score` for each document.
diff --git a/docs/reference/redirects.asciidoc b/docs/reference/redirects.asciidoc
index c3bf84fa600d..68ecc469c6cb 100644
--- a/docs/reference/redirects.asciidoc
+++ b/docs/reference/redirects.asciidoc
@@ -156,10 +156,16 @@ See <>.
The freeze index API was removed in 8.0.
// tag::frozen-removal-explanation[]
Frozen indices are no longer useful due to
-https://www.elastic.co/blog/significantly-decrease-your-elasticsearch-heap-memory-usage[recent
-improvements in heap memory usage].
+https://www.elastic.co/blog/significantly-decrease-your-elasticsearch-heap-memory-usage[improvements
+in heap memory usage].
// end::frozen-removal-explanation[]
+[role="exclude",id="unfreeze-index-api"]
+=== Unfreeze index API
+
+The unfreeze index API was removed in 9.0.
+include::redirects.asciidoc[tag=frozen-removal-explanation]
+
[role="exclude",id="ilm-freeze"]
=== Freeze {ilm-init} action
@@ -326,7 +332,7 @@ See <>.
See <>.
// [END] Security redirects
-[roles="exclude",id="modules-scripting-stored-scripts"]
+[role="exclude",id="modules-scripting-stored-scripts"]
=== Stored scripts
See <>
@@ -1749,8 +1755,10 @@ See <>.
=== Frozen indices
// tag::frozen-index-redirect[]
-
-For API documentation, see <>.
+Older versions of {es} provided the option to reduce the amount of data kept in memory for an index, at the expense of
+increasing search latency. This was known as 'freezing' the index.
+include::redirects.asciidoc[tag=frozen-removal-explanation]
+The freeze index API was removed in 8.0, and the unfreeze index API was removed in 9.0.
// end::frozen-index-redirect[]
[role="exclude",id="best_practices"]
diff --git a/docs/reference/search/search-your-data/search-across-clusters.asciidoc b/docs/reference/search/search-your-data/search-across-clusters.asciidoc
index 5f9e92c57579..8d3768817e85 100644
--- a/docs/reference/search/search-your-data/search-across-clusters.asciidoc
+++ b/docs/reference/search/search-your-data/search-across-clusters.asciidoc
@@ -22,7 +22,7 @@ The following APIs support {ccs}:
* experimental:[] <>
* experimental:[] <>
* experimental:[] <>
-* experimental:[] <>
+* experimental:[] <>
[discrete]
=== Prerequisites
diff --git a/docs/reference/sql/language/indices.asciidoc b/docs/reference/sql/language/indices.asciidoc
index 1dee7f0840ad..1912a020ab0b 100644
--- a/docs/reference/sql/language/indices.asciidoc
+++ b/docs/reference/sql/language/indices.asciidoc
@@ -100,7 +100,7 @@ requires the keyword `LIKE` for SQL `LIKE` pattern.
[[sql-index-frozen]]
=== Frozen Indices
-By default, {es-sql} doesn't search <>. To
+By default, {es-sql} doesn't search <>. To
search frozen indices, use one of the following features:
dedicated configuration parameter::
diff --git a/docs/reference/troubleshooting/common-issues/red-yellow-cluster-status.asciidoc b/docs/reference/troubleshooting/common-issues/red-yellow-cluster-status.asciidoc
index c07e92c05899..5d74ca66ee6b 100644
--- a/docs/reference/troubleshooting/common-issues/red-yellow-cluster-status.asciidoc
+++ b/docs/reference/troubleshooting/common-issues/red-yellow-cluster-status.asciidoc
@@ -78,35 +78,31 @@ A shard can become unassigned for several reasons. The following tips outline th
most common causes and their solutions.
[discrete]
-[[fix-cluster-status-reenable-allocation]]
-===== Re-enable shard allocation
+[[fix-cluster-status-only-one-node]]
+===== Single node cluster
-You typically disable allocation during a <> or other
-cluster maintenance. If you forgot to re-enable allocation afterward, {es} will
-be unable to assign shards. To re-enable allocation, reset the
-`cluster.routing.allocation.enable` cluster setting.
+{es} will never assign a replica to the same node as the primary shard. A single-node cluster will always have yellow status. To change to green, set <> to 0 for all indices.
-[source,console]
-----
-PUT _cluster/settings
-{
- "persistent" : {
- "cluster.routing.allocation.enable" : null
- }
-}
-----
-
-See https://www.youtube.com/watch?v=MiKKUdZvwnI[this video] for walkthrough of troubleshooting "no allocations are allowed".
+Therefore, if the number of replicas equals or exceeds the number of nodes, some shards won't be allocated.
[discrete]
[[fix-cluster-status-recover-nodes]]
===== Recover lost nodes
Shards often become unassigned when a data node leaves the cluster. This can
-occur for several reasons, ranging from connectivity issues to hardware failure.
+occur for several reasons:
+
+* A manual node restart will cause a temporary unhealthy cluster state until the node recovers.
+
+* When a node becomes overloaded or fails, it can temporarily disrupt the cluster’s health, leading to an unhealthy state. Prolonged garbage collection (GC) pauses, caused by out-of-memory errors or high memory usage during intensive searches, can trigger this state. See <> for more JVM-related issues.
+
+* Network issues can prevent reliable node communication, causing shards to become out of sync. Check the logs for repeated messages about nodes leaving and rejoining the cluster.
+
After you resolve the issue and recover the node, it will rejoin the cluster.
{es} will then automatically allocate any unassigned shards.
+You can monitor this process by <>. The number of unallocated shards should progressively decrease until green status is reached.
+
To avoid wasting resources on temporary issues, {es} <> by one minute by default. If you've recovered a node and don’t want
to wait for the delay period, you can call the <> or add a delete phase. If you no longer need to search the data, you
@@ -219,11 +216,39 @@ watermark or set it to an explicit byte value.
PUT _cluster/settings
{
"persistent": {
- "cluster.routing.allocation.disk.watermark.low": "30gb"
+ "cluster.routing.allocation.disk.watermark.low": "90%",
+ "cluster.routing.allocation.disk.watermark.high": "95%"
}
}
----
-// TEST[s/"30gb"/null/]
+// TEST[s/"90%"/null/]
+// TEST[s/"95%"/null/]
+
+[IMPORTANT]
+====
+This is usually a temporary solution and may cause instability if disk space is not freed up.
+====
+
+[discrete]
+[[fix-cluster-status-reenable-allocation]]
+===== Re-enable shard allocation
+
+You typically disable allocation during a <> or other
+cluster maintenance. If you forgot to re-enable allocation afterward, {es} will
+be unable to assign shards. To re-enable allocation, reset the
+`cluster.routing.allocation.enable` cluster setting.
+
+[source,console]
+----
+PUT _cluster/settings
+{
+ "persistent" : {
+ "cluster.routing.allocation.enable" : null
+ }
+}
+----
+
+See https://www.youtube.com/watch?v=MiKKUdZvwnI[this video] for walkthrough of troubleshooting "no allocations are allowed".
[discrete]
[[fix-cluster-status-jvm]]
@@ -271,4 +296,4 @@ POST _cluster/reroute
// TEST[s/^/PUT my-index\n/]
// TEST[catch:bad_request]
-See https://www.youtube.com/watch?v=6OAg9IyXFO4[this video] for a walkthrough of troubleshooting `no_valid_shard_copy`.
\ No newline at end of file
+See https://www.youtube.com/watch?v=6OAg9IyXFO4[this video] for a walkthrough of troubleshooting `no_valid_shard_copy`.
diff --git a/libs/entitlement/bridge/src/main/java/org/elasticsearch/entitlement/bridge/EntitlementChecker.java b/libs/entitlement/bridge/src/main/java/org/elasticsearch/entitlement/bridge/EntitlementChecker.java
index 8b03aeb17858..1e03c61df98e 100644
--- a/libs/entitlement/bridge/src/main/java/org/elasticsearch/entitlement/bridge/EntitlementChecker.java
+++ b/libs/entitlement/bridge/src/main/java/org/elasticsearch/entitlement/bridge/EntitlementChecker.java
@@ -13,10 +13,22 @@ import java.io.InputStream;
import java.io.PrintStream;
import java.io.PrintWriter;
import java.net.ContentHandlerFactory;
+import java.net.DatagramPacket;
+import java.net.DatagramSocket;
import java.net.DatagramSocketImplFactory;
import java.net.FileNameMap;
+import java.net.InetAddress;
+import java.net.MulticastSocket;
+import java.net.NetworkInterface;
+import java.net.Proxy;
+import java.net.ProxySelector;
+import java.net.ResponseCache;
+import java.net.ServerSocket;
+import java.net.Socket;
+import java.net.SocketAddress;
import java.net.SocketImplFactory;
import java.net.URL;
+import java.net.URLStreamHandler;
import java.net.URLStreamHandlerFactory;
import java.util.List;
@@ -167,4 +179,79 @@ public interface EntitlementChecker {
void check$java_net_URLConnection$$setContentHandlerFactory(Class> callerClass, ContentHandlerFactory fac);
+ ////////////////////
+ //
+ // Network access
+ //
+ void check$java_net_ProxySelector$$setDefault(Class> callerClass, ProxySelector ps);
+
+ void check$java_net_ResponseCache$$setDefault(Class> callerClass, ResponseCache rc);
+
+ void check$java_net_spi_InetAddressResolverProvider$(Class> callerClass);
+
+ void check$java_net_spi_URLStreamHandlerProvider$(Class> callerClass);
+
+ void check$java_net_URL$(Class> callerClass, String protocol, String host, int port, String file, URLStreamHandler handler);
+
+ void check$java_net_URL$(Class> callerClass, URL context, String spec, URLStreamHandler handler);
+
+ void check$java_net_DatagramSocket$bind(Class> callerClass, DatagramSocket that, SocketAddress addr);
+
+ void check$java_net_DatagramSocket$connect(Class> callerClass, DatagramSocket that, InetAddress addr);
+
+ void check$java_net_DatagramSocket$connect(Class> callerClass, DatagramSocket that, SocketAddress addr);
+
+ void check$java_net_DatagramSocket$send(Class> callerClass, DatagramSocket that, DatagramPacket p);
+
+ void check$java_net_DatagramSocket$receive(Class> callerClass, DatagramSocket that, DatagramPacket p);
+
+ void check$java_net_DatagramSocket$joinGroup(Class> callerClass, DatagramSocket that, SocketAddress addr, NetworkInterface ni);
+
+ void check$java_net_DatagramSocket$leaveGroup(Class> callerClass, DatagramSocket that, SocketAddress addr, NetworkInterface ni);
+
+ void check$java_net_MulticastSocket$joinGroup(Class> callerClass, MulticastSocket that, InetAddress addr);
+
+ void check$java_net_MulticastSocket$joinGroup(Class> callerClass, MulticastSocket that, SocketAddress addr, NetworkInterface ni);
+
+ void check$java_net_MulticastSocket$leaveGroup(Class> callerClass, MulticastSocket that, InetAddress addr);
+
+ void check$java_net_MulticastSocket$leaveGroup(Class> callerClass, MulticastSocket that, SocketAddress addr, NetworkInterface ni);
+
+ void check$java_net_MulticastSocket$send(Class> callerClass, MulticastSocket that, DatagramPacket p, byte ttl);
+
+ // Binding/connecting ctor
+ void check$java_net_ServerSocket$(Class> callerClass, int port);
+
+ void check$java_net_ServerSocket$(Class> callerClass, int port, int backlog);
+
+ void check$java_net_ServerSocket$(Class> callerClass, int port, int backlog, InetAddress bindAddr);
+
+ void check$java_net_ServerSocket$accept(Class> callerClass, ServerSocket that);
+
+ void check$java_net_ServerSocket$implAccept(Class> callerClass, ServerSocket that, Socket s);
+
+ void check$java_net_ServerSocket$bind(Class> callerClass, ServerSocket that, SocketAddress endpoint);
+
+ void check$java_net_ServerSocket$bind(Class> callerClass, ServerSocket that, SocketAddress endpoint, int backlog);
+
+ // Binding/connecting ctors
+ void check$java_net_Socket$(Class> callerClass, Proxy proxy);
+
+ void check$java_net_Socket$(Class> callerClass, String host, int port);
+
+ void check$java_net_Socket$(Class> callerClass, InetAddress address, int port);
+
+ void check$java_net_Socket$(Class> callerClass, String host, int port, InetAddress localAddr, int localPort);
+
+ void check$java_net_Socket$(Class> callerClass, InetAddress address, int port, InetAddress localAddr, int localPort);
+
+ void check$java_net_Socket$(Class> callerClass, String host, int port, boolean stream);
+
+ void check$java_net_Socket$(Class> callerClass, InetAddress host, int port, boolean stream);
+
+ void check$java_net_Socket$bind(Class> callerClass, Socket that, SocketAddress endpoint);
+
+ void check$java_net_Socket$connect(Class> callerClass, Socket that, SocketAddress endpoint);
+
+ void check$java_net_Socket$connect(Class> callerClass, Socket that, SocketAddress endpoint, int backlog);
}
diff --git a/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/DummyImplementations.java b/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/DummyImplementations.java
index 6dbb684c7151..304aead1e2bf 100644
--- a/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/DummyImplementations.java
+++ b/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/DummyImplementations.java
@@ -9,8 +9,19 @@
package org.elasticsearch.entitlement.qa.common;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.net.DatagramPacket;
+import java.net.DatagramSocket;
+import java.net.DatagramSocketImpl;
import java.net.InetAddress;
+import java.net.NetworkInterface;
+import java.net.ServerSocket;
import java.net.Socket;
+import java.net.SocketAddress;
+import java.net.SocketException;
+import java.net.SocketImpl;
import java.security.cert.Certificate;
import java.text.BreakIterator;
import java.text.Collator;
@@ -290,6 +301,81 @@ class DummyImplementations {
}
}
+ private static class DummySocketImpl extends SocketImpl {
+ @Override
+ protected void create(boolean stream) {}
+
+ @Override
+ protected void connect(String host, int port) {}
+
+ @Override
+ protected void connect(InetAddress address, int port) {}
+
+ @Override
+ protected void connect(SocketAddress address, int timeout) {}
+
+ @Override
+ protected void bind(InetAddress host, int port) {}
+
+ @Override
+ protected void listen(int backlog) {}
+
+ @Override
+ protected void accept(SocketImpl s) {}
+
+ @Override
+ protected InputStream getInputStream() {
+ return null;
+ }
+
+ @Override
+ protected OutputStream getOutputStream() {
+ return null;
+ }
+
+ @Override
+ protected int available() {
+ return 0;
+ }
+
+ @Override
+ protected void close() {}
+
+ @Override
+ protected void sendUrgentData(int data) {}
+
+ @Override
+ public void setOption(int optID, Object value) {}
+
+ @Override
+ public Object getOption(int optID) {
+ return null;
+ }
+ }
+
+ static class DummySocket extends Socket {
+ DummySocket() throws SocketException {
+ super(new DummySocketImpl());
+ }
+ }
+
+ static class DummyServerSocket extends ServerSocket {
+ DummyServerSocket() {
+ super(new DummySocketImpl());
+ }
+ }
+
+ static class DummyBoundServerSocket extends ServerSocket {
+ DummyBoundServerSocket() {
+ super(new DummySocketImpl());
+ }
+
+ @Override
+ public boolean isBound() {
+ return true;
+ }
+ }
+
static class DummySSLSocketFactory extends SSLSocketFactory {
@Override
public Socket createSocket(String host, int port) {
@@ -327,8 +413,77 @@ class DummyImplementations {
}
}
+ static class DummyDatagramSocket extends DatagramSocket {
+ DummyDatagramSocket() throws SocketException {
+ super(new DatagramSocketImpl() {
+ @Override
+ protected void create() throws SocketException {}
+
+ @Override
+ protected void bind(int lport, InetAddress laddr) throws SocketException {}
+
+ @Override
+ protected void send(DatagramPacket p) throws IOException {}
+
+ @Override
+ protected int peek(InetAddress i) throws IOException {
+ return 0;
+ }
+
+ @Override
+ protected int peekData(DatagramPacket p) throws IOException {
+ return 0;
+ }
+
+ @Override
+ protected void receive(DatagramPacket p) throws IOException {}
+
+ @Override
+ protected void setTTL(byte ttl) throws IOException {}
+
+ @Override
+ protected byte getTTL() throws IOException {
+ return 0;
+ }
+
+ @Override
+ protected void setTimeToLive(int ttl) throws IOException {}
+
+ @Override
+ protected int getTimeToLive() throws IOException {
+ return 0;
+ }
+
+ @Override
+ protected void join(InetAddress inetaddr) throws IOException {}
+
+ @Override
+ protected void leave(InetAddress inetaddr) throws IOException {}
+
+ @Override
+ protected void joinGroup(SocketAddress mcastaddr, NetworkInterface netIf) throws IOException {}
+
+ @Override
+ protected void leaveGroup(SocketAddress mcastaddr, NetworkInterface netIf) throws IOException {}
+
+ @Override
+ protected void close() {}
+
+ @Override
+ public void setOption(int optID, Object value) throws SocketException {}
+
+ @Override
+ public Object getOption(int optID) throws SocketException {
+ return null;
+ }
+
+ @Override
+ protected void connect(InetAddress address, int port) throws SocketException {}
+ });
+ }
+ }
+
private static RuntimeException unexpected() {
return new IllegalStateException("This method isn't supposed to be called");
}
-
}
diff --git a/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/NetworkAccessCheckActions.java b/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/NetworkAccessCheckActions.java
new file mode 100644
index 000000000000..c88d4ce2b11a
--- /dev/null
+++ b/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/NetworkAccessCheckActions.java
@@ -0,0 +1,62 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the "Elastic License
+ * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
+ * Public License v 1"; you may not use this file except in compliance with, at
+ * your election, the "Elastic License 2.0", the "GNU Affero General Public
+ * License v3.0 only", or the "Server Side Public License, v 1".
+ */
+
+package org.elasticsearch.entitlement.qa.common;
+
+import org.elasticsearch.core.SuppressForbidden;
+
+import java.io.IOException;
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
+import java.net.Proxy;
+import java.net.ServerSocket;
+import java.net.Socket;
+
+class NetworkAccessCheckActions {
+
+ static void serverSocketAccept() throws IOException {
+ try (ServerSocket socket = new DummyImplementations.DummyBoundServerSocket()) {
+ try {
+ socket.accept();
+ } catch (IOException e) {
+ // Our dummy socket cannot accept connections unless we tell the JDK how to create a socket for it.
+ // But Socket.setSocketImplFactory(); is one of the methods we always forbid, so we cannot use it.
+ // Still, we can check accept is called (allowed/denied), we don't care if it fails later for this
+ // known reason.
+ assert e.getMessage().contains("client socket implementation factory not set");
+ }
+ }
+ }
+
+ static void serverSocketBind() throws IOException {
+ try (ServerSocket socket = new DummyImplementations.DummyServerSocket()) {
+ socket.bind(null);
+ }
+ }
+
+ @SuppressForbidden(reason = "Testing entitlement check on forbidden action")
+ static void createSocketWithProxy() throws IOException {
+ try (Socket socket = new Socket(new Proxy(Proxy.Type.HTTP, new InetSocketAddress(0)))) {
+ assert socket.isBound() == false;
+ }
+ }
+
+ static void socketBind() throws IOException {
+ try (Socket socket = new DummyImplementations.DummySocket()) {
+ socket.bind(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0));
+ }
+ }
+
+ @SuppressForbidden(reason = "Testing entitlement check on forbidden action")
+ static void socketConnect() throws IOException {
+ try (Socket socket = new DummyImplementations.DummySocket()) {
+ socket.connect(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0));
+ }
+ }
+}
diff --git a/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/RestEntitlementsCheckAction.java b/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/RestEntitlementsCheckAction.java
index 9869af4d8525..a156d20e3686 100644
--- a/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/RestEntitlementsCheckAction.java
+++ b/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/RestEntitlementsCheckAction.java
@@ -11,6 +11,7 @@ package org.elasticsearch.entitlement.qa.common;
import org.elasticsearch.client.internal.node.NodeClient;
import org.elasticsearch.common.Strings;
+import org.elasticsearch.core.CheckedRunnable;
import org.elasticsearch.core.SuppressForbidden;
import org.elasticsearch.entitlement.qa.common.DummyImplementations.DummyBreakIteratorProvider;
import org.elasticsearch.entitlement.qa.common.DummyImplementations.DummyCalendarDataProvider;
@@ -32,16 +33,25 @@ import org.elasticsearch.rest.RestResponse;
import org.elasticsearch.rest.RestStatus;
import java.io.IOException;
-import java.io.UncheckedIOException;
+import java.net.DatagramPacket;
import java.net.DatagramSocket;
-import java.net.DatagramSocketImpl;
-import java.net.DatagramSocketImplFactory;
import java.net.HttpURLConnection;
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
+import java.net.MalformedURLException;
+import java.net.NetworkInterface;
+import java.net.ProxySelector;
+import java.net.ResponseCache;
import java.net.ServerSocket;
import java.net.Socket;
+import java.net.SocketException;
import java.net.URL;
import java.net.URLClassLoader;
import java.net.URLConnection;
+import java.net.URLStreamHandler;
+import java.net.spi.InetAddressResolver;
+import java.net.spi.InetAddressResolverProvider;
+import java.net.spi.URLStreamHandlerProvider;
import java.security.NoSuchAlgorithmException;
import java.util.List;
import java.util.Map;
@@ -57,25 +67,26 @@ import static org.elasticsearch.entitlement.qa.common.RestEntitlementsCheckActio
import static org.elasticsearch.entitlement.qa.common.RestEntitlementsCheckAction.CheckAction.forPlugins;
import static org.elasticsearch.rest.RestRequest.Method.GET;
+@SuppressWarnings("unused")
public class RestEntitlementsCheckAction extends BaseRestHandler {
private static final Logger logger = LogManager.getLogger(RestEntitlementsCheckAction.class);
public static final Thread NO_OP_SHUTDOWN_HOOK = new Thread(() -> {}, "Shutdown hook for testing");
private final String prefix;
- record CheckAction(Runnable action, boolean isAlwaysDeniedToPlugins) {
+ record CheckAction(CheckedRunnable action, boolean isAlwaysDeniedToPlugins) {
/**
* These cannot be granted to plugins, so our test plugins cannot test the "allowed" case.
* Used both for always-denied entitlements as well as those granted only to the server itself.
*/
- static CheckAction deniedToPlugins(Runnable action) {
+ static CheckAction deniedToPlugins(CheckedRunnable action) {
return new CheckAction(action, true);
}
- static CheckAction forPlugins(Runnable action) {
+ static CheckAction forPlugins(CheckedRunnable action) {
return new CheckAction(action, false);
}
- static CheckAction alwaysDenied(Runnable action) {
+ static CheckAction alwaysDenied(CheckedRunnable action) {
return new CheckAction(action, true);
}
}
@@ -125,15 +136,81 @@ public class RestEntitlementsCheckAction extends BaseRestHandler {
entry("socket_setSocketImplFactory", alwaysDenied(RestEntitlementsCheckAction::socket$$setSocketImplFactory)),
entry("url_setURLStreamHandlerFactory", alwaysDenied(RestEntitlementsCheckAction::url$$setURLStreamHandlerFactory)),
entry("urlConnection_setFileNameMap", alwaysDenied(RestEntitlementsCheckAction::urlConnection$$setFileNameMap)),
- entry("urlConnection_setContentHandlerFactory", alwaysDenied(RestEntitlementsCheckAction::urlConnection$$setContentHandlerFactory))
+ entry("urlConnection_setContentHandlerFactory", alwaysDenied(RestEntitlementsCheckAction::urlConnection$$setContentHandlerFactory)),
+
+ entry("proxySelector_setDefault", alwaysDenied(RestEntitlementsCheckAction::setDefaultProxySelector)),
+ entry("responseCache_setDefault", alwaysDenied(RestEntitlementsCheckAction::setDefaultResponseCache)),
+ entry("createInetAddressResolverProvider", alwaysDenied(RestEntitlementsCheckAction::createInetAddressResolverProvider)),
+ entry("createURLStreamHandlerProvider", alwaysDenied(RestEntitlementsCheckAction::createURLStreamHandlerProvider)),
+ entry("createURLWithURLStreamHandler", alwaysDenied(RestEntitlementsCheckAction::createURLWithURLStreamHandler)),
+ entry("createURLWithURLStreamHandler2", alwaysDenied(RestEntitlementsCheckAction::createURLWithURLStreamHandler2)),
+ entry("datagram_socket_bind", forPlugins(RestEntitlementsCheckAction::bindDatagramSocket)),
+ entry("datagram_socket_connect", forPlugins(RestEntitlementsCheckAction::connectDatagramSocket)),
+ entry("datagram_socket_send", forPlugins(RestEntitlementsCheckAction::sendDatagramSocket)),
+ entry("datagram_socket_receive", forPlugins(RestEntitlementsCheckAction::receiveDatagramSocket)),
+ entry("datagram_socket_join_group", forPlugins(RestEntitlementsCheckAction::joinGroupDatagramSocket)),
+ entry("datagram_socket_leave_group", forPlugins(RestEntitlementsCheckAction::leaveGroupDatagramSocket)),
+
+ entry("create_socket_with_proxy", forPlugins(NetworkAccessCheckActions::createSocketWithProxy)),
+ entry("socket_bind", forPlugins(NetworkAccessCheckActions::socketBind)),
+ entry("socket_connect", forPlugins(NetworkAccessCheckActions::socketConnect)),
+ entry("server_socket_bind", forPlugins(NetworkAccessCheckActions::serverSocketBind)),
+ entry("server_socket_accept", forPlugins(NetworkAccessCheckActions::serverSocketAccept))
);
- private static void setDefaultSSLContext() {
- try {
- SSLContext.setDefault(SSLContext.getDefault());
- } catch (NoSuchAlgorithmException e) {
- throw new RuntimeException(e);
- }
+ private static void createURLStreamHandlerProvider() {
+ var x = new URLStreamHandlerProvider() {
+ @Override
+ public URLStreamHandler createURLStreamHandler(String protocol) {
+ return null;
+ }
+ };
+ }
+
+ @SuppressWarnings("deprecation")
+ private static void createURLWithURLStreamHandler() throws MalformedURLException {
+ var x = new URL("http", "host", 1234, "file", new URLStreamHandler() {
+ @Override
+ protected URLConnection openConnection(URL u) {
+ return null;
+ }
+ });
+ }
+
+ @SuppressWarnings("deprecation")
+ private static void createURLWithURLStreamHandler2() throws MalformedURLException {
+ var x = new URL(null, "spec", new URLStreamHandler() {
+ @Override
+ protected URLConnection openConnection(URL u) {
+ return null;
+ }
+ });
+ }
+
+ private static void createInetAddressResolverProvider() {
+ var x = new InetAddressResolverProvider() {
+ @Override
+ public InetAddressResolver get(Configuration configuration) {
+ return null;
+ }
+
+ @Override
+ public String name() {
+ return "TEST";
+ }
+ };
+ }
+
+ private static void setDefaultResponseCache() {
+ ResponseCache.setDefault(null);
+ }
+
+ private static void setDefaultProxySelector() {
+ ProxySelector.setDefault(null);
+ }
+
+ private static void setDefaultSSLContext() throws NoSuchAlgorithmException {
+ SSLContext.setDefault(SSLContext.getDefault());
}
private static void setDefaultHostnameVerifier() {
@@ -159,28 +236,18 @@ public class RestEntitlementsCheckAction extends BaseRestHandler {
System.exit(123);
}
- private static void createClassLoader() {
+ private static void createClassLoader() throws IOException {
try (var classLoader = new URLClassLoader("test", new URL[0], RestEntitlementsCheckAction.class.getClassLoader())) {
logger.info("Created URLClassLoader [{}]", classLoader.getName());
- } catch (IOException e) {
- throw new UncheckedIOException(e);
}
}
- private static void processBuilder_start() {
- try {
- new ProcessBuilder("").start();
- } catch (IOException e) {
- throw new IllegalStateException(e);
- }
+ private static void processBuilder_start() throws IOException {
+ new ProcessBuilder("").start();
}
- private static void processBuilder_startPipeline() {
- try {
- ProcessBuilder.startPipeline(List.of());
- } catch (IOException e) {
- throw new IllegalStateException(e);
- }
+ private static void processBuilder_startPipeline() throws IOException {
+ ProcessBuilder.startPipeline(List.of());
}
private static void setHttpsConnectionProperties() {
@@ -268,17 +335,8 @@ public class RestEntitlementsCheckAction extends BaseRestHandler {
@SuppressWarnings("deprecation")
@SuppressForbidden(reason = "We're required to prevent calls to this forbidden API")
- private static void datagramSocket$$setDatagramSocketImplFactory() {
- try {
- DatagramSocket.setDatagramSocketImplFactory(new DatagramSocketImplFactory() {
- @Override
- public DatagramSocketImpl createDatagramSocketImpl() {
- throw new IllegalStateException();
- }
- });
- } catch (IOException e) {
- throw new IllegalStateException(e);
- }
+ private static void datagramSocket$$setDatagramSocketImplFactory() throws IOException {
+ DatagramSocket.setDatagramSocketImplFactory(() -> { throw new IllegalStateException(); });
}
private static void httpURLConnection$$setFollowRedirects() {
@@ -287,22 +345,14 @@ public class RestEntitlementsCheckAction extends BaseRestHandler {
@SuppressWarnings("deprecation")
@SuppressForbidden(reason = "We're required to prevent calls to this forbidden API")
- private static void serverSocket$$setSocketFactory() {
- try {
- ServerSocket.setSocketFactory(() -> { throw new IllegalStateException(); });
- } catch (IOException e) {
- throw new IllegalStateException(e);
- }
+ private static void serverSocket$$setSocketFactory() throws IOException {
+ ServerSocket.setSocketFactory(() -> { throw new IllegalStateException(); });
}
@SuppressWarnings("deprecation")
@SuppressForbidden(reason = "We're required to prevent calls to this forbidden API")
- private static void socket$$setSocketImplFactory() {
- try {
- Socket.setSocketImplFactory(() -> { throw new IllegalStateException(); });
- } catch (IOException e) {
- throw new IllegalStateException(e);
- }
+ private static void socket$$setSocketImplFactory() throws IOException {
+ Socket.setSocketImplFactory(() -> { throw new IllegalStateException(); });
}
private static void url$$setURLStreamHandlerFactory() {
@@ -317,6 +367,51 @@ public class RestEntitlementsCheckAction extends BaseRestHandler {
URLConnection.setContentHandlerFactory(__ -> { throw new IllegalStateException(); });
}
+ private static void bindDatagramSocket() throws SocketException {
+ try (var socket = new DatagramSocket(null)) {
+ socket.bind(null);
+ }
+ }
+
+ @SuppressForbidden(reason = "testing entitlements")
+ private static void connectDatagramSocket() throws SocketException {
+ try (var socket = new DummyImplementations.DummyDatagramSocket()) {
+ socket.connect(new InetSocketAddress(1234));
+ }
+ }
+
+ private static void joinGroupDatagramSocket() throws IOException {
+ try (var socket = new DummyImplementations.DummyDatagramSocket()) {
+ socket.joinGroup(
+ new InetSocketAddress(InetAddress.getByAddress(new byte[] { (byte) 230, 0, 0, 1 }), 1234),
+ NetworkInterface.getByIndex(0)
+ );
+ }
+ }
+
+ private static void leaveGroupDatagramSocket() throws IOException {
+ try (var socket = new DummyImplementations.DummyDatagramSocket()) {
+ socket.leaveGroup(
+ new InetSocketAddress(InetAddress.getByAddress(new byte[] { (byte) 230, 0, 0, 1 }), 1234),
+ NetworkInterface.getByIndex(0)
+ );
+ }
+ }
+
+ @SuppressForbidden(reason = "testing entitlements")
+ private static void sendDatagramSocket() throws IOException {
+ try (var socket = new DummyImplementations.DummyDatagramSocket()) {
+ socket.send(new DatagramPacket(new byte[] { 0 }, 1, InetAddress.getLocalHost(), 1234));
+ }
+ }
+
+ @SuppressForbidden(reason = "testing entitlements")
+ private static void receiveDatagramSocket() throws IOException {
+ try (var socket = new DummyImplementations.DummyDatagramSocket()) {
+ socket.receive(new DatagramPacket(new byte[1], 1, InetAddress.getLocalHost(), 1234));
+ }
+ }
+
public RestEntitlementsCheckAction(String prefix) {
this.prefix = prefix;
}
diff --git a/libs/entitlement/qa/entitlement-allowed-nonmodular/src/main/plugin-metadata/entitlement-policy.yaml b/libs/entitlement/qa/entitlement-allowed-nonmodular/src/main/plugin-metadata/entitlement-policy.yaml
index 30fc9f0abeec..05a94f09264a 100644
--- a/libs/entitlement/qa/entitlement-allowed-nonmodular/src/main/plugin-metadata/entitlement-policy.yaml
+++ b/libs/entitlement/qa/entitlement-allowed-nonmodular/src/main/plugin-metadata/entitlement-policy.yaml
@@ -1,3 +1,8 @@
ALL-UNNAMED:
- create_class_loader
- set_https_connection_properties
+ - network:
+ actions:
+ - listen
+ - accept
+ - connect
diff --git a/libs/entitlement/qa/entitlement-allowed/src/main/plugin-metadata/entitlement-policy.yaml b/libs/entitlement/qa/entitlement-allowed/src/main/plugin-metadata/entitlement-policy.yaml
index 0a25570a9f62..0d2c66c2daa2 100644
--- a/libs/entitlement/qa/entitlement-allowed/src/main/plugin-metadata/entitlement-policy.yaml
+++ b/libs/entitlement/qa/entitlement-allowed/src/main/plugin-metadata/entitlement-policy.yaml
@@ -1,3 +1,8 @@
org.elasticsearch.entitlement.qa.common:
- create_class_loader
- set_https_connection_properties
+ - network:
+ actions:
+ - listen
+ - accept
+ - connect
diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/initialization/EntitlementInitialization.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/initialization/EntitlementInitialization.java
index ba5ccbafa70a..9b621461403d 100644
--- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/initialization/EntitlementInitialization.java
+++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/initialization/EntitlementInitialization.java
@@ -22,6 +22,7 @@ import org.elasticsearch.entitlement.runtime.api.ElasticsearchEntitlementChecker
import org.elasticsearch.entitlement.runtime.policy.CreateClassLoaderEntitlement;
import org.elasticsearch.entitlement.runtime.policy.Entitlement;
import org.elasticsearch.entitlement.runtime.policy.ExitVMEntitlement;
+import org.elasticsearch.entitlement.runtime.policy.NetworkEntitlement;
import org.elasticsearch.entitlement.runtime.policy.Policy;
import org.elasticsearch.entitlement.runtime.policy.PolicyManager;
import org.elasticsearch.entitlement.runtime.policy.PolicyParser;
@@ -44,6 +45,9 @@ import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
+import static org.elasticsearch.entitlement.runtime.policy.NetworkEntitlement.ACCEPT_ACTION;
+import static org.elasticsearch.entitlement.runtime.policy.NetworkEntitlement.CONNECT_ACTION;
+import static org.elasticsearch.entitlement.runtime.policy.NetworkEntitlement.LISTEN_ACTION;
import static org.elasticsearch.entitlement.runtime.policy.PolicyManager.ALL_UNNAMED;
/**
@@ -97,7 +101,15 @@ public class EntitlementInitialization {
List.of(
new Scope("org.elasticsearch.base", List.of(new CreateClassLoaderEntitlement())),
new Scope("org.elasticsearch.xcontent", List.of(new CreateClassLoaderEntitlement())),
- new Scope("org.elasticsearch.server", List.of(new ExitVMEntitlement(), new CreateClassLoaderEntitlement()))
+ new Scope(
+ "org.elasticsearch.server",
+ List.of(
+ new ExitVMEntitlement(),
+ new CreateClassLoaderEntitlement(),
+ new NetworkEntitlement(LISTEN_ACTION | CONNECT_ACTION | ACCEPT_ACTION)
+ )
+ ),
+ new Scope("org.apache.httpcomponents.httpclient", List.of(new NetworkEntitlement(CONNECT_ACTION)))
)
);
// agents run without a module, so this is a special hack for the apm agent
diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/api/ElasticsearchEntitlementChecker.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/api/ElasticsearchEntitlementChecker.java
index 686fb73e10bc..695d1c574c7c 100644
--- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/api/ElasticsearchEntitlementChecker.java
+++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/api/ElasticsearchEntitlementChecker.java
@@ -10,16 +10,29 @@
package org.elasticsearch.entitlement.runtime.api;
import org.elasticsearch.entitlement.bridge.EntitlementChecker;
+import org.elasticsearch.entitlement.runtime.policy.NetworkEntitlement;
import org.elasticsearch.entitlement.runtime.policy.PolicyManager;
import java.io.InputStream;
import java.io.PrintStream;
import java.io.PrintWriter;
import java.net.ContentHandlerFactory;
+import java.net.DatagramPacket;
+import java.net.DatagramSocket;
import java.net.DatagramSocketImplFactory;
import java.net.FileNameMap;
+import java.net.InetAddress;
+import java.net.MulticastSocket;
+import java.net.NetworkInterface;
+import java.net.Proxy;
+import java.net.ProxySelector;
+import java.net.ResponseCache;
+import java.net.ServerSocket;
+import java.net.Socket;
+import java.net.SocketAddress;
import java.net.SocketImplFactory;
import java.net.URL;
+import java.net.URLStreamHandler;
import java.net.URLStreamHandlerFactory;
import java.util.List;
@@ -310,4 +323,185 @@ public class ElasticsearchEntitlementChecker implements EntitlementChecker {
public void check$javax_net_ssl_SSLContext$$setDefault(Class> callerClass, SSLContext context) {
policyManager.checkChangeJVMGlobalState(callerClass);
}
+
+ @Override
+ public void check$java_net_ProxySelector$$setDefault(Class> callerClass, ProxySelector ps) {
+ policyManager.checkChangeNetworkHandling(callerClass);
+ }
+
+ @Override
+ public void check$java_net_ResponseCache$$setDefault(Class> callerClass, ResponseCache rc) {
+ policyManager.checkChangeNetworkHandling(callerClass);
+ }
+
+ @Override
+ public void check$java_net_spi_InetAddressResolverProvider$(Class> callerClass) {
+ policyManager.checkChangeNetworkHandling(callerClass);
+ }
+
+ @Override
+ public void check$java_net_spi_URLStreamHandlerProvider$(Class> callerClass) {
+ policyManager.checkChangeNetworkHandling(callerClass);
+ }
+
+ @Override
+ public void check$java_net_URL$(Class> callerClass, String protocol, String host, int port, String file, URLStreamHandler handler) {
+ policyManager.checkChangeNetworkHandling(callerClass);
+ }
+
+ @Override
+ public void check$java_net_URL$(Class> callerClass, URL context, String spec, URLStreamHandler handler) {
+ policyManager.checkChangeNetworkHandling(callerClass);
+ }
+
+ @Override
+ public void check$java_net_DatagramSocket$bind(Class> callerClass, DatagramSocket that, SocketAddress addr) {
+ policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.LISTEN_ACTION);
+ }
+
+ @Override
+ public void check$java_net_DatagramSocket$connect(Class> callerClass, DatagramSocket that, InetAddress addr) {
+ policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.CONNECT_ACTION | NetworkEntitlement.ACCEPT_ACTION);
+ }
+
+ @Override
+ public void check$java_net_DatagramSocket$connect(Class> callerClass, DatagramSocket that, SocketAddress addr) {
+ policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.CONNECT_ACTION | NetworkEntitlement.ACCEPT_ACTION);
+ }
+
+ @Override
+ public void check$java_net_DatagramSocket$send(Class> callerClass, DatagramSocket that, DatagramPacket p) {
+ var actions = NetworkEntitlement.CONNECT_ACTION;
+ if (p.getAddress().isMulticastAddress()) {
+ actions |= NetworkEntitlement.ACCEPT_ACTION;
+ }
+ policyManager.checkNetworkAccess(callerClass, actions);
+ }
+
+ @Override
+ public void check$java_net_DatagramSocket$receive(Class> callerClass, DatagramSocket that, DatagramPacket p) {
+ policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.ACCEPT_ACTION);
+ }
+
+ @Override
+ public void check$java_net_DatagramSocket$joinGroup(Class> caller, DatagramSocket that, SocketAddress addr, NetworkInterface ni) {
+ policyManager.checkNetworkAccess(caller, NetworkEntitlement.CONNECT_ACTION | NetworkEntitlement.ACCEPT_ACTION);
+ }
+
+ @Override
+ public void check$java_net_DatagramSocket$leaveGroup(Class> caller, DatagramSocket that, SocketAddress addr, NetworkInterface ni) {
+ policyManager.checkNetworkAccess(caller, NetworkEntitlement.CONNECT_ACTION | NetworkEntitlement.ACCEPT_ACTION);
+ }
+
+ @Override
+ public void check$java_net_MulticastSocket$joinGroup(Class> callerClass, MulticastSocket that, InetAddress addr) {
+ policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.CONNECT_ACTION | NetworkEntitlement.ACCEPT_ACTION);
+ }
+
+ @Override
+ public void check$java_net_MulticastSocket$joinGroup(Class> caller, MulticastSocket that, SocketAddress addr, NetworkInterface ni) {
+ policyManager.checkNetworkAccess(caller, NetworkEntitlement.CONNECT_ACTION | NetworkEntitlement.ACCEPT_ACTION);
+ }
+
+ @Override
+ public void check$java_net_MulticastSocket$leaveGroup(Class> caller, MulticastSocket that, InetAddress addr) {
+ policyManager.checkNetworkAccess(caller, NetworkEntitlement.CONNECT_ACTION | NetworkEntitlement.ACCEPT_ACTION);
+ }
+
+ @Override
+ public void check$java_net_MulticastSocket$leaveGroup(Class> caller, MulticastSocket that, SocketAddress addr, NetworkInterface ni) {
+ policyManager.checkNetworkAccess(caller, NetworkEntitlement.CONNECT_ACTION | NetworkEntitlement.ACCEPT_ACTION);
+ }
+
+ @Override
+ public void check$java_net_MulticastSocket$send(Class> callerClass, MulticastSocket that, DatagramPacket p, byte ttl) {
+ policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.CONNECT_ACTION | NetworkEntitlement.ACCEPT_ACTION);
+ }
+
+ @Override
+ public void check$java_net_ServerSocket$(Class> callerClass, int port) {
+ policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.LISTEN_ACTION);
+ }
+
+ @Override
+ public void check$java_net_ServerSocket$(Class> callerClass, int port, int backlog) {
+ policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.LISTEN_ACTION);
+ }
+
+ @Override
+ public void check$java_net_ServerSocket$(Class> callerClass, int port, int backlog, InetAddress bindAddr) {
+ policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.LISTEN_ACTION);
+ }
+
+ @Override
+ public void check$java_net_ServerSocket$accept(Class> callerClass, ServerSocket that) {
+ policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.ACCEPT_ACTION);
+ }
+
+ @Override
+ public void check$java_net_ServerSocket$implAccept(Class> callerClass, ServerSocket that, Socket s) {
+ policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.ACCEPT_ACTION);
+ }
+
+ @Override
+ public void check$java_net_ServerSocket$bind(Class> callerClass, ServerSocket that, SocketAddress endpoint) {
+ policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.LISTEN_ACTION);
+ }
+
+ @Override
+ public void check$java_net_ServerSocket$bind(Class> callerClass, ServerSocket that, SocketAddress endpoint, int backlog) {
+ policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.LISTEN_ACTION);
+ }
+
+ @Override
+ public void check$java_net_Socket$(Class> callerClass, Proxy proxy) {
+ if (proxy.type() == Proxy.Type.SOCKS || proxy.type() == Proxy.Type.HTTP) {
+ policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.CONNECT_ACTION);
+ }
+ }
+
+ @Override
+ public void check$java_net_Socket$(Class> callerClass, String host, int port) {
+ policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.LISTEN_ACTION | NetworkEntitlement.CONNECT_ACTION);
+ }
+
+ @Override
+ public void check$java_net_Socket$(Class> callerClass, InetAddress address, int port) {
+ policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.LISTEN_ACTION | NetworkEntitlement.CONNECT_ACTION);
+ }
+
+ @Override
+ public void check$java_net_Socket$(Class> callerClass, String host, int port, InetAddress localAddr, int localPort) {
+ policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.LISTEN_ACTION | NetworkEntitlement.CONNECT_ACTION);
+ }
+
+ @Override
+ public void check$java_net_Socket$(Class> callerClass, InetAddress address, int port, InetAddress localAddr, int localPort) {
+ policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.LISTEN_ACTION | NetworkEntitlement.CONNECT_ACTION);
+ }
+
+ @Override
+ public void check$java_net_Socket$(Class> callerClass, String host, int port, boolean stream) {
+ policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.LISTEN_ACTION | NetworkEntitlement.CONNECT_ACTION);
+ }
+
+ @Override
+ public void check$java_net_Socket$(Class> callerClass, InetAddress host, int port, boolean stream) {
+ policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.LISTEN_ACTION | NetworkEntitlement.CONNECT_ACTION);
+ }
+
+ @Override
+ public void check$java_net_Socket$bind(Class> callerClass, Socket that, SocketAddress endpoint) {
+ policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.LISTEN_ACTION);
+ }
+
+ @Override
+ public void check$java_net_Socket$connect(Class> callerClass, Socket that, SocketAddress endpoint) {
+ policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.CONNECT_ACTION);
+ }
+
+ @Override
+ public void check$java_net_Socket$connect(Class> callerClass, Socket that, SocketAddress endpoint, int backlog) {
+ policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.CONNECT_ACTION);
+ }
}
diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/NetworkEntitlement.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/NetworkEntitlement.java
new file mode 100644
index 000000000000..9b4035cee98d
--- /dev/null
+++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/NetworkEntitlement.java
@@ -0,0 +1,111 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the "Elastic License
+ * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
+ * Public License v 1"; you may not use this file except in compliance with, at
+ * your election, the "Elastic License 2.0", the "GNU Affero General Public
+ * License v3.0 only", or the "Server Side Public License, v 1".
+ */
+
+package org.elasticsearch.entitlement.runtime.policy;
+
+import org.elasticsearch.core.Strings;
+
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.StringJoiner;
+
+import static java.util.Map.entry;
+
+/**
+ * Describes a network entitlement (sockets) with actions.
+ */
+public class NetworkEntitlement implements Entitlement {
+
+ public static final int LISTEN_ACTION = 0x1;
+ public static final int CONNECT_ACTION = 0x2;
+ public static final int ACCEPT_ACTION = 0x4;
+
+ static final String LISTEN = "listen";
+ static final String CONNECT = "connect";
+ static final String ACCEPT = "accept";
+
+ private static final Map ACTION_MAP = Map.ofEntries(
+ entry(LISTEN, LISTEN_ACTION),
+ entry(CONNECT, CONNECT_ACTION),
+ entry(ACCEPT, ACCEPT_ACTION)
+ );
+
+ private final int actions;
+
+ @ExternalEntitlement(parameterNames = { "actions" }, esModulesOnly = false)
+ public NetworkEntitlement(List actionsList) {
+
+ int actionsInt = 0;
+
+ for (String actionString : actionsList) {
+ var action = ACTION_MAP.get(actionString);
+ if (action == null) {
+ throw new IllegalArgumentException("unknown network action [" + actionString + "]");
+ }
+ if ((actionsInt & action) == action) {
+ throw new IllegalArgumentException(Strings.format("network action [%s] specified multiple times", actionString));
+ }
+ actionsInt |= action;
+ }
+
+ this.actions = actionsInt;
+ }
+
+ public NetworkEntitlement(int actions) {
+ this.actions = actions;
+ }
+
+ public static String printActions(int actions) {
+ var joiner = new StringJoiner(",");
+ for (var entry : ACTION_MAP.entrySet()) {
+ var action = entry.getValue();
+ if ((actions & action) == action) {
+ joiner.add(entry.getKey());
+ }
+ }
+ return joiner.toString();
+ }
+
+ /**
+ * For the actions to match, the actions present in this entitlement must be a superset
+ * of the actions required by a check.
+ * There is only one "negative" case (action required by the check but not present in the entitlement),
+ * and it can be expressed efficiently via this truth table:
+ * this.actions | requiredActions |
+ * 0 | 0 | 0
+ * 0 | 1 | 1 --> NOT this.action AND requiredActions
+ * 1 | 0 | 0
+ * 1 | 1 | 0
+ *
+ * @param requiredActions the actions required to be present for a check to pass
+ * @return true if requiredActions are present, false otherwise
+ */
+ public boolean matchActions(int requiredActions) {
+ return (~this.actions & requiredActions) == 0;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ NetworkEntitlement that = (NetworkEntitlement) o;
+ return actions == that.actions;
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(actions);
+ }
+
+ @Override
+ public String toString() {
+ return "NetworkEntitlement{actions=" + actions + '}';
+ }
+}
diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyManager.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyManager.java
index 9c45f2d42f03..aeb54d5c1156 100644
--- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyManager.java
+++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyManager.java
@@ -52,7 +52,11 @@ public class PolicyManager {
}
public Stream getEntitlements(Class entitlementClass) {
- return entitlementsByType.get(entitlementClass).stream().map(entitlementClass::cast);
+ var entitlements = entitlementsByType.get(entitlementClass);
+ if (entitlements == null) {
+ return Stream.empty();
+ }
+ return entitlements.stream().map(entitlementClass::cast);
}
}
@@ -171,25 +175,67 @@ public class PolicyManager {
});
}
+ /**
+ * Check for operations that can modify the way network operations are handled
+ */
+ public void checkChangeNetworkHandling(Class> callerClass) {
+ checkChangeJVMGlobalState(callerClass);
+ }
+
+ /**
+ * Check for operations that can access sensitive network information, e.g. secrets, tokens or SSL sessions
+ */
+ public void checkReadSensitiveNetworkInformation(Class> callerClass) {
+ neverEntitled(callerClass, "access sensitive network information");
+ }
+
private String operationDescription(String methodName) {
// TODO: Use a more human-readable description. Perhaps share code with InstrumentationServiceImpl.parseCheckerMethodName
return methodName.substring(methodName.indexOf('$'));
}
+ public void checkNetworkAccess(Class> callerClass, int actions) {
+ var requestingClass = requestingClass(callerClass);
+ if (isTriviallyAllowed(requestingClass)) {
+ return;
+ }
+
+ ModuleEntitlements entitlements = getEntitlements(requestingClass, NetworkEntitlement.class);
+ if (entitlements.getEntitlements(NetworkEntitlement.class).anyMatch(n -> n.matchActions(actions))) {
+ logger.debug(
+ () -> Strings.format(
+ "Entitled: class [%s], module [%s], entitlement [network], actions [%s]",
+ requestingClass,
+ requestingClass.getModule().getName(),
+ NetworkEntitlement.printActions(actions)
+ )
+ );
+ return;
+ }
+ throw new NotEntitledException(
+ Strings.format(
+ "Missing entitlement: class [%s], module [%s], entitlement [network], actions [%s]",
+ requestingClass,
+ requestingClass.getModule().getName(),
+ NetworkEntitlement.printActions(actions)
+ )
+ );
+ }
+
private void checkEntitlementPresent(Class> callerClass, Class extends Entitlement> entitlementClass) {
var requestingClass = requestingClass(callerClass);
if (isTriviallyAllowed(requestingClass)) {
return;
}
- ModuleEntitlements entitlements = getEntitlements(requestingClass);
+ ModuleEntitlements entitlements = getEntitlements(requestingClass, entitlementClass);
if (entitlements.hasEntitlement(entitlementClass)) {
logger.debug(
() -> Strings.format(
"Entitled: class [%s], module [%s], entitlement [%s]",
requestingClass,
requestingClass.getModule().getName(),
- entitlementClass.getSimpleName()
+ PolicyParser.getEntitlementTypeName(entitlementClass)
)
);
return;
@@ -199,19 +245,22 @@ public class PolicyManager {
"Missing entitlement: class [%s], module [%s], entitlement [%s]",
requestingClass,
requestingClass.getModule().getName(),
- entitlementClass.getSimpleName()
+ PolicyParser.getEntitlementTypeName(entitlementClass)
)
);
}
- ModuleEntitlements getEntitlements(Class> requestingClass) {
- return moduleEntitlementsMap.computeIfAbsent(requestingClass.getModule(), m -> computeEntitlements(requestingClass));
+ ModuleEntitlements getEntitlements(Class> requestingClass, Class extends Entitlement> entitlementClass) {
+ return moduleEntitlementsMap.computeIfAbsent(
+ requestingClass.getModule(),
+ m -> computeEntitlements(requestingClass, entitlementClass)
+ );
}
- private ModuleEntitlements computeEntitlements(Class> requestingClass) {
+ private ModuleEntitlements computeEntitlements(Class> requestingClass, Class extends Entitlement> entitlementClass) {
Module requestingModule = requestingClass.getModule();
if (isServerModule(requestingModule)) {
- return getModuleScopeEntitlements(requestingClass, serverEntitlements, requestingModule.getName());
+ return getModuleScopeEntitlements(requestingClass, serverEntitlements, requestingModule.getName(), "server", entitlementClass);
}
// plugins
@@ -225,7 +274,7 @@ public class PolicyManager {
} else {
scopeName = requestingModule.getName();
}
- return getModuleScopeEntitlements(requestingClass, pluginEntitlements, scopeName);
+ return getModuleScopeEntitlements(requestingClass, pluginEntitlements, scopeName, pluginName, entitlementClass);
}
}
@@ -241,11 +290,19 @@ public class PolicyManager {
private ModuleEntitlements getModuleScopeEntitlements(
Class> callerClass,
Map> scopeEntitlements,
- String moduleName
+ String moduleName,
+ String component,
+ Class extends Entitlement> entitlementClass
) {
var entitlements = scopeEntitlements.get(moduleName);
if (entitlements == null) {
- logger.warn("No applicable entitlement policy for module [{}], class [{}]", moduleName, callerClass);
+ logger.warn(
+ "No applicable entitlement policy for entitlement [{}] in [{}], module [{}], class [{}]",
+ PolicyParser.getEntitlementTypeName(entitlementClass),
+ component,
+ moduleName,
+ callerClass
+ );
return ModuleEntitlements.NONE;
}
return ModuleEntitlements.from(entitlements);
diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyParser.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyParser.java
index 013acf8f22fa..ac4d4afdd97f 100644
--- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyParser.java
+++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyParser.java
@@ -37,7 +37,8 @@ public class PolicyParser {
private static final Map> EXTERNAL_ENTITLEMENTS = Stream.of(
FileEntitlement.class,
CreateClassLoaderEntitlement.class,
- SetHttpsConnectionPropertiesEntitlement.class
+ SetHttpsConnectionPropertiesEntitlement.class,
+ NetworkEntitlement.class
).collect(Collectors.toUnmodifiableMap(PolicyParser::getEntitlementTypeName, Function.identity()));
protected final XContentParser policyParser;
diff --git a/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/NetworkEntitlementTests.java b/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/NetworkEntitlementTests.java
new file mode 100644
index 000000000000..91051d48c365
--- /dev/null
+++ b/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/NetworkEntitlementTests.java
@@ -0,0 +1,49 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the "Elastic License
+ * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
+ * Public License v 1"; you may not use this file except in compliance with, at
+ * your election, the "Elastic License 2.0", the "GNU Affero General Public
+ * License v3.0 only", or the "Server Side Public License, v 1".
+ */
+
+package org.elasticsearch.entitlement.runtime.policy;
+
+import org.elasticsearch.test.ESTestCase;
+
+import java.util.List;
+
+import static org.hamcrest.Matchers.is;
+
+public class NetworkEntitlementTests extends ESTestCase {
+
+ public void testMatchesActions() {
+ var listenEntitlement = new NetworkEntitlement(List.of(NetworkEntitlement.LISTEN));
+ var emptyEntitlement = new NetworkEntitlement(List.of());
+ var connectAcceptEntitlement = new NetworkEntitlement(List.of(NetworkEntitlement.CONNECT, NetworkEntitlement.ACCEPT));
+
+ assertThat(listenEntitlement.matchActions(0), is(true));
+ assertThat(listenEntitlement.matchActions(NetworkEntitlement.LISTEN_ACTION), is(true));
+ assertThat(listenEntitlement.matchActions(NetworkEntitlement.ACCEPT_ACTION), is(false));
+ assertThat(listenEntitlement.matchActions(NetworkEntitlement.CONNECT_ACTION), is(false));
+ assertThat(listenEntitlement.matchActions(NetworkEntitlement.LISTEN_ACTION | NetworkEntitlement.ACCEPT_ACTION), is(false));
+ assertThat(listenEntitlement.matchActions(NetworkEntitlement.LISTEN_ACTION | NetworkEntitlement.CONNECT_ACTION), is(false));
+ assertThat(listenEntitlement.matchActions(NetworkEntitlement.CONNECT_ACTION | NetworkEntitlement.ACCEPT_ACTION), is(false));
+
+ assertThat(connectAcceptEntitlement.matchActions(0), is(true));
+ assertThat(connectAcceptEntitlement.matchActions(NetworkEntitlement.LISTEN_ACTION), is(false));
+ assertThat(connectAcceptEntitlement.matchActions(NetworkEntitlement.ACCEPT_ACTION), is(true));
+ assertThat(connectAcceptEntitlement.matchActions(NetworkEntitlement.CONNECT_ACTION), is(true));
+ assertThat(connectAcceptEntitlement.matchActions(NetworkEntitlement.LISTEN_ACTION | NetworkEntitlement.ACCEPT_ACTION), is(false));
+ assertThat(connectAcceptEntitlement.matchActions(NetworkEntitlement.LISTEN_ACTION | NetworkEntitlement.CONNECT_ACTION), is(false));
+ assertThat(connectAcceptEntitlement.matchActions(NetworkEntitlement.CONNECT_ACTION | NetworkEntitlement.ACCEPT_ACTION), is(true));
+
+ assertThat(emptyEntitlement.matchActions(0), is(true));
+ assertThat(emptyEntitlement.matchActions(NetworkEntitlement.LISTEN_ACTION), is(false));
+ assertThat(emptyEntitlement.matchActions(NetworkEntitlement.ACCEPT_ACTION), is(false));
+ assertThat(emptyEntitlement.matchActions(NetworkEntitlement.CONNECT_ACTION), is(false));
+ assertThat(emptyEntitlement.matchActions(NetworkEntitlement.LISTEN_ACTION | NetworkEntitlement.ACCEPT_ACTION), is(false));
+ assertThat(emptyEntitlement.matchActions(NetworkEntitlement.LISTEN_ACTION | NetworkEntitlement.CONNECT_ACTION), is(false));
+ assertThat(emptyEntitlement.matchActions(NetworkEntitlement.CONNECT_ACTION | NetworkEntitlement.ACCEPT_ACTION), is(false));
+ }
+}
diff --git a/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyManagerTests.java b/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyManagerTests.java
index d22c2f598e34..092813be75cc 100644
--- a/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyManagerTests.java
+++ b/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyManagerTests.java
@@ -38,7 +38,7 @@ import static org.hamcrest.Matchers.sameInstance;
public class PolicyManagerTests extends ESTestCase {
/**
* A module you can use for test cases that don't actually care about the
- * entitlements module.
+ * entitlement module.
*/
private static Module NO_ENTITLEMENTS_MODULE;
@@ -66,7 +66,11 @@ public class PolicyManagerTests extends ESTestCase {
var callerClass = this.getClass();
var requestingModule = callerClass.getModule();
- assertEquals("No policy for the unnamed module", ModuleEntitlements.NONE, policyManager.getEntitlements(callerClass));
+ assertEquals(
+ "No policy for the unnamed module",
+ ModuleEntitlements.NONE,
+ policyManager.getEntitlements(callerClass, Entitlement.class)
+ );
assertEquals(Map.of(requestingModule, ModuleEntitlements.NONE), policyManager.moduleEntitlementsMap);
}
@@ -78,7 +82,7 @@ public class PolicyManagerTests extends ESTestCase {
var callerClass = this.getClass();
var requestingModule = callerClass.getModule();
- assertEquals("No policy for this plugin", ModuleEntitlements.NONE, policyManager.getEntitlements(callerClass));
+ assertEquals("No policy for this plugin", ModuleEntitlements.NONE, policyManager.getEntitlements(callerClass, Entitlement.class));
assertEquals(Map.of(requestingModule, ModuleEntitlements.NONE), policyManager.moduleEntitlementsMap);
}
@@ -90,11 +94,11 @@ public class PolicyManagerTests extends ESTestCase {
var callerClass = this.getClass();
var requestingModule = callerClass.getModule();
- assertEquals(ModuleEntitlements.NONE, policyManager.getEntitlements(callerClass));
+ assertEquals(ModuleEntitlements.NONE, policyManager.getEntitlements(callerClass, Entitlement.class));
assertEquals(Map.of(requestingModule, ModuleEntitlements.NONE), policyManager.moduleEntitlementsMap);
// A second time
- assertEquals(ModuleEntitlements.NONE, policyManager.getEntitlements(callerClass));
+ assertEquals(ModuleEntitlements.NONE, policyManager.getEntitlements(callerClass, Entitlement.class));
// Nothing new in the map
assertEquals(Map.of(requestingModule, ModuleEntitlements.NONE), policyManager.moduleEntitlementsMap);
@@ -112,7 +116,7 @@ public class PolicyManagerTests extends ESTestCase {
// Any class from the current module (unnamed) will do
var callerClass = this.getClass();
- var entitlements = policyManager.getEntitlements(callerClass);
+ var entitlements = policyManager.getEntitlements(callerClass, Entitlement.class);
assertThat(entitlements.hasEntitlement(CreateClassLoaderEntitlement.class), is(true));
}
@@ -126,7 +130,11 @@ public class PolicyManagerTests extends ESTestCase {
var mockServerClass = ModuleLayer.boot().findLoader("jdk.httpserver").loadClass("com.sun.net.httpserver.HttpServer");
var requestingModule = mockServerClass.getModule();
- assertEquals("No policy for this module in server", ModuleEntitlements.NONE, policyManager.getEntitlements(mockServerClass));
+ assertEquals(
+ "No policy for this module in server",
+ ModuleEntitlements.NONE,
+ policyManager.getEntitlements(mockServerClass, Entitlement.class)
+ );
assertEquals(Map.of(requestingModule, ModuleEntitlements.NONE), policyManager.moduleEntitlementsMap);
}
@@ -145,9 +153,8 @@ public class PolicyManagerTests extends ESTestCase {
// So we use a random module in the boot layer, and a random class from that module (not java.base -- it is
// loaded too early) to mimic a class that would be in the server module.
var mockServerClass = ModuleLayer.boot().findLoader("jdk.httpserver").loadClass("com.sun.net.httpserver.HttpServer");
- var requestingModule = mockServerClass.getModule();
- var entitlements = policyManager.getEntitlements(mockServerClass);
+ var entitlements = policyManager.getEntitlements(mockServerClass, Entitlement.class);
assertThat(entitlements.hasEntitlement(CreateClassLoaderEntitlement.class), is(true));
assertThat(entitlements.hasEntitlement(ExitVMEntitlement.class), is(true));
}
@@ -167,9 +174,8 @@ public class PolicyManagerTests extends ESTestCase {
var layer = createLayerForJar(jar, "org.example.plugin");
var mockPluginClass = layer.findLoader("org.example.plugin").loadClass("q.B");
- var requestingModule = mockPluginClass.getModule();
- var entitlements = policyManager.getEntitlements(mockPluginClass);
+ var entitlements = policyManager.getEntitlements(mockPluginClass, Entitlement.class);
assertThat(entitlements.hasEntitlement(CreateClassLoaderEntitlement.class), is(true));
assertThat(
entitlements.getEntitlements(FileEntitlement.class).toList(),
@@ -189,11 +195,11 @@ public class PolicyManagerTests extends ESTestCase {
// Any class from the current module (unnamed) will do
var callerClass = this.getClass();
- var entitlements = policyManager.getEntitlements(callerClass);
+ var entitlements = policyManager.getEntitlements(callerClass, Entitlement.class);
assertThat(entitlements.hasEntitlement(CreateClassLoaderEntitlement.class), is(true));
assertThat(policyManager.moduleEntitlementsMap, aMapWithSize(1));
- var cachedResult = policyManager.moduleEntitlementsMap.values().stream().findFirst().get();
- var entitlementsAgain = policyManager.getEntitlements(callerClass);
+ var cachedResult = policyManager.moduleEntitlementsMap.values().stream().findFirst().orElseThrow();
+ var entitlementsAgain = policyManager.getEntitlements(callerClass, Entitlement.class);
// Nothing new in the map
assertThat(policyManager.moduleEntitlementsMap, aMapWithSize(1));
diff --git a/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyParserTests.java b/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyParserTests.java
index 4d17fc92e157..1e0c31d2280b 100644
--- a/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyParserTests.java
+++ b/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyParserTests.java
@@ -52,6 +52,22 @@ public class PolicyParserTests extends ESTestCase {
assertEquals(expected, parsedPolicy);
}
+ public void testParseNetwork() throws IOException {
+ Policy parsedPolicy = new PolicyParser(new ByteArrayInputStream("""
+ entitlement-module-name:
+ - network:
+ actions:
+ - listen
+ - accept
+ - connect
+ """.getBytes(StandardCharsets.UTF_8)), "test-policy.yaml", false).parsePolicy();
+ Policy expected = new Policy(
+ "test-policy.yaml",
+ List.of(new Scope("entitlement-module-name", List.of(new NetworkEntitlement(List.of("listen", "accept", "connect")))))
+ );
+ assertEquals(expected, parsedPolicy);
+ }
+
public void testParseCreateClassloader() throws IOException {
Policy parsedPolicy = new PolicyParser(new ByteArrayInputStream("""
entitlement-module-name:
diff --git a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/APM.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/APM.java
index 339a4ec24ca1..43447cfa21a6 100644
--- a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/APM.java
+++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/APM.java
@@ -92,14 +92,7 @@ public class APM extends Plugin implements NetworkPlugin, TelemetryPlugin {
APMAgentSettings.TELEMETRY_TRACING_ENABLED_SETTING,
APMAgentSettings.TELEMETRY_TRACING_NAMES_INCLUDE_SETTING,
APMAgentSettings.TELEMETRY_TRACING_NAMES_EXCLUDE_SETTING,
- APMAgentSettings.TELEMETRY_TRACING_SANITIZE_FIELD_NAMES,
- // The settings below are deprecated and are currently kept as fallback.
- APMAgentSettings.TRACING_APM_SECRET_TOKEN_SETTING,
- APMAgentSettings.TRACING_APM_API_KEY_SETTING,
- APMAgentSettings.TRACING_APM_ENABLED_SETTING,
- APMAgentSettings.TRACING_APM_NAMES_INCLUDE_SETTING,
- APMAgentSettings.TRACING_APM_NAMES_EXCLUDE_SETTING,
- APMAgentSettings.TRACING_APM_SANITIZE_FIELD_NAMES
+ APMAgentSettings.TELEMETRY_TRACING_SANITIZE_FIELD_NAMES
);
}
}
diff --git a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/APMAgentSettings.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/APMAgentSettings.java
index f66683a787bc..8647761e2def 100644
--- a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/APMAgentSettings.java
+++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/APMAgentSettings.java
@@ -25,9 +25,7 @@ import java.security.PrivilegedAction;
import java.util.List;
import java.util.Objects;
import java.util.Set;
-import java.util.function.Function;
-import static org.elasticsearch.common.settings.Setting.Property.Deprecated;
import static org.elasticsearch.common.settings.Setting.Property.NodeScope;
import static org.elasticsearch.common.settings.Setting.Property.OperatorDynamic;
@@ -101,9 +99,6 @@ public class APMAgentSettings {
private static final String TELEMETRY_SETTING_PREFIX = "telemetry.";
- // The old legacy prefix
- private static final String LEGACY_TRACING_APM_SETTING_PREFIX = "tracing.apm.";
-
/**
* Allow-list of APM agent config keys users are permitted to configure.
* @see APM Java Agent Configuration
@@ -248,56 +243,24 @@ public class APMAgentSettings {
public static final Setting.AffixSetting APM_AGENT_SETTINGS = Setting.prefixKeySetting(
TELEMETRY_SETTING_PREFIX + "agent.",
- LEGACY_TRACING_APM_SETTING_PREFIX + "agent.",
- (namespace, qualifiedKey) -> qualifiedKey.startsWith(LEGACY_TRACING_APM_SETTING_PREFIX)
- ? concreteAgentSetting(namespace, qualifiedKey, NodeScope, OperatorDynamic, Deprecated)
- : concreteAgentSetting(namespace, qualifiedKey, NodeScope, OperatorDynamic)
+ null, // no fallback
+ (namespace, qualifiedKey) -> concreteAgentSetting(namespace, qualifiedKey, NodeScope, OperatorDynamic)
);
- /**
- * @deprecated in favor of TELEMETRY_TRACING_NAMES_INCLUDE_SETTING.
- */
- @Deprecated
- public static final Setting> TRACING_APM_NAMES_INCLUDE_SETTING = Setting.stringListSetting(
- LEGACY_TRACING_APM_SETTING_PREFIX + "names.include",
- OperatorDynamic,
- NodeScope,
- Deprecated
- );
-
- public static final Setting> TELEMETRY_TRACING_NAMES_INCLUDE_SETTING = Setting.listSetting(
+ public static final Setting> TELEMETRY_TRACING_NAMES_INCLUDE_SETTING = Setting.stringListSetting(
TELEMETRY_SETTING_PREFIX + "tracing.names.include",
- TRACING_APM_NAMES_INCLUDE_SETTING,
- Function.identity(),
OperatorDynamic,
NodeScope
);
- /**
- * @deprecated in favor of TELEMETRY_TRACING_NAMES_EXCLUDE_SETTING.
- */
- @Deprecated
- public static final Setting> TRACING_APM_NAMES_EXCLUDE_SETTING = Setting.stringListSetting(
- LEGACY_TRACING_APM_SETTING_PREFIX + "names.exclude",
- OperatorDynamic,
- NodeScope,
- Deprecated
- );
-
- public static final Setting> TELEMETRY_TRACING_NAMES_EXCLUDE_SETTING = Setting.listSetting(
+ public static final Setting> TELEMETRY_TRACING_NAMES_EXCLUDE_SETTING = Setting.stringListSetting(
TELEMETRY_SETTING_PREFIX + "tracing.names.exclude",
- TRACING_APM_NAMES_EXCLUDE_SETTING,
- Function.identity(),
OperatorDynamic,
NodeScope
);
- /**
- * @deprecated in favor of TELEMETRY_TRACING_SANITIZE_FIELD_NAMES.
- */
- @Deprecated
- public static final Setting> TRACING_APM_SANITIZE_FIELD_NAMES = Setting.stringListSetting(
- LEGACY_TRACING_APM_SETTING_PREFIX + "sanitize_field_names",
+ public static final Setting> TELEMETRY_TRACING_SANITIZE_FIELD_NAMES = Setting.stringListSetting(
+ TELEMETRY_SETTING_PREFIX + "tracing.sanitize_field_names",
List.of(
"password",
"passwd",
@@ -313,33 +276,12 @@ public class APMAgentSettings {
"set-cookie"
),
OperatorDynamic,
- NodeScope,
- Deprecated
- );
-
- public static final Setting> TELEMETRY_TRACING_SANITIZE_FIELD_NAMES = Setting.listSetting(
- TELEMETRY_SETTING_PREFIX + "tracing.sanitize_field_names",
- TRACING_APM_SANITIZE_FIELD_NAMES,
- Function.identity(),
- OperatorDynamic,
NodeScope
);
- /**
- * @deprecated in favor of TELEMETRY_TRACING_ENABLED_SETTING.
- */
- @Deprecated
- public static final Setting TRACING_APM_ENABLED_SETTING = Setting.boolSetting(
- LEGACY_TRACING_APM_SETTING_PREFIX + "enabled",
- false,
- OperatorDynamic,
- NodeScope,
- Deprecated
- );
-
public static final Setting TELEMETRY_TRACING_ENABLED_SETTING = Setting.boolSetting(
TELEMETRY_SETTING_PREFIX + "tracing.enabled",
- TRACING_APM_ENABLED_SETTING,
+ false,
OperatorDynamic,
NodeScope
);
@@ -351,33 +293,13 @@ public class APMAgentSettings {
NodeScope
);
- /**
- * @deprecated in favor of TELEMETRY_SECRET_TOKEN_SETTING.
- */
- @Deprecated
- public static final Setting TRACING_APM_SECRET_TOKEN_SETTING = SecureSetting.secureString(
- LEGACY_TRACING_APM_SETTING_PREFIX + "secret_token",
- null,
- Deprecated
- );
-
public static final Setting TELEMETRY_SECRET_TOKEN_SETTING = SecureSetting.secureString(
TELEMETRY_SETTING_PREFIX + "secret_token",
- TRACING_APM_SECRET_TOKEN_SETTING
- );
-
- /**
- * @deprecated in favor of TELEMETRY_API_KEY_SETTING.
- */
- @Deprecated
- public static final Setting TRACING_APM_API_KEY_SETTING = SecureSetting.secureString(
- LEGACY_TRACING_APM_SETTING_PREFIX + "api_key",
- null,
- Deprecated
+ null
);
public static final Setting TELEMETRY_API_KEY_SETTING = SecureSetting.secureString(
TELEMETRY_SETTING_PREFIX + "api_key",
- TRACING_APM_API_KEY_SETTING
+ null
);
}
diff --git a/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/APMAgentSettingsTests.java b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/APMAgentSettingsTests.java
index a60048c82a3c..551667242092 100644
--- a/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/APMAgentSettingsTests.java
+++ b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/APMAgentSettingsTests.java
@@ -11,8 +11,6 @@ package org.elasticsearch.telemetry.apm.internal;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.settings.ClusterSettings;
-import org.elasticsearch.common.settings.MockSecureSettings;
-import org.elasticsearch.common.settings.SecureString;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.test.ESTestCase;
import org.mockito.Mockito;
@@ -21,21 +19,13 @@ import java.util.List;
import java.util.Set;
import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.APM_AGENT_SETTINGS;
-import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TELEMETRY_API_KEY_SETTING;
import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TELEMETRY_METRICS_ENABLED_SETTING;
-import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TELEMETRY_SECRET_TOKEN_SETTING;
import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TELEMETRY_TRACING_ENABLED_SETTING;
import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TELEMETRY_TRACING_NAMES_EXCLUDE_SETTING;
import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TELEMETRY_TRACING_NAMES_INCLUDE_SETTING;
import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TELEMETRY_TRACING_SANITIZE_FIELD_NAMES;
-import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TRACING_APM_API_KEY_SETTING;
-import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TRACING_APM_ENABLED_SETTING;
-import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TRACING_APM_NAMES_EXCLUDE_SETTING;
-import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TRACING_APM_NAMES_INCLUDE_SETTING;
-import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TRACING_APM_SANITIZE_FIELD_NAMES;
-import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TRACING_APM_SECRET_TOKEN_SETTING;
-import static org.hamcrest.Matchers.containsInAnyOrder;
import static org.hamcrest.Matchers.containsString;
+import static org.hamcrest.Matchers.hasEntry;
import static org.hamcrest.Matchers.hasItem;
import static org.mockito.Mockito.clearInvocations;
import static org.mockito.Mockito.mock;
@@ -70,14 +60,6 @@ public class APMAgentSettingsTests extends ESTestCase {
}
}
- public void testEnableTracingUsingLegacySetting() {
- Settings settings = Settings.builder().put(TRACING_APM_ENABLED_SETTING.getKey(), true).build();
- apmAgentSettings.initAgentSystemProperties(settings);
-
- verify(apmAgentSettings).setAgentSetting("recording", "true");
- assertWarnings("[tracing.apm.enabled] setting was deprecated in Elasticsearch and will be removed in a future release.");
- }
-
public void testEnableMetrics() {
for (boolean tracingEnabled : List.of(true, false)) {
clearInvocations(apmAgentSettings, apmTelemetryProvider.getMeterService());
@@ -121,14 +103,6 @@ public class APMAgentSettingsTests extends ESTestCase {
}
}
- public void testDisableTracingUsingLegacySetting() {
- Settings settings = Settings.builder().put(TRACING_APM_ENABLED_SETTING.getKey(), false).build();
- apmAgentSettings.initAgentSystemProperties(settings);
-
- verify(apmAgentSettings).setAgentSetting("recording", "false");
- assertWarnings("[tracing.apm.enabled] setting was deprecated in Elasticsearch and will be removed in a future release.");
- }
-
public void testDisableMetrics() {
for (boolean tracingEnabled : List.of(true, false)) {
clearInvocations(apmAgentSettings, apmTelemetryProvider.getMeterService());
@@ -181,70 +155,18 @@ public class APMAgentSettingsTests extends ESTestCase {
verify(apmAgentSettings).setAgentSetting("span_compression_enabled", "true");
}
- public void testSetAgentsSettingsWithLegacyPrefix() {
- Settings settings = Settings.builder()
- .put(TELEMETRY_TRACING_ENABLED_SETTING.getKey(), true)
- .put("tracing.apm.agent.span_compression_enabled", "true")
- .build();
- apmAgentSettings.initAgentSystemProperties(settings);
-
- verify(apmAgentSettings).setAgentSetting("recording", "true");
- verify(apmAgentSettings).setAgentSetting("span_compression_enabled", "true");
- assertWarnings(
- "[tracing.apm.agent.span_compression_enabled] setting was deprecated in Elasticsearch and will be removed in a future release."
- );
- }
-
/**
* Check that invalid or forbidden APM agent settings are rejected.
*/
public void testRejectForbiddenOrUnknownAgentSettings() {
- List prefixes = List.of(APM_AGENT_SETTINGS.getKey(), "tracing.apm.agent.");
- for (String prefix : prefixes) {
- Settings settings = Settings.builder().put(prefix + "unknown", "true").build();
- Exception exception = expectThrows(IllegalArgumentException.class, () -> APM_AGENT_SETTINGS.getAsMap(settings));
- assertThat(exception.getMessage(), containsString("[" + prefix + "unknown]"));
- }
+ String prefix = APM_AGENT_SETTINGS.getKey();
+ Settings settings = Settings.builder().put(prefix + "unknown", "true").build();
+ Exception exception = expectThrows(IllegalArgumentException.class, () -> APM_AGENT_SETTINGS.getAsMap(settings));
+ assertThat(exception.getMessage(), containsString("[" + prefix + "unknown]"));
+
// though, accept / ignore nested global_labels
- for (String prefix : prefixes) {
- Settings settings = Settings.builder().put(prefix + "global_labels.abc", "123").build();
- APMAgentSettings.APM_AGENT_SETTINGS.getAsMap(settings);
-
- if (prefix.startsWith("tracing.apm.agent.")) {
- assertWarnings(
- "[tracing.apm.agent.global_labels.abc] setting was deprecated in Elasticsearch and will be removed in a future release."
- );
- }
- }
- }
-
- public void testTelemetryTracingNamesIncludeFallback() {
- Settings settings = Settings.builder().put(TRACING_APM_NAMES_INCLUDE_SETTING.getKey(), "abc,xyz").build();
-
- List included = TELEMETRY_TRACING_NAMES_INCLUDE_SETTING.get(settings);
-
- assertThat(included, containsInAnyOrder("abc", "xyz"));
- assertWarnings("[tracing.apm.names.include] setting was deprecated in Elasticsearch and will be removed in a future release.");
- }
-
- public void testTelemetryTracingNamesExcludeFallback() {
- Settings settings = Settings.builder().put(TRACING_APM_NAMES_EXCLUDE_SETTING.getKey(), "abc,xyz").build();
-
- List included = TELEMETRY_TRACING_NAMES_EXCLUDE_SETTING.get(settings);
-
- assertThat(included, containsInAnyOrder("abc", "xyz"));
- assertWarnings("[tracing.apm.names.exclude] setting was deprecated in Elasticsearch and will be removed in a future release.");
- }
-
- public void testTelemetryTracingSanitizeFieldNamesFallback() {
- Settings settings = Settings.builder().put(TRACING_APM_SANITIZE_FIELD_NAMES.getKey(), "abc,xyz").build();
-
- List included = TELEMETRY_TRACING_SANITIZE_FIELD_NAMES.get(settings);
-
- assertThat(included, containsInAnyOrder("abc", "xyz"));
- assertWarnings(
- "[tracing.apm.sanitize_field_names] setting was deprecated in Elasticsearch and will be removed in a future release."
- );
+ var map = APMAgentSettings.APM_AGENT_SETTINGS.getAsMap(Settings.builder().put(prefix + "global_labels.abc", "123").build());
+ assertThat(map, hasEntry("global_labels.abc", "123"));
}
public void testTelemetryTracingSanitizeFieldNamesFallbackDefault() {
@@ -252,28 +174,6 @@ public class APMAgentSettingsTests extends ESTestCase {
assertThat(included, hasItem("password")); // and more defaults
}
- public void testTelemetrySecretTokenFallback() {
- MockSecureSettings secureSettings = new MockSecureSettings();
- secureSettings.setString(TRACING_APM_SECRET_TOKEN_SETTING.getKey(), "verysecret");
- Settings settings = Settings.builder().setSecureSettings(secureSettings).build();
-
- try (SecureString secureString = TELEMETRY_SECRET_TOKEN_SETTING.get(settings)) {
- assertEquals("verysecret", secureString.toString());
- }
- assertWarnings("[tracing.apm.secret_token] setting was deprecated in Elasticsearch and will be removed in a future release.");
- }
-
- public void testTelemetryApiKeyFallback() {
- MockSecureSettings secureSettings = new MockSecureSettings();
- secureSettings.setString(TRACING_APM_API_KEY_SETTING.getKey(), "abc");
- Settings settings = Settings.builder().setSecureSettings(secureSettings).build();
-
- try (SecureString secureString = TELEMETRY_API_KEY_SETTING.get(settings)) {
- assertEquals("abc", secureString.toString());
- }
- assertWarnings("[tracing.apm.api_key] setting was deprecated in Elasticsearch and will be removed in a future release.");
- }
-
/**
* Check that invalid or forbidden APM agent settings are rejected if their last part resembles an allowed setting.
*/
diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java
index 58d3e6613290..fb6f99570bff 100644
--- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java
+++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java
@@ -182,7 +182,8 @@ public class DataStreamIT extends ESIntegTestCase {
String backingIndex = barDataStream.getIndices().get(0).getName();
backingIndices.add(backingIndex);
- GetIndexResponse getIndexResponse = indicesAdmin().getIndex(new GetIndexRequest().indices(backingIndex)).actionGet();
+ GetIndexResponse getIndexResponse = indicesAdmin().getIndex(new GetIndexRequest(TEST_REQUEST_TIMEOUT).indices(backingIndex))
+ .actionGet();
assertThat(getIndexResponse.getSettings().get(backingIndex), notNullValue());
assertThat(getIndexResponse.getSettings().get(backingIndex).getAsBoolean("index.hidden", null), is(true));
Map, ?> mappings = getIndexResponse.getMappings().get(backingIndex).getSourceAsMap();
@@ -190,7 +191,7 @@ public class DataStreamIT extends ESIntegTestCase {
backingIndex = fooDataStream.getIndices().get(0).getName();
backingIndices.add(backingIndex);
- getIndexResponse = indicesAdmin().getIndex(new GetIndexRequest().indices(backingIndex)).actionGet();
+ getIndexResponse = indicesAdmin().getIndex(new GetIndexRequest(TEST_REQUEST_TIMEOUT).indices(backingIndex)).actionGet();
assertThat(getIndexResponse.getSettings().get(backingIndex), notNullValue());
assertThat(getIndexResponse.getSettings().get(backingIndex).getAsBoolean("index.hidden", null), is(true));
mappings = getIndexResponse.getMappings().get(backingIndex).getSourceAsMap();
@@ -214,7 +215,7 @@ public class DataStreamIT extends ESIntegTestCase {
backingIndex = fooRolloverResponse.getNewIndex();
backingIndices.add(backingIndex);
- getIndexResponse = indicesAdmin().getIndex(new GetIndexRequest().indices(backingIndex)).actionGet();
+ getIndexResponse = indicesAdmin().getIndex(new GetIndexRequest(TEST_REQUEST_TIMEOUT).indices(backingIndex)).actionGet();
assertThat(getIndexResponse.getSettings().get(backingIndex), notNullValue());
assertThat(getIndexResponse.getSettings().get(backingIndex).getAsBoolean("index.hidden", null), is(true));
mappings = getIndexResponse.getMappings().get(backingIndex).getSourceAsMap();
@@ -222,7 +223,7 @@ public class DataStreamIT extends ESIntegTestCase {
backingIndex = barRolloverResponse.getNewIndex();
backingIndices.add(backingIndex);
- getIndexResponse = indicesAdmin().getIndex(new GetIndexRequest().indices(backingIndex)).actionGet();
+ getIndexResponse = indicesAdmin().getIndex(new GetIndexRequest(TEST_REQUEST_TIMEOUT).indices(backingIndex)).actionGet();
assertThat(getIndexResponse.getSettings().get(backingIndex), notNullValue());
assertThat(getIndexResponse.getSettings().get(backingIndex).getAsBoolean("index.hidden", null), is(true));
mappings = getIndexResponse.getMappings().get(backingIndex).getSourceAsMap();
@@ -245,7 +246,7 @@ public class DataStreamIT extends ESIntegTestCase {
expectThrows(
IndexNotFoundException.class,
"Backing index '" + index + "' should have been deleted.",
- () -> indicesAdmin().getIndex(new GetIndexRequest().indices(index)).actionGet()
+ () -> indicesAdmin().getIndex(new GetIndexRequest(TEST_REQUEST_TIMEOUT).indices(index)).actionGet()
);
}
}
@@ -479,7 +480,8 @@ public class DataStreamIT extends ESIntegTestCase {
String backingIndex = getDataStreamResponse.getDataStreams().get(0).getDataStream().getIndices().get(0).getName();
assertThat(backingIndex, backingIndexEqualTo(dataStreamName, 1));
- GetIndexResponse getIndexResponse = indicesAdmin().getIndex(new GetIndexRequest().indices(dataStreamName)).actionGet();
+ GetIndexResponse getIndexResponse = indicesAdmin().getIndex(new GetIndexRequest(TEST_REQUEST_TIMEOUT).indices(dataStreamName))
+ .actionGet();
assertThat(getIndexResponse.getSettings().get(backingIndex), notNullValue());
assertThat(getIndexResponse.getSettings().get(backingIndex).getAsBoolean("index.hidden", null), is(true));
assertThat(
@@ -492,7 +494,7 @@ public class DataStreamIT extends ESIntegTestCase {
assertThat(backingIndex, backingIndexEqualTo(dataStreamName, 2));
assertTrue(rolloverResponse.isRolledOver());
- getIndexResponse = indicesAdmin().getIndex(new GetIndexRequest().indices(backingIndex)).actionGet();
+ getIndexResponse = indicesAdmin().getIndex(new GetIndexRequest(TEST_REQUEST_TIMEOUT).indices(backingIndex)).actionGet();
assertThat(getIndexResponse.getSettings().get(backingIndex), notNullValue());
assertThat(getIndexResponse.getSettings().get(backingIndex).getAsBoolean("index.hidden", null), is(true));
assertThat(
@@ -518,7 +520,7 @@ public class DataStreamIT extends ESIntegTestCase {
expectThrows(
IndexNotFoundException.class,
"Backing index '" + index.getName() + "' should have been deleted.",
- () -> indicesAdmin().getIndex(new GetIndexRequest().indices(index.getName())).actionGet()
+ () -> indicesAdmin().getIndex(new GetIndexRequest(TEST_REQUEST_TIMEOUT).indices(index.getName())).actionGet()
);
}
}
@@ -596,7 +598,7 @@ public class DataStreamIT extends ESIntegTestCase {
verifyResolvability(dataStreamName, indicesAdmin().prepareGetFieldMappings(dataStreamName), false);
verifyResolvability(dataStreamName, indicesAdmin().preparePutMapping(dataStreamName).setSource("""
{"_doc":{"properties": {"my_field":{"type":"keyword"}}}}""", XContentType.JSON), false);
- verifyResolvability(dataStreamName, indicesAdmin().prepareGetMappings(dataStreamName), false);
+ verifyResolvability(dataStreamName, indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, dataStreamName), false);
verifyResolvability(
dataStreamName,
indicesAdmin().prepareUpdateSettings(dataStreamName).setSettings(Settings.builder().put("index.number_of_replicas", 0)),
@@ -606,7 +608,7 @@ public class DataStreamIT extends ESIntegTestCase {
verifyResolvability(dataStreamName, clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, dataStreamName), false);
verifyResolvability(dataStreamName, clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).setIndices(dataStreamName), false);
verifyResolvability(dataStreamName, client().prepareFieldCaps(dataStreamName).setFields("*"), false);
- verifyResolvability(dataStreamName, indicesAdmin().prepareGetIndex().addIndices(dataStreamName), false);
+ verifyResolvability(dataStreamName, indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT).addIndices(dataStreamName), false);
verifyResolvability(dataStreamName, indicesAdmin().prepareOpen(dataStreamName), false);
verifyResolvability(dataStreamName, indicesAdmin().prepareClose(dataStreamName), true);
verifyResolvability(aliasToDataStream, indicesAdmin().prepareClose(aliasToDataStream), true);
@@ -643,7 +645,7 @@ public class DataStreamIT extends ESIntegTestCase {
verifyResolvability(wildcardExpression, indicesAdmin().prepareGetFieldMappings(wildcardExpression), false);
verifyResolvability(wildcardExpression, indicesAdmin().preparePutMapping(wildcardExpression).setSource("""
{"_doc":{"properties": {"my_field":{"type":"keyword"}}}}""", XContentType.JSON), false);
- verifyResolvability(wildcardExpression, indicesAdmin().prepareGetMappings(wildcardExpression), false);
+ verifyResolvability(wildcardExpression, indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, wildcardExpression), false);
verifyResolvability(wildcardExpression, indicesAdmin().prepareGetSettings(wildcardExpression), false);
verifyResolvability(
wildcardExpression,
@@ -653,7 +655,7 @@ public class DataStreamIT extends ESIntegTestCase {
verifyResolvability(wildcardExpression, clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, wildcardExpression), false);
verifyResolvability(wildcardExpression, clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).setIndices(wildcardExpression), false);
verifyResolvability(wildcardExpression, client().prepareFieldCaps(wildcardExpression).setFields("*"), false);
- verifyResolvability(wildcardExpression, indicesAdmin().prepareGetIndex().addIndices(wildcardExpression), false);
+ verifyResolvability(wildcardExpression, indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT).addIndices(wildcardExpression), false);
verifyResolvability(wildcardExpression, indicesAdmin().prepareOpen(wildcardExpression), false);
verifyResolvability(wildcardExpression, indicesAdmin().prepareClose(wildcardExpression), false);
verifyResolvability(
@@ -1180,7 +1182,7 @@ public class DataStreamIT extends ESIntegTestCase {
DataStreamTimestampFieldMapper.NAME,
Map.of("enabled", true)
);
- GetMappingsResponse getMappingsResponse = indicesAdmin().prepareGetMappings("logs-foobar").get();
+ GetMappingsResponse getMappingsResponse = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "logs-foobar").get();
assertThat(getMappingsResponse.getMappings().size(), equalTo(2));
assertThat(getMappingsResponse.getMappings().get(backingIndex1).getSourceAsMap(), equalTo(expectedMapping));
assertThat(getMappingsResponse.getMappings().get(backingIndex2).getSourceAsMap(), equalTo(expectedMapping));
@@ -1195,7 +1197,7 @@ public class DataStreamIT extends ESIntegTestCase {
.setSource("{\"properties\":{\"my_field\":{\"type\":\"keyword\"}}}", XContentType.JSON)
.get();
// The mappings of all backing indices should be updated:
- getMappingsResponse = indicesAdmin().prepareGetMappings("logs-foobar").get();
+ getMappingsResponse = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "logs-foobar").get();
assertThat(getMappingsResponse.getMappings().size(), equalTo(2));
assertThat(getMappingsResponse.getMappings().get(backingIndex1).getSourceAsMap(), equalTo(expectedMapping));
assertThat(getMappingsResponse.getMappings().get(backingIndex2).getSourceAsMap(), equalTo(expectedMapping));
@@ -1401,7 +1403,8 @@ public class DataStreamIT extends ESIntegTestCase {
}
private static void assertBackingIndex(String backingIndex, String timestampFieldPathInMapping, Map, ?> expectedMapping) {
- GetIndexResponse getIndexResponse = indicesAdmin().getIndex(new GetIndexRequest().indices(backingIndex)).actionGet();
+ GetIndexResponse getIndexResponse = indicesAdmin().getIndex(new GetIndexRequest(TEST_REQUEST_TIMEOUT).indices(backingIndex))
+ .actionGet();
assertThat(getIndexResponse.getSettings().get(backingIndex), notNullValue());
assertThat(getIndexResponse.getSettings().get(backingIndex).getAsBoolean("index.hidden", null), is(true));
Map, ?> mappings = getIndexResponse.getMappings().get(backingIndex).getSourceAsMap();
@@ -1488,7 +1491,8 @@ public class DataStreamIT extends ESIntegTestCase {
assertThat(getDataStreamsResponse.getDataStreams().get(2).getDataStream().getName(), equalTo("logs-foobaz2"));
assertThat(getDataStreamsResponse.getDataStreams().get(3).getDataStream().getName(), equalTo("logs-foobaz3"));
- GetIndexResponse getIndexResponse = indicesAdmin().getIndex(new GetIndexRequest().indices("logs-bar*")).actionGet();
+ GetIndexResponse getIndexResponse = indicesAdmin().getIndex(new GetIndexRequest(TEST_REQUEST_TIMEOUT).indices("logs-bar*"))
+ .actionGet();
assertThat(getIndexResponse.getIndices(), arrayWithSize(4));
assertThat(getIndexResponse.getIndices(), hasItemInArray("logs-barbaz"));
assertThat(getIndexResponse.getIndices(), hasItemInArray("logs-barfoo"));
@@ -1521,7 +1525,8 @@ public class DataStreamIT extends ESIntegTestCase {
.actionGet();
assertThat(getDataStreamsResponse.getDataStreams(), hasSize(0));
- GetIndexResponse getIndexResponse = indicesAdmin().getIndex(new GetIndexRequest().indices("logs-foobar")).actionGet();
+ GetIndexResponse getIndexResponse = indicesAdmin().getIndex(new GetIndexRequest(TEST_REQUEST_TIMEOUT).indices("logs-foobar"))
+ .actionGet();
assertThat(getIndexResponse.getIndices(), arrayWithSize(1));
assertThat(getIndexResponse.getIndices(), hasItemInArray("logs-foobar"));
assertThat(getIndexResponse.getSettings().get("logs-foobar").get(IndexMetadata.SETTING_NUMBER_OF_REPLICAS), equalTo("0"));
@@ -1657,7 +1662,7 @@ public class DataStreamIT extends ESIntegTestCase {
.actionGet();
String newBackingIndexName = getDataStreamResponse.getDataStreams().get(0).getDataStream().getWriteIndex().getName();
assertThat(newBackingIndexName, backingIndexEqualTo("potato-biscuit", 2));
- indicesAdmin().prepareGetIndex().addIndices(newBackingIndexName).get();
+ indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT).addIndices(newBackingIndexName).get();
} catch (Exception e) {
logger.info("--> expecting second index to be created but it has not yet been created");
fail("expecting second index to exist");
diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java
index f6c703b96888..40bde501f0bf 100644
--- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java
+++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java
@@ -1304,7 +1304,7 @@ public class DataStreamsSnapshotsIT extends AbstractSnapshotIntegTestCase {
assertEquals(RestStatus.OK, restoreSnapshotResponse.status());
assertThat(getDataStreamInfo("*"), hasSize(3));
- assertNotNull(client.admin().indices().prepareGetIndex().setIndices(indexName).get());
+ assertNotNull(client.admin().indices().prepareGetIndex(TEST_REQUEST_TIMEOUT).setIndices(indexName).get());
}
public void testRestoreDataStreamAliasWithConflictingDataStream() throws Exception {
diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataTierDataStreamIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataTierDataStreamIT.java
index c08a3548127e..c02c7ea25b12 100644
--- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataTierDataStreamIT.java
+++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataTierDataStreamIT.java
@@ -50,7 +50,7 @@ public class DataTierDataStreamIT extends ESIntegTestCase {
.setWaitForActiveShards(0)
.get()
.getIndex();
- var idxSettings = indicesAdmin().prepareGetIndex().addIndices(index).get().getSettings().get(dsIndexName);
+ var idxSettings = indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT).addIndices(index).get().getSettings().get(dsIndexName);
assertThat(DataTier.TIER_PREFERENCE_SETTING.get(idxSettings), equalTo(DataTier.DATA_HOT));
logger.info("--> waiting for {} to be yellow", index);
@@ -62,7 +62,7 @@ public class DataTierDataStreamIT extends ESIntegTestCase {
// new index name should have the rolled over name
assertNotEquals(dsIndexName, rolledOverIndexName);
- idxSettings = indicesAdmin().prepareGetIndex().addIndices(index).get().getSettings().get(rolledOverIndexName);
+ idxSettings = indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT).addIndices(index).get().getSettings().get(rolledOverIndexName);
assertThat(DataTier.TIER_PREFERENCE_SETTING.get(idxSettings), equalTo(DataTier.DATA_HOT));
}
diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/ResolveClusterDataStreamIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/ResolveClusterDataStreamIT.java
index aa6ecf35e06f..0bba93ee6ec3 100644
--- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/ResolveClusterDataStreamIT.java
+++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/ResolveClusterDataStreamIT.java
@@ -341,7 +341,10 @@ public class ResolveClusterDataStreamIT extends AbstractMultiClustersTestCase {
DataStream fooDataStream = getDataStreamResponse.getDataStreams().get(0).getDataStream();
String backingIndex = fooDataStream.getIndices().get(0).getName();
backingIndices.add(backingIndex);
- GetIndexResponse getIndexResponse = client.admin().indices().getIndex(new GetIndexRequest().indices(backingIndex)).actionGet();
+ GetIndexResponse getIndexResponse = client.admin()
+ .indices()
+ .getIndex(new GetIndexRequest(TEST_REQUEST_TIMEOUT).indices(backingIndex))
+ .actionGet();
assertThat(getIndexResponse.getSettings().get(backingIndex), notNullValue());
assertThat(getIndexResponse.getSettings().get(backingIndex).getAsBoolean("index.hidden", null), is(true));
Map, ?> mappings = getIndexResponse.getMappings().get(backingIndex).getSourceAsMap();
@@ -377,7 +380,10 @@ public class ResolveClusterDataStreamIT extends AbstractMultiClustersTestCase {
DataStream barDataStream = getDataStreamResponse.getDataStreams().get(0).getDataStream();
String backingIndex = barDataStream.getIndices().get(0).getName();
backingIndices.add(backingIndex);
- GetIndexResponse getIndexResponse = client.admin().indices().getIndex(new GetIndexRequest().indices(backingIndex)).actionGet();
+ GetIndexResponse getIndexResponse = client.admin()
+ .indices()
+ .getIndex(new GetIndexRequest(TEST_REQUEST_TIMEOUT).indices(backingIndex))
+ .actionGet();
assertThat(getIndexResponse.getSettings().get(backingIndex), notNullValue());
assertThat(getIndexResponse.getSettings().get(backingIndex).getAsBoolean("index.hidden", null), is(true));
Map, ?> mappings = getIndexResponse.getMappings().get(backingIndex).getSourceAsMap();
diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/SystemDataStreamSnapshotIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/SystemDataStreamSnapshotIT.java
index 2083807b1227..855644a09e0e 100644
--- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/SystemDataStreamSnapshotIT.java
+++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/SystemDataStreamSnapshotIT.java
@@ -104,7 +104,7 @@ public class SystemDataStreamSnapshotIT extends AbstractSnapshotIntegTestCase {
}
{
- GetIndexResponse indicesRemaining = indicesAdmin().prepareGetIndex().addIndices("_all").get();
+ GetIndexResponse indicesRemaining = indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT).addIndices("_all").get();
assertThat(indicesRemaining.indices(), arrayWithSize(0));
assertSystemDataStreamDoesNotExist();
}
@@ -236,7 +236,7 @@ public class SystemDataStreamSnapshotIT extends AbstractSnapshotIntegTestCase {
assertAcked(indicesAdmin().prepareDelete("my-index"));
{
- GetIndexResponse indicesRemaining = indicesAdmin().prepareGetIndex().addIndices("_all").get();
+ GetIndexResponse indicesRemaining = indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT).addIndices("_all").get();
assertThat(indicesRemaining.indices(), arrayWithSize(0));
}
diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBIndexingIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBIndexingIT.java
index aad68660d2e4..434a8bced889 100644
--- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBIndexingIT.java
+++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBIndexingIT.java
@@ -155,7 +155,7 @@ public class TSDBIndexingIT extends ESSingleNodeTestCase {
}
// fetch end time
- var getIndexResponse = indicesAdmin().getIndex(new GetIndexRequest().indices(backingIndexName)).actionGet();
+ var getIndexResponse = indicesAdmin().getIndex(new GetIndexRequest(TEST_REQUEST_TIMEOUT).indices(backingIndexName)).actionGet();
Instant endTime = IndexSettings.TIME_SERIES_END_TIME.get(getIndexResponse.getSettings().get(backingIndexName));
// index another doc and verify index
@@ -194,7 +194,7 @@ public class TSDBIndexingIT extends ESSingleNodeTestCase {
var newBackingIndexName = rolloverResponse.getNewIndex();
// index and check target index is new
- getIndexResponse = indicesAdmin().getIndex(new GetIndexRequest().indices(newBackingIndexName)).actionGet();
+ getIndexResponse = indicesAdmin().getIndex(new GetIndexRequest(TEST_REQUEST_TIMEOUT).indices(newBackingIndexName)).actionGet();
Instant newStartTime = IndexSettings.TIME_SERIES_START_TIME.get(getIndexResponse.getSettings().get(newBackingIndexName));
Instant newEndTime = IndexSettings.TIME_SERIES_END_TIME.get(getIndexResponse.getSettings().get(newBackingIndexName));
diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBPassthroughIndexingIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBPassthroughIndexingIT.java
index 17e9cca07a05..a76dac5db454 100644
--- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBPassthroughIndexingIT.java
+++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBPassthroughIndexingIT.java
@@ -183,7 +183,7 @@ public class TSDBPassthroughIndexingIT extends ESSingleNodeTestCase {
}
// validate index:
- var getIndexResponse = client().admin().indices().getIndex(new GetIndexRequest().indices(index)).actionGet();
+ var getIndexResponse = client().admin().indices().getIndex(new GetIndexRequest(TEST_REQUEST_TIMEOUT).indices(index)).actionGet();
assertThat(getIndexResponse.getSettings().get(index).get("index.routing_path"), equalTo("[attributes.*]"));
// validate mapping
var mapping = getIndexResponse.mappings().get(index).getSourceAsMap();
diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamFeatures.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamFeatures.java
index f090186480b7..8026ec641d04 100644
--- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamFeatures.java
+++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamFeatures.java
@@ -9,10 +9,6 @@
package org.elasticsearch.datastreams;
-import org.elasticsearch.action.admin.indices.rollover.LazyRolloverAction;
-import org.elasticsearch.action.datastreams.autosharding.DataStreamAutoShardingService;
-import org.elasticsearch.cluster.metadata.DataStreamGlobalRetention;
-import org.elasticsearch.datastreams.lifecycle.health.DataStreamLifecycleHealthInfoPublisher;
import org.elasticsearch.features.FeatureSpecification;
import org.elasticsearch.features.NodeFeature;
@@ -27,12 +23,7 @@ public class DataStreamFeatures implements FeatureSpecification {
@Override
public Set getFeatures() {
- return Set.of(
- DataStreamLifecycleHealthInfoPublisher.DSL_HEALTH_INFO_FEATURE, // Added in 8.12
- LazyRolloverAction.DATA_STREAM_LAZY_ROLLOVER, // Added in 8.13
- DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE,
- DataStreamGlobalRetention.GLOBAL_RETENTION // Added in 8.14
- );
+ return Set.of();
}
@Override
diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java
index cb7445705537..7d5f4bbee32b 100644
--- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java
+++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java
@@ -197,8 +197,7 @@ public class DataStreamsPlugin extends Plugin implements ActionPlugin, HealthPlu
settings,
services.client(),
services.clusterService(),
- errorStoreInitialisationService.get(),
- services.featureService()
+ errorStoreInitialisationService.get()
)
);
dataLifecycleInitialisationService.set(
diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/health/DataStreamLifecycleHealthInfoPublisher.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/health/DataStreamLifecycleHealthInfoPublisher.java
index 642fa4923e07..71575ee88aa7 100644
--- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/health/DataStreamLifecycleHealthInfoPublisher.java
+++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/health/DataStreamLifecycleHealthInfoPublisher.java
@@ -19,8 +19,6 @@ import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.datastreams.lifecycle.DataStreamLifecycleErrorStore;
-import org.elasticsearch.features.FeatureService;
-import org.elasticsearch.features.NodeFeature;
import org.elasticsearch.health.node.DataStreamLifecycleHealthInfo;
import org.elasticsearch.health.node.DslErrorInfo;
import org.elasticsearch.health.node.UpdateHealthInfoCacheAction;
@@ -45,12 +43,10 @@ public class DataStreamLifecycleHealthInfoPublisher {
Setting.Property.Dynamic,
Setting.Property.NodeScope
);
- public static final NodeFeature DSL_HEALTH_INFO_FEATURE = new NodeFeature("health.dsl.info", true);
private final Client client;
private final ClusterService clusterService;
private final DataStreamLifecycleErrorStore errorStore;
- private final FeatureService featureService;
private volatile int signallingErrorRetryInterval;
private volatile int maxNumberOfErrorsToPublish;
@@ -58,13 +54,11 @@ public class DataStreamLifecycleHealthInfoPublisher {
Settings settings,
Client client,
ClusterService clusterService,
- DataStreamLifecycleErrorStore errorStore,
- FeatureService featureService
+ DataStreamLifecycleErrorStore errorStore
) {
this.client = client;
this.clusterService = clusterService;
this.errorStore = errorStore;
- this.featureService = featureService;
this.signallingErrorRetryInterval = DATA_STREAM_SIGNALLING_ERROR_RETRY_INTERVAL_SETTING.get(settings);
this.maxNumberOfErrorsToPublish = DATA_STREAM_LIFECYCLE_MAX_ERRORS_TO_PUBLISH_SETTING.get(settings);
}
@@ -89,9 +83,6 @@ public class DataStreamLifecycleHealthInfoPublisher {
* {@link org.elasticsearch.datastreams.lifecycle.DataStreamLifecycleService#DATA_STREAM_SIGNALLING_ERROR_RETRY_INTERVAL_SETTING}
*/
public void publishDslErrorEntries(ActionListener actionListener) {
- if (featureService.clusterHasFeature(clusterService.state(), DSL_HEALTH_INFO_FEATURE) == false) {
- return;
- }
// fetching the entries that persist in the error store for more than the signalling retry interval
// note that we're reporting this view into the error store on every publishing iteration
List errorEntriesToSignal = errorStore.getErrorsInfo(
diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java
index fc6a6ffa6ad3..9779a151c829 100644
--- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java
+++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java
@@ -67,9 +67,7 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.core.TimeValue;
import org.elasticsearch.core.Tuple;
-import org.elasticsearch.datastreams.DataStreamFeatures;
import org.elasticsearch.datastreams.lifecycle.health.DataStreamLifecycleHealthInfoPublisher;
-import org.elasticsearch.features.FeatureService;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexMode;
import org.elasticsearch.index.IndexSettings;
@@ -183,13 +181,7 @@ public class DataStreamLifecycleServiceTests extends ESTestCase {
() -> now,
errorStore,
allocationService,
- new DataStreamLifecycleHealthInfoPublisher(
- Settings.EMPTY,
- client,
- clusterService,
- errorStore,
- new FeatureService(List.of(new DataStreamFeatures()))
- ),
+ new DataStreamLifecycleHealthInfoPublisher(Settings.EMPTY, client, clusterService, errorStore),
globalRetentionSettings
);
clientDelegate = null;
@@ -1487,13 +1479,7 @@ public class DataStreamLifecycleServiceTests extends ESTestCase {
() -> now.getAndAdd(delta),
errorStore,
mock(AllocationService.class),
- new DataStreamLifecycleHealthInfoPublisher(
- Settings.EMPTY,
- getTransportRequestsRecordingClient(),
- clusterService,
- errorStore,
- new FeatureService(List.of(new DataStreamFeatures()))
- ),
+ new DataStreamLifecycleHealthInfoPublisher(Settings.EMPTY, getTransportRequestsRecordingClient(), clusterService, errorStore),
globalRetentionSettings
);
assertThat(service.getLastRunDuration(), is(nullValue()));
diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/health/DataStreamLifecycleHealthInfoPublisherTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/health/DataStreamLifecycleHealthInfoPublisherTests.java
index cff6127e0729..f8a2ac3c6102 100644
--- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/health/DataStreamLifecycleHealthInfoPublisherTests.java
+++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/health/DataStreamLifecycleHealthInfoPublisherTests.java
@@ -24,10 +24,8 @@ import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.datastreams.DataStreamFeatures;
import org.elasticsearch.datastreams.lifecycle.DataStreamLifecycleErrorStore;
import org.elasticsearch.datastreams.lifecycle.DataStreamLifecycleService;
-import org.elasticsearch.features.FeatureService;
import org.elasticsearch.health.node.DataStreamLifecycleHealthInfo;
import org.elasticsearch.health.node.DslErrorInfo;
import org.elasticsearch.health.node.UpdateHealthInfoCacheAction;
@@ -40,7 +38,6 @@ import org.junit.Before;
import java.util.HashSet;
import java.util.List;
-import java.util.Map;
import java.util.Set;
import java.util.concurrent.CopyOnWriteArrayList;
@@ -83,13 +80,7 @@ public class DataStreamLifecycleHealthInfoPublisherTests extends ESTestCase {
final Client client = getTransportRequestsRecordingClient();
errorStore = new DataStreamLifecycleErrorStore(() -> now);
- dslHealthInfoPublisher = new DataStreamLifecycleHealthInfoPublisher(
- Settings.EMPTY,
- client,
- clusterService,
- errorStore,
- new FeatureService(List.of(new DataStreamFeatures()))
- );
+ dslHealthInfoPublisher = new DataStreamLifecycleHealthInfoPublisher(Settings.EMPTY, client, clusterService, errorStore);
}
@After
@@ -105,16 +96,6 @@ public class DataStreamLifecycleHealthInfoPublisherTests extends ESTestCase {
}
errorStore.recordError("testIndex", new IllegalStateException("bad state"));
ClusterState stateWithHealthNode = ClusterStateCreationUtils.state(node1, node1, node1, allNodes);
- stateWithHealthNode = ClusterState.builder(stateWithHealthNode)
- .nodeFeatures(
- Map.of(
- node1.getId(),
- Set.of(DataStreamLifecycleHealthInfoPublisher.DSL_HEALTH_INFO_FEATURE.id()),
- node2.getId(),
- Set.of(DataStreamLifecycleHealthInfoPublisher.DSL_HEALTH_INFO_FEATURE.id())
- )
- )
- .build();
ClusterServiceUtils.setState(clusterService, stateWithHealthNode);
dslHealthInfoPublisher.publishDslErrorEntries(new ActionListener<>() {
@Override
@@ -143,16 +124,6 @@ public class DataStreamLifecycleHealthInfoPublisherTests extends ESTestCase {
errorStore.recordError("testIndex", new IllegalStateException("bad state"));
ClusterState stateNoHealthNode = ClusterStateCreationUtils.state(node1, node1, null, allNodes);
- stateNoHealthNode = ClusterState.builder(stateNoHealthNode)
- .nodeFeatures(
- Map.of(
- node1.getId(),
- Set.of(DataStreamLifecycleHealthInfoPublisher.DSL_HEALTH_INFO_FEATURE.id()),
- node2.getId(),
- Set.of(DataStreamLifecycleHealthInfoPublisher.DSL_HEALTH_INFO_FEATURE.id())
- )
- )
- .build();
ClusterServiceUtils.setState(clusterService, stateNoHealthNode);
dslHealthInfoPublisher.publishDslErrorEntries(new ActionListener<>() {
@Override
@@ -170,16 +141,6 @@ public class DataStreamLifecycleHealthInfoPublisherTests extends ESTestCase {
public void testPublishDslErrorEntriesEmptyErrorStore() {
// publishes the empty error store (this is the "back to healthy" state where all errors have been fixed)
ClusterState state = ClusterStateCreationUtils.state(node1, node1, node1, allNodes);
- state = ClusterState.builder(state)
- .nodeFeatures(
- Map.of(
- node1.getId(),
- Set.of(DataStreamLifecycleHealthInfoPublisher.DSL_HEALTH_INFO_FEATURE.id()),
- node2.getId(),
- Set.of(DataStreamLifecycleHealthInfoPublisher.DSL_HEALTH_INFO_FEATURE.id())
- )
- )
- .build();
ClusterServiceUtils.setState(clusterService, state);
dslHealthInfoPublisher.publishDslErrorEntries(new ActionListener<>() {
@Override
diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/150_tsdb.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/150_tsdb.yml
index 9ea3bfefabdf..884adb545810 100644
--- a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/150_tsdb.yml
+++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/150_tsdb.yml
@@ -300,9 +300,6 @@ index without timestamp with pipeline:
---
dynamic templates:
- - requires:
- cluster_features: ["mapper.pass_through_priority"]
- reason: support for priority in passthrough objects
- do:
allowed_warnings:
- "index template [my-dynamic-template] has index patterns [k9s*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-dynamic-template] will take precedence during new index creation"
@@ -450,9 +447,6 @@ dynamic templates:
---
dynamic templates - conflicting aliases:
- - requires:
- cluster_features: ["mapper.pass_through_priority"]
- reason: support for priority in passthrough objects
- do:
allowed_warnings:
- "index template [my-dynamic-template] has index patterns [k9s*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-dynamic-template] will take precedence during new index creation"
@@ -549,9 +543,6 @@ dynamic templates - conflicting aliases:
---
dynamic templates - conflicting aliases with top-level field:
- - requires:
- cluster_features: ["mapper.pass_through_priority"]
- reason: support for priority in passthrough objects
- do:
allowed_warnings:
- "index template [my-dynamic-template] has index patterns [otel] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-dynamic-template] will take precedence during new index creation"
@@ -632,9 +623,6 @@ dynamic templates - conflicting aliases with top-level field:
---
dynamic templates with nesting:
- - requires:
- cluster_features: ["mapper.pass_through_priority"]
- reason: support for priority in passthrough objects
- do:
allowed_warnings:
- "index template [my-dynamic-template] has index patterns [k9s*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-dynamic-template] will take precedence during new index creation"
@@ -810,10 +798,6 @@ dynamic templates with nesting:
---
dynamic templates with incremental indexing:
- - requires:
- cluster_features: ["mapper.pass_through_priority"]
- reason: support for priority in passthrough objects
-
- do:
allowed_warnings:
- "index template [my-dynamic-template] has index patterns [k9s*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-dynamic-template] will take precedence during new index creation"
@@ -1038,9 +1022,6 @@ dynamic templates with incremental indexing:
---
subobject in passthrough object auto flatten:
- - requires:
- cluster_features: ["mapper.pass_through_priority"]
- reason: support for priority in passthrough objects
- do:
allowed_warnings:
- "index template [my-passthrough-template] has index patterns [k9s*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-passthrough-template] will take precedence during new index creation"
@@ -1108,9 +1089,6 @@ enable subobjects in passthrough object:
---
passthrough objects with duplicate priority:
- - requires:
- cluster_features: ["mapper.pass_through_priority"]
- reason: support for priority in passthrough objects
- do:
catch: /has a conflicting param/
indices.put_index_template:
@@ -1135,9 +1113,6 @@ passthrough objects with duplicate priority:
---
dimensions with ignore_malformed and ignore_above:
- - requires:
- cluster_features: ["mapper.keyword_dimension_ignore_above"]
- reason: support for ignore_above on keyword dimensions
- do:
allowed_warnings:
- "index template [my-dynamic-template] has index patterns [k9s*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-dynamic-template] will take precedence during new index creation"
@@ -1229,9 +1204,6 @@ dimensions with ignore_malformed and ignore_above:
---
non string dimension fields:
- - requires:
- cluster_features: ["mapper.pass_through_priority", "routing.boolean_routing_path", "mapper.boolean_dimension"]
- reason: support for priority in passthrough objects
- do:
allowed_warnings:
- "index template [my-dynamic-template] has index patterns [k9s*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-dynamic-template] will take precedence during new index creation"
@@ -1339,10 +1311,6 @@ non string dimension fields:
---
multi value dimensions:
- - requires:
- cluster_features: ["routing.multi_value_routing_path"]
- reason: support for multi-value dimensions
-
- do:
allowed_warnings:
- "index template [my-dynamic-template] has index patterns [k9s*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-dynamic-template] will take precedence during new index creation"
diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/DatabaseConfiguration.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/DatabaseConfiguration.java
index aa48c73cf1d7..08efe87e6fde 100644
--- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/DatabaseConfiguration.java
+++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/DatabaseConfiguration.java
@@ -160,11 +160,6 @@ public record DatabaseConfiguration(String id, String name, Provider provider) i
if (provider instanceof Maxmind maxmind) {
out.writeString(maxmind.accountId);
} else {
- /*
- * The existence of a non-Maxmind providers is gated on the feature get_database_configuration_action.multi_node, and
- * get_database_configuration_action.multi_node is only available on or after
- * TransportVersions.INGEST_GEO_DATABASE_PROVIDERS.
- */
assert false : "non-maxmind DatabaseConfiguration.Provider [" + provider.getWriteableName() + "]";
}
}
diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportGetDatabaseConfigurationAction.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportGetDatabaseConfigurationAction.java
index a2705655bc20..7233765bfeda 100644
--- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportGetDatabaseConfigurationAction.java
+++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportGetDatabaseConfigurationAction.java
@@ -17,7 +17,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.regex.Regex;
-import org.elasticsearch.features.FeatureService;
import org.elasticsearch.ingest.geoip.DatabaseNodeService;
import org.elasticsearch.ingest.geoip.GeoIpTaskState;
import org.elasticsearch.ingest.geoip.IngestGeoIpMetadata;
@@ -41,8 +40,6 @@ import java.util.Optional;
import java.util.Set;
import java.util.stream.Collectors;
-import static org.elasticsearch.ingest.IngestGeoIpFeatures.GET_DATABASE_CONFIGURATION_ACTION_MULTI_NODE;
-
public class TransportGetDatabaseConfigurationAction extends TransportNodesAction<
GetDatabaseConfigurationAction.Request,
GetDatabaseConfigurationAction.Response,
@@ -50,7 +47,6 @@ public class TransportGetDatabaseConfigurationAction extends TransportNodesActio
GetDatabaseConfigurationAction.NodeResponse,
List> {
- private final FeatureService featureService;
private final DatabaseNodeService databaseNodeService;
@Inject
@@ -59,7 +55,6 @@ public class TransportGetDatabaseConfigurationAction extends TransportNodesActio
ClusterService clusterService,
ThreadPool threadPool,
ActionFilters actionFilters,
- FeatureService featureService,
DatabaseNodeService databaseNodeService
) {
super(
@@ -70,39 +65,9 @@ public class TransportGetDatabaseConfigurationAction extends TransportNodesActio
GetDatabaseConfigurationAction.NodeRequest::new,
threadPool.executor(ThreadPool.Names.MANAGEMENT)
);
- this.featureService = featureService;
this.databaseNodeService = databaseNodeService;
}
- @Override
- protected void doExecute(
- Task task,
- GetDatabaseConfigurationAction.Request request,
- ActionListener listener
- ) {
- if (featureService.clusterHasFeature(clusterService.state(), GET_DATABASE_CONFIGURATION_ACTION_MULTI_NODE) == false) {
- /*
- * TransportGetDatabaseConfigurationAction used to be a TransportMasterNodeAction, and not all nodes in the cluster have been
- * updated. So we don't want to send node requests to the other nodes because they will blow up. Instead, we just return
- * the information that we used to return from the master node (it doesn't make any difference that this might not be the master
- * node, because we're only reading the cluster state). Because older nodes only know about the Maxmind provider type, we filter
- * out all others here to avoid causing problems on those nodes.
- */
- newResponseAsync(
- task,
- request,
- createActionContext(task, request).stream()
- .filter(database -> database.database().provider() instanceof DatabaseConfiguration.Maxmind)
- .toList(),
- List.of(),
- List.of(),
- listener
- );
- } else {
- super.doExecute(task, request, listener);
- }
- }
-
protected List createActionContext(Task task, GetDatabaseConfigurationAction.Request request) {
final Set ids;
if (request.getDatabaseIds().length == 0) {
diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportPutDatabaseConfigurationAction.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportPutDatabaseConfigurationAction.java
index 5dda55799209..6e3910f9d5e9 100644
--- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportPutDatabaseConfigurationAction.java
+++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportPutDatabaseConfigurationAction.java
@@ -29,7 +29,6 @@ import org.elasticsearch.common.util.concurrent.EsExecutors;
import org.elasticsearch.core.Nullable;
import org.elasticsearch.core.Strings;
import org.elasticsearch.core.Tuple;
-import org.elasticsearch.features.FeatureService;
import org.elasticsearch.ingest.geoip.IngestGeoIpMetadata;
import org.elasticsearch.ingest.geoip.direct.PutDatabaseConfigurationAction.Request;
import org.elasticsearch.injection.guice.Inject;
@@ -42,8 +41,6 @@ import java.util.HashMap;
import java.util.Map;
import java.util.Optional;
-import static org.elasticsearch.ingest.IngestGeoIpFeatures.PUT_DATABASE_CONFIGURATION_ACTION_IPINFO;
-
public class TransportPutDatabaseConfigurationAction extends TransportMasterNodeAction {
private static final Logger logger = LogManager.getLogger(TransportPutDatabaseConfigurationAction.class);
@@ -61,7 +58,6 @@ public class TransportPutDatabaseConfigurationAction extends TransportMasterNode
}
};
- private final FeatureService featureService;
private final MasterServiceTaskQueue updateDatabaseConfigurationTaskQueue;
@Inject
@@ -70,8 +66,7 @@ public class TransportPutDatabaseConfigurationAction extends TransportMasterNode
ClusterService clusterService,
ThreadPool threadPool,
ActionFilters actionFilters,
- IndexNameExpressionResolver indexNameExpressionResolver,
- FeatureService featureService
+ IndexNameExpressionResolver indexNameExpressionResolver
) {
super(
PutDatabaseConfigurationAction.NAME,
@@ -84,7 +79,6 @@ public class TransportPutDatabaseConfigurationAction extends TransportMasterNode
AcknowledgedResponse::readFrom,
EsExecutors.DIRECT_EXECUTOR_SERVICE
);
- this.featureService = featureService;
this.updateDatabaseConfigurationTaskQueue = clusterService.createTaskQueue(
"update-geoip-database-configuration-state-update",
Priority.NORMAL,
@@ -96,18 +90,6 @@ public class TransportPutDatabaseConfigurationAction extends TransportMasterNode
protected void masterOperation(Task task, Request request, ClusterState state, ActionListener listener) {
final String id = request.getDatabase().id();
- // if this is an ipinfo configuration, then make sure the whole cluster supports that feature
- if (request.getDatabase().provider() instanceof DatabaseConfiguration.Ipinfo
- && featureService.clusterHasFeature(clusterService.state(), PUT_DATABASE_CONFIGURATION_ACTION_IPINFO) == false) {
- listener.onFailure(
- new IllegalArgumentException(
- "Unable to use ipinfo database configurations in mixed-clusters with nodes that do not support feature "
- + PUT_DATABASE_CONFIGURATION_ACTION_IPINFO.id()
- )
- );
- return;
- }
-
updateDatabaseConfigurationTaskQueue.submitTask(
Strings.format("update-geoip-database-configuration-[%s]", id),
new UpdateDatabaseConfigurationTask(listener, request.getDatabase()),
diff --git a/modules/ingest-geoip/src/yamlRestTest/resources/rest-api-spec/test/ingest_geoip/40_geoip_databases.yml b/modules/ingest-geoip/src/yamlRestTest/resources/rest-api-spec/test/ingest_geoip/40_geoip_databases.yml
index a1104505bc24..007c82db4c92 100644
--- a/modules/ingest-geoip/src/yamlRestTest/resources/rest-api-spec/test/ingest_geoip/40_geoip_databases.yml
+++ b/modules/ingest-geoip/src/yamlRestTest/resources/rest-api-spec/test/ingest_geoip/40_geoip_databases.yml
@@ -1,9 +1,3 @@
----
-setup:
- - requires:
- cluster_features: ["geoip.downloader.database.configuration", "get_database_configuration_action.multi_node"]
- reason: "geoip downloader database configuration APIs added in 8.15, and updated in 8.16 to return more results"
-
---
teardown:
- do:
diff --git a/modules/ingest-geoip/src/yamlRestTest/resources/rest-api-spec/test/ingest_geoip/50_ip_lookup_processor.yml b/modules/ingest-geoip/src/yamlRestTest/resources/rest-api-spec/test/ingest_geoip/50_ip_lookup_processor.yml
index fd73c715a5ac..094798476952 100644
--- a/modules/ingest-geoip/src/yamlRestTest/resources/rest-api-spec/test/ingest_geoip/50_ip_lookup_processor.yml
+++ b/modules/ingest-geoip/src/yamlRestTest/resources/rest-api-spec/test/ingest_geoip/50_ip_lookup_processor.yml
@@ -1,9 +1,3 @@
-setup:
- - requires:
- cluster_features:
- - "put_database_configuration_action.ipinfo"
- reason: "ipinfo support added in 8.16"
-
---
"Test ip_location processor with defaults":
- do:
diff --git a/modules/ingest-geoip/src/yamlRestTest/resources/rest-api-spec/test/ingest_geoip/60_ip_location_databases.yml b/modules/ingest-geoip/src/yamlRestTest/resources/rest-api-spec/test/ingest_geoip/60_ip_location_databases.yml
index e2e9a1fdb5e2..47f09392df60 100644
--- a/modules/ingest-geoip/src/yamlRestTest/resources/rest-api-spec/test/ingest_geoip/60_ip_location_databases.yml
+++ b/modules/ingest-geoip/src/yamlRestTest/resources/rest-api-spec/test/ingest_geoip/60_ip_location_databases.yml
@@ -1,10 +1,3 @@
----
-setup:
- - requires:
- cluster_features:
- - "put_database_configuration_action.ipinfo"
- reason: "ip location downloader database configuration APIs added in 8.16 to support more types"
-
---
teardown:
- do:
diff --git a/modules/kibana/src/internalClusterTest/java/org/elasticsearch/kibana/KibanaThreadPoolIT.java b/modules/kibana/src/internalClusterTest/java/org/elasticsearch/kibana/KibanaThreadPoolIT.java
index 553e4696af31..a9ab0c02612f 100644
--- a/modules/kibana/src/internalClusterTest/java/org/elasticsearch/kibana/KibanaThreadPoolIT.java
+++ b/modules/kibana/src/internalClusterTest/java/org/elasticsearch/kibana/KibanaThreadPoolIT.java
@@ -16,6 +16,8 @@ import org.elasticsearch.action.search.SearchPhaseExecutionException;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.support.WriteRequest;
import org.elasticsearch.client.internal.Client;
+import org.elasticsearch.cluster.metadata.IndexMetadata;
+import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
import org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor;
@@ -23,7 +25,6 @@ import org.elasticsearch.index.IndexingPressure;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.test.ESIntegTestCase;
-import org.elasticsearch.test.junit.annotations.TestLogging;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.threadpool.ThreadPoolStats;
@@ -49,10 +50,6 @@ import static org.hamcrest.Matchers.startsWith;
* threads that wait on a phaser. This lets us verify that operations on system indices
* are being directed to other thread pools.
*/
-@TestLogging(
- reason = "investigate",
- value = "org.elasticsearch.kibana.KibanaThreadPoolIT:DEBUG,org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor:TRACE"
-)
public class KibanaThreadPoolIT extends ESIntegTestCase {
private static final Logger logger = LogManager.getLogger(KibanaThreadPoolIT.class);
@@ -68,6 +65,8 @@ public class KibanaThreadPoolIT extends ESIntegTestCase {
.put("thread_pool.write.queue_size", 1)
.put("thread_pool.get.size", 1)
.put("thread_pool.get.queue_size", 1)
+ // a rejected GET may retry on an INITIALIZING shard (the target of a relocation) and unexpectedly succeed, so block rebalancing
+ .put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE)
.build();
}
@@ -112,7 +111,12 @@ public class KibanaThreadPoolIT extends ESIntegTestCase {
}
public void testBlockedThreadPoolsRejectUserRequests() throws Exception {
- assertAcked(client().admin().indices().prepareCreate(USER_INDEX));
+ assertAcked(
+ client().admin()
+ .indices()
+ .prepareCreate(USER_INDEX)
+ .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)) // avoid retrying rejected actions
+ );
runWithBlockedThreadPools(this::assertThreadPoolsBlocked);
diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/140_dense_vector_basic.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/140_dense_vector_basic.yml
index 25088f51e2b5..1434450b65a6 100644
--- a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/140_dense_vector_basic.yml
+++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/140_dense_vector_basic.yml
@@ -221,9 +221,6 @@ setup:
- close_to: {hits.hits.2._score: {value: 186.34454, error: 0.01}}
---
"Test hamming distance fails on float":
- - requires:
- cluster_features: ["script.hamming"]
- reason: "support for hamming distance added in 8.15"
- do:
headers:
Content-Type: application/json
diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/146_dense_vector_bit_basic.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/146_dense_vector_bit_basic.yml
index cdd65ca0eb29..05a10ffdbccd 100644
--- a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/146_dense_vector_bit_basic.yml
+++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/146_dense_vector_bit_basic.yml
@@ -1,7 +1,5 @@
setup:
- requires:
- cluster_features: ["mapper.vectors.bit_vectors"]
- reason: "support for bit vectors added in 8.15"
test_runner_features: headers
- do:
diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/151_dense_vector_byte_hamming.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/151_dense_vector_byte_hamming.yml
index 373f048e7be7..a6c111be681f 100644
--- a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/151_dense_vector_byte_hamming.yml
+++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/151_dense_vector_byte_hamming.yml
@@ -1,7 +1,5 @@
setup:
- requires:
- cluster_features: ["script.hamming"]
- reason: "support for hamming distance added in 8.15"
test_runner_features: headers
- do:
diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/190_term_statistics_script_score.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/190_term_statistics_script_score.yml
index f82b844f0158..3a869640993f 100644
--- a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/190_term_statistics_script_score.yml
+++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/190_term_statistics_script_score.yml
@@ -1,8 +1,4 @@
setup:
- - requires:
- cluster_features: ["script.term_stats"]
- reason: "support for term stats has been added in 8.16"
-
- do:
indices.create:
index: test-index
diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/191_term_statistics_function_score.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/191_term_statistics_function_score.yml
index de4d6530f4a9..3a9c71e3c2ba 100644
--- a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/191_term_statistics_function_score.yml
+++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/191_term_statistics_function_score.yml
@@ -1,8 +1,4 @@
setup:
- - requires:
- cluster_features: ["script.term_stats"]
- reason: "support for term stats has been added in 8.16"
-
- do:
indices.create:
index: test-index
diff --git a/modules/repository-azure/src/yamlRestTest/resources/rest-api-spec/test/repository_azure/20_repository.yml b/modules/repository-azure/src/yamlRestTest/resources/rest-api-spec/test/repository_azure/20_repository.yml
index 968e93cf9fc5..175abe183106 100644
--- a/modules/repository-azure/src/yamlRestTest/resources/rest-api-spec/test/repository_azure/20_repository.yml
+++ b/modules/repository-azure/src/yamlRestTest/resources/rest-api-spec/test/repository_azure/20_repository.yml
@@ -251,11 +251,6 @@ setup:
---
"Usage stats":
- - requires:
- cluster_features:
- - repositories.supports_usage_stats
- reason: requires this feature
-
- do:
cluster.stats: {}
diff --git a/modules/repository-gcs/src/yamlRestTest/resources/rest-api-spec/test/repository_gcs/20_repository.yml b/modules/repository-gcs/src/yamlRestTest/resources/rest-api-spec/test/repository_gcs/20_repository.yml
index e8c34a4b6a20..d2370919297a 100644
--- a/modules/repository-gcs/src/yamlRestTest/resources/rest-api-spec/test/repository_gcs/20_repository.yml
+++ b/modules/repository-gcs/src/yamlRestTest/resources/rest-api-spec/test/repository_gcs/20_repository.yml
@@ -234,11 +234,6 @@ setup:
---
"Usage stats":
- - requires:
- cluster_features:
- - repositories.supports_usage_stats
- reason: requires this feature
-
- do:
cluster.stats: {}
diff --git a/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3MinioBasicCredentialsRestIT.java b/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3MinioBasicCredentialsRestIT.java
index 93915e8491d5..3d7c8dd15061 100644
--- a/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3MinioBasicCredentialsRestIT.java
+++ b/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3MinioBasicCredentialsRestIT.java
@@ -19,13 +19,15 @@ import org.junit.ClassRule;
import org.junit.rules.RuleChain;
import org.junit.rules.TestRule;
+import java.util.Locale;
+
@ThreadLeakFilters(filters = { TestContainersThreadFilter.class })
@ThreadLeakScope(ThreadLeakScope.Scope.NONE) // https://github.com/elastic/elasticsearch/issues/102482
public class RepositoryS3MinioBasicCredentialsRestIT extends AbstractRepositoryS3RestTestCase {
- private static final String PREFIX = getIdentifierPrefix("RepositoryS3MinioBasicCredentialsRestIT");
+ private static final String PREFIX = getIdentifierPrefix("RepositoryS3MinioBasicCredentialsRestIT").toLowerCase(Locale.ROOT);
private static final String BUCKET = PREFIX + "bucket";
- private static final String BASE_PATH = PREFIX + "base_path";
+ private static final String BASE_PATH = PREFIX + "base-path";
private static final String ACCESS_KEY = PREFIX + "access-key";
private static final String SECRET_KEY = PREFIX + "secret-key";
private static final String CLIENT = "minio_client";
diff --git a/modules/repository-s3/src/main/plugin-metadata/entitlement-policy.yaml b/modules/repository-s3/src/main/plugin-metadata/entitlement-policy.yaml
new file mode 100644
index 000000000000..4c42ec110a25
--- /dev/null
+++ b/modules/repository-s3/src/main/plugin-metadata/entitlement-policy.yaml
@@ -0,0 +1,4 @@
+ALL-UNNAMED:
+ - network:
+ actions:
+ - connect
diff --git a/modules/repository-url/src/main/plugin-metadata/entitlement-policy.yaml b/modules/repository-url/src/main/plugin-metadata/entitlement-policy.yaml
new file mode 100644
index 000000000000..f1dc1fc7755e
--- /dev/null
+++ b/modules/repository-url/src/main/plugin-metadata/entitlement-policy.yaml
@@ -0,0 +1,4 @@
+org.apache.httpcomponents.httpclient:
+ - network:
+ actions:
+ - connect # for URLHttpClient
diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java
index cad839bed955..5876945cf93b 100644
--- a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java
+++ b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java
@@ -79,7 +79,7 @@ public class SimpleNetty4TransportTests extends AbstractSimpleTransportTestCase
if (doHandshake) {
super.executeHandshake(node, channel, profile, listener);
} else {
- assert getVersion().equals(TransportVersion.current());
+ assert version.equals(TransportVersion.current());
listener.onResponse(TransportVersions.MINIMUM_COMPATIBLE);
}
}
diff --git a/muted-tests.yml b/muted-tests.yml
index 2f652f87ab28..69e6ba22b84b 100644
--- a/muted-tests.yml
+++ b/muted-tests.yml
@@ -52,9 +52,6 @@ tests:
- class: org.elasticsearch.xpack.transform.integration.TransformIT
method: testStopWaitForCheckpoint
issue: https://github.com/elastic/elasticsearch/issues/106113
-- class: org.elasticsearch.kibana.KibanaThreadPoolIT
- method: testBlockedThreadPoolsRejectUserRequests
- issue: https://github.com/elastic/elasticsearch/issues/113939
- class: org.elasticsearch.xpack.inference.TextEmbeddingCrudIT
method: testPutE5Small_withPlatformAgnosticVariant
issue: https://github.com/elastic/elasticsearch/issues/113983
@@ -216,9 +213,6 @@ tests:
- class: org.elasticsearch.smoketest.MlWithSecurityIT
method: test {yaml=ml/sparse_vector_search/Test sparse_vector search with query vector and pruning config}
issue: https://github.com/elastic/elasticsearch/issues/119548
-- class: org.elasticsearch.index.engine.LuceneSyntheticSourceChangesSnapshotTests
- method: testSkipNonRootOfNestedDocuments
- issue: https://github.com/elastic/elasticsearch/issues/119553
- class: org.elasticsearch.xpack.ml.integration.ForecastIT
method: testOverflowToDisk
issue: https://github.com/elastic/elasticsearch/issues/117740
@@ -227,50 +221,39 @@ tests:
- class: org.elasticsearch.search.profile.dfs.DfsProfilerIT
method: testProfileDfs
issue: https://github.com/elastic/elasticsearch/issues/119711
-- class: org.elasticsearch.xpack.esql.optimizer.LocalPhysicalPlanOptimizerTests
- method: testSingleMatchFunctionFilterPushdownWithStringValues {default}
- issue: https://github.com/elastic/elasticsearch/issues/119720
-- class: org.elasticsearch.xpack.esql.optimizer.LocalPhysicalPlanOptimizerTests
- method: testSingleMatchFunctionPushdownWithCasting {default}
- issue: https://github.com/elastic/elasticsearch/issues/119722
-- class: org.elasticsearch.xpack.esql.optimizer.LocalPhysicalPlanOptimizerTests
- method: testSingleMatchOperatorFilterPushdownWithStringValues {default}
- issue: https://github.com/elastic/elasticsearch/issues/119721
-- class: org.elasticsearch.xpack.inference.action.filter.ShardBulkInferenceActionFilterIT
- method: testBulkOperations {p0=false}
- issue: https://github.com/elastic/elasticsearch/issues/119901
-- class: org.elasticsearch.xpack.inference.InferenceCrudIT
- method: testGetServicesWithCompletionTaskType
- issue: https://github.com/elastic/elasticsearch/issues/119959
-- class: org.elasticsearch.lucene.RollingUpgradeSearchableSnapshotIndexCompatibilityIT
- method: testSearchableSnapshotUpgrade {p0=[9.0.0, 8.18.0, 8.18.0]}
- issue: https://github.com/elastic/elasticsearch/issues/119978
-- class: org.elasticsearch.lucene.RollingUpgradeSearchableSnapshotIndexCompatibilityIT
- method: testSearchableSnapshotUpgrade {p0=[9.0.0, 9.0.0, 8.18.0]}
- issue: https://github.com/elastic/elasticsearch/issues/119979
-- class: org.elasticsearch.lucene.RollingUpgradeSearchableSnapshotIndexCompatibilityIT
- method: testMountSearchableSnapshot {p0=[9.0.0, 8.18.0, 8.18.0]}
- issue: https://github.com/elastic/elasticsearch/issues/119550
-- class: org.elasticsearch.lucene.RollingUpgradeSearchableSnapshotIndexCompatibilityIT
- method: testMountSearchableSnapshot {p0=[9.0.0, 9.0.0, 8.18.0]}
- issue: https://github.com/elastic/elasticsearch/issues/119980
-- class: org.elasticsearch.index.codec.vectors.es816.ES816HnswBinaryQuantizedVectorsFormatTests
- method: testRandomExceptions
- issue: https://github.com/elastic/elasticsearch/issues/119981
- class: org.elasticsearch.multi_cluster.MultiClusterYamlTestSuiteIT
issue: https://github.com/elastic/elasticsearch/issues/119983
-- class: org.elasticsearch.lucene.RollingUpgradeSearchableSnapshotIndexCompatibilityIT
- method: testMountSearchableSnapshot {p0=[9.0.0, 9.0.0, 9.0.0]}
- issue: https://github.com/elastic/elasticsearch/issues/119989
-- class: org.elasticsearch.lucene.RollingUpgradeSearchableSnapshotIndexCompatibilityIT
- method: testSearchableSnapshotUpgrade {p0=[9.0.0, 9.0.0, 9.0.0]}
- issue: https://github.com/elastic/elasticsearch/issues/119990
- class: org.elasticsearch.xpack.test.rest.XPackRestIT
method: test {p0=transform/transforms_unattended/Test unattended put and start}
issue: https://github.com/elastic/elasticsearch/issues/120019
- class: org.elasticsearch.index.mapper.IntervalThrottlerTests
method: testThrottling
issue: https://github.com/elastic/elasticsearch/issues/120023
+- class: org.elasticsearch.xpack.ilm.actions.SearchableSnapshotActionIT
+ method: testUpdatePolicyToAddPhasesYieldsInvalidActionsToBeSkipped
+ issue: https://github.com/elastic/elasticsearch/issues/118406
+- class: org.elasticsearch.xpack.ml.integration.DatafeedJobsIT
+ issue: https://github.com/elastic/elasticsearch/issues/120088
+- class: org.elasticsearch.xpack.searchablesnapshots.minio.MinioSearchableSnapshotsIT
+ issue: https://github.com/elastic/elasticsearch/issues/120101
+- class: org.elasticsearch.repositories.s3.S3RepositoryThirdPartyTests
+ issue: https://github.com/elastic/elasticsearch/issues/120115
+- class: org.elasticsearch.repositories.s3.RepositoryS3MinioBasicCredentialsRestIT
+ issue: https://github.com/elastic/elasticsearch/issues/120117
+- class: org.elasticsearch.repositories.blobstore.testkit.analyze.MinioRepositoryAnalysisRestIT
+ issue: https://github.com/elastic/elasticsearch/issues/118548
+- class: org.elasticsearch.xpack.security.QueryableReservedRolesIT
+ method: testConfiguredReservedRolesAfterClosingAndOpeningIndex
+ issue: https://github.com/elastic/elasticsearch/issues/120127
+- class: org.elasticsearch.oldrepos.OldRepositoryAccessIT
+ method: testOldRepoAccess
+ issue: https://github.com/elastic/elasticsearch/issues/120148
+- class: org.elasticsearch.oldrepos.OldRepositoryAccessIT
+ method: testOldSourceOnlyRepoAccess
+ issue: https://github.com/elastic/elasticsearch/issues/120080
+- class: org.elasticsearch.xpack.esql.qa.multi_node.EsqlSpecIT
+ method: test {lookup-join.MvJoinKeyFromRow ASYNC}
+ issue: https://github.com/elastic/elasticsearch/issues/120242
# Examples:
#
diff --git a/plugins/discovery-ec2/src/main/plugin-metadata/entitlement-policy.yaml b/plugins/discovery-ec2/src/main/plugin-metadata/entitlement-policy.yaml
new file mode 100644
index 000000000000..4c42ec110a25
--- /dev/null
+++ b/plugins/discovery-ec2/src/main/plugin-metadata/entitlement-policy.yaml
@@ -0,0 +1,4 @@
+ALL-UNNAMED:
+ - network:
+ actions:
+ - connect
diff --git a/plugins/mapper-annotated-text/src/main/java/module-info.java b/plugins/mapper-annotated-text/src/main/java/module-info.java
index 13f2bd66418b..58aca0d2857f 100644
--- a/plugins/mapper-annotated-text/src/main/java/module-info.java
+++ b/plugins/mapper-annotated-text/src/main/java/module-info.java
@@ -15,6 +15,4 @@ module org.elasticsearch.index.mapper.annotatedtext {
requires org.apache.lucene.highlighter;
// exports nothing
-
- provides org.elasticsearch.features.FeatureSpecification with org.elasticsearch.index.mapper.annotatedtext.Features;
}
diff --git a/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java b/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java
index 33b5db1c4662..4b2006430b89 100644
--- a/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java
+++ b/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java
@@ -22,7 +22,6 @@ import org.apache.lucene.document.Field;
import org.apache.lucene.document.FieldType;
import org.apache.lucene.index.IndexOptions;
import org.elasticsearch.ElasticsearchParseException;
-import org.elasticsearch.features.NodeFeature;
import org.elasticsearch.index.IndexVersion;
import org.elasticsearch.index.analysis.AnalyzerScope;
import org.elasticsearch.index.analysis.IndexAnalyzers;
@@ -64,8 +63,6 @@ import java.util.regex.Pattern;
**/
public class AnnotatedTextFieldMapper extends FieldMapper {
- public static final NodeFeature SYNTHETIC_SOURCE_SUPPORT = new NodeFeature("mapper.annotated_text.synthetic_source", true);
-
public static final String CONTENT_TYPE = "annotated_text";
private static Builder builder(FieldMapper in) {
diff --git a/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/Features.java b/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/Features.java
deleted file mode 100644
index 51a2d2bbe1d4..000000000000
--- a/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/Features.java
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
- * or more contributor license agreements. Licensed under the "Elastic License
- * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
- * Public License v 1"; you may not use this file except in compliance with, at
- * your election, the "Elastic License 2.0", the "GNU Affero General Public
- * License v3.0 only", or the "Server Side Public License, v 1".
- */
-
-package org.elasticsearch.index.mapper.annotatedtext;
-
-import org.elasticsearch.features.FeatureSpecification;
-import org.elasticsearch.features.NodeFeature;
-
-import java.util.Set;
-
-/**
- * Provides features for annotated text mapper.
- */
-public class Features implements FeatureSpecification {
- @Override
- public Set getFeatures() {
- return Set.of(
- AnnotatedTextFieldMapper.SYNTHETIC_SOURCE_SUPPORT // Added in 8.15
- );
- }
-}
diff --git a/plugins/mapper-annotated-text/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification b/plugins/mapper-annotated-text/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification
deleted file mode 100644
index 1fc11da18fc3..000000000000
--- a/plugins/mapper-annotated-text/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification
+++ /dev/null
@@ -1,10 +0,0 @@
-#
- # Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
- # or more contributor license agreements. Licensed under the "Elastic License
- # 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
- # Public License v 1"; you may not use this file except in compliance with, at
- # your election, the "Elastic License 2.0", the "GNU Affero General Public
- # License v3.0 only", or the "Server Side Public License, v 1".
-#
-
-org.elasticsearch.index.mapper.annotatedtext.Features
diff --git a/plugins/mapper-size/src/internalClusterTest/java/org/elasticsearch/index/mapper/size/SizeMappingIT.java b/plugins/mapper-size/src/internalClusterTest/java/org/elasticsearch/index/mapper/size/SizeMappingIT.java
index c2251910c312..435849821691 100644
--- a/plugins/mapper-size/src/internalClusterTest/java/org/elasticsearch/index/mapper/size/SizeMappingIT.java
+++ b/plugins/mapper-size/src/internalClusterTest/java/org/elasticsearch/index/mapper/size/SizeMappingIT.java
@@ -91,7 +91,7 @@ public class SizeMappingIT extends ESIntegTestCase {
"Expected size field mapping to be " + (enabled ? "enabled" : "disabled") + " for %s",
index
);
- GetMappingsResponse getMappingsResponse = indicesAdmin().prepareGetMappings(index).get();
+ GetMappingsResponse getMappingsResponse = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, index).get();
Map mappingSource = getMappingsResponse.getMappings().get(index).getSourceAsMap();
assertThat(errMsg, mappingSource, hasKey("_size"));
String sizeAsString = mappingSource.get("_size").toString();
diff --git a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/DesiredNodesUpgradeIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/DesiredNodesUpgradeIT.java
index d9adec47ff48..30367bf55d8c 100644
--- a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/DesiredNodesUpgradeIT.java
+++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/DesiredNodesUpgradeIT.java
@@ -11,7 +11,6 @@ package org.elasticsearch.upgrades;
import com.carrotsearch.randomizedtesting.annotations.Name;
-import org.elasticsearch.Build;
import org.elasticsearch.action.admin.cluster.desirednodes.UpdateDesiredNodesRequest;
import org.elasticsearch.client.Request;
import org.elasticsearch.cluster.metadata.DesiredNode;
@@ -84,7 +83,7 @@ public class DesiredNodesUpgradeIT extends AbstractRollingUpgradeTestCase {
randomDoubleProcessorCount(),
ByteSizeValue.ofGb(randomIntBetween(10, 24)),
ByteSizeValue.ofGb(randomIntBetween(128, 256)),
- clusterHasFeature(DesiredNode.DESIRED_NODE_VERSION_DEPRECATED) ? null : Build.current().version()
+ null
)
)
.toList();
@@ -96,7 +95,7 @@ public class DesiredNodesUpgradeIT extends AbstractRollingUpgradeTestCase {
new DesiredNode.ProcessorsRange(minProcessors, minProcessors + randomIntBetween(10, 20)),
ByteSizeValue.ofGb(randomIntBetween(10, 24)),
ByteSizeValue.ofGb(randomIntBetween(128, 256)),
- clusterHasFeature(DesiredNode.DESIRED_NODE_VERSION_DEPRECATED) ? null : Build.current().version()
+ null
);
}).toList();
}
diff --git a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/RestActionCancellationIT.java b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/RestActionCancellationIT.java
index ce514c5f1b1e..c48ae9ba1843 100644
--- a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/RestActionCancellationIT.java
+++ b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/RestActionCancellationIT.java
@@ -12,12 +12,15 @@ package org.elasticsearch.http;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.methods.HttpPost;
import org.elasticsearch.action.admin.cluster.health.TransportClusterHealthAction;
+import org.elasticsearch.action.admin.cluster.settings.ClusterGetSettingsAction;
import org.elasticsearch.action.admin.cluster.state.ClusterStateAction;
import org.elasticsearch.action.admin.indices.alias.get.GetAliasesAction;
import org.elasticsearch.action.admin.indices.recovery.RecoveryAction;
import org.elasticsearch.action.admin.indices.template.get.GetComponentTemplateAction;
import org.elasticsearch.action.admin.indices.template.get.GetComposableIndexTemplateAction;
import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesAction;
+import org.elasticsearch.action.admin.indices.template.post.SimulateIndexTemplateAction;
+import org.elasticsearch.action.admin.indices.template.post.SimulateTemplateAction;
import org.elasticsearch.action.support.CancellableActionTestPlugin;
import org.elasticsearch.action.support.PlainActionFuture;
import org.elasticsearch.action.support.RefCountingListener;
@@ -81,6 +84,25 @@ public class RestActionCancellationIT extends HttpSmokeTestCase {
runRestActionCancellationTest(new Request(HttpGet.METHOD_NAME, "/_index_template"), GetComposableIndexTemplateAction.NAME);
}
+ public void testSimulateTemplateCancellation() {
+ runRestActionCancellationTest(
+ new Request(HttpPost.METHOD_NAME, "/_index_template/_simulate/random_index_template"),
+ SimulateTemplateAction.NAME
+ );
+ }
+
+ public void testSimulateIndexTemplateCancellation() {
+ createIndex("test");
+ runRestActionCancellationTest(
+ new Request(HttpPost.METHOD_NAME, "/_index_template/_simulate_index/test"),
+ SimulateIndexTemplateAction.NAME
+ );
+ }
+
+ public void testClusterGetSettingsCancellation() {
+ runRestActionCancellationTest(new Request(HttpGet.METHOD_NAME, "/_cluster/settings"), ClusterGetSettingsAction.NAME);
+ }
+
private void runRestActionCancellationTest(Request request, String actionName) {
final var node = usually() ? internalCluster().getRandomNodeName() : internalCluster().startCoordinatingOnlyNode(Settings.EMPTY);
diff --git a/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_ingest_simulate.yml b/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_ingest_simulate.yml
index d4843fb15288..4a5ceeb66f66 100644
--- a/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_ingest_simulate.yml
+++ b/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_ingest_simulate.yml
@@ -222,10 +222,6 @@ setup:
- headers
- allowed_warnings
- - requires:
- cluster_features: ["simulate.mapping.validation.templates"]
- reason: "ingest simulate index mapping validation added in 8.16"
-
- do:
headers:
Content-Type: application/json
@@ -313,10 +309,6 @@ setup:
- headers
- allowed_warnings
- - requires:
- cluster_features: ["simulate.component.template.substitutions"]
- reason: "ingest simulate component template substitutions added in 8.16"
-
- do:
headers:
Content-Type: application/json
@@ -494,10 +486,6 @@ setup:
- headers
- allowed_warnings
- - requires:
- cluster_features: ["simulate.component.template.substitutions"]
- reason: "ingest simulate component template substitutions added in 8.16"
-
- do:
headers:
Content-Type: application/json
@@ -617,10 +605,6 @@ setup:
- headers
- allowed_warnings
- - requires:
- cluster_features: ["simulate.component.template.substitutions"]
- reason: "ingest simulate component template substitutions added in 8.16"
-
- do:
headers:
Content-Type: application/json
@@ -816,10 +800,6 @@ setup:
- headers
- allowed_warnings
- - requires:
- cluster_features: ["simulate.index.template.substitutions"]
- reason: "ingest simulate index template substitutions added in 8.16"
-
- do:
headers:
Content-Type: application/json
@@ -1010,10 +990,6 @@ setup:
- headers
- allowed_warnings
- - requires:
- cluster_features: ["simulate.index.template.substitutions"]
- reason: "ingest simulate component template substitutions added in 8.16"
-
- do:
headers:
Content-Type: application/json
@@ -1227,10 +1203,6 @@ setup:
- headers
- allowed_warnings
- - requires:
- cluster_features: ["simulate.mapping.addition"]
- reason: "ingest simulate mapping addition added in 8.17"
-
- do:
headers:
Content-Type: application/json
@@ -1463,10 +1435,6 @@ setup:
- headers
- allowed_warnings
- - requires:
- cluster_features: ["simulate.mapping.addition"]
- reason: "ingest simulate mapping addition added in 8.17"
-
- do:
indices.put_template:
name: my-legacy-template
@@ -1584,10 +1552,6 @@ setup:
- headers
- allowed_warnings
- - requires:
- cluster_features: ["simulate.support.non.template.mapping"]
- reason: "ingest simulate support for indices with mappings that didn't come from templates added in 8.17"
-
# A global match-everything legacy template is added to the cluster sometimes (rarely). We have to get rid of this template if it exists
# because this test is making sure we get correct behavior when an index matches *no* template:
- do:
diff --git a/rest-api-spec/build.gradle b/rest-api-spec/build.gradle
index 1da8e906582b..e4b46b98cedd 100644
--- a/rest-api-spec/build.gradle
+++ b/rest-api-spec/build.gradle
@@ -85,4 +85,6 @@ tasks.named("yamlRestCompatTestTransform").configure ({ task ->
task.skipTest("search.vectors/110_knn_query_with_filter/PRE_FILTER: pre-filter across multiple aliases", "waiting for #118774 backport")
task.skipTest("search.vectors/160_knn_query_missing_params/kNN search in a dis_max query - missing num_candidates", "waiting for #118774 backport")
task.skipTest("search.highlight/30_max_analyzed_offset/Plain highlighter with max_analyzed_offset < 0 should FAIL", "semantics of test has changed")
+ task.skipTest("indices.create/10_basic/Create lookup index", "default auto_expand_replicas was removed")
+ task.skipTest("indices.create/10_basic/Create lookup index with one shard", "default auto_expand_replicas was removed")
})
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.get_settings.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.get_settings.json
index 5862804257c6..5004ab8de697 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.get_settings.json
+++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.get_settings.json
@@ -26,7 +26,7 @@
},
"master_timeout":{
"type":"time",
- "description":"Explicit operation timeout for connection to master node"
+ "description":"Timeout for waiting for new cluster state in case it is blocked"
},
"timeout":{
"type":"time",
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/connector.delete.json b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.delete.json
index 96d477160b27..aa5a3dc0a791 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/api/connector.delete.json
+++ b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.delete.json
@@ -28,6 +28,11 @@
]
},
"params": {
+ "hard": {
+ "type": "boolean",
+ "default": false,
+ "description": "If true, the connector doc is deleted. If false, connector doc is marked as deleted (soft-deleted)."
+ },
"delete_sync_jobs": {
"type": "boolean",
"default": false,
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.unfreeze.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.unfreeze.json
deleted file mode 100644
index 2327519ff281..000000000000
--- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.unfreeze.json
+++ /dev/null
@@ -1,67 +0,0 @@
-{
- "indices.unfreeze":{
- "documentation":{
- "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/unfreeze-index-api.html",
- "description":"Unfreezes an index. When a frozen index is unfrozen, the index goes through the normal recovery process and becomes writeable again."
- },
- "stability":"stable",
- "visibility":"public",
- "headers":{
- "accept": [ "application/json"]
- },
- "url":{
- "paths":[
- {
- "path":"/{index}/_unfreeze",
- "methods":[
- "POST"
- ],
- "parts":{
- "index":{
- "type":"string",
- "description":"The name of the index to unfreeze"
- }
- },
- "deprecated":{
- "version":"7.14.0",
- "description":"Frozen indices are deprecated because they provide no benefit given improvements in heap memory utilization. They will be removed in a future release."
- }
- }
- ]
- },
- "params":{
- "timeout":{
- "type":"time",
- "description":"Explicit operation timeout"
- },
- "master_timeout":{
- "type":"time",
- "description":"Specify timeout for connection to master"
- },
- "ignore_unavailable":{
- "type":"boolean",
- "description":"Whether specified concrete indices should be ignored when unavailable (missing or closed)"
- },
- "allow_no_indices":{
- "type":"boolean",
- "description":"Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)"
- },
- "expand_wildcards":{
- "type":"enum",
- "options":[
- "open",
- "closed",
- "hidden",
- "none",
- "all"
- ],
- "default":"closed",
- "description":"Whether to expand wildcard expression to concrete indices that are open, closed or both."
- },
- "wait_for_active_shards":{
- "type":"string",
- "description":"Sets the number of active shards to wait for before the operation returns."
- }
- }
- }
-}
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/field_caps/70_index_mode.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/field_caps/70_index_mode.yml
index 9da6d2c5f086..ce3f7f019839 100644
--- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/field_caps/70_index_mode.yml
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/field_caps/70_index_mode.yml
@@ -1,9 +1,5 @@
---
setup:
- - requires:
- cluster_features: "mapper.query_index_mode"
- reason: "require index_mode"
-
- do:
indices.create:
index: test_metrics
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/get/100_synthetic_source.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/get/100_synthetic_source.yml
index 13f6ca58ea29..a0061272a2c2 100644
--- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/get/100_synthetic_source.yml
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/get/100_synthetic_source.yml
@@ -1014,10 +1014,6 @@ flattened field:
---
flattened field with ignore_above:
- - requires:
- cluster_features: ["mapper.track_ignored_source"]
- reason: requires tracking ignored source
-
- do:
indices.create:
index: test
@@ -1070,10 +1066,6 @@ flattened field with ignore_above:
---
flattened field with ignore_above and arrays:
- - requires:
- cluster_features: ["mapper.flattened.ignore_above_with_arrays_support"]
- reason: requires support of ignore_above synthetic source with arrays
-
- do:
indices.create:
index: test
@@ -1127,10 +1119,6 @@ flattened field with ignore_above and arrays:
---
completion:
- - requires:
- cluster_features: ["mapper.source.synthetic_source_fallback"]
- reason: introduced in 8.15.0
-
- do:
indices.create:
index: test
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/index/92_metrics_auto_subobjects.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/index/92_metrics_auto_subobjects.yml
index 414c24cfffd7..7b8f785a2cb9 100644
--- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/index/92_metrics_auto_subobjects.yml
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/index/92_metrics_auto_subobjects.yml
@@ -2,7 +2,6 @@
"Metrics object indexing":
- requires:
test_runner_features: [ "allowed_warnings", "allowed_warnings_regex" ]
- cluster_features: ["mapper.subobjects_auto"]
reason: requires supporting subobjects auto setting
- do:
@@ -69,7 +68,6 @@
"Root with metrics":
- requires:
test_runner_features: [ "allowed_warnings", "allowed_warnings_regex" ]
- cluster_features: ["mapper.subobjects_auto"]
reason: requires supporting subobjects auto setting
- do:
@@ -131,7 +129,6 @@
"Metrics object indexing with synthetic source":
- requires:
test_runner_features: [ "allowed_warnings", "allowed_warnings_regex" ]
- cluster_features: ["mapper.subobjects_auto"]
reason: added in 8.4.0
- do:
@@ -201,7 +198,6 @@
"Root without subobjects with synthetic source":
- requires:
test_runner_features: [ "allowed_warnings", "allowed_warnings_regex" ]
- cluster_features: ["mapper.subobjects_auto"]
reason: added in 8.4.0
- do:
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/10_basic.yml
index d0e1759073e1..8645c91a51ad 100644
--- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/10_basic.yml
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/10_basic.yml
@@ -171,7 +171,6 @@
index: test_lookup
- match: { test_lookup.settings.index.number_of_shards: "1"}
- - match: { test_lookup.settings.index.auto_expand_replicas: "0-all"}
---
"Create lookup index with one shard":
@@ -196,7 +195,6 @@
index: test_lookup
- match: { test_lookup.settings.index.number_of_shards: "1"}
- - match: { test_lookup.settings.index.auto_expand_replicas: "0-all"}
---
"Create lookup index with two shards":
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml
index 5003f6df79a1..096ccbce9a58 100644
--- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml
@@ -5,10 +5,6 @@ setup:
---
object with unmapped fields:
- - requires:
- cluster_features: ["mapper.track_ignored_source", "mapper.bwc_workaround_9_0"]
- reason: requires tracking ignored source
-
- do:
indices.create:
index: test
@@ -56,10 +52,6 @@ object with unmapped fields:
---
unmapped arrays:
- - requires:
- cluster_features: ["mapper.track_ignored_source"]
- reason: requires tracking ignored source
-
- do:
indices.create:
index: test
@@ -102,10 +94,6 @@ unmapped arrays:
---
nested object with unmapped fields:
- - requires:
- cluster_features: ["mapper.track_ignored_source", "mapper.bwc_workaround_9_0"]
- reason: requires tracking ignored source
-
- do:
indices.create:
index: test
@@ -153,10 +141,6 @@ nested object with unmapped fields:
---
empty object with unmapped fields:
- - requires:
- cluster_features: ["mapper.track_ignored_source", "mapper.bwc_workaround_9_0"]
- reason: requires tracking ignored source
-
- do:
indices.create:
index: test
@@ -314,10 +298,6 @@ disabled object contains array:
---
disabled subobject:
- - requires:
- cluster_features: ["mapper.track_ignored_source"]
- reason: requires tracking ignored source
-
- do:
indices.create:
index: test
@@ -353,10 +333,6 @@ disabled subobject:
---
disabled subobject with array:
- - requires:
- cluster_features: ["mapper.track_ignored_source"]
- reason: requires tracking ignored source
-
- do:
indices.create:
index: test
@@ -393,10 +369,6 @@ disabled subobject with array:
---
mixed disabled and enabled objects:
- - requires:
- cluster_features: ["mapper.track_ignored_source"]
- reason: requires tracking ignored source
-
- do:
indices.create:
index: test
@@ -441,7 +413,7 @@ mixed disabled and enabled objects:
---
object with dynamic override:
- requires:
- cluster_features: ["mapper.ignored_source.dont_expand_dots", "mapper.bwc_workaround_9_0"]
+ cluster_features: ["mapper.ignored_source.dont_expand_dots"]
reason: requires tracking ignored source
- do:
@@ -488,10 +460,6 @@ object with dynamic override:
---
subobject with dynamic override:
- - requires:
- cluster_features: ["mapper.track_ignored_source"]
- reason: requires tracking ignored source
-
- do:
indices.create:
index: test
@@ -537,10 +505,6 @@ subobject with dynamic override:
---
object array in object with dynamic override:
- - requires:
- cluster_features: ["mapper.synthetic_source_keep"]
- reason: requires tracking ignored source
-
- do:
indices.create:
index: test
@@ -592,10 +556,6 @@ object array in object with dynamic override:
---
value array in object with dynamic override:
- - requires:
- cluster_features: ["mapper.track_ignored_source"]
- reason: requires tracking ignored source
-
- do:
indices.create:
index: test
@@ -636,10 +596,6 @@ value array in object with dynamic override:
---
nested object:
- - requires:
- cluster_features: ["mapper.track_ignored_source"]
- reason: requires tracking ignored source
-
- do:
indices.create:
index: test
@@ -682,10 +638,6 @@ nested object:
---
nested object next to regular:
- - requires:
- cluster_features: ["mapper.track_ignored_source"]
- reason: requires tracking ignored source
-
- do:
indices.create:
index: test
@@ -729,10 +681,6 @@ nested object next to regular:
---
nested object with disabled:
- - requires:
- cluster_features: ["mapper.track_ignored_source"]
- reason: requires tracking ignored source
-
- do:
indices.create:
index: test
@@ -818,10 +766,6 @@ nested object with disabled:
---
doubly nested object:
- - requires:
- cluster_features: ["mapper.track_ignored_source"]
- reason: requires tracking ignored source
-
- do:
indices.create:
index: test
@@ -914,10 +858,6 @@ doubly nested object:
---
subobjects auto:
- - requires:
- cluster_features: ["mapper.subobjects_auto", "mapper.bwc_workaround_9_0"]
- reason: requires tracking ignored source and supporting subobjects auto setting
-
- do:
indices.create:
index: test
@@ -1003,10 +943,6 @@ subobjects auto:
---
synthetic_source with copy_to:
- - requires:
- cluster_features: ["mapper.source.synthetic_source_copy_to_fix"]
- reason: requires copy_to support in synthetic source
-
- do:
indices.create:
index: test
@@ -1140,10 +1076,6 @@ synthetic_source with copy_to:
---
synthetic_source with disabled doc_values:
- - requires:
- cluster_features: ["mapper.source.synthetic_source_with_copy_to_and_doc_values_false"]
- reason: requires disabled doc_values support in synthetic source
-
- do:
indices.create:
index: test
@@ -1224,10 +1156,6 @@ synthetic_source with disabled doc_values:
---
fallback synthetic_source for text field:
- - requires:
- cluster_features: ["mapper.source.synthetic_source_with_copy_to_and_doc_values_false"]
- reason: requires disabled doc_values support in synthetic source
-
- do:
indices.create:
index: test
@@ -1259,10 +1187,6 @@ fallback synthetic_source for text field:
---
synthetic_source with copy_to and ignored values:
- - requires:
- cluster_features: ["mapper.source.synthetic_source_copy_to_fix"]
- reason: requires copy_to support in synthetic source
-
- do:
indices.create:
index: test
@@ -1328,10 +1252,6 @@ synthetic_source with copy_to and ignored values:
---
synthetic_source with copy_to field having values in source:
- - requires:
- cluster_features: ["mapper.source.synthetic_source_copy_to_fix"]
- reason: requires copy_to support in synthetic source
-
- do:
indices.create:
index: test
@@ -1392,10 +1312,6 @@ synthetic_source with copy_to field having values in source:
---
synthetic_source with ignored source field using copy_to:
- - requires:
- cluster_features: ["mapper.source.synthetic_source_copy_to_fix"]
- reason: requires copy_to support in synthetic source
-
- do:
indices.create:
index: test
@@ -1457,10 +1373,6 @@ synthetic_source with ignored source field using copy_to:
---
synthetic_source with copy_to field from dynamic template having values in source:
- - requires:
- cluster_features: ["mapper.source.synthetic_source_copy_to_fix"]
- reason: requires copy_to support in synthetic source
-
- do:
indices.create:
index: test
@@ -1555,7 +1467,6 @@ synthetic_source with copy_to field from dynamic template having values in sourc
---
synthetic_source with copy_to and invalid values for copy:
- requires:
- cluster_features: ["mapper.source.synthetic_source_copy_to_fix"]
reason: requires copy_to support in synthetic source
test_runner_features: "contains"
@@ -1592,10 +1503,6 @@ synthetic_source with copy_to and invalid values for copy:
---
synthetic_source with copy_to pointing inside object:
- - requires:
- cluster_features: ["mapper.source.synthetic_source_copy_to_inside_objects_fix"]
- reason: requires copy_to support in synthetic source
-
- do:
indices.create:
index: test
@@ -1697,10 +1604,6 @@ synthetic_source with copy_to pointing inside object:
---
synthetic_source with copy_to pointing to ambiguous field:
- - requires:
- cluster_features: ["mapper.source.synthetic_source_copy_to_inside_objects_fix"]
- reason: requires copy_to support in synthetic source
-
- do:
indices.create:
index: test
@@ -1745,10 +1648,6 @@ synthetic_source with copy_to pointing to ambiguous field:
---
synthetic_source with copy_to pointing to ambiguous field and subobjects false:
- - requires:
- cluster_features: ["mapper.source.synthetic_source_copy_to_inside_objects_fix"]
- reason: requires copy_to support in synthetic source
-
- do:
indices.create:
index: test
@@ -1794,10 +1693,6 @@ synthetic_source with copy_to pointing to ambiguous field and subobjects false:
---
synthetic_source with copy_to pointing to ambiguous field and subobjects auto:
- - requires:
- cluster_features: ["mapper.source.synthetic_source_copy_to_inside_objects_fix"]
- reason: requires copy_to support in synthetic source
-
- do:
indices.create:
index: test
@@ -1845,7 +1740,6 @@ synthetic_source with copy_to pointing to ambiguous field and subobjects auto:
synthetic_source with copy_to pointing at dynamic field:
- requires:
test_runner_features: contains
- cluster_features: ["mapper.source.synthetic_source_copy_to_inside_objects_fix"]
reason: requires copy_to support in synthetic source
- do:
@@ -1931,10 +1825,6 @@ synthetic_source with copy_to pointing at dynamic field:
---
synthetic_source with copy_to pointing inside dynamic object:
- - requires:
- cluster_features: ["mapper.source.synthetic_source_copy_to_inside_objects_fix"]
- reason: requires copy_to support in synthetic source
-
- do:
indices.create:
index: test
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/21_synthetic_source_stored.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/21_synthetic_source_stored.yml
index 095665e9337b..e51d527593d4 100644
--- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/21_synthetic_source_stored.yml
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/21_synthetic_source_stored.yml
@@ -5,10 +5,6 @@ setup:
---
object param - store complex object:
- - requires:
- cluster_features: ["mapper.synthetic_source_keep", "mapper.bwc_workaround_9_0"]
- reason: requires tracking ignored source
-
- do:
indices.create:
index: test
@@ -70,10 +66,6 @@ object param - store complex object:
---
object param - object array:
- - requires:
- cluster_features: ["mapper.synthetic_source_keep", "mapper.bwc_workaround_9_0"]
- reason: requires tracking ignored source
-
- do:
indices.create:
index: test
@@ -135,10 +127,6 @@ object param - object array:
---
object param - object array within array:
- - requires:
- cluster_features: ["mapper.synthetic_source_keep", "mapper.bwc_workaround_9_0"]
- reason: requires tracking ignored source
-
- do:
indices.create:
index: test
@@ -179,10 +167,6 @@ object param - object array within array:
---
object param - no object array:
- - requires:
- cluster_features: ["mapper.synthetic_source_keep", "mapper.bwc_workaround_9_0"]
- reason: requires tracking ignored source
-
- do:
indices.create:
index: test
@@ -222,10 +206,6 @@ object param - no object array:
---
object param - field ordering in object array:
- - requires:
- cluster_features: ["mapper.synthetic_source_keep", "mapper.bwc_workaround_9_0"]
- reason: requires tracking ignored source
-
- do:
indices.create:
index: test
@@ -272,10 +252,6 @@ object param - field ordering in object array:
---
object param - nested object array next to other fields:
- - requires:
- cluster_features: ["mapper.synthetic_source_keep", "mapper.bwc_workaround_9_0"]
- reason: requires tracking ignored source
-
- do:
indices.create:
index: test
@@ -330,7 +306,7 @@ object param - nested object array next to other fields:
---
object param - nested object with stored array:
- requires:
- cluster_features: ["mapper.ignored_source.always_store_object_arrays_in_nested", "mapper.bwc_workaround_9_0"]
+ cluster_features: ["mapper.ignored_source.always_store_object_arrays_in_nested"]
reason: requires fix to object array handling
- do:
@@ -379,10 +355,6 @@ object param - nested object with stored array:
---
index param - nested array within array:
- - requires:
- cluster_features: ["mapper.synthetic_source_keep", "mapper.bwc_workaround_9_0"]
- reason: requires tracking ignored source
-
- do:
indices.create:
index: test
@@ -427,10 +399,6 @@ index param - nested array within array:
---
# 112156
stored field under object with store_array_source:
- - requires:
- cluster_features: ["mapper.source.synthetic_source_stored_fields_advance_fix", "mapper.bwc_workaround_9_0"]
- reason: requires bug fix to be implemented
-
- do:
indices.create:
index: test
@@ -477,10 +445,6 @@ stored field under object with store_array_source:
---
field param - keep root array:
- - requires:
- cluster_features: ["mapper.synthetic_source_keep"]
- reason: requires keeping array source
-
- do:
indices.create:
index: test
@@ -535,10 +499,6 @@ field param - keep root array:
---
field param - keep nested array:
- - requires:
- cluster_features: ["mapper.synthetic_source_keep"]
- reason: requires keeping array source
-
- do:
indices.create:
index: test
@@ -605,7 +565,6 @@ field param - keep nested array:
field param - keep root singleton fields:
- requires:
test_runner_features: close_to
- cluster_features: ["mapper.synthetic_source_keep"]
reason: requires keeping singleton source
- do:
@@ -695,7 +654,6 @@ field param - keep root singleton fields:
field param - keep nested singleton fields:
- requires:
test_runner_features: close_to
- cluster_features: ["mapper.synthetic_source_keep"]
reason: requires keeping singleton source
- do:
@@ -776,10 +734,6 @@ field param - keep nested singleton fields:
---
field param - nested array within array:
- - requires:
- cluster_features: ["mapper.synthetic_source_keep"]
- reason: requires tracking ignored source
-
- do:
indices.create:
index: test
@@ -820,10 +774,6 @@ field param - nested array within array:
---
index param - root arrays:
- - requires:
- cluster_features: ["mapper.synthetic_source_keep", "mapper.bwc_workaround_9_0"]
- reason: requires keeping array source
-
- do:
indices.create:
index: test
@@ -900,10 +850,6 @@ index param - root arrays:
---
index param - dynamic root arrays:
- - requires:
- cluster_features: ["mapper.synthetic_source_keep"]
- reason: requires keeping array source
-
- do:
indices.create:
index: test
@@ -952,10 +898,6 @@ index param - dynamic root arrays:
---
index param - object array within array:
- - requires:
- cluster_features: ["mapper.synthetic_source_keep"]
- reason: requires keeping array source
-
- do:
indices.create:
index: test
@@ -1001,10 +943,6 @@ index param - object array within array:
---
index param - no object array:
- - requires:
- cluster_features: ["mapper.synthetic_source_keep"]
- reason: requires keeping array source
-
- do:
indices.create:
index: test
@@ -1045,10 +983,6 @@ index param - no object array:
---
index param - field ordering:
- - requires:
- cluster_features: ["mapper.synthetic_source_keep"]
- reason: requires keeping array source
-
- do:
indices.create:
index: test
@@ -1095,10 +1029,6 @@ index param - field ordering:
---
index param - nested arrays:
- - requires:
- cluster_features: ["mapper.synthetic_source_keep"]
- reason: requires keeping array source
-
- do:
indices.create:
index: test
@@ -1162,10 +1092,6 @@ index param - nested arrays:
---
index param - nested object with stored array:
- - requires:
- cluster_features: ["mapper.synthetic_source_keep"]
- reason: requires keeping array source
-
- do:
indices.create:
index: test
@@ -1213,10 +1139,6 @@ index param - nested object with stored array:
---
index param - flattened fields:
- - requires:
- cluster_features: ["mapper.synthetic_source_keep", "mapper.bwc_workaround_9_0"]
- reason: requires keeping array source
-
- do:
indices.create:
index: test
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_index_template/15_composition.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_index_template/15_composition.yml
index 3d82539944a9..89816be5ca8e 100644
--- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_index_template/15_composition.yml
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_index_template/15_composition.yml
@@ -453,8 +453,6 @@
---
"Composable index templates that include subobjects: auto at root":
- requires:
- cluster_features: ["mapper.subobjects_auto"]
- reason: "https://github.com/elastic/elasticsearch/issues/96768 fixed at 8.11.0"
test_runner_features: "allowed_warnings"
- do:
@@ -504,8 +502,6 @@
---
"Composable index templates that include subobjects: auto on arbitrary field":
- requires:
- cluster_features: ["mapper.subobjects_auto"]
- reason: "https://github.com/elastic/elasticsearch/issues/96768 fixed at 8.11.0"
test_runner_features: "allowed_warnings"
- do:
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.sort/20_nested.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.sort/20_nested.yml
index c88d638199db..d07d03cb7146 100644
--- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.sort/20_nested.yml
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.sort/20_nested.yml
@@ -1,8 +1,5 @@
---
sort doc with nested object:
- - requires:
- cluster_features: ["mapper.index_sorting_on_nested"]
- reason: uses index sorting on nested fields
- do:
indices.create:
index: test
@@ -66,9 +63,6 @@ sort doc with nested object:
---
sort doc on nested field:
- - requires:
- cluster_features: [ "mapper.index_sorting_on_nested" ]
- reason: uses index sorting on nested fields
- do:
catch: /cannot apply index sort to field \[nested_field\.foo\] under nested object \[nested_field\]/
indices.create:
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/logsdb/10_settings.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/logsdb/10_settings.yml
index 07af3fb52b92..2a31b3bd387c 100644
--- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/logsdb/10_settings.yml
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/logsdb/10_settings.yml
@@ -312,7 +312,6 @@ override sort mode settings:
---
override sort field using nested field type in sorting:
- requires:
- cluster_features: ["mapper.index_sorting_on_nested"]
test_runner_features: [ capabilities ]
capabilities:
- method: PUT
@@ -358,9 +357,6 @@ override sort field using nested field type in sorting:
---
override sort field using nested field type:
- - requires:
- cluster_features: ["mapper.index_sorting_on_nested"]
- reason: "Support for index sorting on indexes with nested objects required"
- do:
indices.create:
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mget/90_synthetic_source.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mget/90_synthetic_source.yml
index 084f104932d9..8485aba0ecc6 100644
--- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mget/90_synthetic_source.yml
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mget/90_synthetic_source.yml
@@ -55,9 +55,6 @@ keyword:
---
keyword with normalizer:
- - requires:
- cluster_features: [ "mapper.keyword_normalizer_synthetic_source" ]
- reason: support for normalizer on keyword fields
- do:
indices.create:
index: test-keyword-with-normalizer
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml
index 9d6e8da8c1e1..2a14c291d5d3 100644
--- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml
@@ -417,7 +417,6 @@
- requires:
test_runner_features: [arbitrary_key]
- cluster_features: ["mapper.query_index_mode"]
reason: "_ignored_source added to mappings"
- do:
@@ -511,10 +510,6 @@
---
"Lucene segment level fields stats":
- - requires:
- cluster_features: ["mapper.segment_level_fields_stats"]
- reason: "segment level fields stats"
-
- do:
indices.create:
index: index1
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/nodes.stats/90_fs_watermark_stats.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/nodes.stats/90_fs_watermark_stats.yml
index 3ec854e93d82..20e9d92a3608 100644
--- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/nodes.stats/90_fs_watermark_stats.yml
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/nodes.stats/90_fs_watermark_stats.yml
@@ -1,8 +1,6 @@
---
"Allocation stats":
- requires:
- cluster_features: ["stats.include_disk_thresholds"]
- reason: "fs watermark stats was added in 8.15.0"
test_runner_features: [arbitrary_key]
- do:
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/range/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/range/10_basic.yml
index 3432a1e34c01..6ca17cc9cdce 100644
--- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/range/10_basic.yml
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/range/10_basic.yml
@@ -520,10 +520,6 @@ setup:
---
"Null bounds":
- - requires:
- cluster_features: ["mapper.range.null_values_off_by_one_fix"]
- reason: fixed in 8.15.0
-
- do:
index:
index: test
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.highlight/60_unified_matched_fields.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.highlight/60_unified_matched_fields.yml
index bd14fb182ac5..94db54d15294 100644
--- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.highlight/60_unified_matched_fields.yml
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.highlight/60_unified_matched_fields.yml
@@ -1,8 +1,4 @@
setup:
- - requires:
- cluster_features: 'unified_highlighter_matched_fields'
- reason: 'test requires unified highlighter to support matched_fields'
-
- do:
indices.create:
index: index1
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/101_knn_nested_search_bits.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/101_knn_nested_search_bits.yml
index a3d920d903ae..bc4e262ea53c 100644
--- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/101_knn_nested_search_bits.yml
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/101_knn_nested_search_bits.yml
@@ -1,8 +1,6 @@
setup:
- requires:
- cluster_features: "mapper.vectors.bit_vectors"
test_runner_features: close_to
- reason: 'bit vectors added in 8.15'
- do:
indices.create:
index: test
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/110_knn_query_with_filter.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/110_knn_query_with_filter.yml
index 3d4841a16d82..cffc12a8d24a 100644
--- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/110_knn_query_with_filter.yml
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/110_knn_query_with_filter.yml
@@ -59,9 +59,6 @@ setup:
---
"Simple knn query":
- - requires:
- cluster_features: "search.vectors.k_param_supported"
- reason: 'k param for knn as query is required'
- do:
search:
index: my_index
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/190_knn_query-with-k-param.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/190_knn_query-with-k-param.yml
index f6538b573809..c92c88df9164 100644
--- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/190_knn_query-with-k-param.yml
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/190_knn_query-with-k-param.yml
@@ -1,8 +1,6 @@
# test how knn query interact with other queries
setup:
- requires:
- cluster_features: "search.vectors.k_param_supported"
- reason: 'k param for knn as query is required'
test_runner_features: close_to
- do:
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/41_knn_search_bbq_hnsw.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/41_knn_search_bbq_hnsw.yml
index 3f81c0044d17..abde3e86dd05 100644
--- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/41_knn_search_bbq_hnsw.yml
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/41_knn_search_bbq_hnsw.yml
@@ -1,7 +1,4 @@
setup:
- - requires:
- cluster_features: "mapper.vectors.bbq"
- reason: 'kNN float to better-binary quantization is required'
- do:
indices.create:
index: bbq_hnsw
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/41_knn_search_half_byte_quantized.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/41_knn_search_half_byte_quantized.yml
index baf568762dd1..9b27aea4b1db 100644
--- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/41_knn_search_half_byte_quantized.yml
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/41_knn_search_half_byte_quantized.yml
@@ -1,7 +1,4 @@
setup:
- - requires:
- cluster_features: "mapper.vectors.int4_quantization"
- reason: 'kNN float to half-byte quantization is required'
- do:
indices.create:
index: hnsw_byte_quantized
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/42_knn_search_bbq_flat.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/42_knn_search_bbq_flat.yml
index 0bc111576c2a..2541de7023bf 100644
--- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/42_knn_search_bbq_flat.yml
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/42_knn_search_bbq_flat.yml
@@ -1,7 +1,4 @@
setup:
- - requires:
- cluster_features: "mapper.vectors.bbq"
- reason: 'kNN float to better-binary quantization is required'
- do:
indices.create:
index: bbq_flat
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/42_knn_search_int4_flat.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/42_knn_search_int4_flat.yml
index 0e0180e58fd9..f9f8d56e1d9c 100644
--- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/42_knn_search_int4_flat.yml
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/42_knn_search_int4_flat.yml
@@ -1,7 +1,4 @@
setup:
- - requires:
- cluster_features: "mapper.vectors.int4_quantization"
- reason: 'kNN float to half-byte quantization is required'
- do:
indices.create:
index: int4_flat
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_bit.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_bit.yml
index 680433a5945f..ef2ae3ba7ee0 100644
--- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_bit.yml
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_bit.yml
@@ -1,8 +1,4 @@
setup:
- - requires:
- cluster_features: "mapper.vectors.bit_vectors"
- reason: 'mapper.vectors.bit_vectors'
-
- do:
indices.create:
index: test
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_bit_flat.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_bit_flat.yml
index 783f08a5d4ff..07261e6a30c7 100644
--- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_bit_flat.yml
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_bit_flat.yml
@@ -1,8 +1,4 @@
setup:
- - requires:
- cluster_features: "mapper.vectors.bit_vectors"
- reason: 'mapper.vectors.bit_vectors'
-
- do:
indices.create:
index: test
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/330_fetch_fields.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/330_fetch_fields.yml
index 44d966b76f34..8915325c3a67 100644
--- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/330_fetch_fields.yml
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/330_fetch_fields.yml
@@ -1128,10 +1128,6 @@ fetch geo_point:
---
"Test with subobjects: auto":
- - requires:
- cluster_features: "mapper.subobjects_auto"
- reason: requires support for subobjects auto setting
-
- do:
indices.create:
index: test
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/530_ignore_above_stored_source.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/530_ignore_above_stored_source.yml
index 1730a49f743d..7e00cbb01c58 100644
--- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/530_ignore_above_stored_source.yml
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/530_ignore_above_stored_source.yml
@@ -1,8 +1,5 @@
---
ignore_above mapping level setting:
- - requires:
- cluster_features: [ "mapper.ignore_above_index_level_setting" ]
- reason: introduce ignore_above index level setting
- do:
indices.create:
index: test
@@ -42,9 +39,6 @@ ignore_above mapping level setting:
---
ignore_above mapping level setting on arrays:
- - requires:
- cluster_features: [ "mapper.ignore_above_index_level_setting" ]
- reason: introduce ignore_above index level setting
- do:
indices.create:
index: test
@@ -84,9 +78,6 @@ ignore_above mapping level setting on arrays:
---
ignore_above mapping overrides setting:
- - requires:
- cluster_features: [ "mapper.ignore_above_index_level_setting" ]
- reason: introduce ignore_above index level setting
- do:
indices.create:
index: test
@@ -128,9 +119,6 @@ ignore_above mapping overrides setting:
---
ignore_above mapping overrides setting on arrays:
- - requires:
- cluster_features: [ "mapper.ignore_above_index_level_setting" ]
- reason: introduce ignore_above index level setting
- do:
indices.create:
index: test
@@ -172,9 +160,6 @@ ignore_above mapping overrides setting on arrays:
---
date ignore_above index level setting:
- - requires:
- cluster_features: [ "mapper.ignore_above_index_level_setting" ]
- reason: introduce ignore_above index level setting
- do:
indices.create:
index: test
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/540_ignore_above_synthetic_source.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/540_ignore_above_synthetic_source.yml
index 772c3c24170c..045f757b0830 100644
--- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/540_ignore_above_synthetic_source.yml
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/540_ignore_above_synthetic_source.yml
@@ -5,9 +5,6 @@ setup:
---
ignore_above mapping level setting:
- - requires:
- cluster_features: [ "mapper.ignore_above_index_level_setting" ]
- reason: introduce ignore_above index level setting
- do:
indices.create:
index: test
@@ -47,9 +44,6 @@ ignore_above mapping level setting:
---
ignore_above mapping level setting on arrays:
- - requires:
- cluster_features: [ "mapper.flattened.ignore_above_with_arrays_support" ]
- reason: requires support of ignore_above with arrays for flattened fields
- do:
indices.create:
index: test
@@ -90,9 +84,6 @@ ignore_above mapping level setting on arrays:
---
ignore_above mapping overrides setting:
- - requires:
- cluster_features: [ "mapper.ignore_above_index_level_setting" ]
- reason: introduce ignore_above index level setting
- do:
indices.create:
index: test
@@ -135,9 +126,6 @@ ignore_above mapping overrides setting:
---
ignore_above mapping overrides setting on arrays:
- - requires:
- cluster_features: [ "mapper.ignore_above_index_level_setting" ]
- reason: introduce ignore_above index level setting
- do:
indices.create:
index: test
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/550_ignore_above_invalid.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/550_ignore_above_invalid.yml
index 3c29845871fe..6e711ee143b0 100644
--- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/550_ignore_above_invalid.yml
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/550_ignore_above_invalid.yml
@@ -16,9 +16,6 @@ ignore_above index setting negative value:
---
keyword ignore_above mapping setting negative value:
- - requires:
- cluster_features: [ "mapper.ignore_above_index_level_setting" ]
- reason: introduce ignore_above index level setting
- do:
catch: bad_request
indices.create:
@@ -32,9 +29,6 @@ keyword ignore_above mapping setting negative value:
---
flattened ignore_above mapping setting negative value:
- - requires:
- cluster_features: [ "mapper.ignore_above_index_level_setting" ]
- reason: introduce ignore_above index level setting
- do:
catch: bad_request
indices.create:
@@ -48,9 +42,6 @@ flattened ignore_above mapping setting negative value:
---
wildcard ignore_above mapping setting negative value:
- - requires:
- cluster_features: [ "mapper.ignore_above_index_level_setting" ]
- reason: introduce ignore_above index level setting
- do:
catch: bad_request
indices.create:
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/600_flattened_ignore_above.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/600_flattened_ignore_above.yml
index a4a9b1aaecb2..71e0c2d147c1 100644
--- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/600_flattened_ignore_above.yml
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/600_flattened_ignore_above.yml
@@ -1,8 +1,5 @@
---
flattened ignore_above single-value field:
- - requires:
- cluster_features: [ "flattened.ignore_above_support" ]
- reason: introduce ignore_above support in flattened fields
- do:
indices.create:
index: test
@@ -65,9 +62,6 @@ flattened ignore_above single-value field:
---
flattened ignore_above multi-value field:
- - requires:
- cluster_features: [ "flattened.ignore_above_support" ]
- reason: introduce ignore_above support in flattened fields
- do:
indices.create:
index: test
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/simulate.ingest/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/simulate.ingest/10_basic.yml
index da0f00d96053..70a3b0253c78 100644
--- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/simulate.ingest/10_basic.yml
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/simulate.ingest/10_basic.yml
@@ -119,10 +119,6 @@ setup:
- skip:
features: headers
- - requires:
- cluster_features: ["simulate.mapping.validation"]
- reason: "ingest simulate index mapping validation added in 8.16"
-
- do:
headers:
Content-Type: application/json
@@ -265,10 +261,6 @@ setup:
- headers
- allowed_warnings
- - requires:
- cluster_features: ["simulate.mapping.validation.templates"]
- reason: "ingest simulate index mapping validation added in 8.16"
-
- do:
indices.put_template:
name: v1_template
@@ -401,10 +393,6 @@ setup:
- headers
- allowed_warnings
- - requires:
- cluster_features: ["simulate.mapping.validation.templates"]
- reason: "ingest simulate index mapping validation added in 8.16"
-
- do:
allowed_warnings:
- "index template [my-template1] has index patterns [simple-data-stream1] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-template1] will take precedence during new index creation"
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/140_routing_path.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/140_routing_path.yml
index 616afd3cf67a..1e841c8893fc 100644
--- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/140_routing_path.yml
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/140_routing_path.yml
@@ -122,8 +122,6 @@ missing dimension on routing path field:
multi-value routing path field succeeds:
- requires:
test_runner_features: close_to
- cluster_features: ["routing.multi_value_routing_path"]
- reason: support for multi-value dimensions
- do:
indices.create:
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/25_id_generation.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/25_id_generation.yml
index beba6f2752a1..5a5ae03ab938 100644
--- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/25_id_generation.yml
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/25_id_generation.yml
@@ -65,9 +65,6 @@ setup:
---
generates a consistent id:
- - requires:
- cluster_features: "tsdb.ts_routing_hash_doc_value_parse_byte_ref"
- reason: _tsid routing hash doc value parsing has been fixed
- do:
bulk:
refresh: true
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/40_search.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/40_search.yml
index dae50704dd0d..a8d256bbc097 100644
--- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/40_search.yml
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/40_search.yml
@@ -340,9 +340,6 @@ sort by tsid:
---
aggs by index_mode:
- - requires:
- cluster_features: ["mapper.query_index_mode"]
- reason: require _index_mode metadata field
- do:
search:
index: test
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/IndicesRequestIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/IndicesRequestIT.java
index 420f6427a55e..68e65b16aa3a 100644
--- a/server/src/internalClusterTest/java/org/elasticsearch/action/IndicesRequestIT.java
+++ b/server/src/internalClusterTest/java/org/elasticsearch/action/IndicesRequestIT.java
@@ -519,7 +519,7 @@ public class IndicesRequestIT extends ESIntegTestCase {
public void testGetMappings() {
interceptTransportActions(GetMappingsAction.NAME);
- GetMappingsRequest getMappingsRequest = new GetMappingsRequest().indices(randomIndicesOrAliases());
+ GetMappingsRequest getMappingsRequest = new GetMappingsRequest(TEST_REQUEST_TIMEOUT).indices(randomIndicesOrAliases());
internalCluster().coordOnlyNodeClient().admin().indices().getMappings(getMappingsRequest).actionGet();
clearInterceptedActions();
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/AutoCreateSystemIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/AutoCreateSystemIndexIT.java
index ea7cec710e31..e46a0e2ab65e 100644
--- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/AutoCreateSystemIndexIT.java
+++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/AutoCreateSystemIndexIT.java
@@ -71,7 +71,7 @@ public class AutoCreateSystemIndexIT extends ESIntegTestCase {
CreateIndexRequest request = new CreateIndexRequest(PRIMARY_INDEX_NAME);
client().execute(AutoCreateAction.INSTANCE, request).get();
- GetIndexResponse response = indicesAdmin().prepareGetIndex().addIndices(PRIMARY_INDEX_NAME).get();
+ GetIndexResponse response = indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT).addIndices(PRIMARY_INDEX_NAME).get();
assertThat(response.indices().length, is(1));
assertThat(response.aliases().size(), is(1));
assertThat(response.aliases().get(PRIMARY_INDEX_NAME).size(), is(1));
@@ -85,7 +85,7 @@ public class AutoCreateSystemIndexIT extends ESIntegTestCase {
CreateIndexRequest request = new CreateIndexRequest(INDEX_NAME);
client().execute(AutoCreateAction.INSTANCE, request).get();
- GetIndexResponse response = indicesAdmin().prepareGetIndex().addIndices(PRIMARY_INDEX_NAME).get();
+ GetIndexResponse response = indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT).addIndices(PRIMARY_INDEX_NAME).get();
assertThat(response.indices().length, is(1));
assertThat(response.aliases().size(), is(1));
assertThat(response.aliases().get(PRIMARY_INDEX_NAME).size(), is(1));
@@ -99,7 +99,7 @@ public class AutoCreateSystemIndexIT extends ESIntegTestCase {
CreateIndexRequest request = new CreateIndexRequest(INDEX_NAME + "-2");
client().execute(AutoCreateAction.INSTANCE, request).get();
- GetIndexResponse response = indicesAdmin().prepareGetIndex().addIndices(INDEX_NAME + "-2").get();
+ GetIndexResponse response = indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT).addIndices(INDEX_NAME + "-2").get();
assertThat(response.indices().length, is(1));
assertThat(response.aliases().size(), is(1));
assertThat(response.aliases().get(INDEX_NAME + "-2").size(), is(1));
@@ -144,7 +144,9 @@ public class AutoCreateSystemIndexIT extends ESIntegTestCase {
CreateIndexRequest request = new CreateIndexRequest(UnmanagedSystemIndexTestPlugin.SYSTEM_INDEX_NAME);
client().execute(AutoCreateAction.INSTANCE, request).get();
- GetIndexResponse response = indicesAdmin().prepareGetIndex().addIndices(UnmanagedSystemIndexTestPlugin.SYSTEM_INDEX_NAME).get();
+ GetIndexResponse response = indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT)
+ .addIndices(UnmanagedSystemIndexTestPlugin.SYSTEM_INDEX_NAME)
+ .get();
assertThat(response.indices().length, is(1));
Settings settings = response.settings().get(UnmanagedSystemIndexTestPlugin.SYSTEM_INDEX_NAME);
assertThat(settings, notNullValue());
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java
index 152d8405c312..20ef20867337 100644
--- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java
+++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java
@@ -120,7 +120,7 @@ public class CreateIndexIT extends ESIntegTestCase {
)
);
- GetMappingsResponse response = indicesAdmin().prepareGetMappings("test").get();
+ GetMappingsResponse response = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "test").get();
MappingMetadata mappings = response.mappings().get("test");
assertNotNull(mappings);
@@ -130,7 +130,7 @@ public class CreateIndexIT extends ESIntegTestCase {
public void testEmptyNestedMappings() throws Exception {
assertAcked(prepareCreate("test").setMapping(XContentFactory.jsonBuilder().startObject().endObject()));
- GetMappingsResponse response = indicesAdmin().prepareGetMappings("test").get();
+ GetMappingsResponse response = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "test").get();
MappingMetadata mappings = response.mappings().get("test");
assertNotNull(mappings);
@@ -150,7 +150,7 @@ public class CreateIndexIT extends ESIntegTestCase {
prepareCreate("test").setMapping(XContentFactory.jsonBuilder().startObject().startObject("_doc").endObject().endObject())
);
- GetMappingsResponse response = indicesAdmin().prepareGetMappings("test").get();
+ GetMappingsResponse response = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "test").get();
MappingMetadata mappings = response.mappings().get("test");
assertNotNull(mappings);
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateSystemIndicesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateSystemIndicesIT.java
index 867ca89f9e7f..b9dadf86c334 100644
--- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateSystemIndicesIT.java
+++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateSystemIndicesIT.java
@@ -357,8 +357,9 @@ public class CreateSystemIndicesIT extends ESIntegTestCase {
* Fetch the mappings and settings for {@link TestSystemIndexDescriptor#INDEX_NAME} and verify that they match the expected values.
*/
private void assertMappingsAndSettings(String expectedMappings, String concreteIndex) {
- final GetMappingsResponse getMappingsResponse = indicesAdmin().getMappings(new GetMappingsRequest().indices(INDEX_NAME))
- .actionGet();
+ final GetMappingsResponse getMappingsResponse = indicesAdmin().getMappings(
+ new GetMappingsRequest(TEST_REQUEST_TIMEOUT).indices(INDEX_NAME)
+ ).actionGet();
final Map mappings = getMappingsResponse.getMappings();
assertThat(
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/get/GetIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/get/GetIndexIT.java
index 3945c3a48f0a..ddd3c8e53773 100644
--- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/get/GetIndexIT.java
+++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/get/GetIndexIT.java
@@ -46,7 +46,7 @@ public class GetIndexIT extends ESIntegTestCase {
}
public void testSimple() {
- GetIndexResponse response = indicesAdmin().prepareGetIndex().addIndices("idx").get();
+ GetIndexResponse response = indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT).addIndices("idx").get();
String[] indices = response.indices();
assertThat(indices, notNullValue());
assertThat(indices.length, equalTo(1));
@@ -58,7 +58,7 @@ public class GetIndexIT extends ESIntegTestCase {
public void testSimpleUnknownIndex() {
try {
- indicesAdmin().prepareGetIndex().addIndices("missing_idx").get();
+ indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT).addIndices("missing_idx").get();
fail("Expected IndexNotFoundException");
} catch (IndexNotFoundException e) {
assertThat(e.getMessage(), is("no such index [missing_idx]"));
@@ -66,7 +66,7 @@ public class GetIndexIT extends ESIntegTestCase {
}
public void testUnknownIndexWithAllowNoIndices() {
- GetIndexResponse response = indicesAdmin().prepareGetIndex()
+ GetIndexResponse response = indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT)
.addIndices("missing_idx")
.setIndicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN)
.get();
@@ -77,7 +77,7 @@ public class GetIndexIT extends ESIntegTestCase {
}
public void testEmpty() {
- GetIndexResponse response = indicesAdmin().prepareGetIndex().addIndices("empty_idx").get();
+ GetIndexResponse response = indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT).addIndices("empty_idx").get();
String[] indices = response.indices();
assertThat(indices, notNullValue());
assertThat(indices.length, equalTo(1));
@@ -88,7 +88,10 @@ public class GetIndexIT extends ESIntegTestCase {
}
public void testSimpleMapping() {
- GetIndexResponse response = runWithRandomFeatureMethod(indicesAdmin().prepareGetIndex().addIndices("idx"), Feature.MAPPINGS);
+ GetIndexResponse response = runWithRandomFeatureMethod(
+ indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT).addIndices("idx"),
+ Feature.MAPPINGS
+ );
String[] indices = response.indices();
assertThat(indices, notNullValue());
assertThat(indices.length, equalTo(1));
@@ -99,7 +102,10 @@ public class GetIndexIT extends ESIntegTestCase {
}
public void testSimpleAlias() {
- GetIndexResponse response = runWithRandomFeatureMethod(indicesAdmin().prepareGetIndex().addIndices("idx"), Feature.ALIASES);
+ GetIndexResponse response = runWithRandomFeatureMethod(
+ indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT).addIndices("idx"),
+ Feature.ALIASES
+ );
String[] indices = response.indices();
assertThat(indices, notNullValue());
assertThat(indices.length, equalTo(1));
@@ -110,7 +116,10 @@ public class GetIndexIT extends ESIntegTestCase {
}
public void testSimpleSettings() {
- GetIndexResponse response = runWithRandomFeatureMethod(indicesAdmin().prepareGetIndex().addIndices("idx"), Feature.SETTINGS);
+ GetIndexResponse response = runWithRandomFeatureMethod(
+ indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT).addIndices("idx"),
+ Feature.SETTINGS
+ );
String[] indices = response.indices();
assertThat(indices, notNullValue());
assertThat(indices.length, equalTo(1));
@@ -127,7 +136,7 @@ public class GetIndexIT extends ESIntegTestCase {
features.add(randomFrom(Feature.values()));
}
GetIndexResponse response = runWithRandomFeatureMethod(
- indicesAdmin().prepareGetIndex().addIndices("idx"),
+ indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT).addIndices("idx"),
features.toArray(new Feature[features.size()])
);
String[] indices = response.indices();
@@ -158,7 +167,7 @@ public class GetIndexIT extends ESIntegTestCase {
features.add(randomFrom(Feature.values()));
}
GetIndexResponse response = runWithRandomFeatureMethod(
- indicesAdmin().prepareGetIndex().addIndices("empty_idx"),
+ indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT).addIndices("empty_idx"),
features.toArray(new Feature[features.size()])
);
String[] indices = response.indices();
@@ -182,7 +191,7 @@ public class GetIndexIT extends ESIntegTestCase {
for (String block : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE, SETTING_READ_ONLY, SETTING_READ_ONLY_ALLOW_DELETE)) {
try {
enableIndexBlock("idx", block);
- GetIndexResponse response = indicesAdmin().prepareGetIndex()
+ GetIndexResponse response = indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT)
.addIndices("idx")
.addFeatures(Feature.MAPPINGS, Feature.ALIASES)
.get();
@@ -200,7 +209,7 @@ public class GetIndexIT extends ESIntegTestCase {
try {
enableIndexBlock("idx", SETTING_BLOCKS_METADATA);
assertBlocked(
- indicesAdmin().prepareGetIndex().addIndices("idx").addFeatures(Feature.MAPPINGS, Feature.ALIASES),
+ indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT).addIndices("idx").addFeatures(Feature.MAPPINGS, Feature.ALIASES),
INDEX_METADATA_BLOCK
);
} finally {
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java
index 1f34646269f1..9c6189f35874 100644
--- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java
+++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java
@@ -838,7 +838,7 @@ public class RolloverIT extends ESIntegTestCase {
assertBusy(() -> {
try {
- indicesAdmin().prepareGetIndex().addIndices(writeIndexPrefix + "000002").get();
+ indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT).addIndices(writeIndexPrefix + "000002").get();
} catch (Exception e) {
logger.info("--> expecting second index to be created but it has not yet been created");
fail("expecting second index to exist");
@@ -857,7 +857,7 @@ public class RolloverIT extends ESIntegTestCase {
});
// We should *NOT* have a third index, it should have rolled over *exactly* once
- expectThrows(Exception.class, indicesAdmin().prepareGetIndex().addIndices(writeIndexPrefix + "000003"));
+ expectThrows(Exception.class, indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT).addIndices(writeIndexPrefix + "000003"));
}
public void testRolloverConcurrently() throws Exception {
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkIntegrationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkIntegrationIT.java
index e45555b1dec1..2cd319d14832 100644
--- a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkIntegrationIT.java
+++ b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkIntegrationIT.java
@@ -55,7 +55,7 @@ public class BulkIntegrationIT extends ESIntegTestCase {
bulkBuilder.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, XContentType.JSON);
bulkBuilder.get();
assertBusy(() -> {
- GetMappingsResponse mappingsResponse = indicesAdmin().prepareGetMappings().get();
+ GetMappingsResponse mappingsResponse = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT).get();
assertTrue(mappingsResponse.getMappings().containsKey("logstash-2014.03.30"));
});
}
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/SimpleClusterStateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/SimpleClusterStateIT.java
index e0ecc0815945..11dbe38a0824 100644
--- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/SimpleClusterStateIT.java
+++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/SimpleClusterStateIT.java
@@ -258,11 +258,14 @@ public class SimpleClusterStateIT extends ESIntegTestCase {
.setTimeout(TimeValue.timeValueMinutes(1))
);
ensureGreen(); // wait for green state, so its both green, and there are no more pending events
- MappingMetadata masterMappingMetadata = indicesAdmin().prepareGetMappings("test").get().getMappings().get("test");
+ MappingMetadata masterMappingMetadata = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "test")
+ .get()
+ .getMappings()
+ .get("test");
for (Client client : clients()) {
MappingMetadata mappingMetadata = client.admin()
.indices()
- .prepareGetMappings("test")
+ .prepareGetMappings(TEST_REQUEST_TIMEOUT, "test")
.setLocal(true)
.get()
.getMappings()
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/gateway/MetadataNodesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/gateway/MetadataNodesIT.java
index 584653e6220d..f8f88fadd074 100644
--- a/server/src/internalClusterTest/java/org/elasticsearch/gateway/MetadataNodesIT.java
+++ b/server/src/internalClusterTest/java/org/elasticsearch/gateway/MetadataNodesIT.java
@@ -118,7 +118,7 @@ public class MetadataNodesIT extends ESIntegTestCase {
)
.get();
- GetMappingsResponse getMappingsResponse = indicesAdmin().prepareGetMappings(index).get();
+ GetMappingsResponse getMappingsResponse = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, index).get();
assertNotNull(
((Map) (getMappingsResponse.getMappings().get(index).getSourceAsMap().get("properties"))).get("integer_field")
);
@@ -149,7 +149,7 @@ public class MetadataNodesIT extends ESIntegTestCase {
)
.get();
- getMappingsResponse = indicesAdmin().prepareGetMappings(index).get();
+ getMappingsResponse = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, index).get();
assertNotNull(
((Map) (getMappingsResponse.getMappings().get(index).getSourceAsMap().get("properties"))).get("float_field")
);
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/get/GetActionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/get/GetActionIT.java
index 6c7754932af6..f06810377771 100644
--- a/server/src/internalClusterTest/java/org/elasticsearch/get/GetActionIT.java
+++ b/server/src/internalClusterTest/java/org/elasticsearch/get/GetActionIT.java
@@ -24,6 +24,7 @@ import org.elasticsearch.action.get.MultiGetRequestBuilder;
import org.elasticsearch.action.get.MultiGetResponse;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
+import org.elasticsearch.action.support.WriteRequest;
import org.elasticsearch.action.support.broadcast.BroadcastResponse;
import org.elasticsearch.common.Randomness;
import org.elasticsearch.common.Strings;
@@ -33,8 +34,10 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.core.CheckedFunction;
import org.elasticsearch.core.Nullable;
import org.elasticsearch.index.IndexModule;
+import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.engine.EngineTestCase;
import org.elasticsearch.index.engine.VersionConflictEngineException;
+import org.elasticsearch.index.mapper.SourceFieldMapper;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.test.ESIntegTestCase;
@@ -932,6 +935,102 @@ public class GetActionIT extends ESIntegTestCase {
);
}
+ public void testRealTimeGetNestedFields() {
+ String index = "test";
+ SourceFieldMapper.Mode sourceMode = randomFrom(SourceFieldMapper.Mode.values());
+ assertAcked(
+ prepareCreate(index).setMapping("title", "type=keyword", "author", "type=nested")
+ .setSettings(
+ indexSettings(1, 0).put("index.refresh_interval", -1)
+ .put(IndexSettings.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), sourceMode)
+ )
+ );
+ ensureGreen();
+ String source0 = """
+ {
+ "title": "t0",
+ "author": [
+ {
+ "name": "a0"
+ }
+ ]
+ }
+ """;
+ prepareIndex(index).setRefreshPolicy(WriteRequest.RefreshPolicy.NONE).setId("0").setSource(source0, XContentType.JSON).get();
+ // start tracking translog locations
+ assertTrue(client().prepareGet(index, "0").setRealtime(true).get().isExists());
+ String source1 = """
+ {
+ "title": ["t1"],
+ "author": [
+ {
+ "name": "a1"
+ }
+ ]
+ }
+ """;
+ prepareIndex(index).setRefreshPolicy(WriteRequest.RefreshPolicy.NONE).setId("1").setSource(source1, XContentType.JSON).get();
+ String source2 = """
+ {
+ "title": ["t1", "t2"],
+ "author": [
+ {
+ "name": "a1"
+ },
+ {
+ "name": "a2"
+ }
+ ]
+ }
+ """;
+ prepareIndex(index).setRefreshPolicy(WriteRequest.RefreshPolicy.NONE).setId("2").setSource(source2, XContentType.JSON).get();
+ String source3 = """
+ {
+ "title": ["t1", "t3", "t2"]
+ }
+ """;
+ prepareIndex(index).setRefreshPolicy(WriteRequest.RefreshPolicy.NONE).setId("3").setSource(source3, XContentType.JSON).get();
+ GetResponse translog1 = client().prepareGet(index, "1").setRealtime(true).get();
+ GetResponse translog2 = client().prepareGet(index, "2").setRealtime(true).get();
+ GetResponse translog3 = client().prepareGet(index, "3").setRealtime(true).get();
+ assertTrue(translog1.isExists());
+ assertTrue(translog2.isExists());
+ assertTrue(translog3.isExists());
+ switch (sourceMode) {
+ case STORED -> {
+ assertThat(translog1.getSourceAsBytesRef().utf8ToString(), equalTo(source1));
+ assertThat(translog2.getSourceAsBytesRef().utf8ToString(), equalTo(source2));
+ assertThat(translog3.getSourceAsBytesRef().utf8ToString(), equalTo(source3));
+ }
+ case SYNTHETIC -> {
+ assertThat(translog1.getSourceAsBytesRef().utf8ToString(), equalTo("""
+ {"author":{"name":"a1"},"title":"t1"}"""));
+ assertThat(translog2.getSourceAsBytesRef().utf8ToString(), equalTo("""
+ {"author":[{"name":"a1"},{"name":"a2"}],"title":["t1","t2"]}"""));
+ assertThat(translog3.getSourceAsBytesRef().utf8ToString(), equalTo("""
+ {"title":["t1","t2","t3"]}"""));
+ }
+ case DISABLED -> {
+ assertNull(translog1.getSourceAsBytesRef());
+ assertNull(translog2.getSourceAsBytesRef());
+ assertNull(translog3.getSourceAsBytesRef());
+ }
+ }
+ assertFalse(client().prepareGet(index, "1").setRealtime(false).get().isExists());
+ assertFalse(client().prepareGet(index, "2").setRealtime(false).get().isExists());
+ assertFalse(client().prepareGet(index, "3").setRealtime(false).get().isExists());
+ refresh(index);
+ GetResponse lucene1 = client().prepareGet(index, "1").setRealtime(randomBoolean()).get();
+ GetResponse lucene2 = client().prepareGet(index, "2").setRealtime(randomBoolean()).get();
+ GetResponse lucene3 = client().prepareGet(index, "3").setRealtime(randomBoolean()).get();
+ assertTrue(lucene1.isExists());
+ assertTrue(lucene2.isExists());
+ assertTrue(lucene3.isExists());
+ assertThat(translog1.getSourceAsBytesRef(), equalTo(lucene1.getSourceAsBytesRef()));
+ assertThat(translog2.getSourceAsBytesRef(), equalTo(lucene2.getSourceAsBytesRef()));
+ assertThat(translog3.getSourceAsBytesRef(), equalTo(lucene3.getSourceAsBytesRef()));
+ }
+
private void assertGetFieldsAlwaysWorks(String index, String docId, String[] fields) {
assertGetFieldsAlwaysWorks(index, docId, fields, null);
}
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/HiddenIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/HiddenIndexIT.java
index 802ba04375c4..24af560f608d 100644
--- a/server/src/internalClusterTest/java/org/elasticsearch/index/HiddenIndexIT.java
+++ b/server/src/internalClusterTest/java/org/elasticsearch/index/HiddenIndexIT.java
@@ -97,7 +97,7 @@ public class HiddenIndexIT extends ESIntegTestCase {
assertAcked(indicesAdmin().prepareCreate("a_hidden_index").setSettings(Settings.builder().put("index.hidden", true).build()));
- GetMappingsResponse mappingsResponse = indicesAdmin().prepareGetMappings("a_hidden_index").get();
+ GetMappingsResponse mappingsResponse = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "a_hidden_index").get();
assertThat(mappingsResponse.mappings().size(), is(1));
MappingMetadata mappingMetadata = mappingsResponse.mappings().get("a_hidden_index");
assertNotNull(mappingMetadata);
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/LookupIndexModeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/LookupIndexModeIT.java
index 960ee2fd7ca6..6bca87ebd6e3 100644
--- a/server/src/internalClusterTest/java/org/elasticsearch/index/LookupIndexModeIT.java
+++ b/server/src/internalClusterTest/java/org/elasticsearch/index/LookupIndexModeIT.java
@@ -49,7 +49,7 @@ public class LookupIndexModeIT extends ESIntegTestCase {
assertAcked(client().admin().indices().execute(TransportCreateIndexAction.TYPE, createRequest));
Settings settings = client().admin().indices().prepareGetSettings("hosts").get().getIndexToSettings().get("hosts");
assertThat(settings.get("index.mode"), equalTo("lookup"));
- assertThat(settings.get("index.auto_expand_replicas"), equalTo("0-all"));
+ assertNull(settings.get("index.auto_expand_replicas"));
Map allHosts = Map.of(
"192.168.1.2",
"Windows",
@@ -141,7 +141,6 @@ public class LookupIndexModeIT extends ESIntegTestCase {
Settings settings = client().admin().indices().prepareGetSettings("lookup-2").get().getIndexToSettings().get("lookup-2");
assertThat(settings.get("index.mode"), equalTo("lookup"));
assertThat(settings.get("index.number_of_shards"), equalTo("1"));
- assertThat(settings.get("index.auto_expand_replicas"), equalTo("0-all"));
ResizeRequest split = new ResizeRequest("lookup-3", "lookup-1");
split.setResizeType(ResizeType.SPLIT);
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/DynamicMappingIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/DynamicMappingIT.java
index 2dad8500a309..71b5ba58b578 100644
--- a/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/DynamicMappingIT.java
+++ b/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/DynamicMappingIT.java
@@ -96,7 +96,11 @@ public class DynamicMappingIT extends ESIntegTestCase {
client().prepareIndex("index").setId("1").setSource("a.x", 1).get();
client().prepareIndex("index").setId("2").setSource("a.y", 2).get();
- Map mappings = indicesAdmin().prepareGetMappings("index").get().mappings().get("index").sourceAsMap();
+ Map mappings = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "index")
+ .get()
+ .mappings()
+ .get("index")
+ .sourceAsMap();
assertTrue(new WriteField("properties.a", () -> mappings).exists());
assertTrue(new WriteField("properties.a.properties.x", () -> mappings).exists());
}
@@ -183,7 +187,7 @@ public class DynamicMappingIT extends ESIntegTestCase {
for (int i = 0; i < numberOfFieldsToCreate; ++i) {
assertTrue(client().prepareGet("index", Integer.toString(i)).get().isExists());
}
- GetMappingsResponse mappings = indicesAdmin().prepareGetMappings("index").get();
+ GetMappingsResponse mappings = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "index").get();
MappingMetadata indexMappings = mappings.getMappings().get("index");
assertNotNull(indexMappings);
Map typeMappingsMap = indexMappings.getSourceAsMap();
@@ -214,7 +218,11 @@ public class DynamicMappingIT extends ESIntegTestCase {
for (int i = 0; i < numberOfDocsToCreate; ++i) {
assertTrue(client().prepareGet("index", Integer.toString(i)).get().isExists());
}
- Map index = indicesAdmin().prepareGetMappings("index").get().getMappings().get("index").getSourceAsMap();
+ Map index = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "index")
+ .get()
+ .getMappings()
+ .get("index")
+ .getSourceAsMap();
for (int i = 0, j = 1; i < numberOfDocsToCreate; i++, j++) {
assertThat(new WriteField("properties.field" + i + ".type", () -> index).get(null), is(oneOf("long", "float")));
assertThat(new WriteField("properties.field" + j + ".type", () -> index).get(null), is(oneOf("long", "float")));
@@ -808,7 +816,11 @@ public class DynamicMappingIT extends ESIntegTestCase {
assertEquals(RestStatus.CREATED, indexResponse.status());
assertBusy(() -> {
- Map mappings = indicesAdmin().prepareGetMappings("test").get().mappings().get("test").sourceAsMap();
+ Map mappings = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "test")
+ .get()
+ .mappings()
+ .get("test")
+ .sourceAsMap();
@SuppressWarnings("unchecked")
Map properties = (Map) mappings.get("properties");
assertEquals(4, properties.size());
@@ -853,7 +865,11 @@ public class DynamicMappingIT extends ESIntegTestCase {
assertEquals(RestStatus.CREATED, indexResponse.status());
assertBusy(() -> {
- Map mappings = indicesAdmin().prepareGetMappings("test").get().mappings().get("test").sourceAsMap();
+ Map mappings = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "test")
+ .get()
+ .mappings()
+ .get("test")
+ .sourceAsMap();
Map properties = (Map) mappings.get("properties");
Map foo = (Map) properties.get("foo");
properties = (Map) foo.get("properties");
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/MultiFieldsIntegrationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/MultiFieldsIntegrationIT.java
index 4a7de4b0ebc2..1a51fc12fed8 100644
--- a/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/MultiFieldsIntegrationIT.java
+++ b/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/MultiFieldsIntegrationIT.java
@@ -37,7 +37,7 @@ public class MultiFieldsIntegrationIT extends ESIntegTestCase {
public void testMultiFields() throws Exception {
assertAcked(indicesAdmin().prepareCreate("my-index").setMapping(createTypeSource()));
- GetMappingsResponse getMappingsResponse = indicesAdmin().prepareGetMappings("my-index").get();
+ GetMappingsResponse getMappingsResponse = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "my-index").get();
MappingMetadata mappingMetadata = getMappingsResponse.mappings().get("my-index");
assertThat(mappingMetadata, not(nullValue()));
Map mappingSource = mappingMetadata.sourceAsMap();
@@ -53,7 +53,7 @@ public class MultiFieldsIntegrationIT extends ESIntegTestCase {
assertAcked(indicesAdmin().preparePutMapping("my-index").setSource(createPutMappingSource()));
- getMappingsResponse = indicesAdmin().prepareGetMappings("my-index").get();
+ getMappingsResponse = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "my-index").get();
mappingMetadata = getMappingsResponse.mappings().get("my-index");
assertThat(mappingMetadata, not(nullValue()));
mappingSource = mappingMetadata.sourceAsMap();
@@ -74,7 +74,7 @@ public class MultiFieldsIntegrationIT extends ESIntegTestCase {
public void testGeoPointMultiField() throws Exception {
assertAcked(indicesAdmin().prepareCreate("my-index").setMapping(createMappingSource("geo_point")));
- GetMappingsResponse getMappingsResponse = indicesAdmin().prepareGetMappings("my-index").get();
+ GetMappingsResponse getMappingsResponse = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "my-index").get();
MappingMetadata mappingMetadata = getMappingsResponse.mappings().get("my-index");
assertThat(mappingMetadata, not(nullValue()));
Map mappingSource = mappingMetadata.sourceAsMap();
@@ -102,7 +102,7 @@ public class MultiFieldsIntegrationIT extends ESIntegTestCase {
public void testCompletionMultiField() throws Exception {
assertAcked(indicesAdmin().prepareCreate("my-index").setMapping(createMappingSource("completion")));
- GetMappingsResponse getMappingsResponse = indicesAdmin().prepareGetMappings("my-index").get();
+ GetMappingsResponse getMappingsResponse = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "my-index").get();
MappingMetadata mappingMetadata = getMappingsResponse.mappings().get("my-index");
assertThat(mappingMetadata, not(nullValue()));
Map mappingSource = mappingMetadata.sourceAsMap();
@@ -123,7 +123,7 @@ public class MultiFieldsIntegrationIT extends ESIntegTestCase {
public void testIpMultiField() throws Exception {
assertAcked(indicesAdmin().prepareCreate("my-index").setMapping(createMappingSource("ip")));
- GetMappingsResponse getMappingsResponse = indicesAdmin().prepareGetMappings("my-index").get();
+ GetMappingsResponse getMappingsResponse = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "my-index").get();
MappingMetadata mappingMetadata = getMappingsResponse.mappings().get("my-index");
assertThat(mappingMetadata, not(nullValue()));
Map mappingSource = mappingMetadata.sourceAsMap();
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/PeerRecoveryRetentionLeaseCreationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/PeerRecoveryRetentionLeaseCreationIT.java
index 92e5eb8e046b..88cca3308ac4 100644
--- a/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/PeerRecoveryRetentionLeaseCreationIT.java
+++ b/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/PeerRecoveryRetentionLeaseCreationIT.java
@@ -54,7 +54,7 @@ public class PeerRecoveryRetentionLeaseCreationIT extends ESIntegTestCase {
ensureGreen(INDEX_NAME);
IndicesService service = internalCluster().getInstance(IndicesService.class, dataNode);
- String uuid = indicesAdmin().getIndex(new GetIndexRequest().indices(INDEX_NAME))
+ String uuid = indicesAdmin().getIndex(new GetIndexRequest(TEST_REQUEST_TIMEOUT).indices(INDEX_NAME))
.actionGet()
.getSetting(INDEX_NAME, IndexMetadata.SETTING_INDEX_UUID);
Path path = service.indexService(new Index(INDEX_NAME, uuid)).getShard(0).shardPath().getShardStatePath();
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java
index 652f4e02ffbc..c53cf3b56f65 100644
--- a/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java
+++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java
@@ -537,26 +537,26 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase {
}
verify(indicesAdmin().preparePutMapping("foo").setSource("field", "type=text"), false);
- assertThat(indicesAdmin().prepareGetMappings("foo").get().mappings().get("foo"), notNullValue());
+ assertThat(indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "foo").get().mappings().get("foo"), notNullValue());
verify(indicesAdmin().preparePutMapping("b*").setSource("field", "type=text"), false);
- assertThat(indicesAdmin().prepareGetMappings("bar").get().mappings().get("bar"), notNullValue());
- assertThat(indicesAdmin().prepareGetMappings("barbaz").get().mappings().get("barbaz"), notNullValue());
+ assertThat(indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "bar").get().mappings().get("bar"), notNullValue());
+ assertThat(indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "barbaz").get().mappings().get("barbaz"), notNullValue());
verify(indicesAdmin().preparePutMapping("_all").setSource("field", "type=text"), false);
- assertThat(indicesAdmin().prepareGetMappings("foo").get().mappings().get("foo"), notNullValue());
- assertThat(indicesAdmin().prepareGetMappings("foobar").get().mappings().get("foobar"), notNullValue());
- assertThat(indicesAdmin().prepareGetMappings("bar").get().mappings().get("bar"), notNullValue());
- assertThat(indicesAdmin().prepareGetMappings("barbaz").get().mappings().get("barbaz"), notNullValue());
+ assertThat(indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "foo").get().mappings().get("foo"), notNullValue());
+ assertThat(indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "foobar").get().mappings().get("foobar"), notNullValue());
+ assertThat(indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "bar").get().mappings().get("bar"), notNullValue());
+ assertThat(indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "barbaz").get().mappings().get("barbaz"), notNullValue());
verify(indicesAdmin().preparePutMapping().setSource("field", "type=text"), false);
- assertThat(indicesAdmin().prepareGetMappings("foo").get().mappings().get("foo"), notNullValue());
- assertThat(indicesAdmin().prepareGetMappings("foobar").get().mappings().get("foobar"), notNullValue());
- assertThat(indicesAdmin().prepareGetMappings("bar").get().mappings().get("bar"), notNullValue());
- assertThat(indicesAdmin().prepareGetMappings("barbaz").get().mappings().get("barbaz"), notNullValue());
+ assertThat(indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "foo").get().mappings().get("foo"), notNullValue());
+ assertThat(indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "foobar").get().mappings().get("foobar"), notNullValue());
+ assertThat(indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "bar").get().mappings().get("bar"), notNullValue());
+ assertThat(indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "barbaz").get().mappings().get("barbaz"), notNullValue());
verify(indicesAdmin().preparePutMapping("c*").setSource("field", "type=text"), true);
assertAcked(indicesAdmin().prepareClose("barbaz").get());
verify(indicesAdmin().preparePutMapping("barbaz").setSource("field", "type=text"), false);
- assertThat(indicesAdmin().prepareGetMappings("barbaz").get().mappings().get("barbaz"), notNullValue());
+ assertThat(indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "barbaz").get().mappings().get("barbaz"), notNullValue());
}
public static final class TestPlugin extends Plugin {
@@ -664,7 +664,7 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase {
}
static GetMappingsRequestBuilder getMapping(String... indices) {
- return indicesAdmin().prepareGetMappings(indices);
+ return indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, indices);
}
static GetSettingsRequestBuilder getSettings(String... indices) {
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/SystemIndexMappingUpdateServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/SystemIndexMappingUpdateServiceIT.java
index de565605ff58..7264585337fc 100644
--- a/server/src/internalClusterTest/java/org/elasticsearch/indices/SystemIndexMappingUpdateServiceIT.java
+++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/SystemIndexMappingUpdateServiceIT.java
@@ -123,8 +123,9 @@ public class SystemIndexMappingUpdateServiceIT extends ESIntegTestCase {
* Fetch the mappings and settings for {@link TestSystemIndexDescriptor#INDEX_NAME} and verify that they match the expected values.
*/
private void assertMappingsAndSettings(String expectedMappings) {
- final GetMappingsResponse getMappingsResponse = indicesAdmin().getMappings(new GetMappingsRequest().indices(INDEX_NAME))
- .actionGet();
+ final GetMappingsResponse getMappingsResponse = indicesAdmin().getMappings(
+ new GetMappingsRequest(TEST_REQUEST_TIMEOUT).indices(INDEX_NAME)
+ ).actionGet();
final Map mappings = getMappingsResponse.getMappings();
assertThat(
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/SimpleGetFieldMappingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/SimpleGetFieldMappingsIT.java
index 71fcb25c2e0b..e3092bda185f 100644
--- a/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/SimpleGetFieldMappingsIT.java
+++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/SimpleGetFieldMappingsIT.java
@@ -198,7 +198,7 @@ public class SimpleGetFieldMappingsIT extends ESIntegTestCase {
try {
enableIndexBlock("test", SETTING_BLOCKS_METADATA);
- assertBlocked(indicesAdmin().prepareGetMappings(), INDEX_METADATA_BLOCK);
+ assertBlocked(indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT), INDEX_METADATA_BLOCK);
} finally {
disableIndexBlock("test", SETTING_BLOCKS_METADATA);
}
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/SimpleGetMappingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/SimpleGetMappingsIT.java
index 20e59fab3bd0..023aa402b733 100644
--- a/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/SimpleGetMappingsIT.java
+++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/SimpleGetMappingsIT.java
@@ -42,7 +42,7 @@ public class SimpleGetMappingsIT extends ESIntegTestCase {
public void testGetMappingsWhereThereAreNone() {
createIndex("index");
- GetMappingsResponse response = indicesAdmin().prepareGetMappings().get();
+ GetMappingsResponse response = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT).get();
assertThat(response.mappings().containsKey("index"), equalTo(true));
assertEquals(MappingMetadata.EMPTY_MAPPINGS, response.mappings().get("index"));
}
@@ -70,19 +70,19 @@ public class SimpleGetMappingsIT extends ESIntegTestCase {
assertThat(clusterHealth.isTimedOut(), equalTo(false));
// Get all mappings
- GetMappingsResponse response = indicesAdmin().prepareGetMappings().get();
+ GetMappingsResponse response = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT).get();
assertThat(response.mappings().size(), equalTo(2));
assertThat(response.mappings().get("indexa"), notNullValue());
assertThat(response.mappings().get("indexb"), notNullValue());
// Get all mappings, via wildcard support
- response = indicesAdmin().prepareGetMappings("*").get();
+ response = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "*").get();
assertThat(response.mappings().size(), equalTo(2));
assertThat(response.mappings().get("indexa"), notNullValue());
assertThat(response.mappings().get("indexb"), notNullValue());
// Get mappings in indexa
- response = indicesAdmin().prepareGetMappings("indexa").get();
+ response = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "indexa").get();
assertThat(response.mappings().size(), equalTo(1));
assertThat(response.mappings().get("indexa"), notNullValue());
}
@@ -94,7 +94,7 @@ public class SimpleGetMappingsIT extends ESIntegTestCase {
for (String block : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE, SETTING_READ_ONLY)) {
try {
enableIndexBlock("test", block);
- GetMappingsResponse response = indicesAdmin().prepareGetMappings().get();
+ GetMappingsResponse response = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT).get();
assertThat(response.mappings().size(), equalTo(1));
assertNotNull(response.mappings().get("test"));
} finally {
@@ -104,7 +104,7 @@ public class SimpleGetMappingsIT extends ESIntegTestCase {
try {
enableIndexBlock("test", SETTING_BLOCKS_METADATA);
- assertBlocked(indicesAdmin().prepareGetMappings(), INDEX_METADATA_BLOCK);
+ assertBlocked(indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT), INDEX_METADATA_BLOCK);
} finally {
disableIndexBlock("test", SETTING_BLOCKS_METADATA);
}
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java
index 6f6e488d46b2..fa2598348a1c 100644
--- a/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java
+++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java
@@ -109,7 +109,7 @@ public class UpdateMappingIntegrationIT extends ESIntegTestCase {
assertThat(putMappingResponse.isAcknowledged(), equalTo(true));
- GetMappingsResponse getMappingsResponse = indicesAdmin().prepareGetMappings("test").get();
+ GetMappingsResponse getMappingsResponse = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "test").get();
assertThat(getMappingsResponse.mappings().get("test").source().toString(), equalTo("""
{"_doc":{"properties":{"body":{"type":"text"},"date":{"type":"integer"}}}}"""));
}
@@ -123,7 +123,7 @@ public class UpdateMappingIntegrationIT extends ESIntegTestCase {
assertThat(putMappingResponse.isAcknowledged(), equalTo(true));
- GetMappingsResponse getMappingsResponse = indicesAdmin().prepareGetMappings("test").get();
+ GetMappingsResponse getMappingsResponse = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "test").get();
assertThat(getMappingsResponse.mappings().get("test").source().toString(), equalTo("""
{"_doc":{"properties":{"date":{"type":"integer"}}}}"""));
}
@@ -215,7 +215,10 @@ public class UpdateMappingIntegrationIT extends ESIntegTestCase {
.get();
assertThat(response.isAcknowledged(), equalTo(true));
- GetMappingsResponse getMappingResponse = client2.admin().indices().prepareGetMappings(indexName).get();
+ GetMappingsResponse getMappingResponse = client2.admin()
+ .indices()
+ .prepareGetMappings(TEST_REQUEST_TIMEOUT, indexName)
+ .get();
MappingMetadata mappings = getMappingResponse.getMappings().get(indexName);
@SuppressWarnings("unchecked")
Map properties = (Map) mappings.getSourceAsMap().get("properties");
@@ -284,7 +287,7 @@ public class UpdateMappingIntegrationIT extends ESIntegTestCase {
* Waits for the given mapping type to exists on the master node.
*/
private void assertMappingOnMaster(final String index, final String... fieldNames) {
- GetMappingsResponse response = indicesAdmin().prepareGetMappings(index).get();
+ GetMappingsResponse response = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, index).get();
MappingMetadata mappings = response.getMappings().get(index);
assertThat(mappings, notNullValue());
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/FeatureStateResetApiIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/FeatureStateResetApiIT.java
index c3eda84ee9e5..b1fd483cb9be 100644
--- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/FeatureStateResetApiIT.java
+++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/FeatureStateResetApiIT.java
@@ -87,16 +87,25 @@ public class FeatureStateResetApiIT extends ESIntegTestCase {
);
// verify that both indices are gone
- Exception e1 = expectThrows(IndexNotFoundException.class, indicesAdmin().prepareGetIndex().addIndices(systemIndex1));
+ Exception e1 = expectThrows(
+ IndexNotFoundException.class,
+ indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT).addIndices(systemIndex1)
+ );
assertThat(e1.getMessage(), containsString("no such index"));
- Exception e2 = expectThrows(IndexNotFoundException.class, indicesAdmin().prepareGetIndex().addIndices(associatedIndex));
+ Exception e2 = expectThrows(
+ IndexNotFoundException.class,
+ indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT).addIndices(associatedIndex)
+ );
assertThat(e2.getMessage(), containsString("no such index"));
- Exception e3 = expectThrows(IndexNotFoundException.class, indicesAdmin().prepareGetIndex().addIndices(systemIndex2));
+ Exception e3 = expectThrows(
+ IndexNotFoundException.class,
+ indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT).addIndices(systemIndex2)
+ );
assertThat(e3.getMessage(), containsString("no such index"));
- GetIndexResponse response = indicesAdmin().prepareGetIndex().addIndices("my_index").get();
+ GetIndexResponse response = indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT).addIndices("my_index").get();
assertThat(response.getIndices(), arrayContaining("my_index"));
}
diff --git a/server/src/main/java/module-info.java b/server/src/main/java/module-info.java
index ffd3d3d883f3..a26632d5b796 100644
--- a/server/src/main/java/module-info.java
+++ b/server/src/main/java/module-info.java
@@ -427,22 +427,14 @@ module org.elasticsearch.server {
provides org.elasticsearch.features.FeatureSpecification
with
- org.elasticsearch.action.admin.indices.stats.IndicesStatsFeatures,
org.elasticsearch.action.bulk.BulkFeatures,
org.elasticsearch.features.FeatureInfrastructureFeatures,
- org.elasticsearch.health.HealthFeatures,
- org.elasticsearch.cluster.metadata.MetadataFeatures,
- org.elasticsearch.rest.RestFeatures,
- org.elasticsearch.repositories.RepositoriesFeatures,
- org.elasticsearch.action.admin.cluster.allocation.AllocationStatsFeatures,
org.elasticsearch.rest.action.admin.cluster.ClusterRerouteFeatures,
org.elasticsearch.index.mapper.MapperFeatures,
org.elasticsearch.index.IndexFeatures,
- org.elasticsearch.ingest.IngestGeoIpFeatures,
org.elasticsearch.search.SearchFeatures,
org.elasticsearch.script.ScriptFeatures,
org.elasticsearch.search.retriever.RetrieversFeatures,
- org.elasticsearch.reservedstate.service.FileSettingsFeatures,
org.elasticsearch.action.admin.cluster.stats.ClusterStatsFeatures;
uses org.elasticsearch.plugins.internal.SettingsExtension;
diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java
index 237ad37982da..e735b3701ff0 100644
--- a/server/src/main/java/org/elasticsearch/TransportVersions.java
+++ b/server/src/main/java/org/elasticsearch/TransportVersions.java
@@ -157,6 +157,7 @@ public class TransportVersions {
public static final TransportVersion REPLACE_FAILURE_STORE_OPTIONS_WITH_SELECTOR_SYNTAX = def(8_821_00_0);
public static final TransportVersion ELASTIC_INFERENCE_SERVICE_UNIFIED_CHAT_COMPLETIONS_INTEGRATION = def(8_822_00_0);
public static final TransportVersion KQL_QUERY_TECH_PREVIEW = def(8_823_00_0);
+ public static final TransportVersion ESQL_PROFILE_ROWS_PROCESSED = def(8_824_00_0);
/*
* WARNING: DO NOT MERGE INTO MAIN!
diff --git a/server/src/main/java/org/elasticsearch/action/ActionModule.java b/server/src/main/java/org/elasticsearch/action/ActionModule.java
index 28ef18e98727..7c6abbe7c788 100644
--- a/server/src/main/java/org/elasticsearch/action/ActionModule.java
+++ b/server/src/main/java/org/elasticsearch/action/ActionModule.java
@@ -1016,7 +1016,7 @@ public class ActionModule extends AbstractModule {
// Desired nodes
registerHandler.accept(new RestGetDesiredNodesAction());
- registerHandler.accept(new RestUpdateDesiredNodesAction(clusterSupportsFeature));
+ registerHandler.accept(new RestUpdateDesiredNodesAction());
registerHandler.accept(new RestDeleteDesiredNodesAction());
for (ActionPlugin plugin : actionPlugins) {
diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/AllocationStatsFeatures.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/AllocationStatsFeatures.java
deleted file mode 100644
index 164fc816ad36..000000000000
--- a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/AllocationStatsFeatures.java
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
- * or more contributor license agreements. Licensed under the "Elastic License
- * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
- * Public License v 1"; you may not use this file except in compliance with, at
- * your election, the "Elastic License 2.0", the "GNU Affero General Public
- * License v3.0 only", or the "Server Side Public License, v 1".
- */
-
-package org.elasticsearch.action.admin.cluster.allocation;
-
-import org.elasticsearch.features.FeatureSpecification;
-import org.elasticsearch.features.NodeFeature;
-
-import java.util.Set;
-
-public class AllocationStatsFeatures implements FeatureSpecification {
- public static final NodeFeature INCLUDE_DISK_THRESHOLD_SETTINGS = new NodeFeature("stats.include_disk_thresholds", true);
-
- @Override
- public Set getFeatures() {
- return Set.of(INCLUDE_DISK_THRESHOLD_SETTINGS);
- }
-}
diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetAllocationStatsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetAllocationStatsAction.java
index d929fb457d5d..23bf22e08985 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetAllocationStatsAction.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetAllocationStatsAction.java
@@ -30,7 +30,6 @@ import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.core.Nullable;
import org.elasticsearch.core.TimeValue;
-import org.elasticsearch.features.FeatureService;
import org.elasticsearch.injection.guice.Inject;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.tasks.TaskId;
@@ -49,7 +48,6 @@ public class TransportGetAllocationStatsAction extends TransportMasterNodeReadAc
private final AllocationStatsService allocationStatsService;
private final DiskThresholdSettings diskThresholdSettings;
- private final FeatureService featureService;
@Inject
public TransportGetAllocationStatsAction(
@@ -58,8 +56,7 @@ public class TransportGetAllocationStatsAction extends TransportMasterNodeReadAc
ThreadPool threadPool,
ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver,
- AllocationStatsService allocationStatsService,
- FeatureService featureService
+ AllocationStatsService allocationStatsService
) {
super(
TYPE.name(),
@@ -74,7 +71,6 @@ public class TransportGetAllocationStatsAction extends TransportMasterNodeReadAc
);
this.allocationStatsService = allocationStatsService;
this.diskThresholdSettings = new DiskThresholdSettings(clusterService.getSettings(), clusterService.getClusterSettings());
- this.featureService = featureService;
}
@Override
@@ -92,10 +88,7 @@ public class TransportGetAllocationStatsAction extends TransportMasterNodeReadAc
listener.onResponse(
new Response(
request.metrics().contains(Metric.ALLOCATIONS) ? allocationStatsService.stats() : Map.of(),
- request.metrics().contains(Metric.FS)
- && featureService.clusterHasFeature(clusterService.state(), AllocationStatsFeatures.INCLUDE_DISK_THRESHOLD_SETTINGS)
- ? diskThresholdSettings
- : null
+ request.metrics().contains(Metric.FS) ? diskThresholdSettings : null
)
);
}
diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/capabilities/NodeCapability.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/capabilities/NodeCapability.java
index 7d70e83f6558..6ff5d347ab05 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/capabilities/NodeCapability.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/capabilities/NodeCapability.java
@@ -41,4 +41,9 @@ public class NodeCapability extends BaseNodeResponse {
out.writeBoolean(supported);
}
+
+ @Override
+ public String toString() {
+ return "NodeCapability{supported=" + supported + '}';
+ }
}
diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/capabilities/TransportNodesCapabilitiesAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/capabilities/TransportNodesCapabilitiesAction.java
index 9fede2ebb5be..beb0e1f927de 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/capabilities/TransportNodesCapabilitiesAction.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/capabilities/TransportNodesCapabilitiesAction.java
@@ -9,7 +9,6 @@
package org.elasticsearch.action.admin.cluster.node.capabilities;
-import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionType;
import org.elasticsearch.action.FailedNodeException;
import org.elasticsearch.action.support.ActionFilters;
@@ -20,11 +19,9 @@ import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.core.RestApiVersion;
import org.elasticsearch.core.UpdateForV9;
-import org.elasticsearch.features.FeatureService;
import org.elasticsearch.injection.guice.Inject;
import org.elasticsearch.rest.RestController;
import org.elasticsearch.rest.RestRequest;
-import org.elasticsearch.rest.action.admin.cluster.RestNodesCapabilitiesAction;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportRequest;
@@ -32,7 +29,6 @@ import org.elasticsearch.transport.TransportService;
import java.io.IOException;
import java.util.List;
-import java.util.Optional;
import java.util.Set;
public class TransportNodesCapabilitiesAction extends TransportNodesAction<
@@ -45,7 +41,6 @@ public class TransportNodesCapabilitiesAction extends TransportNodesAction<
public static final ActionType TYPE = new ActionType<>("cluster:monitor/nodes/capabilities");
private final RestController restController;
- private final FeatureService featureService;
@Inject
public TransportNodesCapabilitiesAction(
@@ -53,8 +48,7 @@ public class TransportNodesCapabilitiesAction extends TransportNodesAction<
ClusterService clusterService,
TransportService transportService,
ActionFilters actionFilters,
- RestController restController,
- FeatureService featureService
+ RestController restController
) {
super(
TYPE.name(),
@@ -65,23 +59,6 @@ public class TransportNodesCapabilitiesAction extends TransportNodesAction<
threadPool.executor(ThreadPool.Names.MANAGEMENT)
);
this.restController = restController;
- this.featureService = featureService;
- }
-
- @Override
- protected void doExecute(Task task, NodesCapabilitiesRequest request, ActionListener listener) {
- if (featureService.clusterHasFeature(clusterService.state(), RestNodesCapabilitiesAction.CAPABILITIES_ACTION) == false) {
- // not everything in the cluster supports capabilities.
- // Therefore we don't support whatever it is we're being asked for
- listener.onResponse(new NodesCapabilitiesResponse(clusterService.getClusterName(), List.of(), List.of()) {
- @Override
- public Optional isSupported() {
- return Optional.of(false);
- }
- });
- } else {
- super.doExecute(task, request, listener);
- }
}
@Override
diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterGetSettingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterGetSettingsAction.java
index 7e3c38c73509..ca02d19749ae 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterGetSettingsAction.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterGetSettingsAction.java
@@ -13,13 +13,18 @@ import org.elasticsearch.TransportVersions;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.action.ActionType;
-import org.elasticsearch.action.support.master.MasterNodeReadRequest;
+import org.elasticsearch.action.support.local.LocalClusterStateRequest;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.core.TimeValue;
+import org.elasticsearch.core.UpdateForV10;
+import org.elasticsearch.tasks.CancellableTask;
+import org.elasticsearch.tasks.Task;
+import org.elasticsearch.tasks.TaskId;
import java.io.IOException;
+import java.util.Map;
import java.util.Objects;
public class ClusterGetSettingsAction extends ActionType {
@@ -34,25 +39,29 @@ public class ClusterGetSettingsAction extends ActionType {
+ public static class Request extends LocalClusterStateRequest {
public Request(TimeValue masterNodeTimeout) {
super(masterNodeTimeout);
}
+ /**
+ * NB prior to 9.0 this was a TransportMasterNodeReadAction so for BwC we must remain able to read these requests until
+ * we no longer need to support calling this action remotely.
+ */
+ @UpdateForV10(owner = UpdateForV10.Owner.CORE_INFRA)
public Request(StreamInput in) throws IOException {
super(in);
assert in.getTransportVersion().onOrAfter(TransportVersions.V_8_3_0);
}
@Override
- public void writeTo(StreamOutput out) throws IOException {
- assert out.getTransportVersion().onOrAfter(TransportVersions.V_8_3_0);
- super.writeTo(out);
+ public ActionRequestValidationException validate() {
+ return null;
}
@Override
- public ActionRequestValidationException validate() {
- return null;
+ public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) {
+ return new CancellableTask(id, type, action, "", parentTaskId, headers);
}
}
@@ -79,20 +88,17 @@ public class ClusterGetSettingsAction extends ActionType {
private final SettingsFilter settingsFilter;
+ /**
+ * NB prior to 9.0 this was a TransportMasterNodeReadAction so for BwC it must be registered with the TransportService until
+ * we no longer need to support calling this action remotely.
+ */
+ @UpdateForV10(owner = UpdateForV10.Owner.CORE_INFRA)
+ @SuppressWarnings("this-escape")
@Inject
public TransportClusterGetSettingsAction(
TransportService transportService,
ClusterService clusterService,
- ThreadPool threadPool,
SettingsFilter settingsFilter,
- ActionFilters actionFilters,
- IndexNameExpressionResolver indexNameExpressionResolver
+ ActionFilters actionFilters
) {
super(
ClusterGetSettingsAction.NAME,
- false,
- transportService,
- clusterService,
- threadPool,
actionFilters,
- ClusterGetSettingsAction.Request::new,
- indexNameExpressionResolver,
- ClusterGetSettingsAction.Response::new,
+ transportService.getTaskManager(),
+ clusterService,
EsExecutors.DIRECT_EXECUTOR_SERVICE
);
-
this.settingsFilter = settingsFilter;
+
+ transportService.registerRequestHandler(
+ actionName,
+ executor,
+ false,
+ true,
+ ClusterGetSettingsAction.Request::new,
+ (request, channel, task) -> executeDirect(task, request, new ChannelActionListener<>(channel))
+ );
}
@Override
- protected void masterOperation(
+ protected void localClusterStateOperation(
Task task,
ClusterGetSettingsAction.Request request,
ClusterState state,
ActionListener listener
) throws Exception {
+ ((CancellableTask) task).ensureNotCancelled();
Metadata metadata = state.metadata();
listener.onResponse(
new ClusterGetSettingsAction.Response(
diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/MappingStats.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/MappingStats.java
index 0d910cbfdeba..02b581ecbdda 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/MappingStats.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/MappingStats.java
@@ -22,7 +22,7 @@ import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.core.Nullable;
import org.elasticsearch.features.NodeFeature;
-import org.elasticsearch.index.mapper.SourceFieldMapper;
+import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.xcontent.ToXContentFragment;
import org.elasticsearch.xcontent.XContentBuilder;
@@ -79,7 +79,7 @@ public final class MappingStats implements ToXContentFragment, Writeable {
}
AnalysisStats.countMapping(mappingCounts, indexMetadata);
- var sourceMode = SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.get(indexMetadata.getSettings());
+ var sourceMode = IndexSettings.INDEX_MAPPER_SOURCE_MODE_SETTING.get(indexMetadata.getSettings());
sourceModeUsageCount.merge(sourceMode.toString().toLowerCase(Locale.ENGLISH), 1, Integer::sum);
}
for (MappingMetadata mappingMetadata : project.getMappingsByHash().values()) {
diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java
index be7aaeec8f69..05c44b55cf8b 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java
@@ -15,6 +15,7 @@ import org.elasticsearch.action.support.master.info.ClusterInfoRequest;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.util.ArrayUtils;
+import org.elasticsearch.core.TimeValue;
import org.elasticsearch.rest.RestRequest;
import org.elasticsearch.tasks.CancellableTask;
import org.elasticsearch.tasks.Task;
@@ -93,8 +94,8 @@ public class GetIndexRequest extends ClusterInfoRequest {
private boolean humanReadable = false;
private transient boolean includeDefaults = false;
- public GetIndexRequest() {
- super(IndicesOptions.strictExpandOpen());
+ public GetIndexRequest(TimeValue masterTimeout) {
+ super(masterTimeout, IndicesOptions.strictExpandOpen());
}
public GetIndexRequest(StreamInput in) throws IOException {
diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequestBuilder.java
index 51088124b084..18abb9e5c58e 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequestBuilder.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequestBuilder.java
@@ -12,11 +12,12 @@ package org.elasticsearch.action.admin.indices.get;
import org.elasticsearch.action.admin.indices.get.GetIndexRequest.Feature;
import org.elasticsearch.action.support.master.info.ClusterInfoRequestBuilder;
import org.elasticsearch.client.internal.ElasticsearchClient;
+import org.elasticsearch.core.TimeValue;
public class GetIndexRequestBuilder extends ClusterInfoRequestBuilder {
- public GetIndexRequestBuilder(ElasticsearchClient client, String... indices) {
- super(client, GetIndexAction.INSTANCE, new GetIndexRequest().indices(indices));
+ public GetIndexRequestBuilder(ElasticsearchClient client, TimeValue masterTimeout, String... indices) {
+ super(client, GetIndexAction.INSTANCE, new GetIndexRequest(masterTimeout).indices(indices));
}
public GetIndexRequestBuilder setFeatures(Feature... features) {
diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsRequest.java
index dd4114c94717..84789d8a2acf 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsRequest.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsRequest.java
@@ -12,6 +12,7 @@ package org.elasticsearch.action.admin.indices.mapping.get;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.support.master.info.ClusterInfoRequest;
import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.core.TimeValue;
import org.elasticsearch.tasks.CancellableTask;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.tasks.TaskId;
@@ -21,7 +22,9 @@ import java.util.Map;
public class GetMappingsRequest extends ClusterInfoRequest {
- public GetMappingsRequest() {}
+ public GetMappingsRequest(TimeValue masterTimeout) {
+ super(masterTimeout);
+ }
public GetMappingsRequest(StreamInput in) throws IOException {
super(in);
diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsRequestBuilder.java
index 3f5413858139..a12ba4f60c26 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsRequestBuilder.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsRequestBuilder.java
@@ -11,13 +11,14 @@ package org.elasticsearch.action.admin.indices.mapping.get;
import org.elasticsearch.action.support.master.info.ClusterInfoRequestBuilder;
import org.elasticsearch.client.internal.ElasticsearchClient;
+import org.elasticsearch.core.TimeValue;
public class GetMappingsRequestBuilder extends ClusterInfoRequestBuilder<
GetMappingsRequest,
GetMappingsResponse,
GetMappingsRequestBuilder> {
- public GetMappingsRequestBuilder(ElasticsearchClient client, String... indices) {
- super(client, GetMappingsAction.INSTANCE, new GetMappingsRequest().indices(indices));
+ public GetMappingsRequestBuilder(ElasticsearchClient client, TimeValue masterTimeout, String... indices) {
+ super(client, GetMappingsAction.INSTANCE, new GetMappingsRequest(masterTimeout).indices(indices));
}
}
diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/LazyRolloverAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/LazyRolloverAction.java
index d2634bdc23ae..7917c311f7c3 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/LazyRolloverAction.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/LazyRolloverAction.java
@@ -37,7 +37,6 @@ import org.elasticsearch.cluster.service.MasterServiceTaskQueue;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.collect.Iterators;
-import org.elasticsearch.features.NodeFeature;
import org.elasticsearch.injection.guice.Inject;
import org.elasticsearch.tasks.CancellableTask;
import org.elasticsearch.tasks.Task;
@@ -59,8 +58,6 @@ public final class LazyRolloverAction extends ActionType {
private static final Logger logger = LogManager.getLogger(LazyRolloverAction.class);
- public static final NodeFeature DATA_STREAM_LAZY_ROLLOVER = new NodeFeature("data_stream.rollover.lazy", true);
-
public static final LazyRolloverAction INSTANCE = new LazyRolloverAction();
public static final String NAME = "indices:admin/data_stream/lazy_rollover";
diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndexStats.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndexStats.java
index 67b6df150c45..6106e620521f 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndexStats.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndexStats.java
@@ -12,7 +12,6 @@ package org.elasticsearch.action.admin.indices.stats;
import org.elasticsearch.cluster.health.ClusterHealthStatus;
import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.core.Nullable;
-import org.elasticsearch.features.NodeFeature;
import java.util.ArrayList;
import java.util.HashMap;
@@ -22,9 +21,6 @@ import java.util.Map;
public class IndexStats implements Iterable {
- // feature was effectively reverted but we still need to keep this constant around
- public static final NodeFeature REVERTED_TIER_CREATION_DATE = new NodeFeature("stats.tier_creation_date", true);
-
private final String index;
private final String uuid;
diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsFeatures.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsFeatures.java
deleted file mode 100644
index 558343db1023..000000000000
--- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsFeatures.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
- * or more contributor license agreements. Licensed under the "Elastic License
- * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
- * Public License v 1"; you may not use this file except in compliance with, at
- * your election, the "Elastic License 2.0", the "GNU Affero General Public
- * License v3.0 only", or the "Server Side Public License, v 1".
- */
-
-package org.elasticsearch.action.admin.indices.stats;
-
-import org.elasticsearch.features.FeatureSpecification;
-import org.elasticsearch.features.NodeFeature;
-
-import java.util.Set;
-
-public class IndicesStatsFeatures implements FeatureSpecification {
-
- @Override
- public Set getFeatures() {
- return Set.of(IndexStats.REVERTED_TIER_CREATION_DATE);
- }
-}
diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateRequest.java
index ce29d65ececf..003be58d1955 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateRequest.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateRequest.java
@@ -12,16 +12,20 @@ package org.elasticsearch.action.admin.indices.template.post;
import org.elasticsearch.TransportVersions;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction;
-import org.elasticsearch.action.support.master.MasterNodeReadRequest;
+import org.elasticsearch.action.support.local.LocalClusterStateRequest;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput;
-import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.core.Nullable;
+import org.elasticsearch.core.TimeValue;
+import org.elasticsearch.tasks.CancellableTask;
+import org.elasticsearch.tasks.Task;
+import org.elasticsearch.tasks.TaskId;
import java.io.IOException;
+import java.util.Map;
import java.util.Objects;
-public class SimulateIndexTemplateRequest extends MasterNodeReadRequest {
+public class SimulateIndexTemplateRequest extends LocalClusterStateRequest {
private String indexName;
@@ -30,14 +34,18 @@ public class SimulateIndexTemplateRequest extends MasterNodeReadRequest headers) {
+ return new CancellableTask(id, type, action, "", parentTaskId, headers);
+ }
+
public String getIndexName() {
return indexName;
}
diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java
index a521dac60e96..1a04b6e4d763 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java
@@ -12,13 +12,11 @@ package org.elasticsearch.action.admin.indices.template.post;
import org.elasticsearch.TransportVersions;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.action.admin.indices.rollover.RolloverConfiguration;
-import org.elasticsearch.cluster.metadata.DataStreamGlobalRetention;
import org.elasticsearch.cluster.metadata.ResettableValue;
import org.elasticsearch.cluster.metadata.Template;
-import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
-import org.elasticsearch.common.util.Maps;
import org.elasticsearch.core.Nullable;
+import org.elasticsearch.core.UpdateForV10;
import org.elasticsearch.xcontent.ParseField;
import org.elasticsearch.xcontent.ToXContentObject;
import org.elasticsearch.xcontent.XContentBuilder;
@@ -67,27 +65,11 @@ public class SimulateIndexTemplateResponse extends ActionResponse implements ToX
return rolloverConfiguration;
}
- public SimulateIndexTemplateResponse(StreamInput in) throws IOException {
- super(in);
- resolvedTemplate = in.readOptionalWriteable(Template::new);
- if (in.readBoolean()) {
- int overlappingTemplatesCount = in.readInt();
- overlappingTemplates = Maps.newMapWithExpectedSize(overlappingTemplatesCount);
- for (int i = 0; i < overlappingTemplatesCount; i++) {
- String templateName = in.readString();
- overlappingTemplates.put(templateName, in.readStringCollectionAsList());
- }
- } else {
- this.overlappingTemplates = null;
- }
- rolloverConfiguration = in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)
- ? in.readOptionalWriteable(RolloverConfiguration::new)
- : null;
- if (in.getTransportVersion().between(TransportVersions.V_8_14_0, TransportVersions.V_8_16_0)) {
- in.readOptionalWriteable(DataStreamGlobalRetention::read);
- }
- }
-
+ /**
+ * NB prior to 9.0 this was a TransportMasterNodeReadAction so for BwC we must remain able to write these responses until
+ * we no longer need to support calling this action remotely.
+ */
+ @UpdateForV10(owner = UpdateForV10.Owner.DATA_MANAGEMENT)
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeOptionalWriteable(resolvedTemplate);
diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateTemplateAction.java
index 75cc72416a85..15015b910767 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateTemplateAction.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateTemplateAction.java
@@ -14,12 +14,16 @@ import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.ActionType;
import org.elasticsearch.action.ValidateActions;
import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction;
-import org.elasticsearch.action.support.master.MasterNodeReadRequest;
+import org.elasticsearch.action.support.local.LocalClusterStateRequest;
import org.elasticsearch.common.io.stream.StreamInput;
-import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.core.Nullable;
+import org.elasticsearch.core.TimeValue;
+import org.elasticsearch.tasks.CancellableTask;
+import org.elasticsearch.tasks.Task;
+import org.elasticsearch.tasks.TaskId;
import java.io.IOException;
+import java.util.Map;
import java.util.Objects;
/**
@@ -35,7 +39,7 @@ public class SimulateTemplateAction extends ActionType {
+ public static class Request extends LocalClusterStateRequest {
@Nullable
private String templateName;
@@ -44,26 +48,15 @@ public class SimulateTemplateAction extends ActionType headers) {
+ return new CancellableTask(id, type, action, "", parentTaskId, headers);
+ }
+
@Nullable
public String getTemplateName() {
return templateName;
@@ -112,11 +100,6 @@ public class SimulateTemplateAction extends ActionType {
@@ -78,14 +78,18 @@ public class TransportSimulateIndexTemplateAction extends TransportMasterNodeRea
private final ClusterSettings clusterSettings;
private final boolean isDslOnlyMode;
+ /**
+ * NB prior to 9.0 this was a TransportMasterNodeReadAction so for BwC it must be registered with the TransportService until
+ * we no longer need to support calling this action remotely.
+ */
+ @UpdateForV10(owner = UpdateForV10.Owner.DATA_MANAGEMENT)
+ @SuppressWarnings("this-escape")
@Inject
public TransportSimulateIndexTemplateAction(
TransportService transportService,
ClusterService clusterService,
- ThreadPool threadPool,
MetadataIndexTemplateService indexTemplateService,
ActionFilters actionFilters,
- IndexNameExpressionResolver indexNameExpressionResolver,
NamedXContentRegistry xContentRegistry,
IndicesService indicesService,
SystemIndices systemIndices,
@@ -93,13 +97,9 @@ public class TransportSimulateIndexTemplateAction extends TransportMasterNodeRea
) {
super(
SimulateIndexTemplateAction.NAME,
- transportService,
- clusterService,
- threadPool,
actionFilters,
- SimulateIndexTemplateRequest::new,
- indexNameExpressionResolver,
- SimulateIndexTemplateResponse::new,
+ transportService.getTaskManager(),
+ clusterService,
EsExecutors.DIRECT_EXECUTOR_SERVICE
);
this.indexTemplateService = indexTemplateService;
@@ -109,10 +109,19 @@ public class TransportSimulateIndexTemplateAction extends TransportMasterNodeRea
this.indexSettingProviders = indexSettingProviders.getIndexSettingProviders();
this.clusterSettings = clusterService.getClusterSettings();
this.isDslOnlyMode = isDataStreamsLifecycleOnlyMode(clusterService.getSettings());
+
+ transportService.registerRequestHandler(
+ actionName,
+ executor,
+ false,
+ true,
+ SimulateIndexTemplateRequest::new,
+ (request, channel, task) -> executeDirect(task, request, new ChannelActionListener<>(channel))
+ );
}
@Override
- protected void masterOperation(
+ protected void localClusterStateOperation(
Task task,
SimulateIndexTemplateRequest request,
ClusterState state,
diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateTemplateAction.java
index 521316ac874e..555fa72af4a5 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateTemplateAction.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateTemplateAction.java
@@ -11,26 +11,26 @@ package org.elasticsearch.action.admin.indices.template.post;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
-import org.elasticsearch.action.support.master.TransportMasterNodeReadAction;
+import org.elasticsearch.action.support.ChannelActionListener;
+import org.elasticsearch.action.support.local.TransportLocalClusterStateAction;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.ComposableIndexTemplate;
import org.elasticsearch.cluster.metadata.DataStreamLifecycle;
-import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MetadataIndexTemplateService;
import org.elasticsearch.cluster.metadata.Template;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.UUIDs;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.util.concurrent.EsExecutors;
+import org.elasticsearch.core.UpdateForV10;
import org.elasticsearch.index.IndexSettingProvider;
import org.elasticsearch.index.IndexSettingProviders;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.indices.SystemIndices;
import org.elasticsearch.injection.guice.Inject;
import org.elasticsearch.tasks.Task;
-import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.xcontent.NamedXContentRegistry;
@@ -48,7 +48,7 @@ import static org.elasticsearch.cluster.metadata.MetadataIndexTemplateService.fi
* Handles simulating an index template either by name (looking it up in the
* cluster state), or by a provided template configuration
*/
-public class TransportSimulateTemplateAction extends TransportMasterNodeReadAction<
+public class TransportSimulateTemplateAction extends TransportLocalClusterStateAction<
SimulateTemplateAction.Request,
SimulateIndexTemplateResponse> {
@@ -60,14 +60,18 @@ public class TransportSimulateTemplateAction extends TransportMasterNodeReadActi
private final ClusterSettings clusterSettings;
private final boolean isDslOnlyMode;
+ /**
+ * NB prior to 9.0 this was a TransportMasterNodeReadAction so for BwC it must be registered with the TransportService until
+ * we no longer need to support calling this action remotely.
+ */
+ @UpdateForV10(owner = UpdateForV10.Owner.DATA_MANAGEMENT)
+ @SuppressWarnings("this-escape")
@Inject
public TransportSimulateTemplateAction(
TransportService transportService,
ClusterService clusterService,
- ThreadPool threadPool,
MetadataIndexTemplateService indexTemplateService,
ActionFilters actionFilters,
- IndexNameExpressionResolver indexNameExpressionResolver,
NamedXContentRegistry xContentRegistry,
IndicesService indicesService,
SystemIndices systemIndices,
@@ -75,13 +79,9 @@ public class TransportSimulateTemplateAction extends TransportMasterNodeReadActi
) {
super(
SimulateTemplateAction.NAME,
- transportService,
- clusterService,
- threadPool,
actionFilters,
- SimulateTemplateAction.Request::new,
- indexNameExpressionResolver,
- SimulateIndexTemplateResponse::new,
+ transportService.getTaskManager(),
+ clusterService,
EsExecutors.DIRECT_EXECUTOR_SERVICE
);
this.indexTemplateService = indexTemplateService;
@@ -91,10 +91,19 @@ public class TransportSimulateTemplateAction extends TransportMasterNodeReadActi
this.indexSettingProviders = indexSettingProviders.getIndexSettingProviders();
this.clusterSettings = clusterService.getClusterSettings();
this.isDslOnlyMode = isDataStreamsLifecycleOnlyMode(clusterService.getSettings());
+
+ transportService.registerRequestHandler(
+ actionName,
+ executor,
+ false,
+ true,
+ SimulateTemplateAction.Request::new,
+ (request, channel, task) -> executeDirect(task, request, new ChannelActionListener<>(channel))
+ );
}
@Override
- protected void masterOperation(
+ protected void localClusterStateOperation(
Task task,
SimulateTemplateAction.Request request,
ClusterState state,
diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkFeatures.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkFeatures.java
index 998a3ada5d15..5851549977ea 100644
--- a/server/src/main/java/org/elasticsearch/action/bulk/BulkFeatures.java
+++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkFeatures.java
@@ -14,24 +14,10 @@ import org.elasticsearch.features.NodeFeature;
import java.util.Set;
-import static org.elasticsearch.action.bulk.TransportSimulateBulkAction.SIMULATE_COMPONENT_TEMPLATE_SUBSTITUTIONS;
import static org.elasticsearch.action.bulk.TransportSimulateBulkAction.SIMULATE_IGNORED_FIELDS;
-import static org.elasticsearch.action.bulk.TransportSimulateBulkAction.SIMULATE_INDEX_TEMPLATE_SUBSTITUTIONS;
-import static org.elasticsearch.action.bulk.TransportSimulateBulkAction.SIMULATE_MAPPING_ADDITION;
-import static org.elasticsearch.action.bulk.TransportSimulateBulkAction.SIMULATE_MAPPING_VALIDATION;
-import static org.elasticsearch.action.bulk.TransportSimulateBulkAction.SIMULATE_MAPPING_VALIDATION_TEMPLATES;
-import static org.elasticsearch.action.bulk.TransportSimulateBulkAction.SIMULATE_SUPPORT_NON_TEMPLATE_MAPPING;
public class BulkFeatures implements FeatureSpecification {
public Set getFeatures() {
- return Set.of(
- SIMULATE_MAPPING_VALIDATION,
- SIMULATE_MAPPING_VALIDATION_TEMPLATES,
- SIMULATE_COMPONENT_TEMPLATE_SUBSTITUTIONS,
- SIMULATE_INDEX_TEMPLATE_SUBSTITUTIONS,
- SIMULATE_MAPPING_ADDITION,
- SIMULATE_SUPPORT_NON_TEMPLATE_MAPPING,
- SIMULATE_IGNORED_FIELDS
- );
+ return Set.of(SIMULATE_IGNORED_FIELDS);
}
}
diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java
index 7fbcd123013a..ab85b0b69791 100644
--- a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java
+++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java
@@ -45,7 +45,6 @@ import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.core.Nullable;
-import org.elasticsearch.features.FeatureService;
import org.elasticsearch.index.IndexingPressure;
import org.elasticsearch.index.VersionType;
import org.elasticsearch.indices.SystemIndices;
@@ -81,7 +80,6 @@ public class TransportBulkAction extends TransportAbstractBulkAction {
private static final Logger logger = LogManager.getLogger(TransportBulkAction.class);
public static final String LAZY_ROLLOVER_ORIGIN = "lazy_rollover";
- private final FeatureService featureService;
private final NodeClient client;
private final IndexNameExpressionResolver indexNameExpressionResolver;
private final OriginSettingClient rolloverClient;
@@ -94,7 +92,6 @@ public class TransportBulkAction extends TransportAbstractBulkAction {
TransportService transportService,
ClusterService clusterService,
IngestService ingestService,
- FeatureService featureService,
NodeClient client,
ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver,
@@ -109,7 +106,6 @@ public class TransportBulkAction extends TransportAbstractBulkAction {
transportService,
clusterService,
ingestService,
- featureService,
client,
actionFilters,
indexNameExpressionResolver,
@@ -127,7 +123,6 @@ public class TransportBulkAction extends TransportAbstractBulkAction {
TransportService transportService,
ClusterService clusterService,
IngestService ingestService,
- FeatureService featureService,
NodeClient client,
ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver,
@@ -145,7 +140,6 @@ public class TransportBulkAction extends TransportAbstractBulkAction {
transportService,
clusterService,
ingestService,
- featureService,
client,
actionFilters,
indexNameExpressionResolver,
@@ -165,7 +159,6 @@ public class TransportBulkAction extends TransportAbstractBulkAction {
TransportService transportService,
ClusterService clusterService,
IngestService ingestService,
- FeatureService featureService,
NodeClient client,
ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver,
@@ -191,7 +184,6 @@ public class TransportBulkAction extends TransportAbstractBulkAction {
);
this.dataStreamFailureStoreSettings = dataStreamFailureStoreSettings;
Objects.requireNonNull(relativeTimeProvider);
- this.featureService = featureService;
this.client = client;
this.indexNameExpressionResolver = indexNameExpressionResolver;
this.rolloverClient = new OriginSettingClient(client, LAZY_ROLLOVER_ORIGIN);
@@ -308,10 +300,6 @@ public class TransportBulkAction extends TransportAbstractBulkAction {
index,
projectState.metadata()
);
- boolean lazyRolloverFeature = featureService.clusterHasFeature(
- projectState.cluster(),
- LazyRolloverAction.DATA_STREAM_LAZY_ROLLOVER
- );
boolean lazyRolloverFailureStoreFeature = DataStream.isFailureStoreFeatureFlagEnabled();
Set indicesThatRequireAlias = new HashSet<>();
@@ -356,16 +344,12 @@ public class TransportBulkAction extends TransportAbstractBulkAction {
}
}
// Determine which data streams and failure stores need to be rolled over.
- if (lazyRolloverFeature) {
- DataStream dataStream = projectState.metadata().dataStreams().get(request.index());
- if (dataStream != null) {
- if (writeToFailureStore == false && dataStream.getBackingIndices().isRolloverOnWrite()) {
- dataStreamsToBeRolledOver.add(request.index());
- } else if (lazyRolloverFailureStoreFeature
- && writeToFailureStore
- && dataStream.getFailureIndices().isRolloverOnWrite()) {
- failureStoresToBeRolledOver.add(request.index());
- }
+ DataStream dataStream = projectState.metadata().dataStreams().get(request.index());
+ if (dataStream != null) {
+ if (writeToFailureStore == false && dataStream.getBackingIndices().isRolloverOnWrite()) {
+ dataStreamsToBeRolledOver.add(request.index());
+ } else if (lazyRolloverFailureStoreFeature && writeToFailureStore && dataStream.getFailureIndices().isRolloverOnWrite()) {
+ failureStoresToBeRolledOver.add(request.index());
}
}
}
diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java
index 0f0352eb2bfa..1f336a5961ec 100644
--- a/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java
+++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java
@@ -85,15 +85,6 @@ import static org.elasticsearch.cluster.metadata.MetadataIndexTemplateService.fi
* shards are not actually modified).
*/
public class TransportSimulateBulkAction extends TransportAbstractBulkAction {
- public static final NodeFeature SIMULATE_MAPPING_VALIDATION = new NodeFeature("simulate.mapping.validation", true);
- public static final NodeFeature SIMULATE_MAPPING_VALIDATION_TEMPLATES = new NodeFeature("simulate.mapping.validation.templates", true);
- public static final NodeFeature SIMULATE_COMPONENT_TEMPLATE_SUBSTITUTIONS = new NodeFeature(
- "simulate.component.template.substitutions",
- true
- );
- public static final NodeFeature SIMULATE_INDEX_TEMPLATE_SUBSTITUTIONS = new NodeFeature("simulate.index.template.substitutions", true);
- public static final NodeFeature SIMULATE_MAPPING_ADDITION = new NodeFeature("simulate.mapping.addition", true);
- public static final NodeFeature SIMULATE_SUPPORT_NON_TEMPLATE_MAPPING = new NodeFeature("simulate.support.non.template.mapping", true);
public static final NodeFeature SIMULATE_IGNORED_FIELDS = new NodeFeature("simulate.ignored.fields");
private final IndicesService indicesService;
diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/autosharding/DataStreamAutoShardingService.java b/server/src/main/java/org/elasticsearch/action/datastreams/autosharding/DataStreamAutoShardingService.java
index 650a72a86e7c..beea9ef07c86 100644
--- a/server/src/main/java/org/elasticsearch/action/datastreams/autosharding/DataStreamAutoShardingService.java
+++ b/server/src/main/java/org/elasticsearch/action/datastreams/autosharding/DataStreamAutoShardingService.java
@@ -23,8 +23,6 @@ import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.core.Nullable;
import org.elasticsearch.core.TimeValue;
-import org.elasticsearch.features.FeatureService;
-import org.elasticsearch.features.NodeFeature;
import org.elasticsearch.index.Index;
import java.util.List;
@@ -43,8 +41,6 @@ public class DataStreamAutoShardingService {
private static final Logger logger = LogManager.getLogger(DataStreamAutoShardingService.class);
public static final String DATA_STREAMS_AUTO_SHARDING_ENABLED = "data_streams.auto_sharding.enabled";
- public static final NodeFeature DATA_STREAM_AUTO_SHARDING_FEATURE = new NodeFeature("data_stream.auto_sharding", true);
-
public static final Setting> DATA_STREAMS_AUTO_SHARDING_EXCLUDES_SETTING = Setting.listSetting(
"data_streams.auto_sharding.excludes",
List.of(),
@@ -101,7 +97,6 @@ public class DataStreamAutoShardingService {
);
private final ClusterService clusterService;
private final boolean isAutoShardingEnabled;
- private final FeatureService featureService;
private final LongSupplier nowSupplier;
private volatile TimeValue increaseShardsCooldown;
private volatile TimeValue reduceShardsCooldown;
@@ -109,12 +104,7 @@ public class DataStreamAutoShardingService {
private volatile int maxWriteThreads;
private volatile List dataStreamExcludePatterns;
- public DataStreamAutoShardingService(
- Settings settings,
- ClusterService clusterService,
- FeatureService featureService,
- LongSupplier nowSupplier
- ) {
+ public DataStreamAutoShardingService(Settings settings, ClusterService clusterService, LongSupplier nowSupplier) {
this.clusterService = clusterService;
this.isAutoShardingEnabled = settings.getAsBoolean(DATA_STREAMS_AUTO_SHARDING_ENABLED, false);
this.increaseShardsCooldown = DATA_STREAMS_AUTO_SHARDING_INCREASE_SHARDS_COOLDOWN.get(settings);
@@ -122,7 +112,6 @@ public class DataStreamAutoShardingService {
this.minWriteThreads = CLUSTER_AUTO_SHARDING_MIN_WRITE_THREADS.get(settings);
this.maxWriteThreads = CLUSTER_AUTO_SHARDING_MAX_WRITE_THREADS.get(settings);
this.dataStreamExcludePatterns = DATA_STREAMS_AUTO_SHARDING_EXCLUDES_SETTING.get(settings);
- this.featureService = featureService;
this.nowSupplier = nowSupplier;
}
@@ -167,15 +156,6 @@ public class DataStreamAutoShardingService {
return NOT_APPLICABLE_RESULT;
}
- if (featureService.clusterHasFeature(state.cluster(), DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE) == false) {
- logger.debug(
- "Data stream auto sharding service cannot compute the optimal number of shards for data stream [{}] because the cluster "
- + "doesn't have the auto sharding feature",
- dataStream.getName()
- );
- return NOT_APPLICABLE_RESULT;
- }
-
if (dataStreamExcludePatterns.stream().anyMatch(pattern -> Regex.simpleMatch(pattern, dataStream.getName()))) {
logger.debug(
"Data stream [{}] is excluded from auto sharding via the [{}] setting",
diff --git a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java
index 80ffd305bad5..73e6a0306247 100644
--- a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java
+++ b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java
@@ -231,7 +231,7 @@ abstract class AbstractSearchAsyncAction exten
}
@Override
- public final void run() {
+ protected final void run() {
for (final SearchShardIterator iterator : toSkipShardsIts) {
assert iterator.skip();
skipShard(iterator);
@@ -286,7 +286,7 @@ abstract class AbstractSearchAsyncAction exten
return true;
}
- protected void performPhaseOnShard(final int shardIndex, final SearchShardIterator shardIt, final SearchShardTarget shard) {
+ private void performPhaseOnShard(final int shardIndex, final SearchShardIterator shardIt, final SearchShardTarget shard) {
if (throttleConcurrentRequests) {
var pendingExecutions = pendingExecutionsPerNode.computeIfAbsent(
shard.getNodeId(),
@@ -349,7 +349,7 @@ abstract class AbstractSearchAsyncAction exten
* of the next phase. If there are no successful operations in the context when this method is executed the search is aborted and
* a response is returned to the user indicating that all shards have failed.
*/
- protected void executeNextPhase(SearchPhase currentPhase, Supplier nextPhaseSupplier) {
+ protected void executeNextPhase(String currentPhase, Supplier nextPhaseSupplier) {
/* This is the main search phase transition where we move to the next phase. If all shards
* failed or if there was a failure and partial results are not allowed, then we immediately
* fail. Otherwise we continue to the next phase.
@@ -360,7 +360,7 @@ abstract class AbstractSearchAsyncAction exten
Throwable cause = shardSearchFailures.length == 0
? null
: ElasticsearchException.guessRootCauses(shardSearchFailures[0].getCause())[0];
- logger.debug(() -> "All shards failed for phase: [" + currentPhase.getName() + "]", cause);
+ logger.debug(() -> "All shards failed for phase: [" + currentPhase + "]", cause);
onPhaseFailure(currentPhase, "all shards failed", cause);
} else {
Boolean allowPartialResults = request.allowPartialSearchResults();
@@ -373,7 +373,7 @@ abstract class AbstractSearchAsyncAction exten
int numShardFailures = shardSearchFailures.length;
shardSearchFailures = ExceptionsHelper.groupBy(shardSearchFailures);
Throwable cause = ElasticsearchException.guessRootCauses(shardSearchFailures[0].getCause())[0];
- logger.debug(() -> format("%s shards failed for phase: [%s]", numShardFailures, currentPhase.getName()), cause);
+ logger.debug(() -> format("%s shards failed for phase: [%s]", numShardFailures, currentPhase), cause);
}
onPhaseFailure(currentPhase, "Partial shards failure", null);
} else {
@@ -386,7 +386,7 @@ abstract class AbstractSearchAsyncAction exten
successfulOps.get(),
skippedOps.get(),
getNumShards(),
- currentPhase.getName()
+ currentPhase
);
}
onPhaseFailure(currentPhase, "Partial shards failure (" + discrepancy + " shards unavailable)", null);
@@ -400,7 +400,7 @@ abstract class AbstractSearchAsyncAction exten
.collect(Collectors.joining(","));
logger.trace(
"[{}] Moving to next phase: [{}], based on results from: {} (cluster state version: {})",
- currentPhase.getName(),
+ currentPhase,
nextPhase.getName(),
resultsFrom,
clusterStateVersion
@@ -413,11 +413,11 @@ abstract class AbstractSearchAsyncAction exten
private void executePhase(SearchPhase phase) {
try {
phase.run();
- } catch (Exception e) {
+ } catch (RuntimeException e) {
if (logger.isDebugEnabled()) {
logger.debug(() -> format("Failed to execute [%s] while moving to [%s] phase", request, phase.getName()), e);
}
- onPhaseFailure(phase, "", e);
+ onPhaseFailure(phase.getName(), "", e);
}
}
@@ -693,8 +693,8 @@ abstract class AbstractSearchAsyncAction exten
* @param msg an optional message
* @param cause the cause of the phase failure
*/
- public void onPhaseFailure(SearchPhase phase, String msg, Throwable cause) {
- raisePhaseFailure(new SearchPhaseExecutionException(phase.getName(), msg, cause, buildShardFailures()));
+ public void onPhaseFailure(String phase, String msg, Throwable cause) {
+ raisePhaseFailure(new SearchPhaseExecutionException(phase, msg, cause, buildShardFailures()));
}
/**
@@ -739,7 +739,7 @@ abstract class AbstractSearchAsyncAction exten
* @see #onShardResult(SearchPhaseResult, SearchShardIterator)
*/
private void onPhaseDone() { // as a tribute to @kimchy aka. finishHim()
- executeNextPhase(this, this::getNextPhase);
+ executeNextPhase(getName(), this::getNextPhase);
}
/**
diff --git a/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java b/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java
index cc8c4becea9a..faeb552530e4 100644
--- a/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java
+++ b/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java
@@ -39,6 +39,9 @@ import java.util.function.Function;
* @see CountedCollector#onFailure(int, SearchShardTarget, Exception)
*/
final class DfsQueryPhase extends SearchPhase {
+
+ public static final String NAME = "dfs_query";
+
private final SearchPhaseResults queryResult;
private final List searchResults;
private final AggregatedDfs dfs;
@@ -56,7 +59,7 @@ final class DfsQueryPhase extends SearchPhase {
Function, SearchPhase> nextPhaseFactory,
AbstractSearchAsyncAction> context
) {
- super("dfs_query");
+ super(NAME);
this.progressListener = context.getTask().getProgressListener();
this.queryResult = queryResult;
this.searchResults = searchResults;
@@ -68,13 +71,13 @@ final class DfsQueryPhase extends SearchPhase {
}
@Override
- public void run() {
+ protected void run() {
// TODO we can potentially also consume the actual per shard results from the initial phase here in the aggregateDfs
// to free up memory early
final CountedCollector counter = new CountedCollector<>(
queryResult,
searchResults.size(),
- () -> context.executeNextPhase(this, () -> nextPhaseFactory.apply(queryResult)),
+ () -> context.executeNextPhase(NAME, () -> nextPhaseFactory.apply(queryResult)),
context
);
@@ -106,7 +109,7 @@ final class DfsQueryPhase extends SearchPhase {
response.setSearchProfileDfsPhaseResult(dfsResult.searchProfileDfsPhaseResult());
counter.onResult(response);
} catch (Exception e) {
- context.onPhaseFailure(DfsQueryPhase.this, "", e);
+ context.onPhaseFailure(NAME, "", e);
}
}
diff --git a/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java
index e8d94c32bdcc..b0b3f1526592 100644
--- a/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java
+++ b/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java
@@ -31,12 +31,15 @@ import java.util.function.Supplier;
* forwards to the next phase immediately.
*/
final class ExpandSearchPhase extends SearchPhase {
+
+ static final String NAME = "expand";
+
private final AbstractSearchAsyncAction> context;
private final SearchHits searchHits;
private final Supplier nextPhase;
ExpandSearchPhase(AbstractSearchAsyncAction> context, SearchHits searchHits, Supplier nextPhase) {
- super("expand");
+ super(NAME);
this.context = context;
this.searchHits = searchHits;
this.nextPhase = nextPhase;
@@ -51,7 +54,7 @@ final class ExpandSearchPhase extends SearchPhase {
}
@Override
- public void run() {
+ protected void run() {
if (isCollapseRequest() == false || searchHits.getHits().length == 0) {
onPhaseDone();
} else {
@@ -123,7 +126,7 @@ final class ExpandSearchPhase extends SearchPhase {
}
private void phaseFailure(Exception ex) {
- context.onPhaseFailure(this, "failed to expand hits", ex);
+ context.onPhaseFailure(NAME, "failed to expand hits", ex);
}
private static SearchSourceBuilder buildExpandSearchSourceBuilder(InnerHitBuilder options, CollapseBuilder innerCollapseBuilder) {
@@ -168,6 +171,6 @@ final class ExpandSearchPhase extends SearchPhase {
}
private void onPhaseDone() {
- context.executeNextPhase(this, nextPhase);
+ context.executeNextPhase(NAME, nextPhase);
}
}
diff --git a/server/src/main/java/org/elasticsearch/action/search/FetchLookupFieldsPhase.java b/server/src/main/java/org/elasticsearch/action/search/FetchLookupFieldsPhase.java
index d8671bcadf86..2e98d5019649 100644
--- a/server/src/main/java/org/elasticsearch/action/search/FetchLookupFieldsPhase.java
+++ b/server/src/main/java/org/elasticsearch/action/search/FetchLookupFieldsPhase.java
@@ -33,6 +33,9 @@ import java.util.stream.Collectors;
* @see org.elasticsearch.index.mapper.LookupRuntimeFieldType
*/
final class FetchLookupFieldsPhase extends SearchPhase {
+
+ static final String NAME = "fetch_lookup_fields";
+
private final AbstractSearchAsyncAction> context;
private final SearchResponseSections searchResponse;
private final AtomicArray queryResults;
@@ -42,7 +45,7 @@ final class FetchLookupFieldsPhase extends SearchPhase {
SearchResponseSections searchResponse,
AtomicArray queryResults
) {
- super("fetch_lookup_fields");
+ super(NAME);
this.context = context;
this.searchResponse = searchResponse;
this.queryResults = queryResults;
@@ -74,7 +77,7 @@ final class FetchLookupFieldsPhase extends SearchPhase {
}
@Override
- public void run() {
+ protected void run() {
final List clusters = groupLookupFieldsByClusterAlias(searchResponse.hits);
if (clusters.isEmpty()) {
context.sendSearchResponse(searchResponse, queryResults);
@@ -129,7 +132,7 @@ final class FetchLookupFieldsPhase extends SearchPhase {
}
}
if (failure != null) {
- context.onPhaseFailure(FetchLookupFieldsPhase.this, "failed to fetch lookup fields", failure);
+ context.onPhaseFailure(NAME, "failed to fetch lookup fields", failure);
} else {
context.sendSearchResponse(searchResponse, queryResults);
}
@@ -137,7 +140,7 @@ final class FetchLookupFieldsPhase extends SearchPhase {
@Override
public void onFailure(Exception e) {
- context.onPhaseFailure(FetchLookupFieldsPhase.this, "failed to fetch lookup fields", e);
+ context.onPhaseFailure(NAME, "failed to fetch lookup fields", e);
}
});
}
diff --git a/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java
index 8568b6091676..119cfcab7610 100644
--- a/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java
+++ b/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java
@@ -34,6 +34,9 @@ import java.util.function.BiFunction;
* Then it reaches out to all relevant shards to fetch the topN hits.
*/
final class FetchSearchPhase extends SearchPhase {
+
+ static final String NAME = "fetch";
+
private final AtomicArray searchPhaseShardResults;
private final BiFunction, SearchPhase> nextPhaseFactory;
private final AbstractSearchAsyncAction> context;
@@ -70,7 +73,7 @@ final class FetchSearchPhase extends SearchPhase {
@Nullable SearchPhaseController.ReducedQueryPhase reducedQueryPhase,
BiFunction, SearchPhase> nextPhaseFactory
) {
- super("fetch");
+ super(NAME);
if (context.getNumShards() != resultConsumer.getNumShards()) {
throw new IllegalStateException(
"number of shards must match the length of the query results but doesn't:"
@@ -90,7 +93,7 @@ final class FetchSearchPhase extends SearchPhase {
}
@Override
- public void run() {
+ protected void run() {
context.execute(new AbstractRunnable() {
@Override
@@ -100,7 +103,7 @@ final class FetchSearchPhase extends SearchPhase {
@Override
public void onFailure(Exception e) {
- context.onPhaseFailure(FetchSearchPhase.this, "", e);
+ context.onPhaseFailure(NAME, "", e);
}
});
}
@@ -222,7 +225,7 @@ final class FetchSearchPhase extends SearchPhase {
progressListener.notifyFetchResult(shardIndex);
counter.onResult(result);
} catch (Exception e) {
- context.onPhaseFailure(FetchSearchPhase.this, "", e);
+ context.onPhaseFailure(NAME, "", e);
}
}
@@ -269,7 +272,7 @@ final class FetchSearchPhase extends SearchPhase {
AtomicArray extends SearchPhaseResult> fetchResultsArr,
SearchPhaseController.ReducedQueryPhase reducedQueryPhase
) {
- context.executeNextPhase(this, () -> {
+ context.executeNextPhase(NAME, () -> {
var resp = SearchPhaseController.merge(context.getRequest().scroll() != null, reducedQueryPhase, fetchResultsArr);
context.addReleasable(resp);
return nextPhaseFactory.apply(resp, searchPhaseShardResults);
diff --git a/server/src/main/java/org/elasticsearch/action/search/RankFeaturePhase.java b/server/src/main/java/org/elasticsearch/action/search/RankFeaturePhase.java
index 199228c9f992..e9302883457e 100644
--- a/server/src/main/java/org/elasticsearch/action/search/RankFeaturePhase.java
+++ b/server/src/main/java/org/elasticsearch/action/search/RankFeaturePhase.java
@@ -37,6 +37,8 @@ import java.util.List;
*/
public class RankFeaturePhase extends SearchPhase {
+ static final String NAME = "rank-feature";
+
private static final Logger logger = LogManager.getLogger(RankFeaturePhase.class);
private final AbstractSearchAsyncAction> context;
final SearchPhaseResults queryPhaseResults;
@@ -51,7 +53,7 @@ public class RankFeaturePhase extends SearchPhase {
AbstractSearchAsyncAction> context,
RankFeaturePhaseRankCoordinatorContext rankFeaturePhaseRankCoordinatorContext
) {
- super("rank-feature");
+ super(NAME);
assert rankFeaturePhaseRankCoordinatorContext != null;
this.rankFeaturePhaseRankCoordinatorContext = rankFeaturePhaseRankCoordinatorContext;
if (context.getNumShards() != queryPhaseResults.getNumShards()) {
@@ -71,7 +73,7 @@ public class RankFeaturePhase extends SearchPhase {
}
@Override
- public void run() {
+ protected void run() {
context.execute(new AbstractRunnable() {
@Override
protected void doRun() throws Exception {
@@ -84,7 +86,7 @@ public class RankFeaturePhase extends SearchPhase {
@Override
public void onFailure(Exception e) {
- context.onPhaseFailure(RankFeaturePhase.this, "", e);
+ context.onPhaseFailure(NAME, "", e);
}
});
}
@@ -139,7 +141,7 @@ public class RankFeaturePhase extends SearchPhase {
progressListener.notifyRankFeatureResult(shardIndex);
rankRequestCounter.onResult(response);
} catch (Exception e) {
- context.onPhaseFailure(RankFeaturePhase.this, "", e);
+ context.onPhaseFailure(NAME, "", e);
}
}
@@ -194,7 +196,7 @@ public class RankFeaturePhase extends SearchPhase {
@Override
public void onFailure(Exception e) {
- context.onPhaseFailure(RankFeaturePhase.this, "Computing updated ranks for results failed", e);
+ context.onPhaseFailure(NAME, "Computing updated ranks for results failed", e);
}
}
);
@@ -239,6 +241,6 @@ public class RankFeaturePhase extends SearchPhase {
}
void moveToNextPhase(SearchPhaseResults phaseResults, SearchPhaseController.ReducedQueryPhase reducedQueryPhase) {
- context.executeNextPhase(this, () -> new FetchSearchPhase(phaseResults, aggregatedDfs, context, reducedQueryPhase));
+ context.executeNextPhase(NAME, () -> new FetchSearchPhase(phaseResults, aggregatedDfs, context, reducedQueryPhase));
}
}
diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java
index 25d59a06664d..5c5c47b5fcc4 100644
--- a/server/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java
+++ b/server/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java
@@ -10,6 +10,13 @@
package org.elasticsearch.action.search;
import org.apache.logging.log4j.Logger;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.CollectionStatistics;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.TermStatistics;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.search.TotalHits;
+import org.apache.lucene.util.SetOnce;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.client.internal.Client;
import org.elasticsearch.cluster.ClusterState;
@@ -17,12 +24,16 @@ import org.elasticsearch.cluster.routing.GroupShardsIterator;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.search.SearchPhaseResult;
import org.elasticsearch.search.SearchShardTarget;
+import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.elasticsearch.search.dfs.AggregatedDfs;
import org.elasticsearch.search.dfs.DfsKnnResults;
import org.elasticsearch.search.dfs.DfsSearchResult;
import org.elasticsearch.search.internal.AliasFilter;
import org.elasticsearch.transport.Transport;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.Executor;
@@ -93,12 +104,11 @@ final class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction
@Override
protected SearchPhase getNextPhase() {
final List dfsSearchResults = results.getAtomicArray().asList();
- final AggregatedDfs aggregatedDfs = SearchPhaseController.aggregateDfs(dfsSearchResults);
- final List mergedKnnResults = SearchPhaseController.mergeKnnResults(getRequest(), dfsSearchResults);
+ final AggregatedDfs aggregatedDfs = aggregateDfs(dfsSearchResults);
return new DfsQueryPhase(
dfsSearchResults,
aggregatedDfs,
- mergedKnnResults,
+ mergeKnnResults(getRequest(), dfsSearchResults),
queryPhaseResultConsumer,
(queryResults) -> SearchQueryThenFetchAsyncAction.nextPhase(client, this, queryResults, aggregatedDfs),
this
@@ -109,4 +119,95 @@ final class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction
protected void onShardGroupFailure(int shardIndex, SearchShardTarget shardTarget, Exception exc) {
progressListener.notifyQueryFailure(shardIndex, shardTarget, exc);
}
+
+ private static List mergeKnnResults(SearchRequest request, List dfsSearchResults) {
+ if (request.hasKnnSearch() == false) {
+ return null;
+ }
+ SearchSourceBuilder source = request.source();
+ List> topDocsLists = new ArrayList<>(source.knnSearch().size());
+ List> nestedPath = new ArrayList<>(source.knnSearch().size());
+ for (int i = 0; i < source.knnSearch().size(); i++) {
+ topDocsLists.add(new ArrayList<>());
+ nestedPath.add(new SetOnce<>());
+ }
+
+ for (DfsSearchResult dfsSearchResult : dfsSearchResults) {
+ if (dfsSearchResult.knnResults() != null) {
+ for (int i = 0; i < dfsSearchResult.knnResults().size(); i++) {
+ DfsKnnResults knnResults = dfsSearchResult.knnResults().get(i);
+ ScoreDoc[] scoreDocs = knnResults.scoreDocs();
+ TotalHits totalHits = new TotalHits(scoreDocs.length, TotalHits.Relation.EQUAL_TO);
+ TopDocs shardTopDocs = new TopDocs(totalHits, scoreDocs);
+ SearchPhaseController.setShardIndex(shardTopDocs, dfsSearchResult.getShardIndex());
+ topDocsLists.get(i).add(shardTopDocs);
+ nestedPath.get(i).trySet(knnResults.getNestedPath());
+ }
+ }
+ }
+
+ List mergedResults = new ArrayList<>(source.knnSearch().size());
+ for (int i = 0; i < source.knnSearch().size(); i++) {
+ TopDocs mergedTopDocs = TopDocs.merge(source.knnSearch().get(i).k(), topDocsLists.get(i).toArray(new TopDocs[0]));
+ mergedResults.add(new DfsKnnResults(nestedPath.get(i).get(), mergedTopDocs.scoreDocs));
+ }
+ return mergedResults;
+ }
+
+ private static AggregatedDfs aggregateDfs(Collection results) {
+ Map termStatistics = new HashMap<>();
+ Map fieldStatistics = new HashMap<>();
+ long aggMaxDoc = 0;
+ for (DfsSearchResult lEntry : results) {
+ final Term[] terms = lEntry.terms();
+ final TermStatistics[] stats = lEntry.termStatistics();
+ assert terms.length == stats.length;
+ for (int i = 0; i < terms.length; i++) {
+ assert terms[i] != null;
+ if (stats[i] == null) {
+ continue;
+ }
+ TermStatistics existing = termStatistics.get(terms[i]);
+ if (existing != null) {
+ assert terms[i].bytes().equals(existing.term());
+ termStatistics.put(
+ terms[i],
+ new TermStatistics(
+ existing.term(),
+ existing.docFreq() + stats[i].docFreq(),
+ existing.totalTermFreq() + stats[i].totalTermFreq()
+ )
+ );
+ } else {
+ termStatistics.put(terms[i], stats[i]);
+ }
+
+ }
+
+ assert lEntry.fieldStatistics().containsKey(null) == false;
+ for (var entry : lEntry.fieldStatistics().entrySet()) {
+ String key = entry.getKey();
+ CollectionStatistics value = entry.getValue();
+ if (value == null) {
+ continue;
+ }
+ assert key != null;
+ CollectionStatistics existing = fieldStatistics.get(key);
+ if (existing != null) {
+ CollectionStatistics merged = new CollectionStatistics(
+ key,
+ existing.maxDoc() + value.maxDoc(),
+ existing.docCount() + value.docCount(),
+ existing.sumTotalTermFreq() + value.sumTotalTermFreq(),
+ existing.sumDocFreq() + value.sumDocFreq()
+ );
+ fieldStatistics.put(key, merged);
+ } else {
+ fieldStatistics.put(key, value);
+ }
+ }
+ aggMaxDoc += lEntry.maxDoc();
+ }
+ return new AggregatedDfs(termStatistics, fieldStatistics, aggMaxDoc);
+ }
}
diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/SearchPhase.java
index 7d849a72abf9..702369dc3839 100644
--- a/server/src/main/java/org/elasticsearch/action/search/SearchPhase.java
+++ b/server/src/main/java/org/elasticsearch/action/search/SearchPhase.java
@@ -9,25 +9,25 @@
package org.elasticsearch.action.search;
import org.elasticsearch.cluster.routing.GroupShardsIterator;
-import org.elasticsearch.core.CheckedRunnable;
import org.elasticsearch.search.SearchPhaseResult;
import org.elasticsearch.search.SearchShardTarget;
import org.elasticsearch.transport.Transport;
-import java.io.IOException;
import java.util.Objects;
import java.util.function.Function;
/**
* Base class for all individual search phases like collecting distributed frequencies, fetching documents, querying shards.
*/
-abstract class SearchPhase implements CheckedRunnable {
+abstract class SearchPhase {
private final String name;
protected SearchPhase(String name) {
this.name = Objects.requireNonNull(name, "name must not be null");
}
+ protected abstract void run();
+
/**
* Returns the phases name.
*/
diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java
index 69e7fba4dd0d..f8736ab79690 100644
--- a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java
+++ b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java
@@ -9,20 +9,16 @@
package org.elasticsearch.action.search;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.search.CollectionStatistics;
import org.apache.lucene.search.FieldDoc;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.Sort;
import org.apache.lucene.search.SortField;
import org.apache.lucene.search.SortedNumericSortField;
import org.apache.lucene.search.SortedSetSortField;
-import org.apache.lucene.search.TermStatistics;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.TopFieldDocs;
import org.apache.lucene.search.TotalHits;
import org.apache.lucene.search.TotalHits.Relation;
-import org.apache.lucene.util.SetOnce;
import org.elasticsearch.common.breaker.CircuitBreaker;
import org.elasticsearch.common.io.stream.DelayableWriteable;
import org.elasticsearch.common.lucene.Lucene;
@@ -42,9 +38,6 @@ import org.elasticsearch.search.aggregations.AggregationReduceContext;
import org.elasticsearch.search.aggregations.AggregatorFactories;
import org.elasticsearch.search.aggregations.InternalAggregations;
import org.elasticsearch.search.builder.SearchSourceBuilder;
-import org.elasticsearch.search.dfs.AggregatedDfs;
-import org.elasticsearch.search.dfs.DfsKnnResults;
-import org.elasticsearch.search.dfs.DfsSearchResult;
import org.elasticsearch.search.fetch.FetchSearchResult;
import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.search.profile.SearchProfileQueryPhaseResult;
@@ -84,97 +77,6 @@ public final class SearchPhaseController {
this.requestToAggReduceContextBuilder = requestToAggReduceContextBuilder;
}
- public static AggregatedDfs aggregateDfs(Collection results) {
- Map termStatistics = new HashMap<>();
- Map fieldStatistics = new HashMap<>();
- long aggMaxDoc = 0;
- for (DfsSearchResult lEntry : results) {
- final Term[] terms = lEntry.terms();
- final TermStatistics[] stats = lEntry.termStatistics();
- assert terms.length == stats.length;
- for (int i = 0; i < terms.length; i++) {
- assert terms[i] != null;
- if (stats[i] == null) {
- continue;
- }
- TermStatistics existing = termStatistics.get(terms[i]);
- if (existing != null) {
- assert terms[i].bytes().equals(existing.term());
- termStatistics.put(
- terms[i],
- new TermStatistics(
- existing.term(),
- existing.docFreq() + stats[i].docFreq(),
- existing.totalTermFreq() + stats[i].totalTermFreq()
- )
- );
- } else {
- termStatistics.put(terms[i], stats[i]);
- }
-
- }
-
- assert lEntry.fieldStatistics().containsKey(null) == false;
- for (var entry : lEntry.fieldStatistics().entrySet()) {
- String key = entry.getKey();
- CollectionStatistics value = entry.getValue();
- if (value == null) {
- continue;
- }
- assert key != null;
- CollectionStatistics existing = fieldStatistics.get(key);
- if (existing != null) {
- CollectionStatistics merged = new CollectionStatistics(
- key,
- existing.maxDoc() + value.maxDoc(),
- existing.docCount() + value.docCount(),
- existing.sumTotalTermFreq() + value.sumTotalTermFreq(),
- existing.sumDocFreq() + value.sumDocFreq()
- );
- fieldStatistics.put(key, merged);
- } else {
- fieldStatistics.put(key, value);
- }
- }
- aggMaxDoc += lEntry.maxDoc();
- }
- return new AggregatedDfs(termStatistics, fieldStatistics, aggMaxDoc);
- }
-
- public static List mergeKnnResults(SearchRequest request, List dfsSearchResults) {
- if (request.hasKnnSearch() == false) {
- return null;
- }
- SearchSourceBuilder source = request.source();
- List> topDocsLists = new ArrayList<>(source.knnSearch().size());
- List> nestedPath = new ArrayList<>(source.knnSearch().size());
- for (int i = 0; i < source.knnSearch().size(); i++) {
- topDocsLists.add(new ArrayList<>());
- nestedPath.add(new SetOnce<>());
- }
-
- for (DfsSearchResult dfsSearchResult : dfsSearchResults) {
- if (dfsSearchResult.knnResults() != null) {
- for (int i = 0; i < dfsSearchResult.knnResults().size(); i++) {
- DfsKnnResults knnResults = dfsSearchResult.knnResults().get(i);
- ScoreDoc[] scoreDocs = knnResults.scoreDocs();
- TotalHits totalHits = new TotalHits(scoreDocs.length, Relation.EQUAL_TO);
- TopDocs shardTopDocs = new TopDocs(totalHits, scoreDocs);
- setShardIndex(shardTopDocs, dfsSearchResult.getShardIndex());
- topDocsLists.get(i).add(shardTopDocs);
- nestedPath.get(i).trySet(knnResults.getNestedPath());
- }
- }
- }
-
- List mergedResults = new ArrayList<>(source.knnSearch().size());
- for (int i = 0; i < source.knnSearch().size(); i++) {
- TopDocs mergedTopDocs = TopDocs.merge(source.knnSearch().get(i).k(), topDocsLists.get(i).toArray(new TopDocs[0]));
- mergedResults.add(new DfsKnnResults(nestedPath.get(i).get(), mergedTopDocs.scoreDocs));
- }
- return mergedResults;
- }
-
/**
* Returns a score doc array of top N search docs across all shards, followed by top suggest docs for each
* named completion suggestion across all shards. If more than one named completion suggestion is specified in the
@@ -496,38 +398,6 @@ public final class SearchPhaseController {
);
}
- /**
- * Reduces the given query results and consumes all aggregations and profile results.
- * @param queryResults a list of non-null query shard results
- */
- static ReducedQueryPhase reducedScrollQueryPhase(Collection extends SearchPhaseResult> queryResults) {
- AggregationReduceContext.Builder aggReduceContextBuilder = new AggregationReduceContext.Builder() {
- @Override
- public AggregationReduceContext forPartialReduction() {
- throw new UnsupportedOperationException("Scroll requests don't have aggs");
- }
-
- @Override
- public AggregationReduceContext forFinalReduction() {
- throw new UnsupportedOperationException("Scroll requests don't have aggs");
- }
- };
- final TopDocsStats topDocsStats = new TopDocsStats(SearchContext.TRACK_TOTAL_HITS_ACCURATE);
- final List topDocs = new ArrayList<>();
- for (SearchPhaseResult sortedResult : queryResults) {
- QuerySearchResult queryResult = sortedResult.queryResult();
- final TopDocsAndMaxScore td = queryResult.consumeTopDocs();
- assert td != null;
- topDocsStats.add(td, queryResult.searchTimedOut(), queryResult.terminatedEarly());
- // make sure we set the shard index before we add it - the consumer didn't do that yet
- if (td.topDocs.scoreDocs.length > 0) {
- setShardIndex(td.topDocs, queryResult.getShardIndex());
- topDocs.add(td.topDocs);
- }
- }
- return reducedQueryPhase(queryResults, null, topDocs, topDocsStats, 0, true, aggReduceContextBuilder, null, true);
- }
-
/**
* Reduces the given query results and consumes all aggregations and profile results.
* @param queryResults a list of non-null query shard results
diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchScrollAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/SearchScrollAsyncAction.java
index 2231f791384f..53da76d96e40 100644
--- a/server/src/main/java/org/elasticsearch/action/search/SearchScrollAsyncAction.java
+++ b/server/src/main/java/org/elasticsearch/action/search/SearchScrollAsyncAction.java
@@ -10,21 +10,27 @@
package org.elasticsearch.action.search;
import org.apache.logging.log4j.Logger;
+import org.apache.lucene.search.TopDocs;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore;
import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.common.util.concurrent.CountDown;
import org.elasticsearch.core.Nullable;
import org.elasticsearch.search.SearchPhaseResult;
import org.elasticsearch.search.SearchShardTarget;
+import org.elasticsearch.search.aggregations.AggregationReduceContext;
import org.elasticsearch.search.internal.InternalScrollSearchRequest;
+import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.search.internal.ShardSearchContextId;
+import org.elasticsearch.search.query.QuerySearchResult;
import org.elasticsearch.transport.RemoteClusterService;
import org.elasticsearch.transport.Transport;
import java.util.ArrayList;
import java.util.Arrays;
+import java.util.Collection;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
@@ -40,7 +46,7 @@ import static org.elasticsearch.core.Strings.format;
* fan out to nodes and execute the query part of the scroll request. Subclasses can for instance
* run separate fetch phases etc.
*/
-abstract class SearchScrollAsyncAction implements Runnable {
+abstract class SearchScrollAsyncAction {
protected final Logger logger;
protected final ActionListener listener;
protected final ParsedScrollId scrollId;
@@ -229,7 +235,7 @@ abstract class SearchScrollAsyncAction implements R
) {
return new SearchPhase("fetch") {
@Override
- public void run() {
+ protected void run() {
sendResponse(queryPhase, fetchResults);
}
};
@@ -301,4 +307,48 @@ abstract class SearchScrollAsyncAction implements R
protected Transport.Connection getConnection(String clusterAlias, DiscoveryNode node) {
return searchTransportService.getConnection(clusterAlias, node);
}
+
+ /**
+ * Reduces the given query results and consumes all aggregations and profile results.
+ * @param queryResults a list of non-null query shard results
+ */
+ protected static SearchPhaseController.ReducedQueryPhase reducedScrollQueryPhase(Collection extends SearchPhaseResult> queryResults) {
+ AggregationReduceContext.Builder aggReduceContextBuilder = new AggregationReduceContext.Builder() {
+ @Override
+ public AggregationReduceContext forPartialReduction() {
+ throw new UnsupportedOperationException("Scroll requests don't have aggs");
+ }
+
+ @Override
+ public AggregationReduceContext forFinalReduction() {
+ throw new UnsupportedOperationException("Scroll requests don't have aggs");
+ }
+ };
+ final SearchPhaseController.TopDocsStats topDocsStats = new SearchPhaseController.TopDocsStats(
+ SearchContext.TRACK_TOTAL_HITS_ACCURATE
+ );
+ final List topDocs = new ArrayList<>();
+ for (SearchPhaseResult sortedResult : queryResults) {
+ QuerySearchResult queryResult = sortedResult.queryResult();
+ final TopDocsAndMaxScore td = queryResult.consumeTopDocs();
+ assert td != null;
+ topDocsStats.add(td, queryResult.searchTimedOut(), queryResult.terminatedEarly());
+ // make sure we set the shard index before we add it - the consumer didn't do that yet
+ if (td.topDocs.scoreDocs.length > 0) {
+ SearchPhaseController.setShardIndex(td.topDocs, queryResult.getShardIndex());
+ topDocs.add(td.topDocs);
+ }
+ }
+ return SearchPhaseController.reducedQueryPhase(
+ queryResults,
+ null,
+ topDocs,
+ topDocsStats,
+ 0,
+ true,
+ aggReduceContextBuilder,
+ null,
+ true
+ );
+ }
}
diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchScrollQueryAndFetchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/SearchScrollQueryAndFetchAsyncAction.java
index 7b5ba21c8022..ba14b5bcd2cb 100644
--- a/server/src/main/java/org/elasticsearch/action/search/SearchScrollQueryAndFetchAsyncAction.java
+++ b/server/src/main/java/org/elasticsearch/action/search/SearchScrollQueryAndFetchAsyncAction.java
@@ -51,7 +51,7 @@ final class SearchScrollQueryAndFetchAsyncAction extends SearchScrollAsyncAction
@Override
protected SearchPhase moveToNextPhase(BiFunction clusterNodeLookup) {
- return sendResponsePhase(SearchPhaseController.reducedScrollQueryPhase(queryFetchResults.asList()), queryFetchResults);
+ return sendResponsePhase(reducedScrollQueryPhase(queryFetchResults.asList()), queryFetchResults);
}
@Override
diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java
index 8c33e3ca7da4..29822e596356 100644
--- a/server/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java
+++ b/server/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java
@@ -65,10 +65,8 @@ final class SearchScrollQueryThenFetchAsyncAction extends SearchScrollAsyncActio
protected SearchPhase moveToNextPhase(BiFunction clusterNodeLookup) {
return new SearchPhase("fetch") {
@Override
- public void run() {
- final SearchPhaseController.ReducedQueryPhase reducedQueryPhase = SearchPhaseController.reducedScrollQueryPhase(
- queryResults.asList()
- );
+ protected void run() {
+ final SearchPhaseController.ReducedQueryPhase reducedQueryPhase = reducedScrollQueryPhase(queryResults.asList());
ScoreDoc[] scoreDocs = reducedQueryPhase.sortedTopDocs().scoreDocs();
if (scoreDocs.length == 0) {
sendResponse(reducedQueryPhase, fetchResults);
diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java
index 36ca0fba9437..6c95a3c8fd43 100644
--- a/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java
+++ b/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java
@@ -270,7 +270,7 @@ public class TransportOpenPointInTimeAction extends HandledTransportAction new SearchScrollQueryThenFetchAsyncAction(
logger,
clusterService,
diff --git a/server/src/main/java/org/elasticsearch/action/support/master/info/ClusterInfoRequest.java b/server/src/main/java/org/elasticsearch/action/support/master/info/ClusterInfoRequest.java
index 943b03588d4b..634a103e9754 100644
--- a/server/src/main/java/org/elasticsearch/action/support/master/info/ClusterInfoRequest.java
+++ b/server/src/main/java/org/elasticsearch/action/support/master/info/ClusterInfoRequest.java
@@ -16,6 +16,7 @@ import org.elasticsearch.action.support.master.MasterNodeReadRequest;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.core.TimeValue;
import java.io.IOException;
@@ -27,11 +28,22 @@ public abstract class ClusterInfoRequest listener) {
@@ -349,8 +349,8 @@ public class IndicesAdminClient implements ElasticsearchClient {
execute(GetIndexAction.INSTANCE, request, listener);
}
- public GetIndexRequestBuilder prepareGetIndex() {
- return new GetIndexRequestBuilder(this);
+ public GetIndexRequestBuilder prepareGetIndex(TimeValue masterTimeout) {
+ return new GetIndexRequestBuilder(this, masterTimeout);
}
public ActionFuture clearCache(final ClearIndicesCacheRequest request) {
diff --git a/server/src/main/java/org/elasticsearch/client/internal/RemoteClusterClient.java b/server/src/main/java/org/elasticsearch/client/internal/RemoteClusterClient.java
index 9e3497601fb5..f632ba33f772 100644
--- a/server/src/main/java/org/elasticsearch/client/internal/RemoteClusterClient.java
+++ b/server/src/main/java/org/elasticsearch/client/internal/RemoteClusterClient.java
@@ -47,7 +47,8 @@ public interface RemoteClusterClient {
/**
* Obtain a connection to the remote cluster for use with the {@link #execute} override that allows to specify the connection. Useful
- * for cases where you need to inspect {@link Transport.Connection#getVersion} before deciding the exact remote action to invoke.
+ * for cases where you need to inspect {@link Transport.Connection#getTransportVersion} before deciding the exact remote action to
+ * invoke.
*/
void getConnection(@Nullable Request request, ActionListener listener);
}
diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetention.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetention.java
index 673960c71339..17267525d4bd 100644
--- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetention.java
+++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetention.java
@@ -14,7 +14,6 @@ import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.core.Nullable;
import org.elasticsearch.core.TimeValue;
-import org.elasticsearch.features.NodeFeature;
import java.io.IOException;
@@ -23,7 +22,6 @@ import java.io.IOException;
*/
public record DataStreamGlobalRetention(@Nullable TimeValue defaultRetention, @Nullable TimeValue maxRetention) implements Writeable {
- public static final NodeFeature GLOBAL_RETENTION = new NodeFeature("data_stream.lifecycle.global_retention", true);
public static final TimeValue MIN_RETENTION_VALUE = TimeValue.timeValueSeconds(10);
/**
diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNode.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNode.java
index 353f17fe0e00..8366083b1907 100644
--- a/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNode.java
+++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNode.java
@@ -23,7 +23,6 @@ import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.Processors;
import org.elasticsearch.core.Nullable;
import org.elasticsearch.core.UpdateForV9;
-import org.elasticsearch.features.NodeFeature;
import org.elasticsearch.xcontent.ConstructingObjectParser;
import org.elasticsearch.xcontent.ObjectParser;
import org.elasticsearch.xcontent.ParseField;
@@ -46,9 +45,6 @@ import static org.elasticsearch.node.NodeRoleSettings.NODE_ROLES_SETTING;
public final class DesiredNode implements Writeable, ToXContentObject, Comparable {
- public static final NodeFeature RANGE_FLOAT_PROCESSORS_SUPPORTED = new NodeFeature("desired_node.range_float_processors");
- public static final NodeFeature DESIRED_NODE_VERSION_DEPRECATED = new NodeFeature("desired_node.version_deprecated", true);
-
public static final TransportVersion RANGE_FLOAT_PROCESSORS_SUPPORT_TRANSPORT_VERSION = TransportVersions.V_8_3_0;
private static final ParseField SETTINGS_FIELD = new ParseField("settings");
diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java
index 1e76d7965e36..4fa3ed1bb0d5 100644
--- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java
+++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java
@@ -72,7 +72,6 @@ import org.elasticsearch.index.IndexVersions;
import org.elasticsearch.index.mapper.DocumentMapper;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.MapperService.MergeReason;
-import org.elasticsearch.index.mapper.SourceFieldMapper;
import org.elasticsearch.index.query.SearchExecutionContext;
import org.elasticsearch.indices.IndexCreationException;
import org.elasticsearch.indices.IndicesService;
@@ -245,6 +244,9 @@ public class MetadataCreateIndexService {
* Validate the name for an index or alias against some static rules.
*/
public static void validateIndexOrAliasName(String index, BiFunction exceptionCtor) {
+ if (index == null || index.isEmpty()) {
+ throw exceptionCtor.apply(index, "must not be empty");
+ }
if (Strings.validFileName(index) == false) {
throw exceptionCtor.apply(index, "must not contain the following characters " + Strings.INVALID_FILENAME_CHARS);
}
@@ -1657,7 +1659,7 @@ public class MetadataCreateIndexService {
private static final Set UNMODIFIABLE_SETTINGS_DURING_RESIZE = Set.of(
IndexSettings.MODE.getKey(),
- SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(),
+ IndexSettings.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(),
IndexSettings.RECOVERY_USE_SYNTHETIC_SOURCE_SETTING.getKey(),
IndexSortConfig.INDEX_SORT_FIELD_SETTING.getKey(),
IndexSortConfig.INDEX_SORT_ORDER_SETTING.getKey(),
diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataFeatures.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataFeatures.java
deleted file mode 100644
index 49bd38330e3a..000000000000
--- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataFeatures.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
- * or more contributor license agreements. Licensed under the "Elastic License
- * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
- * Public License v 1"; you may not use this file except in compliance with, at
- * your election, the "Elastic License 2.0", the "GNU Affero General Public
- * License v3.0 only", or the "Server Side Public License, v 1".
- */
-
-package org.elasticsearch.cluster.metadata;
-
-import org.elasticsearch.features.FeatureSpecification;
-import org.elasticsearch.features.NodeFeature;
-
-import java.util.Set;
-
-public class MetadataFeatures implements FeatureSpecification {
- @Override
- public Set getFeatures() {
- return Set.of(DesiredNode.DESIRED_NODE_VERSION_DEPRECATED);
- }
-}
diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/IndexRouting.java b/server/src/main/java/org/elasticsearch/cluster/routing/IndexRouting.java
index e38cd677991f..24b14a46c878 100644
--- a/server/src/main/java/org/elasticsearch/cluster/routing/IndexRouting.java
+++ b/server/src/main/java/org/elasticsearch/cluster/routing/IndexRouting.java
@@ -54,8 +54,6 @@ import static org.elasticsearch.common.xcontent.XContentParserUtils.expectValueT
*/
public abstract class IndexRouting {
- static final NodeFeature BOOLEAN_ROUTING_PATH = new NodeFeature("routing.boolean_routing_path", true);
- static final NodeFeature MULTI_VALUE_ROUTING_PATH = new NodeFeature("routing.multi_value_routing_path", true);
static final NodeFeature LOGSB_ROUTE_ON_SORT_FIELDS = new NodeFeature("routing.logsb_route_on_sort_fields");
/**
diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingFeatures.java b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingFeatures.java
index 1545fdf90d11..461ac50e1efc 100644
--- a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingFeatures.java
+++ b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingFeatures.java
@@ -18,7 +18,7 @@ public class RoutingFeatures implements FeatureSpecification {
@Override
public Set getFeatures() {
- return Set.of(IndexRouting.BOOLEAN_ROUTING_PATH, IndexRouting.MULTI_VALUE_ROUTING_PATH);
+ return Set.of();
}
@Override
diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java
index 7b2514868530..5d01775b43a5 100644
--- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java
+++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java
@@ -277,8 +277,7 @@ public class AllocationService {
}
/**
- * unassigned an shards that are associated with nodes that are no longer part of the cluster, potentially promoting replicas
- * if needed.
+ * Unassign any shards that are associated with nodes that are no longer part of the cluster, potentially promoting replicas if needed.
*/
public ClusterState disassociateDeadNodes(ClusterState clusterState, boolean reroute, String reason) {
RoutingAllocation allocation = createRoutingAllocation(clusterState, currentNanoTime());
@@ -290,7 +289,7 @@ public class AllocationService {
clusterState = buildResultAndLogHealthChange(clusterState, allocation, reason);
}
if (reroute) {
- return reroute(clusterState, reason, rerouteCompletionIsNotRequired());// this is not triggered by a user request
+ return reroute(clusterState, reason, rerouteCompletionIsNotRequired() /* this is not triggered by a user request */);
} else {
return clusterState;
}
diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalance.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalance.java
index 0745c45b2183..16cbf41ee1bf 100644
--- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalance.java
+++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalance.java
@@ -19,6 +19,10 @@ import java.util.Objects;
/**
* The desired balance of the cluster, indicating which nodes should hold a copy of each shard.
*
+ * @param lastConvergedIndex Identifies what input data the balancer computation round used to produce this {@link DesiredBalance}. See
+ * {@link DesiredBalanceInput#index()} for details. Each reroute request in the same master term is assigned a
+ * strictly increasing sequence number. A new master term restarts the index values from zero. The balancer,
+ * which runs async to reroute, uses the latest request's data to compute the desired balance.
* @param assignments a set of the (persistent) node IDs to which each {@link ShardId} should be allocated
* @param weightsPerNode The node weights calculated based on
* {@link org.elasticsearch.cluster.routing.allocation.allocator.WeightFunction#calculateNodeWeight}
@@ -31,8 +35,11 @@ public record DesiredBalance(
) {
enum ComputationFinishReason {
+ /** Computation ran to completion */
CONVERGED,
+ /** Computation exited and published early because a new cluster event occurred that affects computation */
YIELD_TO_NEW_INPUT,
+ /** Computation stopped and published early to avoid delaying new shard assignment */
STOP_EARLY
}
@@ -44,6 +51,7 @@ public record DesiredBalance(
* The placeholder value for {@link DesiredBalance} when the node stands down as master.
*/
public static final DesiredBalance NOT_MASTER = new DesiredBalance(-2, Map.of());
+
/**
* The starting value for {@link DesiredBalance} when the node becomes the master.
*/
@@ -57,6 +65,10 @@ public record DesiredBalance(
return Objects.equals(a.assignments, b.assignments) == false;
}
+ /**
+ * Returns the sum of shard movements needed to reach the new desired balance. Doesn't count new shard copies as a move, nor removal or
+ * unassignment of a shard copy.
+ */
public static int shardMovements(DesiredBalance old, DesiredBalance updated) {
var intersection = Sets.intersection(old.assignments().keySet(), updated.assignments().keySet());
int movements = 0;
@@ -70,8 +82,15 @@ public record DesiredBalance(
return movements;
}
+ /**
+ * Returns the number of shard movements needed to reach the new shard assignment. Doesn't count new shard copies as a move, nor removal
+ * or unassignment of a shard copy.
+ */
private static int shardMovements(ShardAssignment old, ShardAssignment updated) {
- var movements = Math.min(0, old.assigned() - updated.assigned());// compensate newly started shards
+ // A shard move should retain the same number of assigned nodes, just swap out one node for another. We will compensate for newly
+ // started shards -- adding a shard copy is not a move -- by initializing the count with a negative value so that incrementing later
+ // for a new node zeros out.
+ var movements = Math.min(0, old.assigned() - updated.assigned());
for (String nodeId : updated.nodeIds()) {
if (old.nodeIds().contains(nodeId) == false) {
movements++;
diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputer.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputer.java
index 3b22221ea7db..03630c284fa3 100644
--- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputer.java
+++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputer.java
@@ -415,11 +415,14 @@ public class DesiredBalanceComputer {
}
private static Map collectShardAssignments(RoutingNodes routingNodes) {
- final var entries = routingNodes.getAssignedShards().entrySet();
- assert entries.stream().flatMap(t -> t.getValue().stream()).allMatch(ShardRouting::started) : routingNodes;
- final Map res = Maps.newHashMapWithExpectedSize(entries.size());
- for (var shardAndAssignments : entries) {
- res.put(shardAndAssignments.getKey(), ShardAssignment.ofAssignedShards(shardAndAssignments.getValue()));
+ final var allAssignedShards = routingNodes.getAssignedShards().entrySet();
+ assert allAssignedShards.stream().flatMap(t -> t.getValue().stream()).allMatch(ShardRouting::started) : routingNodes;
+ final Map res = Maps.newHashMapWithExpectedSize(allAssignedShards.size());
+ for (var shardIdAndShardRoutings : allAssignedShards) {
+ res.put(
+ shardIdAndShardRoutings.getKey(),
+ ShardAssignment.createFromAssignedShardRoutingsList(shardIdAndShardRoutings.getValue())
+ );
}
return res;
}
diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java
index bf4900b8d04b..85beb1498d11 100644
--- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java
+++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java
@@ -55,6 +55,10 @@ public class DesiredBalanceReconciler {
private static final Logger logger = LogManager.getLogger(DesiredBalanceReconciler.class);
+ /**
+ * The minimum interval that log messages will be written if the number of undesired shard allocations reaches the percentage of total
+ * shards set by {@link #UNDESIRED_ALLOCATIONS_LOG_THRESHOLD_SETTING}.
+ */
public static final Setting UNDESIRED_ALLOCATIONS_LOG_INTERVAL_SETTING = Setting.timeSetting(
"cluster.routing.allocation.desired_balance.undesired_allocations.log_interval",
TimeValue.timeValueHours(1),
@@ -63,6 +67,10 @@ public class DesiredBalanceReconciler {
Setting.Property.NodeScope
);
+ /**
+ * Warning log messages may be periodically written if the number of shards that are on undesired nodes reaches this percentage setting.
+ * Works together with {@link #UNDESIRED_ALLOCATIONS_LOG_INTERVAL_SETTING} to log on a periodic basis.
+ */
public static final Setting UNDESIRED_ALLOCATIONS_LOG_THRESHOLD_SETTING = Setting.doubleSetting(
"cluster.routing.allocation.desired_balance.undesired_allocations.threshold",
0.1,
@@ -97,6 +105,13 @@ public class DesiredBalanceReconciler {
this.nodeAllocationStatsAndWeightsCalculator = nodeAllocationStatsAndWeightsCalculator;
}
+ /**
+ * Applies a desired shard allocation to the routing table by initializing and relocating shards in the cluster state.
+ *
+ * @param desiredBalance The new desired cluster shard allocation
+ * @param allocation Cluster state information with which to make decisions, contains routing table metadata that will be modified to
+ * reach the given desired balance.
+ */
public void reconcile(DesiredBalance desiredBalance, RoutingAllocation allocation) {
var nodeIds = allocation.routingNodes().getAllNodeIds();
allocationOrdering.retainNodes(nodeIds);
diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java
index dec2590915fb..c37a2e6aa7be 100644
--- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java
+++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java
@@ -17,6 +17,7 @@ import org.elasticsearch.cluster.ClusterStateTaskExecutor;
import org.elasticsearch.cluster.ClusterStateTaskListener;
import org.elasticsearch.cluster.metadata.SingleNodeShutdownMetadata;
import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.cluster.routing.allocation.AllocationService;
import org.elasticsearch.cluster.routing.allocation.AllocationService.RerouteStrategy;
import org.elasticsearch.cluster.routing.allocation.NodeAllocationStatsAndWeightsCalculator;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
@@ -56,11 +57,30 @@ public class DesiredBalanceShardsAllocator implements ShardsAllocator {
private final ShardsAllocator delegateAllocator;
private final ThreadPool threadPool;
+ /**
+ * This is a callback to run {@link AllocationService#executeWithRoutingAllocation(ClusterState, String, RerouteStrategy)}, which
+ * produces a new ClusterState with the changes made by {@link DesiredBalanceReconciler#reconcile}. The {@link RerouteStrategy} provided
+ * to the callback calls into {@link #desiredBalanceReconciler} for the changes. The {@link #masterServiceTaskQueue} will publish the
+ * new cluster state after the cluster state is constructed by the {@link ReconcileDesiredBalanceExecutor}.
+ */
private final DesiredBalanceReconcilerAction reconciler;
private final DesiredBalanceComputer desiredBalanceComputer;
+ /**
+ * Reconciliation ({@link DesiredBalanceReconciler#reconcile(DesiredBalance, RoutingAllocation)}) takes the {@link DesiredBalance}
+ * output of {@link DesiredBalanceComputer#compute} and identifies how shards need to be added, moved or removed to go from the current
+ * cluster shard allocation to the new desired allocation.
+ */
private final DesiredBalanceReconciler desiredBalanceReconciler;
private final ContinuousComputation desiredBalanceComputation;
- private final PendingListenersQueue queue;
+ /**
+ * Saves and runs listeners after DesiredBalance computations complete.
+ */
+ private final PendingListenersQueue pendingListenersQueue;
+ /**
+ * Each reroute request gets assigned a monotonically increasing sequence number. Many reroute requests may arrive before the balancer
+ * asynchronously runs a computation. The balancer will use the latest request and save this sequence number to track back to the
+ * request.
+ */
private final AtomicLong indexGenerator = new AtomicLong(-1);
private final ConcurrentLinkedQueue> pendingDesiredBalanceMoves = new ConcurrentLinkedQueue<>();
private final MasterServiceTaskQueue masterServiceTaskQueue;
@@ -199,7 +219,7 @@ public class DesiredBalanceShardsAllocator implements ShardsAllocator {
return "DesiredBalanceShardsAllocator#allocate";
}
};
- this.queue = new PendingListenersQueue();
+ this.pendingListenersQueue = new PendingListenersQueue();
this.masterServiceTaskQueue = clusterService.createTaskQueue(
"reconcile-desired-balance",
Priority.URGENT,
@@ -235,7 +255,7 @@ public class DesiredBalanceShardsAllocator implements ShardsAllocator {
var index = indexGenerator.incrementAndGet();
logger.debug("Executing allocate for [{}]", index);
- queue.add(index, listener);
+ pendingListenersQueue.add(index, listener);
// This can only run on master, so unset not-master if exists
if (currentDesiredBalanceRef.compareAndSet(DesiredBalance.NOT_MASTER, DesiredBalance.BECOME_MASTER_INITIAL)) {
logger.debug("initialized desired balance for becoming master");
@@ -378,7 +398,7 @@ public class DesiredBalanceShardsAllocator implements ShardsAllocator {
private void onNoLongerMaster() {
if (indexGenerator.getAndSet(-1) != -1) {
currentDesiredBalanceRef.set(DesiredBalance.NOT_MASTER);
- queue.completeAllAsNotMaster();
+ pendingListenersQueue.completeAllAsNotMaster();
pendingDesiredBalanceMoves.clear();
desiredBalanceReconciler.clear();
desiredBalanceMetrics.zeroAllMetrics();
@@ -428,7 +448,7 @@ public class DesiredBalanceShardsAllocator implements ShardsAllocator {
batchExecutionContext.initialState(),
createReconcileAllocationAction(latest.getTask().desiredBalance)
);
- latest.success(() -> queue.complete(latest.getTask().desiredBalance.lastConvergedIndex()));
+ latest.success(() -> pendingListenersQueue.complete(latest.getTask().desiredBalance.lastConvergedIndex()));
return newState;
}
}
@@ -447,7 +467,7 @@ public class DesiredBalanceShardsAllocator implements ShardsAllocator {
// only for tests - in production, this happens after reconciliation
protected final void completeToLastConvergedIndex() {
- queue.complete(currentDesiredBalanceRef.get().lastConvergedIndex());
+ pendingListenersQueue.complete(currentDesiredBalanceRef.get().lastConvergedIndex());
}
private void recordTime(CounterMetric metric, Runnable action) {
diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/PendingListenersQueue.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/PendingListenersQueue.java
index e1b58cf79ac0..5b14277f2c65 100644
--- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/PendingListenersQueue.java
+++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/PendingListenersQueue.java
@@ -24,6 +24,10 @@ import java.util.Queue;
import static org.elasticsearch.cluster.service.ClusterApplierService.CLUSTER_UPDATE_THREAD_NAME;
import static org.elasticsearch.cluster.service.MasterService.MASTER_UPDATE_THREAD_NAME;
+/**
+ * Registers listeners with an `index` number ({@link #add(long, ActionListener)}) and then completes them whenever the latest index number
+ * is greater or equal to a listener's index value ({@link #complete(long)}).
+ */
public class PendingListenersQueue {
private static final Logger logger = LogManager.getLogger(PendingListenersQueue.class);
diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ShardAssignment.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ShardAssignment.java
index 4fb9137cb454..2bd1b9bb2bb6 100644
--- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ShardAssignment.java
+++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ShardAssignment.java
@@ -17,6 +17,14 @@ import java.util.Set;
import static java.util.Collections.unmodifiableSet;
+/**
+ * Simple shard assignment summary of shard copies for a particular index shard.
+ *
+ * @param nodeIds The node IDs of nodes holding a shard copy.
+ * @param total The total number of shard copies.
+ * @param unassigned The number of unassigned shard copies.
+ * @param ignored The number of ignored shard copies.
+ */
public record ShardAssignment(Set nodeIds, int total, int unassigned, int ignored) {
public ShardAssignment {
@@ -28,9 +36,13 @@ public record ShardAssignment(Set nodeIds, int total, int unassigned, in
return nodeIds.size();
}
- public static ShardAssignment ofAssignedShards(List routings) {
+ /**
+ * Helper method to instantiate a new ShardAssignment from a given list of ShardRouting instances. Assumes all shards are assigned.
+ */
+ public static ShardAssignment createFromAssignedShardRoutingsList(List routings) {
var nodeIds = new LinkedHashSet();
for (ShardRouting routing : routings) {
+ assert routing.unassignedInfo() == null : "Expected assigned shard copies only, unassigned info: " + routing.unassignedInfo();
nodeIds.add(routing.currentNodeId());
}
return new ShardAssignment(unmodifiableSet(nodeIds), routings.size(), 0, 0);
diff --git a/server/src/main/java/org/elasticsearch/cluster/service/ClusterApplierService.java b/server/src/main/java/org/elasticsearch/cluster/service/ClusterApplierService.java
index 5327380d5cba..d2369139ebe1 100644
--- a/server/src/main/java/org/elasticsearch/cluster/service/ClusterApplierService.java
+++ b/server/src/main/java/org/elasticsearch/cluster/service/ClusterApplierService.java
@@ -159,6 +159,31 @@ public class ClusterApplierService extends AbstractLifecycleComponent implements
}
}
+ private record TimedListener(ActionListener listener, Recorder recorder) implements ActionListener {
+
+ @Override
+ public void onResponse(Void response) {
+ try (Releasable ignored = recorder.record("listener.onResponse")) {
+ listener.onResponse(null);
+ } catch (Exception e) {
+ assert false : e;
+ logger.error("exception thrown by listener.onResponse", e);
+ }
+ }
+
+ @Override
+ public void onFailure(Exception e) {
+ assert e != null;
+ try (Releasable ignored = recorder.record("listener.onFailure")) {
+ listener.onFailure(e);
+ } catch (Exception inner) {
+ e.addSuppressed(inner);
+ assert false : e;
+ logger.error(() -> "exception thrown by listener.onFailure", e);
+ }
+ }
+ }
+
@Override
protected synchronized void doStop() {
for (Map.Entry onGoingTimeout : timeoutClusterStateListeners.entrySet()) {
@@ -395,12 +420,14 @@ public class ClusterApplierService extends AbstractLifecycleComponent implements
final long startTimeMillis = threadPool.relativeTimeInMillis();
final Recorder stopWatch = new Recorder(threadPool, slowTaskThreadDumpTimeout);
+ final TimedListener timedListener = new TimedListener(clusterApplyListener, stopWatch);
final ClusterState newClusterState;
try {
try (Releasable ignored = stopWatch.record("running task [" + source + ']')) {
newClusterState = updateFunction.apply(previousClusterState);
}
} catch (Exception e) {
+ timedListener.onFailure(e);
TimeValue executionTime = getTimeSince(startTimeMillis);
logger.trace(
() -> format(
@@ -413,15 +440,14 @@ public class ClusterApplierService extends AbstractLifecycleComponent implements
e
);
warnAboutSlowTaskIfNeeded(executionTime, source, stopWatch);
- clusterApplyListener.onFailure(e);
return;
}
if (previousClusterState == newClusterState) {
+ timedListener.onResponse(null);
TimeValue executionTime = getTimeSince(startTimeMillis);
logger.debug("processing [{}]: took [{}] no change in cluster state", source, executionTime);
warnAboutSlowTaskIfNeeded(executionTime, source, stopWatch);
- clusterApplyListener.onResponse(null);
} else {
if (logger.isTraceEnabled()) {
logger.debug("cluster state updated, version [{}], source [{}]\n{}", newClusterState.version(), source, newClusterState);
@@ -431,6 +457,7 @@ public class ClusterApplierService extends AbstractLifecycleComponent implements
try {
setIsApplyingClusterState();
applyChanges(previousClusterState, newClusterState, source, stopWatch);
+ timedListener.onResponse(null);
TimeValue executionTime = getTimeSince(startTimeMillis);
logger.debug(
"processing [{}]: took [{}] done applying updated cluster state (version: {}, uuid: {})",
@@ -440,8 +467,11 @@ public class ClusterApplierService extends AbstractLifecycleComponent implements
newClusterState.stateUUID()
);
warnAboutSlowTaskIfNeeded(executionTime, source, stopWatch);
- clusterApplyListener.onResponse(null);
} catch (Exception e) {
+ // failing to apply a cluster state with an exception indicates a bug in validation or in one of the appliers; if we
+ // continue we will retry with the same cluster state but that might not help.
+ assert applicationMayFail();
+ timedListener.onFailure(e);
TimeValue executionTime = getTimeSince(startTimeMillis);
if (logger.isTraceEnabled()) {
logger.warn(() -> format("""
@@ -461,10 +491,6 @@ public class ClusterApplierService extends AbstractLifecycleComponent implements
e
);
}
- // failing to apply a cluster state with an exception indicates a bug in validation or in one of the appliers; if we
- // continue we will retry with the same cluster state but that might not help.
- assert applicationMayFail();
- clusterApplyListener.onFailure(e);
} finally {
clearIsApplyingClusterState();
}
diff --git a/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java
index afee2491672e..3c1f53ca4a2c 100644
--- a/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java
+++ b/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java
@@ -36,7 +36,6 @@ import org.elasticsearch.index.mapper.FieldMapper;
import org.elasticsearch.index.mapper.IgnoredSourceFieldMapper;
import org.elasticsearch.index.mapper.InferenceMetadataFieldsMapper;
import org.elasticsearch.index.mapper.MapperService;
-import org.elasticsearch.index.mapper.SourceFieldMapper;
import org.elasticsearch.index.similarity.SimilarityService;
import org.elasticsearch.index.store.FsDirectoryFactory;
import org.elasticsearch.index.store.Store;
@@ -191,7 +190,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings {
FieldMapper.SYNTHETIC_SOURCE_KEEP_INDEX_SETTING,
IgnoredSourceFieldMapper.SKIP_IGNORED_SOURCE_WRITE_SETTING,
IgnoredSourceFieldMapper.SKIP_IGNORED_SOURCE_READ_SETTING,
- SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING,
+ IndexSettings.INDEX_MAPPER_SOURCE_MODE_SETTING,
IndexSettings.RECOVERY_USE_SYNTHETIC_SOURCE_SETTING,
InferenceMetadataFieldsMapper.USE_LEGACY_SEMANTIC_TEXT_FORMAT,
diff --git a/server/src/main/java/org/elasticsearch/common/settings/Setting.java b/server/src/main/java/org/elasticsearch/common/settings/Setting.java
index aec9c108d898..16c6844f4640 100644
--- a/server/src/main/java/org/elasticsearch/common/settings/Setting.java
+++ b/server/src/main/java/org/elasticsearch/common/settings/Setting.java
@@ -1727,7 +1727,7 @@ public class Setting implements ToXContentObject {
*
* @param key the key for the setting
* @param defaultValue the default value for this setting
- * @param properties properties properties for this setting like scope, filtering...
+ * @param properties properties for this setting like scope, filtering...
* @return the setting object
*/
public static Setting memorySizeSetting(String key, ByteSizeValue defaultValue, Property... properties) {
diff --git a/server/src/main/java/org/elasticsearch/health/HealthFeatures.java b/server/src/main/java/org/elasticsearch/health/HealthFeatures.java
deleted file mode 100644
index 72fc955320b9..000000000000
--- a/server/src/main/java/org/elasticsearch/health/HealthFeatures.java
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
- * or more contributor license agreements. Licensed under the "Elastic License
- * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
- * Public License v 1"; you may not use this file except in compliance with, at
- * your election, the "Elastic License 2.0", the "GNU Affero General Public
- * License v3.0 only", or the "Server Side Public License, v 1".
- */
-
-package org.elasticsearch.health;
-
-import org.elasticsearch.features.FeatureSpecification;
-import org.elasticsearch.features.NodeFeature;
-
-import java.util.Set;
-
-public class HealthFeatures implements FeatureSpecification {
-
- public static final NodeFeature SUPPORTS_EXTENDED_REPOSITORY_INDICATOR = new NodeFeature("health.extended_repository_indicator", true);
-
- @Override
- public Set getFeatures() {
- return Set.of(SUPPORTS_EXTENDED_REPOSITORY_INDICATOR);
- }
-}
diff --git a/server/src/main/java/org/elasticsearch/health/node/LocalHealthMonitor.java b/server/src/main/java/org/elasticsearch/health/node/LocalHealthMonitor.java
index aab9e972cba7..113e789727f0 100644
--- a/server/src/main/java/org/elasticsearch/health/node/LocalHealthMonitor.java
+++ b/server/src/main/java/org/elasticsearch/health/node/LocalHealthMonitor.java
@@ -24,7 +24,6 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
import org.elasticsearch.common.util.concurrent.RunOnce;
import org.elasticsearch.core.TimeValue;
-import org.elasticsearch.features.FeatureService;
import org.elasticsearch.health.metadata.HealthMetadata;
import org.elasticsearch.health.node.action.HealthNodeNotDiscoveredException;
import org.elasticsearch.health.node.selection.HealthNode;
@@ -62,7 +61,6 @@ public class LocalHealthMonitor implements ClusterStateListener {
private final ClusterService clusterService;
private final ThreadPool threadPool;
private final Client client;
- private final FeatureService featureService;
private volatile TimeValue monitorInterval;
private volatile boolean enabled;
@@ -88,7 +86,6 @@ public class LocalHealthMonitor implements ClusterStateListener {
ClusterService clusterService,
ThreadPool threadPool,
Client client,
- FeatureService featureService,
List> healthTrackers
) {
this.threadPool = threadPool;
@@ -96,7 +93,6 @@ public class LocalHealthMonitor implements ClusterStateListener {
this.enabled = HealthNodeTaskExecutor.ENABLED_SETTING.get(settings);
this.clusterService = clusterService;
this.client = client;
- this.featureService = featureService;
this.healthTrackers = healthTrackers;
}
@@ -105,17 +101,9 @@ public class LocalHealthMonitor implements ClusterStateListener {
ClusterService clusterService,
ThreadPool threadPool,
Client client,
- FeatureService featureService,
List> healthTrackers
) {
- LocalHealthMonitor localHealthMonitor = new LocalHealthMonitor(
- settings,
- clusterService,
- threadPool,
- client,
- featureService,
- healthTrackers
- );
+ LocalHealthMonitor localHealthMonitor = new LocalHealthMonitor(settings, clusterService, threadPool, client, healthTrackers);
localHealthMonitor.registerListeners();
return localHealthMonitor;
}
diff --git a/server/src/main/java/org/elasticsearch/index/IndexMode.java b/server/src/main/java/org/elasticsearch/index/IndexMode.java
index bf8f590b9f9a..d9a62f713050 100644
--- a/server/src/main/java/org/elasticsearch/index/IndexMode.java
+++ b/server/src/main/java/org/elasticsearch/index/IndexMode.java
@@ -623,10 +623,7 @@ public enum IndexMode {
}
}
if (indexMode == LOOKUP) {
- return Settings.builder()
- .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
- .put(IndexMetadata.SETTING_AUTO_EXPAND_REPLICAS, "0-all")
- .build();
+ return Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).build();
} else {
return Settings.EMPTY;
}
diff --git a/server/src/main/java/org/elasticsearch/index/IndexSettings.java b/server/src/main/java/org/elasticsearch/index/IndexSettings.java
index 68f334b10ea5..284140460a43 100644
--- a/server/src/main/java/org/elasticsearch/index/IndexSettings.java
+++ b/server/src/main/java/org/elasticsearch/index/IndexSettings.java
@@ -25,7 +25,6 @@ import org.elasticsearch.common.time.DateUtils;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.core.TimeValue;
-import org.elasticsearch.features.NodeFeature;
import org.elasticsearch.index.mapper.IgnoredSourceFieldMapper;
import org.elasticsearch.index.mapper.Mapper;
import org.elasticsearch.index.mapper.SourceFieldMapper;
@@ -52,7 +51,6 @@ import static org.elasticsearch.index.mapper.MapperService.INDEX_MAPPING_IGNORE_
import static org.elasticsearch.index.mapper.MapperService.INDEX_MAPPING_NESTED_DOCS_LIMIT_SETTING;
import static org.elasticsearch.index.mapper.MapperService.INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING;
import static org.elasticsearch.index.mapper.MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING;
-import static org.elasticsearch.index.mapper.SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING;
/**
* This class encapsulates all index level settings and handles settings updates.
@@ -655,48 +653,6 @@ public final class IndexSettings {
Property.Final
);
- public static final Setting RECOVERY_USE_SYNTHETIC_SOURCE_SETTING = Setting.boolSetting(
- "index.recovery.use_synthetic_source",
- false,
- new Setting.Validator<>() {
- @Override
- public void validate(Boolean value) {}
-
- @Override
- public void validate(Boolean enabled, Map, Object> settings) {
- if (enabled == false) {
- return;
- }
-
- // Verify if synthetic source is enabled on the index; fail if it is not
- var indexMode = (IndexMode) settings.get(MODE);
- if (indexMode.defaultSourceMode() != SourceFieldMapper.Mode.SYNTHETIC) {
- var sourceMode = (SourceFieldMapper.Mode) settings.get(INDEX_MAPPER_SOURCE_MODE_SETTING);
- if (sourceMode != SourceFieldMapper.Mode.SYNTHETIC) {
- throw new IllegalArgumentException(
- String.format(
- Locale.ROOT,
- "The setting [%s] is only permitted when [%s] is set to [%s]. Current mode: [%s].",
- RECOVERY_USE_SYNTHETIC_SOURCE_SETTING.getKey(),
- INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(),
- SourceFieldMapper.Mode.SYNTHETIC.name(),
- sourceMode.name()
- )
- );
- }
- }
- }
-
- @Override
- public Iterator> settings() {
- List> res = List.of(INDEX_MAPPER_SOURCE_MODE_SETTING, MODE);
- return res.iterator();
- }
- },
- Property.IndexScope,
- Property.Final
- );
-
/**
* Returns true if TSDB encoding is enabled. The default is true
*/
@@ -753,6 +709,60 @@ public final class IndexSettings {
Property.ServerlessPublic
);
+ public static final Setting INDEX_MAPPER_SOURCE_MODE_SETTING = Setting.enumSetting(
+ SourceFieldMapper.Mode.class,
+ settings -> {
+ final IndexMode indexMode = IndexSettings.MODE.get(settings);
+ return indexMode.defaultSourceMode().name();
+ },
+ "index.mapping.source.mode",
+ value -> {},
+ Setting.Property.Final,
+ Setting.Property.IndexScope
+ );
+
+ public static final Setting RECOVERY_USE_SYNTHETIC_SOURCE_SETTING = Setting.boolSetting(
+ "index.recovery.use_synthetic_source",
+ false,
+ new Setting.Validator<>() {
+ @Override
+ public void validate(Boolean value) {}
+
+ @Override
+ public void validate(Boolean enabled, Map, Object> settings) {
+ if (enabled == false) {
+ return;
+ }
+
+ // Verify if synthetic source is enabled on the index; fail if it is not
+ var indexMode = (IndexMode) settings.get(MODE);
+ if (indexMode.defaultSourceMode() != SourceFieldMapper.Mode.SYNTHETIC) {
+ var sourceMode = (SourceFieldMapper.Mode) settings.get(INDEX_MAPPER_SOURCE_MODE_SETTING);
+ if (sourceMode != SourceFieldMapper.Mode.SYNTHETIC) {
+ throw new IllegalArgumentException(
+ String.format(
+ Locale.ROOT,
+ "The setting [%s] is only permitted when [%s] is set to [%s]. Current mode: [%s].",
+ RECOVERY_USE_SYNTHETIC_SOURCE_SETTING.getKey(),
+ INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(),
+ SourceFieldMapper.Mode.SYNTHETIC.name(),
+ sourceMode.name()
+ )
+ );
+ }
+ }
+ }
+
+ @Override
+ public Iterator> settings() {
+ List> res = List.of(INDEX_MAPPER_SOURCE_MODE_SETTING, MODE);
+ return res.iterator();
+ }
+ },
+ Property.IndexScope,
+ Property.Final
+ );
+
/**
* Legacy index setting, kept for 7.x BWC compatibility. This setting has no effect in 8.x. Do not use.
* TODO: Remove in 9.0
@@ -806,8 +816,6 @@ public final class IndexSettings {
}
}
- public static final NodeFeature IGNORE_ABOVE_INDEX_LEVEL_SETTING = new NodeFeature("mapper.ignore_above_index_level_setting", true);
-
private final Index index;
private final IndexVersion version;
private final Logger logger;
diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java
index 8d3d1bde316e..40839d8e1878 100644
--- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java
+++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java
@@ -819,7 +819,7 @@ public class InternalEngine extends Engine {
) throws IOException {
assert get.isReadFromTranslog();
translogGetCount.incrementAndGet();
- final TranslogDirectoryReader inMemoryReader = new TranslogDirectoryReader(
+ final DirectoryReader inMemoryReader = TranslogDirectoryReader.create(
shardId,
index,
mappingLookup,
@@ -3161,7 +3161,7 @@ public class InternalEngine extends Engine {
final Translog.Snapshot snapshot;
if (engineConfig.getIndexSettings().isRecoverySourceSyntheticEnabled()) {
snapshot = new LuceneSyntheticSourceChangesSnapshot(
- engineConfig.getMapperService().mappingLookup(),
+ engineConfig.getMapperService(),
searcher,
SearchBasedChangesSnapshot.DEFAULT_BATCH_SIZE,
maxChunkSize,
@@ -3173,6 +3173,7 @@ public class InternalEngine extends Engine {
);
} else {
snapshot = new LuceneChangesSnapshot(
+ engineConfig.getMapperService(),
searcher,
SearchBasedChangesSnapshot.DEFAULT_BATCH_SIZE,
fromSeqNo,
diff --git a/server/src/main/java/org/elasticsearch/index/engine/LuceneChangesSnapshot.java b/server/src/main/java/org/elasticsearch/index/engine/LuceneChangesSnapshot.java
index d4466cbc17c5..30c6c639b9cf 100644
--- a/server/src/main/java/org/elasticsearch/index/engine/LuceneChangesSnapshot.java
+++ b/server/src/main/java/org/elasticsearch/index/engine/LuceneChangesSnapshot.java
@@ -19,6 +19,7 @@ import org.elasticsearch.common.lucene.index.SequentialStoredFieldsLeafReader;
import org.elasticsearch.core.Assertions;
import org.elasticsearch.index.IndexVersion;
import org.elasticsearch.index.fieldvisitor.FieldsVisitor;
+import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.SourceFieldMapper;
import org.elasticsearch.index.translog.Translog;
import org.elasticsearch.transport.Transports;
@@ -46,6 +47,7 @@ public final class LuceneChangesSnapshot extends SearchBasedChangesSnapshot {
/**
* Creates a new "translog" snapshot from Lucene for reading operations whose seq# in the specified range.
*
+ * @param mapperService the mapper service for this index
* @param engineSearcher the internal engine searcher which will be taken over if the snapshot is opened successfully
* @param searchBatchSize the number of documents should be returned by each search
* @param fromSeqNo the min requesting seq# - inclusive
@@ -56,6 +58,7 @@ public final class LuceneChangesSnapshot extends SearchBasedChangesSnapshot {
* @param indexVersionCreated the version on which this index was created
*/
public LuceneChangesSnapshot(
+ MapperService mapperService,
Engine.Searcher engineSearcher,
int searchBatchSize,
long fromSeqNo,
@@ -65,7 +68,7 @@ public final class LuceneChangesSnapshot extends SearchBasedChangesSnapshot {
boolean accessStats,
IndexVersion indexVersionCreated
) throws IOException {
- super(engineSearcher, searchBatchSize, fromSeqNo, toSeqNo, requiredFullRange, accessStats, indexVersionCreated);
+ super(mapperService, engineSearcher, searchBatchSize, fromSeqNo, toSeqNo, requiredFullRange, accessStats, indexVersionCreated);
this.creationThread = Assertions.ENABLED ? Thread.currentThread() : null;
this.singleConsumer = singleConsumer;
this.parallelArray = new ParallelArray(this.searchBatchSize);
@@ -214,20 +217,24 @@ public final class LuceneChangesSnapshot extends SearchBasedChangesSnapshot {
if (leaf.reader() instanceof SequentialStoredFieldsLeafReader) {
storedFieldsReader = ((SequentialStoredFieldsLeafReader) leaf.reader()).getSequentialStoredFieldsReader();
storedFieldsReaderOrd = leaf.ord;
+ setNextSourceMetadataReader(leaf);
} else {
storedFieldsReader = null;
storedFieldsReaderOrd = -1;
}
}
}
+
if (storedFieldsReader != null) {
assert singleConsumer : "Sequential access optimization must not be enabled for multiple consumers";
assert parallelArray.useSequentialStoredFieldsReader;
assert storedFieldsReaderOrd == leaf.ord : storedFieldsReaderOrd + " != " + leaf.ord;
storedFieldsReader.document(segmentDocID, fields);
} else {
+ setNextSourceMetadataReader(leaf);
leaf.reader().storedFields().document(segmentDocID, fields);
}
+ final BytesReference source = fields.source() != null ? addSourceMetadata(fields.source(), segmentDocID) : null;
final Translog.Operation op;
final boolean isTombstone = parallelArray.isTombStone[docIndex];
@@ -241,7 +248,6 @@ public final class LuceneChangesSnapshot extends SearchBasedChangesSnapshot {
op = new Translog.Delete(id, seqNo, primaryTerm, version);
assert assertDocSoftDeleted(leaf.reader(), segmentDocID) : "Delete op but soft_deletes field is not set [" + op + "]";
} else {
- final BytesReference source = fields.source();
if (source == null) {
// TODO: Callers should ask for the range that source should be retained. Thus we should always
// check for the existence source once we make peer-recovery to send ops after the local checkpoint.
diff --git a/server/src/main/java/org/elasticsearch/index/engine/LuceneSyntheticSourceChangesSnapshot.java b/server/src/main/java/org/elasticsearch/index/engine/LuceneSyntheticSourceChangesSnapshot.java
index 08508103181e..20154c20b363 100644
--- a/server/src/main/java/org/elasticsearch/index/engine/LuceneSyntheticSourceChangesSnapshot.java
+++ b/server/src/main/java/org/elasticsearch/index/engine/LuceneSyntheticSourceChangesSnapshot.java
@@ -13,12 +13,11 @@ import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.FieldDoc;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.util.ArrayUtil;
-import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.IndexVersion;
import org.elasticsearch.index.fieldvisitor.LeafStoredFieldLoader;
import org.elasticsearch.index.fieldvisitor.StoredFieldLoader;
-import org.elasticsearch.index.mapper.MappingLookup;
+import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.SourceFieldMetrics;
import org.elasticsearch.index.mapper.SourceLoader;
import org.elasticsearch.index.translog.Translog;
@@ -66,7 +65,7 @@ public class LuceneSyntheticSourceChangesSnapshot extends SearchBasedChangesSnap
private final Deque operationQueue = new LinkedList<>();
public LuceneSyntheticSourceChangesSnapshot(
- MappingLookup mappingLookup,
+ MapperService mapperService,
Engine.Searcher engineSearcher,
int searchBatchSize,
long maxMemorySizeInBytes,
@@ -76,13 +75,13 @@ public class LuceneSyntheticSourceChangesSnapshot extends SearchBasedChangesSnap
boolean accessStats,
IndexVersion indexVersionCreated
) throws IOException {
- super(engineSearcher, searchBatchSize, fromSeqNo, toSeqNo, requiredFullRange, accessStats, indexVersionCreated);
+ super(mapperService, engineSearcher, searchBatchSize, fromSeqNo, toSeqNo, requiredFullRange, accessStats, indexVersionCreated);
// a MapperService#updateMapping(...) of empty index may not have been invoked and then mappingLookup is empty
- assert engineSearcher.getDirectoryReader().maxDoc() == 0 || mappingLookup.isSourceSynthetic()
+ assert engineSearcher.getDirectoryReader().maxDoc() == 0 || mapperService.mappingLookup().isSourceSynthetic()
: "either an empty index or synthetic source must be enabled for proper functionality.";
// ensure we can buffer at least one document
this.maxMemorySizeInBytes = maxMemorySizeInBytes > 0 ? maxMemorySizeInBytes : 1;
- this.sourceLoader = mappingLookup.newSourceLoader(null, SourceFieldMetrics.NOOP);
+ this.sourceLoader = mapperService.mappingLookup().newSourceLoader(null, SourceFieldMetrics.NOOP);
Set storedFields = sourceLoader.requiredStoredFields();
this.storedFieldLoader = StoredFieldLoader.create(false, storedFields);
this.lastSeenSeqNo = fromSeqNo - 1;
@@ -194,6 +193,7 @@ public class LuceneSyntheticSourceChangesSnapshot extends SearchBasedChangesSnap
leafFieldLoader = storedFieldLoader.getLoader(leafReaderContext, null);
leafSourceLoader = sourceLoader.leaf(leafReaderContext.reader(), null);
+ setNextSourceMetadataReader(leafReaderContext);
}
int segmentDocID = docRecord.docID() - docBase;
leafFieldLoader.advanceTo(segmentDocID);
@@ -229,17 +229,16 @@ public class LuceneSyntheticSourceChangesSnapshot extends SearchBasedChangesSnap
return null;
}
}
- BytesReference source = sourceLoader.source(fieldLoader, segmentDocID).internalSourceRef();
+ var sourceBytes = addSourceMetadata(sourceLoader.source(fieldLoader, segmentDocID).internalSourceRef(), segmentDocID);
return new Translog.Index(
fieldLoader.id(),
docRecord.seqNo(),
docRecord.primaryTerm(),
docRecord.version(),
- source,
+ sourceBytes,
fieldLoader.routing(),
-1 // autogenerated timestamp
);
}
}
-
}
diff --git a/server/src/main/java/org/elasticsearch/index/engine/SearchBasedChangesSnapshot.java b/server/src/main/java/org/elasticsearch/index/engine/SearchBasedChangesSnapshot.java
index 191125c59705..8a96d4a2a252 100644
--- a/server/src/main/java/org/elasticsearch/index/engine/SearchBasedChangesSnapshot.java
+++ b/server/src/main/java/org/elasticsearch/index/engine/SearchBasedChangesSnapshot.java
@@ -22,12 +22,17 @@ import org.apache.lucene.search.Sort;
import org.apache.lucene.search.SortField;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.TopFieldCollectorManager;
+import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.lucene.search.Queries;
import org.elasticsearch.core.IOUtils;
import org.elasticsearch.index.IndexVersion;
+import org.elasticsearch.index.mapper.InferenceMetadataFieldsMapper;
+import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.SeqNoFieldMapper;
+import org.elasticsearch.index.mapper.ValueFetcher;
import org.elasticsearch.index.translog.Translog;
+import org.elasticsearch.search.lookup.Source;
import java.io.Closeable;
import java.io.IOException;
@@ -44,6 +49,7 @@ public abstract class SearchBasedChangesSnapshot implements Translog.Snapshot, C
private final IndexVersion indexVersionCreated;
private final IndexSearcher indexSearcher;
+ private final ValueFetcher sourceMetadataFetcher;
private final Closeable onClose;
protected final long fromSeqNo, toSeqNo;
@@ -67,6 +73,7 @@ public abstract class SearchBasedChangesSnapshot implements Translog.Snapshot, C
* @param indexVersionCreated Version of the index when it was created.
*/
protected SearchBasedChangesSnapshot(
+ MapperService mapperService,
Engine.Searcher engineSearcher,
int searchBatchSize,
long fromSeqNo,
@@ -103,6 +110,19 @@ public abstract class SearchBasedChangesSnapshot implements Translog.Snapshot, C
this.accessStats = accessStats;
this.totalHits = accessStats ? indexSearcher.count(rangeQuery(fromSeqNo, toSeqNo, indexVersionCreated)) : -1;
+ this.sourceMetadataFetcher = createSourceMetadataValueFetcher(mapperService, indexSearcher);
+ }
+
+ private ValueFetcher createSourceMetadataValueFetcher(MapperService mapperService, IndexSearcher searcher) {
+ if (mapperService.mappingLookup().inferenceFields().isEmpty()) {
+ return null;
+ }
+ var mapper = (InferenceMetadataFieldsMapper) mapperService.mappingLookup()
+ .getMapping()
+ .getMetadataMapperByName(InferenceMetadataFieldsMapper.NAME);
+ return mapper != null
+ ? mapper.fieldType().valueFetcher(mapperService.mappingLookup(), mapperService.getBitSetProducer(), searcher)
+ : null;
}
/**
@@ -184,6 +204,45 @@ public abstract class SearchBasedChangesSnapshot implements Translog.Snapshot, C
return results;
}
+ /**
+ * Sets the reader context to enable reading metadata that was removed from the {@code _source}.
+ * This method sets up the {@code sourceMetadataFetcher} with the provided {@link LeafReaderContext},
+ * ensuring it is ready to fetch metadata for subsequent operations.
+ *
+ *
Note: This method should be called before {@link #addSourceMetadata(BytesReference, int)} at the start of every leaf
+ * to ensure the metadata fetcher is properly initialized.
+ */
+ protected void setNextSourceMetadataReader(LeafReaderContext context) {
+ if (sourceMetadataFetcher != null) {
+ sourceMetadataFetcher.setNextReader(context);
+ }
+ }
+
+ /**
+ * Creates a new {@link Source} object by combining the provided {@code originalSource}
+ * with additional metadata fields. If the {@code sourceMetadataFetcher} is null or no metadata
+ * fields are fetched, the original source is returned unchanged.
+ *
+ * @param originalSourceBytes the original source bytes
+ * @param segmentDocID the document ID used to fetch metadata fields
+ * @return a new {@link Source} instance containing the original data and additional metadata,
+ * or the original source if no metadata is added
+ * @throws IOException if an error occurs while fetching metadata values
+ */
+ protected BytesReference addSourceMetadata(BytesReference originalSourceBytes, int segmentDocID) throws IOException {
+ if (sourceMetadataFetcher == null) {
+ return originalSourceBytes;
+ }
+ var originalSource = Source.fromBytes(originalSourceBytes);
+ List