Merge revision 34059c9dbd into multi-project

This commit is contained in:
Tim Vernum 2025-01-17 16:32:15 +11:00
commit 552cec7ff0
306 changed files with 5663 additions and 2507 deletions

View file

@ -29,6 +29,11 @@ if [[ "$WORKFLOW" == "snapshot" ]]; then
VERSION_SUFFIX="-SNAPSHOT" VERSION_SUFFIX="-SNAPSHOT"
fi fi
if [[ -n "$VERSION_QUALIFER" ]]; then
ES_VERSION = "${ES_VERSION}-${VERSION_QUALIFER}"
echo "Version qualifier specified. ES_VERSION=${ES_VERSION}."
fi
BEATS_BUILD_ID="$(./.ci/scripts/resolve-dra-manifest.sh beats "$RM_BRANCH" "$ES_VERSION" "$WORKFLOW")" BEATS_BUILD_ID="$(./.ci/scripts/resolve-dra-manifest.sh beats "$RM_BRANCH" "$ES_VERSION" "$WORKFLOW")"
echo "BEATS_BUILD_ID=$BEATS_BUILD_ID" echo "BEATS_BUILD_ID=$BEATS_BUILD_ID"
@ -37,6 +42,7 @@ echo "ML_CPP_BUILD_ID=$ML_CPP_BUILD_ID"
LICENSE_KEY_ARG="" LICENSE_KEY_ARG=""
BUILD_SNAPSHOT_ARG="" BUILD_SNAPSHOT_ARG=""
VERSION_QUALIFIER_ARG=""
if [[ "$WORKFLOW" == "staging" ]]; then if [[ "$WORKFLOW" == "staging" ]]; then
LICENSE_KEY=$(mktemp -d)/license.key LICENSE_KEY=$(mktemp -d)/license.key
@ -47,6 +53,10 @@ if [[ "$WORKFLOW" == "staging" ]]; then
BUILD_SNAPSHOT_ARG="-Dbuild.snapshot=false" BUILD_SNAPSHOT_ARG="-Dbuild.snapshot=false"
fi fi
if [[ -n "$VERSION_QUALIFER" ]]; then
VERSION_QUALIFIER_ARG="-Dbuild.version_qualifier=$VERSION_QUALIFER"
fi
echo --- Building release artifacts echo --- Building release artifacts
.ci/scripts/run-gradle.sh -Ddra.artifacts=true \ .ci/scripts/run-gradle.sh -Ddra.artifacts=true \
@ -56,12 +66,17 @@ echo --- Building release artifacts
-Dcsv="$WORKSPACE/build/distributions/dependencies-${ES_VERSION}${VERSION_SUFFIX}.csv" \ -Dcsv="$WORKSPACE/build/distributions/dependencies-${ES_VERSION}${VERSION_SUFFIX}.csv" \
$LICENSE_KEY_ARG \ $LICENSE_KEY_ARG \
$BUILD_SNAPSHOT_ARG \ $BUILD_SNAPSHOT_ARG \
$VERSION_QUALIFIER_ARG \
buildReleaseArtifacts \ buildReleaseArtifacts \
exportCompressedDockerImages \ exportCompressedDockerImages \
:distribution:generateDependenciesReport :distribution:generateDependenciesReport
PATH="$PATH:${JAVA_HOME}/bin" # Required by the following script PATH="$PATH:${JAVA_HOME}/bin" # Required by the following script
if [[ -z "$VERSION_QUALIFER" ]]; then
x-pack/plugin/sql/connectors/tableau/package.sh asm qualifier="$VERSION_SUFFIX" x-pack/plugin/sql/connectors/tableau/package.sh asm qualifier="$VERSION_SUFFIX"
else
x-pack/plugin/sql/connectors/tableau/package.sh asm qualifier="$VERSION_QUALIFER"
fi
# we regenerate this file as part of the release manager invocation # we regenerate this file as part of the release manager invocation
rm "build/distributions/elasticsearch-jdbc-${ES_VERSION}${VERSION_SUFFIX}.taco.sha512" rm "build/distributions/elasticsearch-jdbc-${ES_VERSION}${VERSION_SUFFIX}.taco.sha512"
@ -88,6 +103,7 @@ docker run --rm \
--branch "$RM_BRANCH" \ --branch "$RM_BRANCH" \
--commit "$BUILDKITE_COMMIT" \ --commit "$BUILDKITE_COMMIT" \
--workflow "$WORKFLOW" \ --workflow "$WORKFLOW" \
--qualifier "${VERSION_QUALIFER:-}" \
--version "$ES_VERSION" \ --version "$ES_VERSION" \
--artifact-set main \ --artifact-set main \
--dependency "beats:https://artifacts-${WORKFLOW}.elastic.co/beats/${BEATS_BUILD_ID}/manifest-${ES_VERSION}${VERSION_SUFFIX}.json" \ --dependency "beats:https://artifacts-${WORKFLOW}.elastic.co/beats/${BEATS_BUILD_ID}/manifest-${ES_VERSION}${VERSION_SUFFIX}.json" \

View file

@ -8,7 +8,7 @@ source .buildkite/scripts/branches.sh
for BRANCH in "${BRANCHES[@]}"; do for BRANCH in "${BRANCHES[@]}"; do
if [[ "$BRANCH" == "main" ]]; then if [[ "$BRANCH" == "main" ]]; then
continue export VERSION_QUALIFIER="alpha1"
fi fi
INTAKE_PIPELINE_SLUG="elasticsearch-intake" INTAKE_PIPELINE_SLUG="elasticsearch-intake"
@ -24,5 +24,6 @@ for BRANCH in "${BRANCHES[@]}"; do
commit: "$LAST_GOOD_COMMIT" commit: "$LAST_GOOD_COMMIT"
env: env:
DRA_WORKFLOW: staging DRA_WORKFLOW: staging
VERSION_QUALIFIER: ${VERSION_QUALIFIER:-}
EOF EOF
done done

View file

@ -47,7 +47,7 @@ public class BytesArrayReadLongBenchmark {
@Setup @Setup
public void initResults() throws IOException { public void initResults() throws IOException {
final BytesStreamOutput tmp = new BytesStreamOutput(); final BytesStreamOutput tmp = new BytesStreamOutput();
final long bytes = new ByteSizeValue(dataMb, ByteSizeUnit.MB).getBytes(); final long bytes = ByteSizeValue.of(dataMb, ByteSizeUnit.MB).getBytes();
for (int i = 0; i < bytes / 8; i++) { for (int i = 0; i < bytes / 8; i++) {
tmp.writeLong(i); tmp.writeLong(i);
} }

View file

@ -47,7 +47,7 @@ public class PagedBytesReferenceReadLongBenchmark {
@Setup @Setup
public void initResults() throws IOException { public void initResults() throws IOException {
final BytesStreamOutput tmp = new BytesStreamOutput(); final BytesStreamOutput tmp = new BytesStreamOutput();
final long bytes = new ByteSizeValue(dataMb, ByteSizeUnit.MB).getBytes(); final long bytes = ByteSizeValue.of(dataMb, ByteSizeUnit.MB).getBytes();
for (int i = 0; i < bytes / 8; i++) { for (int i = 0; i < bytes / 8; i++) {
tmp.writeLong(i); tmp.writeLong(i);
} }

View file

@ -122,7 +122,7 @@ public class TestFixturesPlugin implements Plugin<Project> {
composeExtension.getRemoveContainers().set(true); composeExtension.getRemoveContainers().set(true);
composeExtension.getCaptureContainersOutput() composeExtension.getCaptureContainersOutput()
.set(EnumSet.of(LogLevel.INFO, LogLevel.DEBUG).contains(project.getGradle().getStartParameter().getLogLevel())); .set(EnumSet.of(LogLevel.INFO, LogLevel.DEBUG).contains(project.getGradle().getStartParameter().getLogLevel()));
composeExtension.getUseDockerComposeV2().set(true); composeExtension.getUseDockerComposeV2().set(false);
composeExtension.getExecutable().set(this.providerFactory.provider(() -> { composeExtension.getExecutable().set(this.providerFactory.provider(() -> {
String composePath = dockerSupport.get().getDockerAvailability().dockerComposePath(); String composePath = dockerSupport.get().getDockerAvailability().dockerComposePath();
LOGGER.debug("Docker Compose path: {}", composePath); LOGGER.debug("Docker Compose path: {}", composePath);

View file

@ -26,7 +26,7 @@ public interface TestClustersAware extends Task {
Collection<ElasticsearchCluster> getClusters(); Collection<ElasticsearchCluster> getClusters();
@ServiceReference(REGISTRY_SERVICE_NAME) @ServiceReference(REGISTRY_SERVICE_NAME)
Property<TestClustersRegistry> getRegistery(); Property<TestClustersRegistry> getRegistry();
@ServiceReference(TEST_CLUSTER_TASKS_SERVICE) @ServiceReference(TEST_CLUSTER_TASKS_SERVICE)
Property<TestClustersPlugin.TaskEventsService> getTasksService(); Property<TestClustersPlugin.TaskEventsService> getTasksService();
@ -47,6 +47,14 @@ public interface TestClustersAware extends Task {
getClusters().add(cluster); getClusters().add(cluster);
} }
default Provider<TestClusterInfo> getClusterInfo(String clusterName) {
return getProject().getProviders().of(TestClusterValueSource.class, source -> {
source.getParameters().getService().set(getRegistry());
source.getParameters().getClusterName().set(clusterName);
source.getParameters().getPath().set(getProject().getIsolated().getPath());
});
}
default void useCluster(Provider<ElasticsearchCluster> cluster) { default void useCluster(Provider<ElasticsearchCluster> cluster) {
useCluster(cluster.get()); useCluster(cluster.get());
} }

View file

@ -249,7 +249,7 @@ public class TestClustersPlugin implements Plugin<Project> {
.forEach(awareTask -> { .forEach(awareTask -> {
awareTask.doFirst(task -> { awareTask.doFirst(task -> {
awareTask.beforeStart(); awareTask.beforeStart();
awareTask.getClusters().forEach(awareTask.getRegistery().get()::maybeStartCluster); awareTask.getClusters().forEach(awareTask.getRegistry().get()::maybeStartCluster);
}); });
}); });
}); });

View file

@ -109,6 +109,23 @@ public abstract class TestClustersRegistry implements BuildService<BuildServiceP
cluster.restart(); cluster.restart();
} }
public void nextNodeToNextVersion(Provider<ElasticsearchCluster> cluster) {
nextNodeToNextVersion(cluster.get());
}
public void nextNodeToNextVersion(ElasticsearchCluster cluster) {
nextNodeToNextVersion(cluster.getPath(), cluster.getName());
}
public void nextNodeToNextVersion(String path, String clusterName) {
ElasticsearchCluster cluster = runningClusters.stream()
.filter(c -> c.getPath().equals(path))
.filter(c -> c.getName().equals(clusterName))
.findFirst()
.orElseThrow();
cluster.nextNodeToNextVersion();
}
public void storeProcess(String id, Process esProcess) { public void storeProcess(String id, Process esProcess) {
nodeProcesses.put(id, esProcess); nodeProcesses.put(id, esProcess);
} }

View file

@ -187,12 +187,20 @@ class APMJvmOptions {
static void extractSecureSettings(SecureSettings secrets, Map<String, String> propertiesMap) { static void extractSecureSettings(SecureSettings secrets, Map<String, String> propertiesMap) {
final Set<String> settingNames = secrets.getSettingNames(); final Set<String> settingNames = secrets.getSettingNames();
for (String key : List.of("api_key", "secret_token")) { for (String key : List.of("api_key", "secret_token")) {
String prefix = "telemetry."; for (String prefix : List.of("telemetry.", "tracing.apm.")) {
if (settingNames.contains(prefix + key)) { if (settingNames.contains(prefix + key)) {
try (SecureString token = secrets.getString(prefix + key)) { if (propertiesMap.containsKey(key)) {
propertiesMap.put(key, token.toString()); throw new IllegalStateException(
Strings.format("Duplicate telemetry setting: [telemetry.%s] and [tracing.apm.%s]", key, key)
);
}
try (SecureString token = secrets.getString(prefix + key)) {
propertiesMap.put(key, token.toString());
}
} }
} }
} }
} }
@ -219,12 +227,44 @@ class APMJvmOptions {
static Map<String, String> extractApmSettings(Settings settings) throws UserException { static Map<String, String> extractApmSettings(Settings settings) throws UserException {
final Map<String, String> propertiesMap = new HashMap<>(); final Map<String, String> propertiesMap = new HashMap<>();
// tracing.apm.agent. is deprecated by telemetry.agent.
final String telemetryAgentPrefix = "telemetry.agent."; final String telemetryAgentPrefix = "telemetry.agent.";
final String deprecatedTelemetryAgentPrefix = "tracing.apm.agent.";
final Settings telemetryAgentSettings = settings.getByPrefix(telemetryAgentPrefix); final Settings telemetryAgentSettings = settings.getByPrefix(telemetryAgentPrefix);
telemetryAgentSettings.keySet().forEach(key -> propertiesMap.put(key, String.valueOf(telemetryAgentSettings.get(key)))); telemetryAgentSettings.keySet().forEach(key -> propertiesMap.put(key, String.valueOf(telemetryAgentSettings.get(key))));
final Settings apmAgentSettings = settings.getByPrefix(deprecatedTelemetryAgentPrefix);
for (String key : apmAgentSettings.keySet()) {
if (propertiesMap.containsKey(key)) {
throw new IllegalStateException(
Strings.format(
"Duplicate telemetry setting: [%s%s] and [%s%s]",
telemetryAgentPrefix,
key,
deprecatedTelemetryAgentPrefix,
key
)
);
}
propertiesMap.put(key, String.valueOf(apmAgentSettings.get(key)));
}
StringJoiner globalLabels = extractGlobalLabels(telemetryAgentPrefix, propertiesMap, settings); StringJoiner globalLabels = extractGlobalLabels(telemetryAgentPrefix, propertiesMap, settings);
if (globalLabels.length() == 0) {
globalLabels = extractGlobalLabels(deprecatedTelemetryAgentPrefix, propertiesMap, settings);
} else {
StringJoiner tracingGlobalLabels = extractGlobalLabels(deprecatedTelemetryAgentPrefix, propertiesMap, settings);
if (tracingGlobalLabels.length() != 0) {
throw new IllegalArgumentException(
"Cannot have global labels with tracing.agent prefix ["
+ globalLabels
+ "] and telemetry.apm.agent prefix ["
+ tracingGlobalLabels
+ "]"
);
}
}
if (globalLabels.length() > 0) { if (globalLabels.length() > 0) {
propertiesMap.put("global_labels", globalLabels.toString()); propertiesMap.put("global_labels", globalLabels.toString());
} }
@ -234,7 +274,7 @@ class APMJvmOptions {
if (propertiesMap.containsKey(key)) { if (propertiesMap.containsKey(key)) {
throw new UserException( throw new UserException(
ExitCodes.CONFIG, ExitCodes.CONFIG,
"Do not set a value for [telemetry.agent." + key + "], as this is configured automatically by Elasticsearch" "Do not set a value for [tracing.apm.agent." + key + "], as this is configured automatically by Elasticsearch"
); );
} }
} }

View file

@ -25,15 +25,18 @@ import java.util.Arrays;
import java.util.HashMap; import java.util.HashMap;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.function.Function;
import static org.elasticsearch.test.MapMatcher.matchesMap; import static org.elasticsearch.test.MapMatcher.matchesMap;
import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.allOf;
import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsInAnyOrder;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.endsWith;
import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.hasEntry; import static org.hamcrest.Matchers.hasEntry;
import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.hasKey;
import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.hasSize;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.not;
import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock; import static org.mockito.Mockito.mock;
@ -79,63 +82,109 @@ public class APMJvmOptionsTests extends ESTestCase {
} }
public void testExtractSecureSettings() { public void testExtractSecureSettings() {
MockSecureSettings secureSettings = new MockSecureSettings(); MockSecureSettings duplicateSecureSettings = new MockSecureSettings();
secureSettings.setString("telemetry.secret_token", "token");
secureSettings.setString("telemetry.api_key", "key");
Map<String, String> propertiesMap = new HashMap<>(); for (String prefix : List.of("telemetry.", "tracing.apm.")) {
APMJvmOptions.extractSecureSettings(secureSettings, propertiesMap); MockSecureSettings secureSettings = new MockSecureSettings();
secureSettings.setString(prefix + "secret_token", "token");
secureSettings.setString(prefix + "api_key", "key");
duplicateSecureSettings.setString(prefix + "api_key", "secret");
Map<String, String> propertiesMap = new HashMap<>();
APMJvmOptions.extractSecureSettings(secureSettings, propertiesMap);
assertThat(propertiesMap, matchesMap(Map.of("secret_token", "token", "api_key", "key")));
}
Exception exception = expectThrows(
IllegalStateException.class,
() -> APMJvmOptions.extractSecureSettings(duplicateSecureSettings, new HashMap<>())
);
assertThat(exception.getMessage(), containsString("Duplicate telemetry setting"));
assertThat(exception.getMessage(), containsString("telemetry.api_key"));
assertThat(exception.getMessage(), containsString("tracing.apm.api_key"));
assertThat(propertiesMap, matchesMap(Map.of("secret_token", "token", "api_key", "key")));
} }
public void testExtractSettings() throws UserException { public void testExtractSettings() throws UserException {
Settings defaults = Settings.builder() Function<String, Settings.Builder> buildSettings = (prefix) -> Settings.builder()
.put("telemetry.agent.server_url", "https://myurl:443") .put(prefix + "server_url", "https://myurl:443")
.put("telemetry.agent.service_node_name", "instance-0000000001") .put(prefix + "service_node_name", "instance-0000000001");
.build();
var name = "APM Tracing"; for (String prefix : List.of("tracing.apm.agent.", "telemetry.agent.")) {
var deploy = "123"; var name = "APM Tracing";
var org = "456"; var deploy = "123";
var extracted = APMJvmOptions.extractApmSettings( var org = "456";
Settings.builder() var extracted = APMJvmOptions.extractApmSettings(
.put(defaults) buildSettings.apply(prefix)
.put("telemetry.agent.global_labels.deployment_name", name) .put(prefix + "global_labels.deployment_name", name)
.put("telemetry.agent.global_labels.deployment_id", deploy) .put(prefix + "global_labels.deployment_id", deploy)
.put("telemetry.agent.global_labels.organization_id", org) .put(prefix + "global_labels.organization_id", org)
.build() .build()
); );
assertThat( assertThat(
extracted, extracted,
allOf( allOf(
hasEntry("server_url", "https://myurl:443"), hasEntry("server_url", "https://myurl:443"),
hasEntry("service_node_name", "instance-0000000001"), hasEntry("service_node_name", "instance-0000000001"),
hasEntry(equalTo("global_labels"), not(endsWith(","))), // test that we have collapsed all global labels into one hasEntry(equalTo("global_labels"), not(endsWith(","))), // test that we have collapsed all global labels into one
not(hasKey("global_labels.organization_id")) // tests that we strip out the top level label keys not(hasKey("global_labels.organization_id")) // tests that we strip out the top level label keys
)
);
List<String> labels = Arrays.stream(extracted.get("global_labels").split(",")).toList();
assertThat(labels, hasSize(3));
assertThat(labels, containsInAnyOrder("deployment_name=APM Tracing", "organization_id=" + org, "deployment_id=" + deploy));
// test replacing with underscores and skipping empty
name = "APM=Tracing";
deploy = "";
org = ",456";
extracted = APMJvmOptions.extractApmSettings(
buildSettings.apply(prefix)
.put(prefix + "global_labels.deployment_name", name)
.put(prefix + "global_labels.deployment_id", deploy)
.put(prefix + "global_labels.organization_id", org)
.build()
);
labels = Arrays.stream(extracted.get("global_labels").split(",")).toList();
assertThat(labels, hasSize(2));
assertThat(labels, containsInAnyOrder("deployment_name=APM_Tracing", "organization_id=_456"));
}
IllegalStateException err = expectThrows(
IllegalStateException.class,
() -> APMJvmOptions.extractApmSettings(
Settings.builder()
.put("tracing.apm.agent.server_url", "https://myurl:443")
.put("telemetry.agent.server_url", "https://myurl-2:443")
.build()
) )
); );
assertThat(err.getMessage(), is("Duplicate telemetry setting: [telemetry.agent.server_url] and [tracing.apm.agent.server_url]"));
}
List<String> labels = Arrays.stream(extracted.get("global_labels").split(",")).toList(); public void testNoMixedLabels() {
assertThat(labels, hasSize(3)); String telemetryAgent = "telemetry.agent.";
assertThat(labels, containsInAnyOrder("deployment_name=APM Tracing", "organization_id=" + org, "deployment_id=" + deploy)); String tracingAgent = "tracing.apm.agent.";
Settings settings = Settings.builder()
.put("tracing.apm.enabled", true)
.put(telemetryAgent + "server_url", "https://myurl:443")
.put(telemetryAgent + "service_node_name", "instance-0000000001")
.put(tracingAgent + "global_labels.deployment_id", "123")
.put(telemetryAgent + "global_labels.organization_id", "456")
.build();
// test replacing with underscores and skipping empty IllegalArgumentException err = assertThrows(IllegalArgumentException.class, () -> APMJvmOptions.extractApmSettings(settings));
name = "APM=Tracing"; assertThat(
deploy = ""; err.getMessage(),
org = ",456"; is(
extracted = APMJvmOptions.extractApmSettings( "Cannot have global labels with tracing.agent prefix [organization_id=456] and"
Settings.builder() + " telemetry.apm.agent prefix [deployment_id=123]"
.put(defaults) )
.put("telemetry.agent.global_labels.deployment_name", name)
.put("telemetry.agent.global_labels.deployment_id", deploy)
.put("telemetry.agent.global_labels.organization_id", org)
.build()
); );
labels = Arrays.stream(extracted.get("global_labels").split(",")).toList();
assertThat(labels, hasSize(2));
assertThat(labels, containsInAnyOrder("deployment_name=APM_Tracing", "organization_id=_456"));
} }
private Path makeFakeAgentJar() throws IOException { private Path makeFakeAgentJar() throws IOException {

View file

@ -0,0 +1,5 @@
pr: 119001
summary: Add support for specifying reindexing script for system index migration
area: Infra/Core
type: enhancement
issues: []

View file

@ -0,0 +1,5 @@
pr: 119889
summary: Optimize ST_EXTENT_AGG for `geo_shape` and `cartesian_shape`
area: "ES|QL"
type: enhancement
issues: []

View file

@ -1,11 +0,0 @@
pr: 119926
summary: "Deprecated tracing.apm.* settings got removed."
area: Infra/Metrics
type: breaking
issues: []
breaking:
title: "Deprecated tracing.apm.* settings got removed."
area: Cluster and node setting
details: Deprecated `tracing.apm.*` settings got removed, use respective `telemetry.*` / `telemetry.tracing.*` settings instead.
impact: 9.x nodes will refuse to start if any such setting (including secret settings) is still present.
notable: false

View file

@ -0,0 +1,13 @@
pr: 120142
summary: Limit `ByteSizeUnit` to 2 decimals
area: Infra/Core
type: breaking
issues: []
breaking:
title: Limit `ByteSizeUnit` to 2 decimals
area: Cluster and node setting
details: In the past, byte values like `1.25 mb` were allowed but deprecated. Now, values with up to two decimal places are allowed,
unless the unit is bytes, in which case no decimals are allowed. Values with too many decimal places result in an error.
impact: Values with more than two decimal places, like `0.123 mb` will be rejected as an error,
where in the past, they'd be accepted with a deprecation warning.
notable: false

View file

@ -0,0 +1,5 @@
pr: 120207
summary: Make `requests_per_second` configurable to throttle reindexing
area: Data streams
type: enhancement
issues: []

View file

@ -0,0 +1,5 @@
pr: 120231
summary: Add sanity check to `ReindexDatastreamIndexAction`
area: Data streams
type: enhancement
issues: []

View file

@ -1132,91 +1132,4 @@ Reindex from remote supports configurable SSL settings. These must be
specified in the `elasticsearch.yml` file, with the exception of the specified in the `elasticsearch.yml` file, with the exception of the
secure settings, which you add in the Elasticsearch keystore. secure settings, which you add in the Elasticsearch keystore.
It is not possible to configure SSL in the body of the `_reindex` request. It is not possible to configure SSL in the body of the `_reindex` request.
Refer to <<reindex-settings>>.
The following settings are supported:
`reindex.ssl.certificate_authorities`::
List of paths to PEM encoded certificate files that should be trusted.
You cannot specify both `reindex.ssl.certificate_authorities` and
`reindex.ssl.truststore.path`.
`reindex.ssl.truststore.path`::
The path to the Java Keystore file that contains the certificates to trust.
This keystore can be in "JKS" or "PKCS#12" format.
You cannot specify both `reindex.ssl.certificate_authorities` and
`reindex.ssl.truststore.path`.
`reindex.ssl.truststore.password`::
The password to the truststore (`reindex.ssl.truststore.path`).
deprecated:[7.17.0] Prefer `reindex.ssl.truststore.secure_password` instead.
This setting cannot be used with `reindex.ssl.truststore.secure_password`.
`reindex.ssl.truststore.secure_password` (<<secure-settings,Secure>>)::
The password to the truststore (`reindex.ssl.truststore.path`).
This setting cannot be used with `reindex.ssl.truststore.password`.
`reindex.ssl.truststore.type`::
The type of the truststore (`reindex.ssl.truststore.path`).
Must be either `jks` or `PKCS12`. If the truststore path ends in ".p12", ".pfx"
or "pkcs12", this setting defaults to `PKCS12`. Otherwise, it defaults to `jks`.
`reindex.ssl.verification_mode`::
Indicates the type of verification to protect against man in the middle attacks
and certificate forgery.
One of `full` (verify the hostname and the certificate path), `certificate`
(verify the certificate path, but not the hostname) or `none` (perform no
verification - this is strongly discouraged in production environments).
Defaults to `full`.
`reindex.ssl.certificate`::
Specifies the path to the PEM encoded certificate (or certificate chain) to be
used for HTTP client authentication (if required by the remote cluster)
This setting requires that `reindex.ssl.key` also be set.
You cannot specify both `reindex.ssl.certificate` and `reindex.ssl.keystore.path`.
`reindex.ssl.key`::
Specifies the path to the PEM encoded private key associated with the
certificate used for client authentication (`reindex.ssl.certificate`).
You cannot specify both `reindex.ssl.key` and `reindex.ssl.keystore.path`.
`reindex.ssl.key_passphrase`::
Specifies the passphrase to decrypt the PEM encoded private key
(`reindex.ssl.key`) if it is encrypted.
deprecated:[7.17.0] Prefer `reindex.ssl.secure_key_passphrase` instead.
Cannot be used with `reindex.ssl.secure_key_passphrase`.
`reindex.ssl.secure_key_passphrase` (<<secure-settings,Secure>>)::
Specifies the passphrase to decrypt the PEM encoded private key
(`reindex.ssl.key`) if it is encrypted.
Cannot be used with `reindex.ssl.key_passphrase`.
`reindex.ssl.keystore.path`::
Specifies the path to the keystore that contains a private key and certificate
to be used for HTTP client authentication (if required by the remote cluster).
This keystore can be in "JKS" or "PKCS#12" format.
You cannot specify both `reindex.ssl.key` and `reindex.ssl.keystore.path`.
`reindex.ssl.keystore.type`::
The type of the keystore (`reindex.ssl.keystore.path`). Must be either `jks` or `PKCS12`.
If the keystore path ends in ".p12", ".pfx" or "pkcs12", this setting defaults
to `PKCS12`. Otherwise, it defaults to `jks`.
`reindex.ssl.keystore.password`::
The password to the keystore (`reindex.ssl.keystore.path`).
deprecated:[7.17.0] Prefer `reindex.ssl.keystore.secure_password` instead.
This setting cannot be used with `reindex.ssl.keystore.secure_password`.
`reindex.ssl.keystore.secure_password` (<<secure-settings,Secure>>)::
The password to the keystore (`reindex.ssl.keystore.path`).
This setting cannot be used with `reindex.ssl.keystore.password`.
`reindex.ssl.keystore.key_password`::
The password for the key in the keystore (`reindex.ssl.keystore.path`).
Defaults to the keystore password.
deprecated:[7.17.0] Prefer `reindex.ssl.keystore.secure_key_password` instead.
This setting cannot be used with `reindex.ssl.keystore.secure_key_password`.
`reindex.ssl.keystore.secure_key_password` (<<secure-settings,Secure>>)::
The password for the key in the keystore (`reindex.ssl.keystore.path`).
Defaults to the keystore password. This setting cannot be used with
`reindex.ssl.keystore.key_password`.

View file

@ -27,13 +27,6 @@ cannot close open indices. Defaults to `true`.
+ +
NOTE: Closed indices still consume a significant amount of disk space. NOTE: Closed indices still consume a significant amount of disk space.
[[reindex-remote-whitelist]]
// tag::reindex-remote-whitelist[]
`reindex.remote.whitelist` {ess-icon}::
(<<static-cluster-setting,Static>>)
Specifies the hosts that can be <<reindex-from-remote,reindexed from remotely>>. Expects a YAML array of `host:port` strings. Consists of a comma-delimited list of `host:port` entries. Defaults to `["\*.io:*", "\*.com:*"]`.
// end::reindex-remote-whitelist[]
[[stack-templates-enabled]] [[stack-templates-enabled]]
`stack.templates.enabled`:: `stack.templates.enabled`::
+ +
@ -52,3 +45,101 @@ This setting also affects the following built-in component templates:
include::{es-ref-dir}/indices/put-component-template.asciidoc[tag=built-in-component-templates] include::{es-ref-dir}/indices/put-component-template.asciidoc[tag=built-in-component-templates]
-- --
[discrete]
[[reindex-settings]]
==== Reindex settings
[[reindex-remote-whitelist]]
// tag::reindex-remote-whitelist[]
`reindex.remote.whitelist` {ess-icon}::
(<<static-cluster-setting,Static>>)
Specifies the hosts that can be <<reindex-from-remote,reindexed from remotely>>. Expects a YAML array of `host:port` strings. Consists of a comma-delimited list of `host:port` entries. Defaults to `["\*.io:*", "\*.com:*"]`.
// end::reindex-remote-whitelist[]
`reindex.ssl.certificate`::
Specifies the path to the PEM encoded certificate (or certificate chain) to be
used for HTTP client authentication (if required by the remote cluster)
This setting requires that `reindex.ssl.key` also be set.
You cannot specify both `reindex.ssl.certificate` and `reindex.ssl.keystore.path`.
`reindex.ssl.certificate_authorities`::
List of paths to PEM encoded certificate files that should be trusted.
You cannot specify both `reindex.ssl.certificate_authorities` and
`reindex.ssl.truststore.path`.
`reindex.ssl.key`::
Specifies the path to the PEM encoded private key associated with the
certificate used for client authentication (`reindex.ssl.certificate`).
You cannot specify both `reindex.ssl.key` and `reindex.ssl.keystore.path`.
`reindex.ssl.key_passphrase`::
Specifies the passphrase to decrypt the PEM encoded private key
(`reindex.ssl.key`) if it is encrypted.
deprecated:[7.17.0] Prefer `reindex.ssl.secure_key_passphrase` instead.
Cannot be used with `reindex.ssl.secure_key_passphrase`.
`reindex.ssl.keystore.key_password`::
The password for the key in the keystore (`reindex.ssl.keystore.path`).
Defaults to the keystore password.
deprecated:[7.17.0] Prefer `reindex.ssl.keystore.secure_key_password` instead.
This setting cannot be used with `reindex.ssl.keystore.secure_key_password`.
`reindex.ssl.keystore.password`::
The password to the keystore (`reindex.ssl.keystore.path`).
deprecated:[7.17.0] Prefer `reindex.ssl.keystore.secure_password` instead.
This setting cannot be used with `reindex.ssl.keystore.secure_password`.
`reindex.ssl.keystore.path`::
Specifies the path to the keystore that contains a private key and certificate
to be used for HTTP client authentication (if required by the remote cluster).
This keystore can be in "JKS" or "PKCS#12" format.
You cannot specify both `reindex.ssl.key` and `reindex.ssl.keystore.path`.
`reindex.ssl.keystore.type`::
The type of the keystore (`reindex.ssl.keystore.path`). Must be either `jks` or `PKCS12`.
If the keystore path ends in ".p12", ".pfx" or "pkcs12", this setting defaults
to `PKCS12`. Otherwise, it defaults to `jks`.
`reindex.ssl.secure_key_passphrase` (<<secure-settings,Secure>>)::
Specifies the passphrase to decrypt the PEM encoded private key
(`reindex.ssl.key`) if it is encrypted.
Cannot be used with `reindex.ssl.key_passphrase`.
`reindex.ssl.keystore.secure_key_password` (<<secure-settings,Secure>>)::
The password for the key in the keystore (`reindex.ssl.keystore.path`).
Defaults to the keystore password. This setting cannot be used with
`reindex.ssl.keystore.key_password`.
`reindex.ssl.keystore.secure_password` (<<secure-settings,Secure>>)::
The password to the keystore (`reindex.ssl.keystore.path`).
This setting cannot be used with `reindex.ssl.keystore.password`.
`reindex.ssl.truststore.password`::
The password to the truststore (`reindex.ssl.truststore.path`).
deprecated:[7.17.0] Prefer `reindex.ssl.truststore.secure_password` instead.
This setting cannot be used with `reindex.ssl.truststore.secure_password`.
`reindex.ssl.truststore.path`::
The path to the Java Keystore file that contains the certificates to trust.
This keystore can be in "JKS" or "PKCS#12" format.
You cannot specify both `reindex.ssl.certificate_authorities` and
`reindex.ssl.truststore.path`.
`reindex.ssl.truststore.secure_password` (<<secure-settings,Secure>>)::
The password to the truststore (`reindex.ssl.truststore.path`).
This setting cannot be used with `reindex.ssl.truststore.password`.
`reindex.ssl.truststore.type`::
The type of the truststore (`reindex.ssl.truststore.path`).
Must be either `jks` or `PKCS12`. If the truststore path ends in ".p12", ".pfx"
or "pkcs12", this setting defaults to `PKCS12`. Otherwise, it defaults to `jks`.
`reindex.ssl.verification_mode`::
Indicates the type of verification to protect against man in the middle attacks
and certificate forgery.
One of `full` (verify the hostname and the certificate path), `certificate`
(verify the certificate path, but not the hostname) or `none` (perform no
verification - this is strongly discouraged in production environments).
Defaults to `full`.

View file

@ -7,9 +7,16 @@
* License v3.0 only", or the "Server Side Public License, v 1". * License v3.0 only", or the "Server Side Public License, v 1".
*/ */
import org.elasticsearch.gradle.internal.precommit.CheckForbiddenApisTask
apply plugin: 'elasticsearch.build' apply plugin: 'elasticsearch.build'
apply plugin: 'elasticsearch.mrjar'
dependencies { dependencies {
implementation project(':server') implementation project(':server')
implementation project(':libs:logging') implementation project(':libs:logging')
} }
tasks.withType(CheckForbiddenApisTask).configureEach {
replaceSignatureFiles 'jdk-signatures'
}

View file

@ -49,14 +49,13 @@ import java.net.URL;
import java.net.URLClassLoader; import java.net.URLClassLoader;
import java.net.URLConnection; import java.net.URLConnection;
import java.net.URLStreamHandler; import java.net.URLStreamHandler;
import java.net.spi.InetAddressResolver;
import java.net.spi.InetAddressResolverProvider;
import java.net.spi.URLStreamHandlerProvider; import java.net.spi.URLStreamHandlerProvider;
import java.security.NoSuchAlgorithmException; import java.security.NoSuchAlgorithmException;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Set; import java.util.Set;
import java.util.stream.Collectors; import java.util.stream.Collectors;
import java.util.stream.Stream;
import javax.net.ssl.HttpsURLConnection; import javax.net.ssl.HttpsURLConnection;
import javax.net.ssl.SSLContext; import javax.net.ssl.SSLContext;
@ -73,25 +72,25 @@ public class RestEntitlementsCheckAction extends BaseRestHandler {
public static final Thread NO_OP_SHUTDOWN_HOOK = new Thread(() -> {}, "Shutdown hook for testing"); public static final Thread NO_OP_SHUTDOWN_HOOK = new Thread(() -> {}, "Shutdown hook for testing");
private final String prefix; private final String prefix;
record CheckAction(CheckedRunnable<Exception> action, boolean isAlwaysDeniedToPlugins) { record CheckAction(CheckedRunnable<Exception> action, boolean isAlwaysDeniedToPlugins, Integer fromJavaVersion) {
/** /**
* These cannot be granted to plugins, so our test plugins cannot test the "allowed" case. * These cannot be granted to plugins, so our test plugins cannot test the "allowed" case.
* Used both for always-denied entitlements as well as those granted only to the server itself. * Used both for always-denied entitlements and those granted only to the server itself.
*/ */
static CheckAction deniedToPlugins(CheckedRunnable<Exception> action) { static CheckAction deniedToPlugins(CheckedRunnable<Exception> action) {
return new CheckAction(action, true); return new CheckAction(action, true, null);
} }
static CheckAction forPlugins(CheckedRunnable<Exception> action) { static CheckAction forPlugins(CheckedRunnable<Exception> action) {
return new CheckAction(action, false); return new CheckAction(action, false, null);
} }
static CheckAction alwaysDenied(CheckedRunnable<Exception> action) { static CheckAction alwaysDenied(CheckedRunnable<Exception> action) {
return new CheckAction(action, true); return new CheckAction(action, true, null);
} }
} }
private static final Map<String, CheckAction> checkActions = Map.ofEntries( private static final Map<String, CheckAction> checkActions = Stream.of(
entry("runtime_exit", deniedToPlugins(RestEntitlementsCheckAction::runtimeExit)), entry("runtime_exit", deniedToPlugins(RestEntitlementsCheckAction::runtimeExit)),
entry("runtime_halt", deniedToPlugins(RestEntitlementsCheckAction::runtimeHalt)), entry("runtime_halt", deniedToPlugins(RestEntitlementsCheckAction::runtimeHalt)),
entry("system_exit", deniedToPlugins(RestEntitlementsCheckAction::systemExit)), entry("system_exit", deniedToPlugins(RestEntitlementsCheckAction::systemExit)),
@ -140,7 +139,10 @@ public class RestEntitlementsCheckAction extends BaseRestHandler {
entry("proxySelector_setDefault", alwaysDenied(RestEntitlementsCheckAction::setDefaultProxySelector)), entry("proxySelector_setDefault", alwaysDenied(RestEntitlementsCheckAction::setDefaultProxySelector)),
entry("responseCache_setDefault", alwaysDenied(RestEntitlementsCheckAction::setDefaultResponseCache)), entry("responseCache_setDefault", alwaysDenied(RestEntitlementsCheckAction::setDefaultResponseCache)),
entry("createInetAddressResolverProvider", alwaysDenied(RestEntitlementsCheckAction::createInetAddressResolverProvider)), entry(
"createInetAddressResolverProvider",
new CheckAction(VersionSpecificNetworkChecks::createInetAddressResolverProvider, true, 18)
),
entry("createURLStreamHandlerProvider", alwaysDenied(RestEntitlementsCheckAction::createURLStreamHandlerProvider)), entry("createURLStreamHandlerProvider", alwaysDenied(RestEntitlementsCheckAction::createURLStreamHandlerProvider)),
entry("createURLWithURLStreamHandler", alwaysDenied(RestEntitlementsCheckAction::createURLWithURLStreamHandler)), entry("createURLWithURLStreamHandler", alwaysDenied(RestEntitlementsCheckAction::createURLWithURLStreamHandler)),
entry("createURLWithURLStreamHandler2", alwaysDenied(RestEntitlementsCheckAction::createURLWithURLStreamHandler2)), entry("createURLWithURLStreamHandler2", alwaysDenied(RestEntitlementsCheckAction::createURLWithURLStreamHandler2)),
@ -156,7 +158,9 @@ public class RestEntitlementsCheckAction extends BaseRestHandler {
entry("socket_connect", forPlugins(NetworkAccessCheckActions::socketConnect)), entry("socket_connect", forPlugins(NetworkAccessCheckActions::socketConnect)),
entry("server_socket_bind", forPlugins(NetworkAccessCheckActions::serverSocketBind)), entry("server_socket_bind", forPlugins(NetworkAccessCheckActions::serverSocketBind)),
entry("server_socket_accept", forPlugins(NetworkAccessCheckActions::serverSocketAccept)) entry("server_socket_accept", forPlugins(NetworkAccessCheckActions::serverSocketAccept))
); )
.filter(entry -> entry.getValue().fromJavaVersion() == null || Runtime.version().feature() >= entry.getValue().fromJavaVersion())
.collect(Collectors.toUnmodifiableMap(Map.Entry::getKey, Map.Entry::getValue));
private static void createURLStreamHandlerProvider() { private static void createURLStreamHandlerProvider() {
var x = new URLStreamHandlerProvider() { var x = new URLStreamHandlerProvider() {
@ -187,20 +191,6 @@ public class RestEntitlementsCheckAction extends BaseRestHandler {
}); });
} }
private static void createInetAddressResolverProvider() {
var x = new InetAddressResolverProvider() {
@Override
public InetAddressResolver get(Configuration configuration) {
return null;
}
@Override
public String name() {
return "TEST";
}
};
}
private static void setDefaultResponseCache() { private static void setDefaultResponseCache() {
ResponseCache.setDefault(null); ResponseCache.setDefault(null);
} }

View file

@ -0,0 +1,14 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the "Elastic License
* 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
* Public License v 1"; you may not use this file except in compliance with, at
* your election, the "Elastic License 2.0", the "GNU Affero General Public
* License v3.0 only", or the "Server Side Public License, v 1".
*/
package org.elasticsearch.entitlement.qa.common;
class VersionSpecificNetworkChecks {
static void createInetAddressResolverProvider() {}
}

View file

@ -0,0 +1,29 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the "Elastic License
* 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
* Public License v 1"; you may not use this file except in compliance with, at
* your election, the "Elastic License 2.0", the "GNU Affero General Public
* License v3.0 only", or the "Server Side Public License, v 1".
*/
package org.elasticsearch.entitlement.qa.common;
import java.net.spi.InetAddressResolver;
import java.net.spi.InetAddressResolverProvider;
class VersionSpecificNetworkChecks {
static void createInetAddressResolverProvider() {
var x = new InetAddressResolverProvider() {
@Override
public InetAddressResolver get(Configuration configuration) {
return null;
}
@Override
public String name() {
return "TEST";
}
};
}
}

View file

@ -116,6 +116,9 @@ public class SpatialEnvelopeVisitor implements GeometryVisitor<Boolean, RuntimeE
boolean isValid(); boolean isValid();
Rectangle getResult(); Rectangle getResult();
/** To allow for memory optimizations through object reuse, the visitor can be reset to its initial state. */
void reset();
} }
/** /**
@ -124,18 +127,14 @@ public class SpatialEnvelopeVisitor implements GeometryVisitor<Boolean, RuntimeE
*/ */
public static class CartesianPointVisitor implements PointVisitor { public static class CartesianPointVisitor implements PointVisitor {
private double minX = Double.POSITIVE_INFINITY; private double minX = Double.POSITIVE_INFINITY;
private double minY = Double.POSITIVE_INFINITY;
private double maxX = Double.NEGATIVE_INFINITY; private double maxX = Double.NEGATIVE_INFINITY;
private double maxY = Double.NEGATIVE_INFINITY; private double maxY = Double.NEGATIVE_INFINITY;
private double minY = Double.POSITIVE_INFINITY;
public double getMinX() { public double getMinX() {
return minX; return minX;
} }
public double getMinY() {
return minY;
}
public double getMaxX() { public double getMaxX() {
return maxX; return maxX;
} }
@ -144,12 +143,16 @@ public class SpatialEnvelopeVisitor implements GeometryVisitor<Boolean, RuntimeE
return maxY; return maxY;
} }
public double getMinY() {
return minY;
}
@Override @Override
public void visitPoint(double x, double y) { public void visitPoint(double x, double y) {
minX = Math.min(minX, x); minX = Math.min(minX, x);
minY = Math.min(minY, y);
maxX = Math.max(maxX, x); maxX = Math.max(maxX, x);
maxY = Math.max(maxY, y); maxY = Math.max(maxY, y);
minY = Math.min(minY, y);
} }
@Override @Override
@ -160,9 +163,9 @@ public class SpatialEnvelopeVisitor implements GeometryVisitor<Boolean, RuntimeE
); );
} }
this.minX = Math.min(this.minX, minX); this.minX = Math.min(this.minX, minX);
this.minY = Math.min(this.minY, minY);
this.maxX = Math.max(this.maxX, maxX); this.maxX = Math.max(this.maxX, maxX);
this.maxY = Math.max(this.maxY, maxY); this.maxY = Math.max(this.maxY, maxY);
this.minY = Math.min(this.minY, minY);
} }
@Override @Override
@ -174,6 +177,14 @@ public class SpatialEnvelopeVisitor implements GeometryVisitor<Boolean, RuntimeE
public Rectangle getResult() { public Rectangle getResult() {
return new Rectangle(minX, maxX, maxY, minY); return new Rectangle(minX, maxX, maxY, minY);
} }
@Override
public void reset() {
minX = Double.POSITIVE_INFINITY;
maxX = Double.NEGATIVE_INFINITY;
maxY = Double.NEGATIVE_INFINITY;
minY = Double.POSITIVE_INFINITY;
}
} }
/** /**
@ -186,12 +197,12 @@ public class SpatialEnvelopeVisitor implements GeometryVisitor<Boolean, RuntimeE
* </ul> * </ul>
*/ */
public static class GeoPointVisitor implements PointVisitor { public static class GeoPointVisitor implements PointVisitor {
protected double minY = Double.POSITIVE_INFINITY; protected double top = Double.NEGATIVE_INFINITY;
protected double maxY = Double.NEGATIVE_INFINITY; protected double bottom = Double.POSITIVE_INFINITY;
protected double minNegX = Double.POSITIVE_INFINITY; protected double negLeft = Double.POSITIVE_INFINITY;
protected double maxNegX = Double.NEGATIVE_INFINITY; protected double negRight = Double.NEGATIVE_INFINITY;
protected double minPosX = Double.POSITIVE_INFINITY; protected double posLeft = Double.POSITIVE_INFINITY;
protected double maxPosX = Double.NEGATIVE_INFINITY; protected double posRight = Double.NEGATIVE_INFINITY;
private final WrapLongitude wrapLongitude; private final WrapLongitude wrapLongitude;
@ -199,69 +210,104 @@ public class SpatialEnvelopeVisitor implements GeometryVisitor<Boolean, RuntimeE
this.wrapLongitude = wrapLongitude; this.wrapLongitude = wrapLongitude;
} }
public double getTop() {
return top;
}
public double getBottom() {
return bottom;
}
public double getNegLeft() {
return negLeft;
}
public double getNegRight() {
return negRight;
}
public double getPosLeft() {
return posLeft;
}
public double getPosRight() {
return posRight;
}
@Override @Override
public void visitPoint(double x, double y) { public void visitPoint(double x, double y) {
minY = Math.min(minY, y); bottom = Math.min(bottom, y);
maxY = Math.max(maxY, y); top = Math.max(top, y);
visitLongitude(x); visitLongitude(x);
} }
@Override @Override
public void visitRectangle(double minX, double maxX, double maxY, double minY) { public void visitRectangle(double minX, double maxX, double maxY, double minY) {
this.minY = Math.min(this.minY, minY); // TODO: Fix bug with rectangle crossing the dateline (see Extent.addRectangle for correct behaviour)
this.maxY = Math.max(this.maxY, maxY); this.bottom = Math.min(this.bottom, minY);
this.top = Math.max(this.top, maxY);
visitLongitude(minX); visitLongitude(minX);
visitLongitude(maxX); visitLongitude(maxX);
} }
private void visitLongitude(double x) { private void visitLongitude(double x) {
if (x >= 0) { if (x >= 0) {
minPosX = Math.min(minPosX, x); posLeft = Math.min(posLeft, x);
maxPosX = Math.max(maxPosX, x); posRight = Math.max(posRight, x);
} else { } else {
minNegX = Math.min(minNegX, x); negLeft = Math.min(negLeft, x);
maxNegX = Math.max(maxNegX, x); negRight = Math.max(negRight, x);
} }
} }
@Override @Override
public boolean isValid() { public boolean isValid() {
return minY != Double.POSITIVE_INFINITY; return bottom != Double.POSITIVE_INFINITY;
} }
@Override @Override
public Rectangle getResult() { public Rectangle getResult() {
return getResult(minNegX, minPosX, maxNegX, maxPosX, maxY, minY, wrapLongitude); return getResult(top, bottom, negLeft, negRight, posLeft, posRight, wrapLongitude);
} }
protected static Rectangle getResult( @Override
double minNegX, public void reset() {
double minPosX, bottom = Double.POSITIVE_INFINITY;
double maxNegX, top = Double.NEGATIVE_INFINITY;
double maxPosX, negLeft = Double.POSITIVE_INFINITY;
double maxY, negRight = Double.NEGATIVE_INFINITY;
double minY, posLeft = Double.POSITIVE_INFINITY;
posRight = Double.NEGATIVE_INFINITY;
}
public static Rectangle getResult(
double top,
double bottom,
double negLeft,
double negRight,
double posLeft,
double posRight,
WrapLongitude wrapLongitude WrapLongitude wrapLongitude
) { ) {
assert Double.isFinite(maxY); assert Double.isFinite(top);
if (Double.isInfinite(minPosX)) { if (posRight == Double.NEGATIVE_INFINITY) {
return new Rectangle(minNegX, maxNegX, maxY, minY); return new Rectangle(negLeft, negRight, top, bottom);
} else if (Double.isInfinite(minNegX)) { } else if (negLeft == Double.POSITIVE_INFINITY) {
return new Rectangle(minPosX, maxPosX, maxY, minY); return new Rectangle(posLeft, posRight, top, bottom);
} else { } else {
return switch (wrapLongitude) { return switch (wrapLongitude) {
case NO_WRAP -> new Rectangle(minNegX, maxPosX, maxY, minY); case NO_WRAP -> new Rectangle(negLeft, posRight, top, bottom);
case WRAP -> maybeWrap(minNegX, minPosX, maxNegX, maxPosX, maxY, minY); case WRAP -> maybeWrap(top, bottom, negLeft, negRight, posLeft, posRight);
}; };
} }
} }
private static Rectangle maybeWrap(double minNegX, double minPosX, double maxNegX, double maxPosX, double maxY, double minY) { private static Rectangle maybeWrap(double top, double bottom, double negLeft, double negRight, double posLeft, double posRight) {
double unwrappedWidth = maxPosX - minNegX; double unwrappedWidth = posRight - negLeft;
double wrappedWidth = 360 + maxNegX - minPosX; double wrappedWidth = 360 + negRight - posLeft;
return unwrappedWidth <= wrappedWidth return unwrappedWidth <= wrappedWidth
? new Rectangle(minNegX, maxPosX, maxY, minY) ? new Rectangle(negLeft, posRight, top, bottom)
: new Rectangle(minPosX, maxNegX, maxY, minY); : new Rectangle(posLeft, negRight, top, bottom);
} }
} }

View file

@ -92,7 +92,14 @@ public class APM extends Plugin implements NetworkPlugin, TelemetryPlugin {
APMAgentSettings.TELEMETRY_TRACING_ENABLED_SETTING, APMAgentSettings.TELEMETRY_TRACING_ENABLED_SETTING,
APMAgentSettings.TELEMETRY_TRACING_NAMES_INCLUDE_SETTING, APMAgentSettings.TELEMETRY_TRACING_NAMES_INCLUDE_SETTING,
APMAgentSettings.TELEMETRY_TRACING_NAMES_EXCLUDE_SETTING, APMAgentSettings.TELEMETRY_TRACING_NAMES_EXCLUDE_SETTING,
APMAgentSettings.TELEMETRY_TRACING_SANITIZE_FIELD_NAMES APMAgentSettings.TELEMETRY_TRACING_SANITIZE_FIELD_NAMES,
// The settings below are deprecated and are currently kept as fallback.
APMAgentSettings.TRACING_APM_SECRET_TOKEN_SETTING,
APMAgentSettings.TRACING_APM_API_KEY_SETTING,
APMAgentSettings.TRACING_APM_ENABLED_SETTING,
APMAgentSettings.TRACING_APM_NAMES_INCLUDE_SETTING,
APMAgentSettings.TRACING_APM_NAMES_EXCLUDE_SETTING,
APMAgentSettings.TRACING_APM_SANITIZE_FIELD_NAMES
); );
} }
} }

View file

@ -25,7 +25,9 @@ import java.security.PrivilegedAction;
import java.util.List; import java.util.List;
import java.util.Objects; import java.util.Objects;
import java.util.Set; import java.util.Set;
import java.util.function.Function;
import static org.elasticsearch.common.settings.Setting.Property.Deprecated;
import static org.elasticsearch.common.settings.Setting.Property.NodeScope; import static org.elasticsearch.common.settings.Setting.Property.NodeScope;
import static org.elasticsearch.common.settings.Setting.Property.OperatorDynamic; import static org.elasticsearch.common.settings.Setting.Property.OperatorDynamic;
@ -99,6 +101,9 @@ public class APMAgentSettings {
private static final String TELEMETRY_SETTING_PREFIX = "telemetry."; private static final String TELEMETRY_SETTING_PREFIX = "telemetry.";
// The old legacy prefix
private static final String LEGACY_TRACING_APM_SETTING_PREFIX = "tracing.apm.";
/** /**
* Allow-list of APM agent config keys users are permitted to configure. * Allow-list of APM agent config keys users are permitted to configure.
* @see <a href="https://www.elastic.co/guide/en/apm/agent/java/current/configuration.html">APM Java Agent Configuration</a> * @see <a href="https://www.elastic.co/guide/en/apm/agent/java/current/configuration.html">APM Java Agent Configuration</a>
@ -243,24 +248,56 @@ public class APMAgentSettings {
public static final Setting.AffixSetting<String> APM_AGENT_SETTINGS = Setting.prefixKeySetting( public static final Setting.AffixSetting<String> APM_AGENT_SETTINGS = Setting.prefixKeySetting(
TELEMETRY_SETTING_PREFIX + "agent.", TELEMETRY_SETTING_PREFIX + "agent.",
null, // no fallback LEGACY_TRACING_APM_SETTING_PREFIX + "agent.",
(namespace, qualifiedKey) -> concreteAgentSetting(namespace, qualifiedKey, NodeScope, OperatorDynamic) (namespace, qualifiedKey) -> qualifiedKey.startsWith(LEGACY_TRACING_APM_SETTING_PREFIX)
? concreteAgentSetting(namespace, qualifiedKey, NodeScope, OperatorDynamic, Deprecated)
: concreteAgentSetting(namespace, qualifiedKey, NodeScope, OperatorDynamic)
); );
public static final Setting<List<String>> TELEMETRY_TRACING_NAMES_INCLUDE_SETTING = Setting.stringListSetting( /**
* @deprecated in favor of TELEMETRY_TRACING_NAMES_INCLUDE_SETTING.
*/
@Deprecated
public static final Setting<List<String>> TRACING_APM_NAMES_INCLUDE_SETTING = Setting.stringListSetting(
LEGACY_TRACING_APM_SETTING_PREFIX + "names.include",
OperatorDynamic,
NodeScope,
Deprecated
);
public static final Setting<List<String>> TELEMETRY_TRACING_NAMES_INCLUDE_SETTING = Setting.listSetting(
TELEMETRY_SETTING_PREFIX + "tracing.names.include", TELEMETRY_SETTING_PREFIX + "tracing.names.include",
TRACING_APM_NAMES_INCLUDE_SETTING,
Function.identity(),
OperatorDynamic, OperatorDynamic,
NodeScope NodeScope
); );
public static final Setting<List<String>> TELEMETRY_TRACING_NAMES_EXCLUDE_SETTING = Setting.stringListSetting( /**
* @deprecated in favor of TELEMETRY_TRACING_NAMES_EXCLUDE_SETTING.
*/
@Deprecated
public static final Setting<List<String>> TRACING_APM_NAMES_EXCLUDE_SETTING = Setting.stringListSetting(
LEGACY_TRACING_APM_SETTING_PREFIX + "names.exclude",
OperatorDynamic,
NodeScope,
Deprecated
);
public static final Setting<List<String>> TELEMETRY_TRACING_NAMES_EXCLUDE_SETTING = Setting.listSetting(
TELEMETRY_SETTING_PREFIX + "tracing.names.exclude", TELEMETRY_SETTING_PREFIX + "tracing.names.exclude",
TRACING_APM_NAMES_EXCLUDE_SETTING,
Function.identity(),
OperatorDynamic, OperatorDynamic,
NodeScope NodeScope
); );
public static final Setting<List<String>> TELEMETRY_TRACING_SANITIZE_FIELD_NAMES = Setting.stringListSetting( /**
TELEMETRY_SETTING_PREFIX + "tracing.sanitize_field_names", * @deprecated in favor of TELEMETRY_TRACING_SANITIZE_FIELD_NAMES.
*/
@Deprecated
public static final Setting<List<String>> TRACING_APM_SANITIZE_FIELD_NAMES = Setting.stringListSetting(
LEGACY_TRACING_APM_SETTING_PREFIX + "sanitize_field_names",
List.of( List.of(
"password", "password",
"passwd", "passwd",
@ -276,12 +313,33 @@ public class APMAgentSettings {
"set-cookie" "set-cookie"
), ),
OperatorDynamic, OperatorDynamic,
NodeScope,
Deprecated
);
public static final Setting<List<String>> TELEMETRY_TRACING_SANITIZE_FIELD_NAMES = Setting.listSetting(
TELEMETRY_SETTING_PREFIX + "tracing.sanitize_field_names",
TRACING_APM_SANITIZE_FIELD_NAMES,
Function.identity(),
OperatorDynamic,
NodeScope NodeScope
); );
/**
* @deprecated in favor of TELEMETRY_TRACING_ENABLED_SETTING.
*/
@Deprecated
public static final Setting<Boolean> TRACING_APM_ENABLED_SETTING = Setting.boolSetting(
LEGACY_TRACING_APM_SETTING_PREFIX + "enabled",
false,
OperatorDynamic,
NodeScope,
Deprecated
);
public static final Setting<Boolean> TELEMETRY_TRACING_ENABLED_SETTING = Setting.boolSetting( public static final Setting<Boolean> TELEMETRY_TRACING_ENABLED_SETTING = Setting.boolSetting(
TELEMETRY_SETTING_PREFIX + "tracing.enabled", TELEMETRY_SETTING_PREFIX + "tracing.enabled",
false, TRACING_APM_ENABLED_SETTING,
OperatorDynamic, OperatorDynamic,
NodeScope NodeScope
); );
@ -293,13 +351,33 @@ public class APMAgentSettings {
NodeScope NodeScope
); );
/**
* @deprecated in favor of TELEMETRY_SECRET_TOKEN_SETTING.
*/
@Deprecated
public static final Setting<SecureString> TRACING_APM_SECRET_TOKEN_SETTING = SecureSetting.secureString(
LEGACY_TRACING_APM_SETTING_PREFIX + "secret_token",
null,
Deprecated
);
public static final Setting<SecureString> TELEMETRY_SECRET_TOKEN_SETTING = SecureSetting.secureString( public static final Setting<SecureString> TELEMETRY_SECRET_TOKEN_SETTING = SecureSetting.secureString(
TELEMETRY_SETTING_PREFIX + "secret_token", TELEMETRY_SETTING_PREFIX + "secret_token",
null TRACING_APM_SECRET_TOKEN_SETTING
);
/**
* @deprecated in favor of TELEMETRY_API_KEY_SETTING.
*/
@Deprecated
public static final Setting<SecureString> TRACING_APM_API_KEY_SETTING = SecureSetting.secureString(
LEGACY_TRACING_APM_SETTING_PREFIX + "api_key",
null,
Deprecated
); );
public static final Setting<SecureString> TELEMETRY_API_KEY_SETTING = SecureSetting.secureString( public static final Setting<SecureString> TELEMETRY_API_KEY_SETTING = SecureSetting.secureString(
TELEMETRY_SETTING_PREFIX + "api_key", TELEMETRY_SETTING_PREFIX + "api_key",
null TRACING_APM_API_KEY_SETTING
); );
} }

View file

@ -11,6 +11,8 @@ package org.elasticsearch.telemetry.apm.internal;
import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.MockSecureSettings;
import org.elasticsearch.common.settings.SecureString;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.ESTestCase;
import org.mockito.Mockito; import org.mockito.Mockito;
@ -19,13 +21,21 @@ import java.util.List;
import java.util.Set; import java.util.Set;
import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.APM_AGENT_SETTINGS; import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.APM_AGENT_SETTINGS;
import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TELEMETRY_API_KEY_SETTING;
import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TELEMETRY_METRICS_ENABLED_SETTING; import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TELEMETRY_METRICS_ENABLED_SETTING;
import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TELEMETRY_SECRET_TOKEN_SETTING;
import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TELEMETRY_TRACING_ENABLED_SETTING; import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TELEMETRY_TRACING_ENABLED_SETTING;
import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TELEMETRY_TRACING_NAMES_EXCLUDE_SETTING; import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TELEMETRY_TRACING_NAMES_EXCLUDE_SETTING;
import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TELEMETRY_TRACING_NAMES_INCLUDE_SETTING; import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TELEMETRY_TRACING_NAMES_INCLUDE_SETTING;
import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TELEMETRY_TRACING_SANITIZE_FIELD_NAMES; import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TELEMETRY_TRACING_SANITIZE_FIELD_NAMES;
import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TRACING_APM_API_KEY_SETTING;
import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TRACING_APM_ENABLED_SETTING;
import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TRACING_APM_NAMES_EXCLUDE_SETTING;
import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TRACING_APM_NAMES_INCLUDE_SETTING;
import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TRACING_APM_SANITIZE_FIELD_NAMES;
import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TRACING_APM_SECRET_TOKEN_SETTING;
import static org.hamcrest.Matchers.containsInAnyOrder;
import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.hasEntry;
import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.hasItem;
import static org.mockito.Mockito.clearInvocations; import static org.mockito.Mockito.clearInvocations;
import static org.mockito.Mockito.mock; import static org.mockito.Mockito.mock;
@ -60,6 +70,14 @@ public class APMAgentSettingsTests extends ESTestCase {
} }
} }
public void testEnableTracingUsingLegacySetting() {
Settings settings = Settings.builder().put(TRACING_APM_ENABLED_SETTING.getKey(), true).build();
apmAgentSettings.initAgentSystemProperties(settings);
verify(apmAgentSettings).setAgentSetting("recording", "true");
assertWarnings("[tracing.apm.enabled] setting was deprecated in Elasticsearch and will be removed in a future release.");
}
public void testEnableMetrics() { public void testEnableMetrics() {
for (boolean tracingEnabled : List.of(true, false)) { for (boolean tracingEnabled : List.of(true, false)) {
clearInvocations(apmAgentSettings, apmTelemetryProvider.getMeterService()); clearInvocations(apmAgentSettings, apmTelemetryProvider.getMeterService());
@ -103,6 +121,14 @@ public class APMAgentSettingsTests extends ESTestCase {
} }
} }
public void testDisableTracingUsingLegacySetting() {
Settings settings = Settings.builder().put(TRACING_APM_ENABLED_SETTING.getKey(), false).build();
apmAgentSettings.initAgentSystemProperties(settings);
verify(apmAgentSettings).setAgentSetting("recording", "false");
assertWarnings("[tracing.apm.enabled] setting was deprecated in Elasticsearch and will be removed in a future release.");
}
public void testDisableMetrics() { public void testDisableMetrics() {
for (boolean tracingEnabled : List.of(true, false)) { for (boolean tracingEnabled : List.of(true, false)) {
clearInvocations(apmAgentSettings, apmTelemetryProvider.getMeterService()); clearInvocations(apmAgentSettings, apmTelemetryProvider.getMeterService());
@ -155,18 +181,70 @@ public class APMAgentSettingsTests extends ESTestCase {
verify(apmAgentSettings).setAgentSetting("span_compression_enabled", "true"); verify(apmAgentSettings).setAgentSetting("span_compression_enabled", "true");
} }
public void testSetAgentsSettingsWithLegacyPrefix() {
Settings settings = Settings.builder()
.put(TELEMETRY_TRACING_ENABLED_SETTING.getKey(), true)
.put("tracing.apm.agent.span_compression_enabled", "true")
.build();
apmAgentSettings.initAgentSystemProperties(settings);
verify(apmAgentSettings).setAgentSetting("recording", "true");
verify(apmAgentSettings).setAgentSetting("span_compression_enabled", "true");
assertWarnings(
"[tracing.apm.agent.span_compression_enabled] setting was deprecated in Elasticsearch and will be removed in a future release."
);
}
/** /**
* Check that invalid or forbidden APM agent settings are rejected. * Check that invalid or forbidden APM agent settings are rejected.
*/ */
public void testRejectForbiddenOrUnknownAgentSettings() { public void testRejectForbiddenOrUnknownAgentSettings() {
String prefix = APM_AGENT_SETTINGS.getKey(); List<String> prefixes = List.of(APM_AGENT_SETTINGS.getKey(), "tracing.apm.agent.");
Settings settings = Settings.builder().put(prefix + "unknown", "true").build(); for (String prefix : prefixes) {
Exception exception = expectThrows(IllegalArgumentException.class, () -> APM_AGENT_SETTINGS.getAsMap(settings)); Settings settings = Settings.builder().put(prefix + "unknown", "true").build();
assertThat(exception.getMessage(), containsString("[" + prefix + "unknown]")); Exception exception = expectThrows(IllegalArgumentException.class, () -> APM_AGENT_SETTINGS.getAsMap(settings));
assertThat(exception.getMessage(), containsString("[" + prefix + "unknown]"));
}
// though, accept / ignore nested global_labels // though, accept / ignore nested global_labels
var map = APMAgentSettings.APM_AGENT_SETTINGS.getAsMap(Settings.builder().put(prefix + "global_labels.abc", "123").build()); for (String prefix : prefixes) {
assertThat(map, hasEntry("global_labels.abc", "123")); Settings settings = Settings.builder().put(prefix + "global_labels.abc", "123").build();
APMAgentSettings.APM_AGENT_SETTINGS.getAsMap(settings);
if (prefix.startsWith("tracing.apm.agent.")) {
assertWarnings(
"[tracing.apm.agent.global_labels.abc] setting was deprecated in Elasticsearch and will be removed in a future release."
);
}
}
}
public void testTelemetryTracingNamesIncludeFallback() {
Settings settings = Settings.builder().put(TRACING_APM_NAMES_INCLUDE_SETTING.getKey(), "abc,xyz").build();
List<String> included = TELEMETRY_TRACING_NAMES_INCLUDE_SETTING.get(settings);
assertThat(included, containsInAnyOrder("abc", "xyz"));
assertWarnings("[tracing.apm.names.include] setting was deprecated in Elasticsearch and will be removed in a future release.");
}
public void testTelemetryTracingNamesExcludeFallback() {
Settings settings = Settings.builder().put(TRACING_APM_NAMES_EXCLUDE_SETTING.getKey(), "abc,xyz").build();
List<String> included = TELEMETRY_TRACING_NAMES_EXCLUDE_SETTING.get(settings);
assertThat(included, containsInAnyOrder("abc", "xyz"));
assertWarnings("[tracing.apm.names.exclude] setting was deprecated in Elasticsearch and will be removed in a future release.");
}
public void testTelemetryTracingSanitizeFieldNamesFallback() {
Settings settings = Settings.builder().put(TRACING_APM_SANITIZE_FIELD_NAMES.getKey(), "abc,xyz").build();
List<String> included = TELEMETRY_TRACING_SANITIZE_FIELD_NAMES.get(settings);
assertThat(included, containsInAnyOrder("abc", "xyz"));
assertWarnings(
"[tracing.apm.sanitize_field_names] setting was deprecated in Elasticsearch and will be removed in a future release."
);
} }
public void testTelemetryTracingSanitizeFieldNamesFallbackDefault() { public void testTelemetryTracingSanitizeFieldNamesFallbackDefault() {
@ -174,6 +252,28 @@ public class APMAgentSettingsTests extends ESTestCase {
assertThat(included, hasItem("password")); // and more defaults assertThat(included, hasItem("password")); // and more defaults
} }
public void testTelemetrySecretTokenFallback() {
MockSecureSettings secureSettings = new MockSecureSettings();
secureSettings.setString(TRACING_APM_SECRET_TOKEN_SETTING.getKey(), "verysecret");
Settings settings = Settings.builder().setSecureSettings(secureSettings).build();
try (SecureString secureString = TELEMETRY_SECRET_TOKEN_SETTING.get(settings)) {
assertEquals("verysecret", secureString.toString());
}
assertWarnings("[tracing.apm.secret_token] setting was deprecated in Elasticsearch and will be removed in a future release.");
}
public void testTelemetryApiKeyFallback() {
MockSecureSettings secureSettings = new MockSecureSettings();
secureSettings.setString(TRACING_APM_API_KEY_SETTING.getKey(), "abc");
Settings settings = Settings.builder().setSecureSettings(secureSettings).build();
try (SecureString secureString = TELEMETRY_API_KEY_SETTING.get(settings)) {
assertEquals("abc", secureString.toString());
}
assertWarnings("[tracing.apm.api_key] setting was deprecated in Elasticsearch and will be removed in a future release.");
}
/** /**
* Check that invalid or forbidden APM agent settings are rejected if their last part resembles an allowed setting. * Check that invalid or forbidden APM agent settings are rejected if their last part resembles an allowed setting.
*/ */

View file

@ -77,9 +77,6 @@ public class BytesProcessorTests extends AbstractStringProcessorTestCase<Long> {
String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, "1.1kb"); String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, "1.1kb");
Processor processor = newProcessor(fieldName, randomBoolean(), fieldName); Processor processor = newProcessor(fieldName, randomBoolean(), fieldName);
processor.execute(ingestDocument); processor.execute(ingestDocument);
assertThat(ingestDocument.getFieldValue(fieldName, expectedResultType()), equalTo(1126L)); assertThat(ingestDocument.getFieldValue(fieldName, expectedResultType()), equalTo(1127L));
assertWarnings(
"Fractional bytes values are deprecated. Use non-fractional bytes values instead: [1.1kb] found for setting " + "[Ingest Field]"
);
} }
} }

View file

@ -33,6 +33,7 @@ import org.elasticsearch.index.IndexVersion;
import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.IndexVersions;
import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.analysis.NamedAnalyzer;
import org.elasticsearch.index.mapper.AbstractShapeGeometryFieldMapper; import org.elasticsearch.index.mapper.AbstractShapeGeometryFieldMapper;
import org.elasticsearch.index.mapper.BlockLoader;
import org.elasticsearch.index.mapper.DocumentParserContext; import org.elasticsearch.index.mapper.DocumentParserContext;
import org.elasticsearch.index.mapper.DocumentParsingException; import org.elasticsearch.index.mapper.DocumentParsingException;
import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.FieldMapper;
@ -46,7 +47,6 @@ import org.elasticsearch.legacygeo.XShapeCollection;
import org.elasticsearch.legacygeo.builders.ShapeBuilder; import org.elasticsearch.legacygeo.builders.ShapeBuilder;
import org.elasticsearch.legacygeo.parsers.ShapeParser; import org.elasticsearch.legacygeo.parsers.ShapeParser;
import org.elasticsearch.legacygeo.query.LegacyGeoShapeQueryProcessor; import org.elasticsearch.legacygeo.query.LegacyGeoShapeQueryProcessor;
import org.elasticsearch.lucene.spatial.CoordinateEncoder;
import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentBuilder;
import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParser;
import org.locationtech.spatial4j.shape.Point; import org.locationtech.spatial4j.shape.Point;
@ -84,6 +84,7 @@ import java.util.stream.Collectors;
* "field" : "POLYGON ((100.0 0.0, 101.0 0.0, 101.0 1.0, 100.0 1.0, 100.0 0.0)) * "field" : "POLYGON ((100.0 0.0, 101.0 0.0, 101.0 1.0, 100.0 1.0, 100.0 0.0))
* *
* @deprecated use the field mapper in the spatial module * @deprecated use the field mapper in the spatial module
* TODO: Remove this class once we no longer need to supported reading 7.x indices that might have this field type
*/ */
@Deprecated @Deprecated
public class LegacyGeoShapeFieldMapper extends AbstractShapeGeometryFieldMapper<ShapeBuilder<?, ?, ?>> { public class LegacyGeoShapeFieldMapper extends AbstractShapeGeometryFieldMapper<ShapeBuilder<?, ?, ?>> {
@ -533,14 +534,9 @@ public class LegacyGeoShapeFieldMapper extends AbstractShapeGeometryFieldMapper<
} }
@Override @Override
protected boolean isBoundsExtractionSupported() { public BlockLoader blockLoader(BlockLoaderContext blContext) {
// Extracting bounds for geo shapes is not implemented yet. // Legacy geo-shapes do not support doc-values, we can only lead from source in ES|QL
return false; return blockLoaderFromSource(blContext);
}
@Override
protected CoordinateEncoder coordinateEncoder() {
return CoordinateEncoder.GEO;
} }
} }

View file

@ -438,7 +438,7 @@ public class RemoteScrollableHitSourceTests extends ESTestCase {
public Future<HttpResponse> answer(InvocationOnMock invocationOnMock) throws Throwable { public Future<HttpResponse> answer(InvocationOnMock invocationOnMock) throws Throwable {
HeapBufferedAsyncResponseConsumer consumer = (HeapBufferedAsyncResponseConsumer) invocationOnMock.getArguments()[1]; HeapBufferedAsyncResponseConsumer consumer = (HeapBufferedAsyncResponseConsumer) invocationOnMock.getArguments()[1];
FutureCallback callback = (FutureCallback) invocationOnMock.getArguments()[3]; FutureCallback callback = (FutureCallback) invocationOnMock.getArguments()[3];
assertEquals(new ByteSizeValue(100, ByteSizeUnit.MB).bytesAsInt(), consumer.getBufferLimit()); assertEquals(ByteSizeValue.of(100, ByteSizeUnit.MB).bytesAsInt(), consumer.getBufferLimit());
callback.failed(tooLong); callback.failed(tooLong);
return null; return null;
} }

View file

@ -90,7 +90,7 @@ public class AzureBlobStoreRepositoryTests extends ESMockAPIBasedRepositoryInteg
protected Settings repositorySettings(String repoName) { protected Settings repositorySettings(String repoName) {
Settings.Builder settingsBuilder = Settings.builder() Settings.Builder settingsBuilder = Settings.builder()
.put(super.repositorySettings(repoName)) .put(super.repositorySettings(repoName))
.put(AzureRepository.Repository.MAX_SINGLE_PART_UPLOAD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.MB)) .put(AzureRepository.Repository.MAX_SINGLE_PART_UPLOAD_SIZE_SETTING.getKey(), ByteSizeValue.of(1, ByteSizeUnit.MB))
.put(AzureRepository.Repository.CONTAINER_SETTING.getKey(), "container") .put(AzureRepository.Repository.CONTAINER_SETTING.getKey(), "container")
.put(AzureStorageSettings.ACCOUNT_SETTING.getKey(), "test") .put(AzureStorageSettings.ACCOUNT_SETTING.getKey(), "test")
.put(AzureRepository.Repository.DELETION_BATCH_SIZE_SETTING.getKey(), randomIntBetween(5, 256)) .put(AzureRepository.Repository.DELETION_BATCH_SIZE_SETTING.getKey(), randomIntBetween(5, 256))

View file

@ -115,7 +115,7 @@ public class AzureStorageCleanupThirdPartyTests extends AbstractThirdPartyReposi
Settings.builder() Settings.builder()
.put("container", System.getProperty("test.azure.container")) .put("container", System.getProperty("test.azure.container"))
.put("base_path", System.getProperty("test.azure.base") + randomAlphaOfLength(8)) .put("base_path", System.getProperty("test.azure.base") + randomAlphaOfLength(8))
.put("max_single_part_upload_size", new ByteSizeValue(1, ByteSizeUnit.MB)) .put("max_single_part_upload_size", ByteSizeValue.of(1, ByteSizeUnit.MB))
) )
.get(); .get();
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));

View file

@ -107,8 +107,8 @@ public class AzureBlobStore implements BlobStore {
private static final Logger logger = LogManager.getLogger(AzureBlobStore.class); private static final Logger logger = LogManager.getLogger(AzureBlobStore.class);
// See https://learn.microsoft.com/en-us/rest/api/storageservices/blob-batch#request-body // See https://learn.microsoft.com/en-us/rest/api/storageservices/blob-batch#request-body
public static final int MAX_ELEMENTS_PER_BATCH = 256; public static final int MAX_ELEMENTS_PER_BATCH = 256;
private static final long DEFAULT_READ_CHUNK_SIZE = new ByteSizeValue(32, ByteSizeUnit.MB).getBytes(); private static final long DEFAULT_READ_CHUNK_SIZE = ByteSizeValue.of(32, ByteSizeUnit.MB).getBytes();
private static final int DEFAULT_UPLOAD_BUFFERS_SIZE = (int) new ByteSizeValue(64, ByteSizeUnit.KB).getBytes(); private static final int DEFAULT_UPLOAD_BUFFERS_SIZE = (int) ByteSizeValue.of(64, ByteSizeUnit.KB).getBytes();
private final AzureStorageService service; private final AzureStorageService service;
private final BigArrays bigArrays; private final BigArrays bigArrays;

View file

@ -81,7 +81,7 @@ public class AzureRepository extends MeteredBlobStoreRepository {
); );
public static final Setting<Boolean> READONLY_SETTING = Setting.boolSetting(READONLY_SETTING_KEY, false, Property.NodeScope); public static final Setting<Boolean> READONLY_SETTING = Setting.boolSetting(READONLY_SETTING_KEY, false, Property.NodeScope);
// see ModelHelper.BLOB_DEFAULT_MAX_SINGLE_UPLOAD_SIZE // see ModelHelper.BLOB_DEFAULT_MAX_SINGLE_UPLOAD_SIZE
private static final ByteSizeValue DEFAULT_MAX_SINGLE_UPLOAD_SIZE = new ByteSizeValue(256, ByteSizeUnit.MB); private static final ByteSizeValue DEFAULT_MAX_SINGLE_UPLOAD_SIZE = ByteSizeValue.of(256, ByteSizeUnit.MB);
public static final Setting<ByteSizeValue> MAX_SINGLE_PART_UPLOAD_SIZE_SETTING = Setting.byteSizeSetting( public static final Setting<ByteSizeValue> MAX_SINGLE_PART_UPLOAD_SIZE_SETTING = Setting.byteSizeSetting(
"max_single_part_upload_size", "max_single_part_upload_size",
DEFAULT_MAX_SINGLE_UPLOAD_SIZE, DEFAULT_MAX_SINGLE_UPLOAD_SIZE,

View file

@ -37,7 +37,7 @@ public class AzureStorageService {
* The maximum size of a BlockBlob block. * The maximum size of a BlockBlob block.
* See https://docs.microsoft.com/en-us/rest/api/storageservices/understanding-block-blobs--append-blobs--and-page-blobs * See https://docs.microsoft.com/en-us/rest/api/storageservices/understanding-block-blobs--append-blobs--and-page-blobs
*/ */
public static final ByteSizeValue MAX_BLOCK_SIZE = new ByteSizeValue(100, ByteSizeUnit.MB); public static final ByteSizeValue MAX_BLOCK_SIZE = ByteSizeValue.of(100, ByteSizeUnit.MB);
/** /**
* The maximum number of blocks. * The maximum number of blocks.

View file

@ -165,7 +165,7 @@ public abstract class AbstractAzureServerTestCase extends ESTestCase {
.put(CONTAINER_SETTING.getKey(), CONTAINER) .put(CONTAINER_SETTING.getKey(), CONTAINER)
.put(ACCOUNT_SETTING.getKey(), clientName) .put(ACCOUNT_SETTING.getKey(), clientName)
.put(LOCATION_MODE_SETTING.getKey(), locationMode) .put(LOCATION_MODE_SETTING.getKey(), locationMode)
.put(MAX_SINGLE_PART_UPLOAD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.MB)) .put(MAX_SINGLE_PART_UPLOAD_SIZE_SETTING.getKey(), ByteSizeValue.of(1, ByteSizeUnit.MB))
.build() .build()
); );

View file

@ -133,7 +133,7 @@ public class AzureRepositorySettingsTests extends ESTestCase {
// chunk size in settings // chunk size in settings
int size = randomIntBetween(1, 256); int size = randomIntBetween(1, 256);
azureRepository = azureRepository(Settings.builder().put("chunk_size", size + "mb").build()); azureRepository = azureRepository(Settings.builder().put("chunk_size", size + "mb").build());
assertEquals(new ByteSizeValue(size, ByteSizeUnit.MB), azureRepository.chunkSize()); assertEquals(ByteSizeValue.of(size, ByteSizeUnit.MB), azureRepository.chunkSize());
// zero bytes is not allowed // zero bytes is not allowed
IllegalArgumentException e = expectThrows( IllegalArgumentException e = expectThrows(

View file

@ -143,7 +143,7 @@ public class GoogleCloudStorageBlobStoreRepositoryTests extends ESMockAPIBasedRe
Settings.builder().put("chunk_size", size + "mb").build() Settings.builder().put("chunk_size", size + "mb").build()
); );
chunkSize = GoogleCloudStorageRepository.getSetting(GoogleCloudStorageRepository.CHUNK_SIZE, repositoryMetadata); chunkSize = GoogleCloudStorageRepository.getSetting(GoogleCloudStorageRepository.CHUNK_SIZE, repositoryMetadata);
assertEquals(new ByteSizeValue(size, ByteSizeUnit.MB), chunkSize); assertEquals(ByteSizeValue.of(size, ByteSizeUnit.MB), chunkSize);
// zero bytes is not allowed // zero bytes is not allowed
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> { IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> {

View file

@ -83,7 +83,7 @@ class GoogleCloudStorageBlobStore implements BlobStore {
final String key = "es.repository_gcs.large_blob_threshold_byte_size"; final String key = "es.repository_gcs.large_blob_threshold_byte_size";
final String largeBlobThresholdByteSizeProperty = System.getProperty(key); final String largeBlobThresholdByteSizeProperty = System.getProperty(key);
if (largeBlobThresholdByteSizeProperty == null) { if (largeBlobThresholdByteSizeProperty == null) {
LARGE_BLOB_THRESHOLD_BYTE_SIZE = Math.toIntExact(new ByteSizeValue(5, ByteSizeUnit.MB).getBytes()); LARGE_BLOB_THRESHOLD_BYTE_SIZE = Math.toIntExact(ByteSizeValue.of(5, ByteSizeUnit.MB).getBytes());
} else { } else {
final int largeBlobThresholdByteSize; final int largeBlobThresholdByteSize;
try { try {

View file

@ -40,7 +40,7 @@ class GoogleCloudStorageRepository extends MeteredBlobStoreRepository {
* Maximum allowed object size in GCS. * Maximum allowed object size in GCS.
* @see <a href="https://cloud.google.com/storage/quotas#objects">GCS documentation</a> for details. * @see <a href="https://cloud.google.com/storage/quotas#objects">GCS documentation</a> for details.
*/ */
static final ByteSizeValue MAX_CHUNK_SIZE = new ByteSizeValue(5, ByteSizeUnit.TB); static final ByteSizeValue MAX_CHUNK_SIZE = ByteSizeValue.of(5, ByteSizeUnit.TB);
static final String TYPE = "gcs"; static final String TYPE = "gcs";

View file

@ -129,7 +129,7 @@ class S3BlobContainer extends AbstractBlobContainer {
@Override @Override
public long readBlobPreferredLength() { public long readBlobPreferredLength() {
// This container returns streams that must be fully consumed, so we tell consumers to make bounded requests. // This container returns streams that must be fully consumed, so we tell consumers to make bounded requests.
return new ByteSizeValue(32, ByteSizeUnit.MB).getBytes(); return ByteSizeValue.of(32, ByteSizeUnit.MB).getBytes();
} }
/** /**

View file

@ -99,13 +99,13 @@ class S3Repository extends MeteredBlobStoreRepository {
/** /**
* Maximum size of files that can be uploaded using a single upload request. * Maximum size of files that can be uploaded using a single upload request.
*/ */
static final ByteSizeValue MAX_FILE_SIZE = new ByteSizeValue(5, ByteSizeUnit.GB); static final ByteSizeValue MAX_FILE_SIZE = ByteSizeValue.of(5, ByteSizeUnit.GB);
/** /**
* Minimum size of parts that can be uploaded using the Multipart Upload API. * Minimum size of parts that can be uploaded using the Multipart Upload API.
* (see http://docs.aws.amazon.com/AmazonS3/latest/dev/qfacts.html) * (see http://docs.aws.amazon.com/AmazonS3/latest/dev/qfacts.html)
*/ */
static final ByteSizeValue MIN_PART_SIZE_USING_MULTIPART = new ByteSizeValue(5, ByteSizeUnit.MB); static final ByteSizeValue MIN_PART_SIZE_USING_MULTIPART = ByteSizeValue.of(5, ByteSizeUnit.MB);
/** /**
* Maximum size of parts that can be uploaded using the Multipart Upload API. * Maximum size of parts that can be uploaded using the Multipart Upload API.
@ -116,7 +116,7 @@ class S3Repository extends MeteredBlobStoreRepository {
/** /**
* Maximum size of files that can be uploaded using the Multipart Upload API. * Maximum size of files that can be uploaded using the Multipart Upload API.
*/ */
static final ByteSizeValue MAX_FILE_SIZE_USING_MULTIPART = new ByteSizeValue(5, ByteSizeUnit.TB); static final ByteSizeValue MAX_FILE_SIZE_USING_MULTIPART = ByteSizeValue.of(5, ByteSizeUnit.TB);
/** /**
* Minimum threshold below which the chunk is uploaded using a single request. Beyond this threshold, * Minimum threshold below which the chunk is uploaded using a single request. Beyond this threshold,
@ -137,7 +137,7 @@ class S3Repository extends MeteredBlobStoreRepository {
static final Setting<ByteSizeValue> CHUNK_SIZE_SETTING = Setting.byteSizeSetting( static final Setting<ByteSizeValue> CHUNK_SIZE_SETTING = Setting.byteSizeSetting(
"chunk_size", "chunk_size",
MAX_FILE_SIZE_USING_MULTIPART, MAX_FILE_SIZE_USING_MULTIPART,
new ByteSizeValue(5, ByteSizeUnit.MB), ByteSizeValue.of(5, ByteSizeUnit.MB),
MAX_FILE_SIZE_USING_MULTIPART MAX_FILE_SIZE_USING_MULTIPART
); );

View file

@ -335,7 +335,7 @@ public class S3BlobContainerRetriesTests extends AbstractBlobContainerRetriesTes
public void testWriteLargeBlob() throws Exception { public void testWriteLargeBlob() throws Exception {
final boolean useTimeout = rarely(); final boolean useTimeout = rarely();
final TimeValue readTimeout = useTimeout ? TimeValue.timeValueMillis(randomIntBetween(100, 500)) : null; final TimeValue readTimeout = useTimeout ? TimeValue.timeValueMillis(randomIntBetween(100, 500)) : null;
final ByteSizeValue bufferSize = new ByteSizeValue(5, ByteSizeUnit.MB); final ByteSizeValue bufferSize = ByteSizeValue.of(5, ByteSizeUnit.MB);
final BlobContainer blobContainer = createBlobContainer(null, readTimeout, true, bufferSize); final BlobContainer blobContainer = createBlobContainer(null, readTimeout, true, bufferSize);
final int parts = randomIntBetween(1, 5); final int parts = randomIntBetween(1, 5);
@ -436,7 +436,7 @@ public class S3BlobContainerRetriesTests extends AbstractBlobContainerRetriesTes
public void testWriteLargeBlobStreaming() throws Exception { public void testWriteLargeBlobStreaming() throws Exception {
final boolean useTimeout = rarely(); final boolean useTimeout = rarely();
final TimeValue readTimeout = useTimeout ? TimeValue.timeValueMillis(randomIntBetween(100, 500)) : null; final TimeValue readTimeout = useTimeout ? TimeValue.timeValueMillis(randomIntBetween(100, 500)) : null;
final ByteSizeValue bufferSize = new ByteSizeValue(5, ByteSizeUnit.MB); final ByteSizeValue bufferSize = ByteSizeValue.of(5, ByteSizeUnit.MB);
final BlobContainer blobContainer = createBlobContainer(null, readTimeout, true, bufferSize); final BlobContainer blobContainer = createBlobContainer(null, readTimeout, true, bufferSize);
final int parts = randomIntBetween(1, 5); final int parts = randomIntBetween(1, 5);

View file

@ -93,8 +93,8 @@ public class S3RepositoryTests extends ESTestCase {
private Settings bufferAndChunkSettings(long buffer, long chunk) { private Settings bufferAndChunkSettings(long buffer, long chunk) {
return Settings.builder() return Settings.builder()
.put(S3Repository.BUCKET_SETTING.getKey(), "bucket") .put(S3Repository.BUCKET_SETTING.getKey(), "bucket")
.put(S3Repository.BUFFER_SIZE_SETTING.getKey(), new ByteSizeValue(buffer, ByteSizeUnit.MB).getStringRep()) .put(S3Repository.BUFFER_SIZE_SETTING.getKey(), ByteSizeValue.of(buffer, ByteSizeUnit.MB).getStringRep())
.put(S3Repository.CHUNK_SIZE_SETTING.getKey(), new ByteSizeValue(chunk, ByteSizeUnit.MB).getStringRep()) .put(S3Repository.CHUNK_SIZE_SETTING.getKey(), ByteSizeValue.of(chunk, ByteSizeUnit.MB).getStringRep())
.build(); .build();
} }

View file

@ -33,7 +33,7 @@ public class URLBlobStore implements BlobStore {
static final Setting<ByteSizeValue> BUFFER_SIZE_SETTING = Setting.byteSizeSetting( static final Setting<ByteSizeValue> BUFFER_SIZE_SETTING = Setting.byteSizeSetting(
"repositories.uri.buffer_size", "repositories.uri.buffer_size",
new ByteSizeValue(100, ByteSizeUnit.KB), ByteSizeValue.of(100, ByteSizeUnit.KB),
Setting.Property.NodeScope Setting.Property.NodeScope
); );

View file

@ -42,7 +42,7 @@ import static org.hamcrest.Matchers.hasSize;
@ClusterScope(scope = Scope.TEST, supportsDedicatedMasters = false, numClientNodes = 0, numDataNodes = 1) @ClusterScope(scope = Scope.TEST, supportsDedicatedMasters = false, numClientNodes = 0, numDataNodes = 1)
public class Netty4HttpRequestSizeLimitIT extends ESNetty4IntegTestCase { public class Netty4HttpRequestSizeLimitIT extends ESNetty4IntegTestCase {
private static final ByteSizeValue LIMIT = new ByteSizeValue(2, ByteSizeUnit.KB); private static final ByteSizeValue LIMIT = ByteSizeValue.of(2, ByteSizeUnit.KB);
@Override @Override
protected boolean addMockHttpTransport() { protected boolean addMockHttpTransport() {

View file

@ -96,7 +96,7 @@ public class Netty4IncrementalRequestHandlingIT extends ESNetty4IntegTestCase {
@Override @Override
protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) {
Settings.Builder builder = Settings.builder().put(super.nodeSettings(nodeOrdinal, otherSettings)); Settings.Builder builder = Settings.builder().put(super.nodeSettings(nodeOrdinal, otherSettings));
builder.put(HttpTransportSettings.SETTING_HTTP_MAX_CONTENT_LENGTH.getKey(), new ByteSizeValue(50, ByteSizeUnit.MB)); builder.put(HttpTransportSettings.SETTING_HTTP_MAX_CONTENT_LENGTH.getKey(), ByteSizeValue.of(50, ByteSizeUnit.MB));
return builder.build(); return builder.build();
} }

View file

@ -57,7 +57,7 @@ public class Netty4Plugin extends Plugin implements NetworkPlugin {
); );
public static final Setting<ByteSizeValue> SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_SIZE = byteSizeSetting( public static final Setting<ByteSizeValue> SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_SIZE = byteSizeSetting(
"http.netty.receive_predictor_size", "http.netty.receive_predictor_size",
new ByteSizeValue(64, ByteSizeUnit.KB), ByteSizeValue.of(64, ByteSizeUnit.KB),
Setting.Property.NodeScope Setting.Property.NodeScope
); );
public static final Setting<Integer> WORKER_COUNT = new Setting<>( public static final Setting<Integer> WORKER_COUNT = new Setting<>(
@ -68,7 +68,7 @@ public class Netty4Plugin extends Plugin implements NetworkPlugin {
); );
private static final Setting<ByteSizeValue> NETTY_RECEIVE_PREDICTOR_SIZE = byteSizeSetting( private static final Setting<ByteSizeValue> NETTY_RECEIVE_PREDICTOR_SIZE = byteSizeSetting(
"transport.netty.receive_predictor_size", "transport.netty.receive_predictor_size",
new ByteSizeValue(64, ByteSizeUnit.KB), ByteSizeValue.of(64, ByteSizeUnit.KB),
Setting.Property.NodeScope Setting.Property.NodeScope
); );
public static final Setting<ByteSizeValue> NETTY_RECEIVE_PREDICTOR_MAX = byteSizeSetting( public static final Setting<ByteSizeValue> NETTY_RECEIVE_PREDICTOR_MAX = byteSizeSetting(

View file

@ -193,9 +193,6 @@ tests:
- class: org.elasticsearch.cluster.service.MasterServiceTests - class: org.elasticsearch.cluster.service.MasterServiceTests
method: testThreadContext method: testThreadContext
issue: https://github.com/elastic/elasticsearch/issues/118914 issue: https://github.com/elastic/elasticsearch/issues/118914
- class: org.elasticsearch.xpack.security.authc.AuthenticationServiceTests
method: testInvalidToken
issue: https://github.com/elastic/elasticsearch/issues/119019
- class: org.elasticsearch.xpack.security.authc.ldap.ActiveDirectoryRunAsIT - class: org.elasticsearch.xpack.security.authc.ldap.ActiveDirectoryRunAsIT
issue: https://github.com/elastic/elasticsearch/issues/115727 issue: https://github.com/elastic/elasticsearch/issues/115727
- class: org.elasticsearch.smoketest.DocsClientYamlTestSuiteIT - class: org.elasticsearch.smoketest.DocsClientYamlTestSuiteIT
@ -232,16 +229,6 @@ tests:
- class: org.elasticsearch.xpack.ilm.actions.SearchableSnapshotActionIT - class: org.elasticsearch.xpack.ilm.actions.SearchableSnapshotActionIT
method: testUpdatePolicyToAddPhasesYieldsInvalidActionsToBeSkipped method: testUpdatePolicyToAddPhasesYieldsInvalidActionsToBeSkipped
issue: https://github.com/elastic/elasticsearch/issues/118406 issue: https://github.com/elastic/elasticsearch/issues/118406
- class: org.elasticsearch.xpack.ml.integration.DatafeedJobsIT
issue: https://github.com/elastic/elasticsearch/issues/120088
- class: org.elasticsearch.xpack.searchablesnapshots.minio.MinioSearchableSnapshotsIT
issue: https://github.com/elastic/elasticsearch/issues/120101
- class: org.elasticsearch.repositories.s3.S3RepositoryThirdPartyTests
issue: https://github.com/elastic/elasticsearch/issues/120115
- class: org.elasticsearch.repositories.s3.RepositoryS3MinioBasicCredentialsRestIT
issue: https://github.com/elastic/elasticsearch/issues/120117
- class: org.elasticsearch.repositories.blobstore.testkit.analyze.MinioRepositoryAnalysisRestIT
issue: https://github.com/elastic/elasticsearch/issues/118548
- class: org.elasticsearch.xpack.security.QueryableReservedRolesIT - class: org.elasticsearch.xpack.security.QueryableReservedRolesIT
method: testConfiguredReservedRolesAfterClosingAndOpeningIndex method: testConfiguredReservedRolesAfterClosingAndOpeningIndex
issue: https://github.com/elastic/elasticsearch/issues/120127 issue: https://github.com/elastic/elasticsearch/issues/120127
@ -251,9 +238,9 @@ tests:
- class: org.elasticsearch.oldrepos.OldRepositoryAccessIT - class: org.elasticsearch.oldrepos.OldRepositoryAccessIT
method: testOldSourceOnlyRepoAccess method: testOldSourceOnlyRepoAccess
issue: https://github.com/elastic/elasticsearch/issues/120080 issue: https://github.com/elastic/elasticsearch/issues/120080
- class: org.elasticsearch.xpack.esql.qa.multi_node.EsqlSpecIT - class: org.elasticsearch.xpack.migrate.action.ReindexDatastreamIndexTransportActionIT
method: test {lookup-join.MvJoinKeyFromRow ASYNC} method: testTsdbStartEndSet
issue: https://github.com/elastic/elasticsearch/issues/120242 issue: https://github.com/elastic/elasticsearch/issues/120314
# Examples: # Examples:
# #

View file

@ -45,11 +45,11 @@ buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName ->
useCluster localCluster useCluster localCluster
useCluster remoteCluster useCluster remoteCluster
systemProperty 'tests.upgrade_from_version', bwcVersion.toString().replace('-SNAPSHOT', '') systemProperty 'tests.upgrade_from_version', bwcVersion.toString().replace('-SNAPSHOT', '')
nonInputProperties.systemProperty('tests.rest.cluster', getClusterInfo(localCluster.name).map { it.allHttpSocketURI.join(",") })
nonInputProperties.systemProperty('tests.rest.remote_cluster', getClusterInfo(remoteCluster.name).map { it.allHttpSocketURI.join(",") })
doFirst { def fipsDisabled = buildParams.inFipsJvm == false
nonInputProperties.systemProperty('tests.rest.cluster', localCluster.map(c -> c.allHttpSocketURI.join(","))) onlyIf("FIPS mode disabled") { fipsDisabled }
nonInputProperties.systemProperty('tests.rest.remote_cluster', remoteCluster.map(c -> c.allHttpSocketURI.join(",")))
}
} }
tasks.register("${baseName}#oldClusterTest", StandaloneRestIntegTestTask) { tasks.register("${baseName}#oldClusterTest", StandaloneRestIntegTestTask) {
@ -60,28 +60,28 @@ buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName ->
cluster.nodes.forEach { node -> cluster.nodes.forEach { node ->
node.getAllTransportPortURI() node.getAllTransportPortURI()
} }
cluster.nextNodeToNextVersion() getRegistry().get().nextNodeToNextVersion(cluster)
} }
} }
tasks.register("${baseName}#oneThirdUpgraded", StandaloneRestIntegTestTask) { tasks.register("${baseName}#oneThirdUpgraded", StandaloneRestIntegTestTask) {
dependsOn "${baseName}#oldClusterTest" dependsOn "${baseName}#oldClusterTest"
doFirst { doFirst {
remoteCluster.get().nextNodeToNextVersion() getRegistry().get().nextNodeToNextVersion(remoteCluster)
} }
} }
tasks.register("${baseName}#twoThirdUpgraded", StandaloneRestIntegTestTask) { tasks.register("${baseName}#twoThirdUpgraded", StandaloneRestIntegTestTask) {
dependsOn "${baseName}#oneThirdUpgraded" dependsOn "${baseName}#oneThirdUpgraded"
doFirst { doFirst {
remoteCluster.get().nextNodeToNextVersion() getRegistry().get().nextNodeToNextVersion(remoteCluster)
} }
} }
tasks.register("${baseName}#fullUpgraded", StandaloneRestIntegTestTask) { tasks.register("${baseName}#fullUpgraded", StandaloneRestIntegTestTask) {
dependsOn "${baseName}#twoThirdUpgraded" dependsOn "${baseName}#twoThirdUpgraded"
doFirst { doFirst {
remoteCluster.get().nextNodeToNextVersion() getRegistry().get().nextNodeToNextVersion(remoteCluster)
} }
} }

View file

@ -13,8 +13,8 @@ import org.apache.http.entity.ContentType;
import org.apache.http.entity.InputStreamEntity; import org.apache.http.entity.InputStreamEntity;
import org.elasticsearch.client.Request; import org.elasticsearch.client.Request;
import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.core.Strings;
import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.test.XContentTestUtils; import org.elasticsearch.test.XContentTestUtils;
@ -42,7 +42,9 @@ import static org.elasticsearch.test.cluster.util.Version.fromString;
import static org.elasticsearch.test.rest.ObjectPath.createFromResponse; import static org.elasticsearch.test.rest.ObjectPath.createFromResponse;
import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.allOf;
import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.lessThanOrEqualTo;
import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.notNullValue;
public abstract class AbstractIndexCompatibilityTestCase extends ESRestTestCase { public abstract class AbstractIndexCompatibilityTestCase extends ESRestTestCase {
@ -156,8 +158,16 @@ public abstract class AbstractIndexCompatibilityTestCase extends ESRestTestCase
return new Version((byte) ((id / 1000000) % 100), (byte) ((id / 10000) % 100), (byte) ((id / 100) % 100)); return new Version((byte) ((id / 1000000) % 100), (byte) ((id / 10000) % 100), (byte) ((id / 100) % 100));
} }
protected static int getNumberOfReplicas(String indexName) throws Exception {
var indexSettings = (Map<?, ?>) ((Map<?, ?>) getIndexSettings(indexName).get(indexName)).get("settings");
var numberOfReplicas = Integer.parseInt((String) indexSettings.get(IndexMetadata.SETTING_NUMBER_OF_REPLICAS));
assertThat(numberOfReplicas, allOf(greaterThanOrEqualTo(0), lessThanOrEqualTo(NODES - 1)));
return numberOfReplicas;
}
protected static void indexDocs(String indexName, int numDocs) throws Exception { protected static void indexDocs(String indexName, int numDocs) throws Exception {
var request = new Request("POST", "/_bulk"); var request = new Request("POST", "/_bulk");
request.addParameter("refresh", "true");
var docs = new StringBuilder(); var docs = new StringBuilder();
IntStream.range(0, numDocs).forEach(n -> docs.append(Strings.format(""" IntStream.range(0, numDocs).forEach(n -> docs.append(Strings.format("""
{"index":{"_index":"%s"}} {"index":{"_index":"%s"}}
@ -185,19 +195,30 @@ public abstract class AbstractIndexCompatibilityTestCase extends ESRestTestCase
} }
protected static void restoreIndex(String repository, String snapshot, String indexName, String renamedIndexName) throws Exception { protected static void restoreIndex(String repository, String snapshot, String indexName, String renamedIndexName) throws Exception {
restoreIndex(repository, snapshot, indexName, renamedIndexName, Settings.EMPTY);
}
protected static void restoreIndex(
String repository,
String snapshot,
String indexName,
String renamedIndexName,
Settings indexSettings
) throws Exception {
var request = new Request("POST", "/_snapshot/" + repository + "/" + snapshot + "/_restore"); var request = new Request("POST", "/_snapshot/" + repository + "/" + snapshot + "/_restore");
request.addParameter("wait_for_completion", "true"); request.addParameter("wait_for_completion", "true");
request.setJsonEntity(org.elasticsearch.common.Strings.format(""" request.setJsonEntity(Strings.format("""
{ {
"indices": "%s", "indices": "%s",
"include_global_state": false, "include_global_state": false,
"rename_pattern": "(.+)", "rename_pattern": "(.+)",
"rename_replacement": "%s", "rename_replacement": "%s",
"include_aliases": false "include_aliases": false,
}""", indexName, renamedIndexName)); "index_settings": %s
}""", indexName, renamedIndexName, Strings.toString(indexSettings)));
var responseBody = createFromResponse(client().performRequest(request)); var responseBody = createFromResponse(client().performRequest(request));
assertThat(responseBody.evaluate("snapshot.shards.total"), equalTo((int) responseBody.evaluate("snapshot.shards.failed"))); assertThat(responseBody.evaluate("snapshot.shards.total"), equalTo((int) responseBody.evaluate("snapshot.shards.successful")));
assertThat(responseBody.evaluate("snapshot.shards.successful"), equalTo(0)); assertThat(responseBody.evaluate("snapshot.shards.failed"), equalTo(0));
} }
protected static void updateRandomIndexSettings(String indexName) throws IOException { protected static void updateRandomIndexSettings(String indexName) throws IOException {
@ -215,20 +236,19 @@ public abstract class AbstractIndexCompatibilityTestCase extends ESRestTestCase
updateIndexSettings(indexName, settings); updateIndexSettings(indexName, settings);
} }
protected static void updateRandomMappings(String indexName) throws IOException { protected static void updateRandomMappings(String indexName) throws Exception {
final var runtime = new HashMap<>(); final var runtime = new HashMap<>();
runtime.put("field_" + randomInt(2), Map.of("type", "keyword")); runtime.put("field_" + randomInt(2), Map.of("type", "keyword"));
final var properties = new HashMap<>(); final var properties = new HashMap<>();
properties.put(randomIdentifier(), Map.of("type", "long")); properties.put(randomIdentifier(), Map.of("type", "long"));
var body = XContentTestUtils.convertToXContent(Map.of("runtime", runtime, "properties", properties), XContentType.JSON); updateMappings(indexName, Map.of("runtime", runtime, "properties", properties));
}
protected static void updateMappings(String indexName, Map<String, ?> mappings) throws Exception {
var body = XContentTestUtils.convertToXContent(mappings, XContentType.JSON);
var request = new Request("PUT", indexName + "/_mappings"); var request = new Request("PUT", indexName + "/_mappings");
request.setEntity( request.setEntity(
new InputStreamEntity( new InputStreamEntity(body.streamInput(), body.length(), ContentType.create(XContentType.JSON.mediaTypeWithoutParameters()))
body.streamInput(),
body.length(),
ContentType.create(XContentType.JSON.mediaTypeWithoutParameters())
)
); );
assertOK(client().performRequest(request)); assertOK(client().performRequest(request));
} }
@ -238,4 +258,14 @@ public abstract class AbstractIndexCompatibilityTestCase extends ESRestTestCase
var state = responseBody.evaluate("metadata.indices." + indexName + ".state"); var state = responseBody.evaluate("metadata.indices." + indexName + ".state");
return IndexMetadata.State.fromString((String) state) == IndexMetadata.State.CLOSE; return IndexMetadata.State.fromString((String) state) == IndexMetadata.State.CLOSE;
} }
protected static void addIndexWriteBlock(String indexName) throws Exception {
assertAcknowledged(client().performRequest(new Request("PUT", Strings.format("/%s/_block/write", indexName))));
}
protected static void forceMerge(String indexName, int maxNumSegments) throws Exception {
var request = new Request("POST", '/' + indexName + "/_forcemerge");
request.addParameter("max_num_segments", String.valueOf(maxNumSegments));
assertOK(client().performRequest(request));
}
} }

View file

@ -9,18 +9,13 @@
package org.elasticsearch.lucene; package org.elasticsearch.lucene;
import org.elasticsearch.client.Request;
import org.elasticsearch.client.ResponseException;
import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.repositories.fs.FsRepository; import org.elasticsearch.repositories.fs.FsRepository;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.test.cluster.util.Version; import org.elasticsearch.test.cluster.util.Version;
import static org.hamcrest.CoreMatchers.containsString; import static org.elasticsearch.cluster.metadata.MetadataIndexStateService.VERIFIED_READ_ONLY_SETTING;
import static org.hamcrest.Matchers.allOf;
import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.equalTo;
public class FullClusterRestartLuceneIndexCompatibilityIT extends FullClusterRestartIndexCompatibilityTestCase { public class FullClusterRestartLuceneIndexCompatibilityIT extends FullClusterRestartIndexCompatibilityTestCase {
@ -34,7 +29,90 @@ public class FullClusterRestartLuceneIndexCompatibilityIT extends FullClusterRes
} }
/** /**
* Creates an index and a snapshot on N-2, then restores the snapshot on N. * Creates an index on N-2, upgrades to N -1 and marks as read-only, then upgrades to N.
*/
public void testIndexUpgrade() throws Exception {
final String index = suffix("index");
final int numDocs = 2431;
if (isFullyUpgradedTo(VERSION_MINUS_2)) {
logger.debug("--> creating index [{}]", index);
createIndex(
client(),
index,
Settings.builder()
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, randomInt(2))
.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true)
.build()
);
logger.debug("--> indexing [{}] docs in [{}]", numDocs, index);
indexDocs(index, numDocs);
return;
}
if (isFullyUpgradedTo(VERSION_MINUS_1)) {
ensureGreen(index);
assertThat(indexVersion(index), equalTo(VERSION_MINUS_2));
assertDocCount(client(), index, numDocs);
logger.debug("--> flushing [{}]", index);
flush(index, true);
logger.debug("--> applying write block on [{}]", index);
addIndexWriteBlock(index);
logger.debug("--> applying verified read-only setting on [{}]", index);
updateIndexSettings(index, Settings.builder().put(VERIFIED_READ_ONLY_SETTING.getKey(), true));
return;
}
if (isFullyUpgradedTo(VERSION_CURRENT)) {
ensureGreen(index);
assertThat(indexVersion(index), equalTo(VERSION_MINUS_2));
assertDocCount(client(), index, numDocs);
var indexSettings = getIndexSettingsAsMap(index);
assertThat(indexSettings.get(IndexMetadata.APIBlock.WRITE.settingName()), equalTo(Boolean.TRUE.toString()));
assertThat(indexSettings.get(VERIFIED_READ_ONLY_SETTING.getKey()), equalTo(Boolean.TRUE.toString()));
var numberOfReplicas = getNumberOfReplicas(index);
if (0 < numberOfReplicas) {
logger.debug("--> resetting number of replicas [{}] to [0]", numberOfReplicas);
updateIndexSettings(index, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0));
}
updateRandomIndexSettings(index);
updateRandomMappings(index);
logger.debug("--> adding replica to test peer-recovery");
updateIndexSettings(index, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1));
ensureGreen(index);
logger.debug("--> closing restored index [{}]", index);
closeIndex(index);
ensureGreen(index);
logger.debug("--> adding replica to test peer-recovery for closed shards");
updateIndexSettings(index, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 2));
ensureGreen(index);
logger.debug("--> re-opening restored index [{}]", index);
openIndex(index);
ensureGreen(index);
assertDocCount(client(), index, numDocs);
logger.debug("--> deleting index [{}]", index);
deleteIndex(index);
}
}
/**
* Creates an index on N-2, marks as read-only on N-1 and creates a snapshot, then restores the snapshot on N.
*/ */
public void testRestoreIndex() throws Exception { public void testRestoreIndex() throws Exception {
final String repository = suffix("repository"); final String repository = suffix("repository");
@ -59,9 +137,6 @@ public class FullClusterRestartLuceneIndexCompatibilityIT extends FullClusterRes
logger.debug("--> indexing [{}] docs in [{}]", numDocs, index); logger.debug("--> indexing [{}] docs in [{}]", numDocs, index);
indexDocs(index, numDocs); indexDocs(index, numDocs);
logger.debug("--> creating snapshot [{}]", snapshot);
createSnapshot(client(), repository, snapshot, true);
return; return;
} }
@ -71,6 +146,18 @@ public class FullClusterRestartLuceneIndexCompatibilityIT extends FullClusterRes
assertThat(indexVersion(index), equalTo(VERSION_MINUS_2)); assertThat(indexVersion(index), equalTo(VERSION_MINUS_2));
assertDocCount(client(), index, numDocs); assertDocCount(client(), index, numDocs);
logger.debug("--> flushing [{}]", index);
flush(index, true);
logger.debug("--> applying write block on [{}]", index);
addIndexWriteBlock(index);
logger.debug("--> applying verified read-only setting on [{}]", index);
updateIndexSettings(index, Settings.builder().put(VERIFIED_READ_ONLY_SETTING.getKey(), true));
logger.debug("--> creating snapshot [{}]", snapshot);
createSnapshot(client(), repository, snapshot, true);
logger.debug("--> deleting index [{}]", index); logger.debug("--> deleting index [{}]", index);
deleteIndex(index); deleteIndex(index);
return; return;
@ -79,32 +166,109 @@ public class FullClusterRestartLuceneIndexCompatibilityIT extends FullClusterRes
if (isFullyUpgradedTo(VERSION_CURRENT)) { if (isFullyUpgradedTo(VERSION_CURRENT)) {
var restoredIndex = suffix("index-restored"); var restoredIndex = suffix("index-restored");
logger.debug("--> restoring index [{}] as [{}]", index, restoredIndex); logger.debug("--> restoring index [{}] as [{}]", index, restoredIndex);
restoreIndex(repository, snapshot, index, restoredIndex);
ensureGreen(restoredIndex);
// Restoring the index will fail as Elasticsearch does not support reading N-2 yet assertThat(indexVersion(restoredIndex), equalTo(VERSION_MINUS_2));
var request = new Request("POST", "/_snapshot/" + repository + "/" + snapshot + "/_restore"); assertDocCount(client(), restoredIndex, numDocs);
request.addParameter("wait_for_completion", "true");
request.setJsonEntity(Strings.format("""
{
"indices": "%s",
"include_global_state": false,
"rename_pattern": "(.+)",
"rename_replacement": "%s",
"include_aliases": false
}""", index, restoredIndex));
var responseException = expectThrows(ResponseException.class, () -> client().performRequest(request)); updateRandomIndexSettings(restoredIndex);
assertEquals(RestStatus.INTERNAL_SERVER_ERROR.getStatus(), responseException.getResponse().getStatusLine().getStatusCode()); updateRandomMappings(restoredIndex);
assertThat(
responseException.getMessage(), logger.debug("--> adding replica to test peer-recovery");
allOf( updateIndexSettings(restoredIndex, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1));
containsString("cannot restore index [[" + index), ensureGreen(restoredIndex);
containsString("because it cannot be upgraded"),
containsString("has current compatibility version [" + VERSION_MINUS_2 + '-' + VERSION_MINUS_1.getMajor() + ".0.0]"), logger.debug("--> closing restored index [{}]", restoredIndex);
containsString("but the minimum compatible version is [" + VERSION_MINUS_1.getMajor() + ".0.0]."), closeIndex(restoredIndex);
containsString("It should be re-indexed in Elasticsearch " + VERSION_MINUS_1.getMajor() + ".x"), ensureGreen(restoredIndex);
containsString("before upgrading to " + VERSION_CURRENT)
) logger.debug("--> adding replica to test peer-recovery for closed shards");
updateIndexSettings(restoredIndex, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 2));
ensureGreen(restoredIndex);
logger.debug("--> re-opening restored index [{}]", restoredIndex);
openIndex(restoredIndex);
ensureGreen(restoredIndex);
assertDocCount(client(), restoredIndex, numDocs);
logger.debug("--> deleting restored index [{}]", restoredIndex);
deleteIndex(restoredIndex);
}
}
/**
* Creates an index on N-2, marks as read-only on N-1 and creates a snapshot and then closes the index, then restores the snapshot on N.
*/
public void testRestoreIndexOverClosedIndex() throws Exception {
final String repository = suffix("repository");
final String snapshot = suffix("snapshot");
final String index = suffix("index");
final int numDocs = 2134;
if (isFullyUpgradedTo(VERSION_MINUS_2)) {
logger.debug("--> registering repository [{}]", repository);
registerRepository(client(), repository, FsRepository.TYPE, true, repositorySettings());
logger.debug("--> creating index [{}]", index);
createIndex(
client(),
index,
Settings.builder()
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true)
.build()
); );
logger.debug("--> indexing [{}] docs in [{}]", numDocs, index);
indexDocs(index, numDocs);
return;
}
if (isFullyUpgradedTo(VERSION_MINUS_1)) {
ensureGreen(index);
assertThat(indexVersion(index), equalTo(VERSION_MINUS_2));
assertDocCount(client(), index, numDocs);
logger.debug("--> flushing [{}]", index);
flush(index, true);
logger.debug("--> applying write block on [{}]", index);
addIndexWriteBlock(index);
logger.debug("--> applying verified read-only setting on [{}]", index);
updateIndexSettings(index, Settings.builder().put(VERIFIED_READ_ONLY_SETTING.getKey(), true));
logger.debug("--> creating snapshot [{}]", snapshot);
createSnapshot(client(), repository, snapshot, true);
logger.debug("--> force-merge index [{}] to 1 segment", index);
forceMerge(index, 1);
logger.debug("--> closing index [{}]", index);
closeIndex(index);
ensureGreen(index);
return;
}
if (isFullyUpgradedTo(VERSION_CURRENT)) {
var indexSettings = getIndexSettingsAsMap(index);
assertThat(indexSettings.get(IndexMetadata.APIBlock.WRITE.settingName()), equalTo(Boolean.TRUE.toString()));
assertThat(indexSettings.get(VERIFIED_READ_ONLY_SETTING.getKey()), equalTo(Boolean.TRUE.toString()));
assertThat(isIndexClosed(index), equalTo(true));
logger.debug("--> restoring index [{}] over existing closed index", index);
restoreIndex(repository, snapshot, index, index);
ensureGreen(index);
assertThat(indexVersion(index), equalTo(VERSION_MINUS_2));
assertDocCount(client(), index, numDocs);
logger.debug("--> deleting index [{}]", index);
deleteIndex(index);
} }
} }
} }

View file

@ -155,9 +155,11 @@ public class FullClusterRestartSearchableSnapshotIndexCompatibilityIT extends Fu
assertThat(indexVersion(mountedIndex), equalTo(VERSION_MINUS_2)); assertThat(indexVersion(mountedIndex), equalTo(VERSION_MINUS_2));
assertDocCount(client(), mountedIndex, numDocs); assertDocCount(client(), mountedIndex, numDocs);
logger.debug("--> adding replica to test replica upgrade"); if (randomBoolean()) {
updateIndexSettings(mountedIndex, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)); logger.debug("--> adding replica to test upgrade with replica");
ensureGreen(mountedIndex); updateIndexSettings(mountedIndex, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1));
ensureGreen(mountedIndex);
}
if (randomBoolean()) { if (randomBoolean()) {
logger.debug("--> random closing of index [{}] before upgrade", mountedIndex); logger.debug("--> random closing of index [{}] before upgrade", mountedIndex);

View file

@ -73,6 +73,12 @@ public abstract class RollingUpgradeIndexCompatibilityTestCase extends AbstractI
closeClients(); closeClients();
cluster().upgradeNodeToVersion(i, expectedNodeVersion); cluster().upgradeNodeToVersion(i, expectedNodeVersion);
initClient(); initClient();
ensureHealth((request -> {
request.addParameter("timeout", "70s");
request.addParameter("wait_for_nodes", String.valueOf(NODES));
request.addParameter("wait_for_status", "yellow");
}));
} }
currentNodeVersion = nodesVersions().get(nodeName); currentNodeVersion = nodesVersions().get(nodeName);

View file

@ -0,0 +1,192 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the "Elastic License
* 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
* Public License v 1"; you may not use this file except in compliance with, at
* your election, the "Elastic License 2.0", the "GNU Affero General Public
* License v3.0 only", or the "Server Side Public License, v 1".
*/
package org.elasticsearch.lucene;
import org.elasticsearch.client.Request;
import org.elasticsearch.client.ResponseException;
import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.repositories.fs.FsRepository;
import org.elasticsearch.test.cluster.util.Version;
import java.util.List;
import static org.elasticsearch.cluster.metadata.MetadataIndexStateService.VERIFIED_READ_ONLY_SETTING;
import static org.hamcrest.Matchers.equalTo;
public class RollingUpgradeLuceneIndexCompatibilityTestCase extends RollingUpgradeIndexCompatibilityTestCase {
static {
clusterConfig = config -> config.setting("xpack.license.self_generated.type", "trial");
}
public RollingUpgradeLuceneIndexCompatibilityTestCase(List<Version> nodesVersions) {
super(nodesVersions);
}
/**
* Creates an index on N-2, upgrades to N -1 and marks as read-only, then remains searchable during rolling upgrades.
*/
public void testIndexUpgrade() throws Exception {
final String index = suffix("index-rolling-upgraded");
final int numDocs = 2543;
if (isFullyUpgradedTo(VERSION_MINUS_2)) {
logger.debug("--> creating index [{}]", index);
createIndex(
client(),
index,
Settings.builder()
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true)
.build()
);
logger.debug("--> indexing [{}] docs in [{}]", numDocs, index);
indexDocs(index, numDocs);
return;
}
ensureGreen(index);
if (isFullyUpgradedTo(VERSION_MINUS_1)) {
assertThat(indexVersion(index), equalTo(VERSION_MINUS_2));
assertDocCount(client(), index, numDocs);
logger.debug("--> flushing [{}]", index);
flush(index, true);
logger.debug("--> applying write block on [{}]", index);
addIndexWriteBlock(index);
logger.debug("--> applying verified read-only setting on [{}]", index);
updateIndexSettings(index, Settings.builder().put(VERIFIED_READ_ONLY_SETTING.getKey(), true));
return;
}
if (nodesVersions().values().stream().anyMatch(v -> v.onOrAfter(VERSION_CURRENT))) {
var indexSettings = getIndexSettingsAsMap(index);
assertThat(indexSettings.get(IndexMetadata.APIBlock.WRITE.settingName()), equalTo(Boolean.TRUE.toString()));
assertThat(indexSettings.get(VERIFIED_READ_ONLY_SETTING.getKey()), equalTo(Boolean.TRUE.toString()));
if (isIndexClosed(index)) {
logger.debug("--> re-opening index [{}] after upgrade", index);
openIndex(index);
ensureGreen(index);
}
assertThat(indexVersion(index), equalTo(VERSION_MINUS_2));
assertDocCount(client(), index, numDocs);
updateRandomIndexSettings(index);
updateRandomMappings(index);
if (randomBoolean()) {
logger.debug("--> random closing of index [{}] before upgrade", index);
closeIndex(index);
ensureGreen(index);
}
}
}
/**
* Creates an index on N-2, marks as read-only on N-1 and creates a snapshot, then restores the snapshot during rolling upgrades to N.
*/
public void testRestoreIndex() throws Exception {
final String repository = suffix("repository");
final String snapshot = suffix("snapshot");
final String index = suffix("index");
final int numDocs = 1234;
if (isFullyUpgradedTo(VERSION_MINUS_2)) {
logger.debug("--> registering repository [{}]", repository);
registerRepository(client(), repository, FsRepository.TYPE, true, repositorySettings());
logger.debug("--> creating index [{}]", index);
createIndex(
client(),
index,
Settings.builder()
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true)
.build()
);
logger.debug("--> indexing [{}] docs in [{}]", numDocs, index);
indexDocs(index, numDocs);
return;
}
if (isFullyUpgradedTo(VERSION_MINUS_1)) {
ensureGreen(index);
assertThat(indexVersion(index), equalTo(VERSION_MINUS_2));
assertDocCount(client(), index, numDocs);
logger.debug("--> flushing [{}]", index);
flush(index, true);
logger.debug("--> applying write block on [{}]", index);
addIndexWriteBlock(index);
logger.debug("--> applying verified read-only setting on [{}]", index);
updateIndexSettings(index, Settings.builder().put(VERIFIED_READ_ONLY_SETTING.getKey(), true));
logger.debug("--> creating snapshot [{}]", snapshot);
createSnapshot(client(), repository, snapshot, true);
logger.debug("--> deleting index [{}]", index);
deleteIndex(index);
return;
}
if (nodesVersions().values().stream().anyMatch(v -> v.onOrAfter(VERSION_CURRENT))) {
var restoredIndex = suffix("index-restored-rolling");
boolean success = false;
try {
logger.debug("--> restoring index [{}] as [{}]", index, restoredIndex);
restoreIndex(repository, snapshot, index, restoredIndex);
ensureGreen(restoredIndex);
assertThat(indexVersion(restoredIndex), equalTo(VERSION_MINUS_2));
assertDocCount(client(), restoredIndex, numDocs);
updateRandomIndexSettings(restoredIndex);
updateRandomMappings(restoredIndex);
logger.debug("--> closing restored index [{}]", restoredIndex);
closeIndex(restoredIndex);
ensureGreen(restoredIndex);
logger.debug("--> re-opening restored index [{}]", restoredIndex);
openIndex(restoredIndex);
ensureGreen(restoredIndex);
assertDocCount(client(), restoredIndex, numDocs);
logger.debug("--> deleting restored index [{}]", restoredIndex);
deleteIndex(restoredIndex);
success = true;
} finally {
if (success == false) {
try {
client().performRequest(new Request("DELETE", "/" + restoredIndex));
} catch (ResponseException e) {
logger.warn("Failed to delete restored index [" + restoredIndex + ']', e);
}
}
}
}
}
}

View file

@ -82,7 +82,7 @@ buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName ->
def baseCluster = testClusters.register(baseName) { def baseCluster = testClusters.register(baseName) {
versions = [bwcVersion.toString(), project.version] versions = [bwcVersion.toString(), project.version]
numberOfNodes = 4 numberOfNodes = 4
setting 'path.repo', "${buildDir}/cluster/shared/repo/${baseName}" setting 'path.repo', "${layout.buildDirectory.asFile.get()}/cluster/shared/repo/${baseName}"
setting 'xpack.security.enabled', 'false' setting 'xpack.security.enabled', 'false'
setting "xpack.license.self_generated.type", "trial" setting "xpack.license.self_generated.type", "trial"
/* There is a chance we have more master changes than "normal", so to avoid this test from failing, /* There is a chance we have more master changes than "normal", so to avoid this test from failing,
@ -96,50 +96,32 @@ buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName ->
tasks.register("${baseName}#mixedClusterTest", StandaloneRestIntegTestTask) { tasks.register("${baseName}#mixedClusterTest", StandaloneRestIntegTestTask) {
useCluster baseCluster useCluster baseCluster
mustRunAfter("precommit") mustRunAfter("precommit")
Provider<TestClustersRegistry> serviceProvider = GradleUtils.getBuildService( def baseInfo = getClusterInfo(baseName).map { it.allHttpSocketURI.join(",") }
project.gradle.sharedServices, def baseInfoAfterOneNodeUpdate = getClusterInfo(baseName).map { it.allHttpSocketURI.join(",") }
TestClustersPlugin.REGISTRY_SERVICE_NAME def baseInfoAfterTwoNodesUpdate = getClusterInfo(baseName).map { it.allHttpSocketURI.join(",") }
) def sharedRepoFolder = layout.buildDirectory.file("cluster/shared/repo/${baseName}").get().asFile
def baseInfo = project.getProviders().of(TestClusterValueSource.class) {
it.parameters.path.set(clusterPath)
it.parameters.clusterName.set(baseName)
it.parameters.service = serviceProvider
}.map { it.getAllHttpSocketURI() }
def baseInfoAfterOneNodeUpdate = project.getProviders().of(TestClusterValueSource.class) {
it.parameters.path.set(clusterPath)
it.parameters.clusterName.set(baseName)
it.parameters.service = serviceProvider
}.map { it.getAllHttpSocketURI() }
def baseInfoAfterTwoNodesUpdate = project.getProviders().of(TestClusterValueSource.class) {
it.parameters.path.set(clusterPath)
it.parameters.clusterName.set(baseName)
it.parameters.service = serviceProvider
}.map { it.getAllHttpSocketURI() }
def nonInputProps = nonInputProperties
def sharedRepoFolder = new File(buildDir, "cluster/shared/repo/${baseName}")
doFirst { doFirst {
delete(sharedRepoFolder) delete(sharedRepoFolder)
// Getting the endpoints causes a wait for the cluster // Getting the endpoints causes a wait for the cluster
println "Test cluster endpoints are: ${-> baseInfo.get().join(",")}" println "Test cluster endpoints are: ${-> baseInfo.get().join(",")}"
println "Upgrading one node to create a mixed cluster" println "Upgrading one node to create a mixed cluster"
baseCluster.get().nextNodeToNextVersion() getRegistry().get().nextNodeToNextVersion(baseCluster)
// Getting the endpoints causes a wait for the cluster // Getting the endpoints causes a wait for the cluster
println "Upgrade complete, endpoints are: ${-> baseInfoAfterOneNodeUpdate.get().join(",")}" println "Upgrade complete, endpoints are: ${-> baseInfoAfterOneNodeUpdate.get()}"
println "Upgrading another node to create a mixed cluster" println "Upgrading another node to create a mixed cluster"
baseCluster.get().nextNodeToNextVersion() getRegistry().get().nextNodeToNextVersion(baseCluster)
nonInputProps.systemProperty('tests.rest.cluster', baseInfoAfterTwoNodesUpdate.map(c -> c.join(",")))
nonInputProps.systemProperty('tests.clustername', baseName)
if (excludeList.isEmpty() == false) {
systemProperty 'tests.rest.blacklist', excludeList.join(',')
}
} }
systemProperty 'tests.path.repo', "${buildDir}/cluster/shared/repo/${baseName}" if (excludeList.isEmpty() == false) {
systemProperty 'tests.rest.blacklist', excludeList.join(',')
}
nonInputProperties.systemProperty('tests.rest.cluster', baseInfoAfterTwoNodesUpdate)
nonInputProperties.systemProperty('tests.clustername', baseName)
systemProperty 'tests.path.repo', "${layout.buildDirectory.file("cluster/shared/repo/${baseName}").get().asFile}"
systemProperty 'tests.bwc_nodes_version', bwcVersion.toString().replace('-SNAPSHOT', '') systemProperty 'tests.bwc_nodes_version', bwcVersion.toString().replace('-SNAPSHOT', '')
systemProperty 'tests.new_nodes_version', project.version.toString().replace('-SNAPSHOT', '') systemProperty 'tests.new_nodes_version', project.version.toString().replace('-SNAPSHOT', '')
// onlyIf("BWC tests disabled") { project.bwc_tests_enabled } def bwcEnabled = project.bwc_tests_enabled
onlyIf("BWC tests disabled") { bwcEnabled }
} }
tasks.register(bwcTaskName(bwcVersion)) { tasks.register(bwcTaskName(bwcVersion)) {

View file

@ -40,7 +40,7 @@ buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName ->
numberOfNodes = 3 numberOfNodes = 3
setting 'repositories.url.allowed_urls', 'http://snapshot.test*' setting 'repositories.url.allowed_urls', 'http://snapshot.test*'
setting 'path.repo', "${buildDir}/cluster/shared/repo/${baseName}" setting 'path.repo', "${layout.buildDirectory.get().asFile}/cluster/shared/repo/${baseName}"
setting 'xpack.security.enabled', 'false' setting 'xpack.security.enabled', 'false'
requiresFeature 'es.index_mode_feature_flag_registered', Version.fromString("8.0.0") requiresFeature 'es.index_mode_feature_flag_registered', Version.fromString("8.0.0")
} }
@ -52,12 +52,12 @@ buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName ->
useCluster baseCluster useCluster baseCluster
mustRunAfter("precommit") mustRunAfter("precommit")
doFirst { doFirst {
delete("${buildDir}/cluster/shared/repo/${baseName}") delete("${layout.buildDirectory.get().asFile}/cluster/shared/repo/${baseName}")
} }
def excludeList = [] def excludeList = []
systemProperty 'tests.rest.suite', 'old_cluster' systemProperty 'tests.rest.suite', 'old_cluster'
systemProperty 'tests.upgrade_from_version', oldVersion systemProperty 'tests.upgrade_from_version', oldVersion
nonInputProperties.systemProperty('tests.rest.cluster', baseCluster.map(c -> c.allHttpSocketURI.join(","))) nonInputProperties.systemProperty('tests.rest.cluster', getClusterInfo(baseName).map { it.allHttpSocketURI.join(",") })
nonInputProperties.systemProperty('tests.clustername', baseName) nonInputProperties.systemProperty('tests.clustername', baseName)
if (excludeList.isEmpty() == false) { if (excludeList.isEmpty() == false) {
systemProperty 'tests.rest.blacklist', excludeList.join(',') systemProperty 'tests.rest.blacklist', excludeList.join(',')
@ -68,12 +68,12 @@ buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName ->
dependsOn "${baseName}#oldClusterTest" dependsOn "${baseName}#oldClusterTest"
useCluster baseCluster useCluster baseCluster
doFirst { doFirst {
baseCluster.get().nextNodeToNextVersion() getRegistry().get().nextNodeToNextVersion(baseCluster)
} }
systemProperty 'tests.rest.suite', 'mixed_cluster' systemProperty 'tests.rest.suite', 'mixed_cluster'
systemProperty 'tests.upgrade_from_version', oldVersion systemProperty 'tests.upgrade_from_version', oldVersion
systemProperty 'tests.first_round', 'true' systemProperty 'tests.first_round', 'true'
nonInputProperties.systemProperty('tests.rest.cluster', baseCluster.map(c -> c.allHttpSocketURI.join(","))) nonInputProperties.systemProperty('tests.rest.cluster', getClusterInfo(baseName).map { it.allHttpSocketURI.join(",") })
nonInputProperties.systemProperty('tests.clustername', baseName) nonInputProperties.systemProperty('tests.clustername', baseName)
def excludeList = [] def excludeList = []
if (excludeList.isEmpty() == false) { if (excludeList.isEmpty() == false) {
@ -85,12 +85,12 @@ buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName ->
dependsOn "${baseName}#oneThirdUpgradedTest" dependsOn "${baseName}#oneThirdUpgradedTest"
useCluster baseCluster useCluster baseCluster
doFirst { doFirst {
baseCluster.get().nextNodeToNextVersion() getRegistry().get().nextNodeToNextVersion(baseCluster)
} }
systemProperty 'tests.rest.suite', 'mixed_cluster' systemProperty 'tests.rest.suite', 'mixed_cluster'
systemProperty 'tests.upgrade_from_version', oldVersion systemProperty 'tests.upgrade_from_version', oldVersion
systemProperty 'tests.first_round', 'false' systemProperty 'tests.first_round', 'false'
nonInputProperties.systemProperty('tests.rest.cluster', baseCluster.map(c -> c.allHttpSocketURI.join(","))) nonInputProperties.systemProperty('tests.rest.cluster', getClusterInfo(baseName).map { it.allHttpSocketURI.join(",") })
nonInputProperties.systemProperty('tests.clustername', baseName) nonInputProperties.systemProperty('tests.clustername', baseName)
def excludeList = [] def excludeList = []
if (excludeList.isEmpty() == false) { if (excludeList.isEmpty() == false) {
@ -101,12 +101,12 @@ buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName ->
tasks.register("${baseName}#upgradedClusterTest", StandaloneRestIntegTestTask) { tasks.register("${baseName}#upgradedClusterTest", StandaloneRestIntegTestTask) {
dependsOn "${baseName}#twoThirdsUpgradedTest" dependsOn "${baseName}#twoThirdsUpgradedTest"
doFirst { doFirst {
baseCluster.get().nextNodeToNextVersion() getRegistry().get().nextNodeToNextVersion(baseCluster)
} }
useCluster testClusters.named(baseName) useCluster testClusters.named(baseName)
systemProperty 'tests.rest.suite', 'upgraded_cluster' systemProperty 'tests.rest.suite', 'upgraded_cluster'
systemProperty 'tests.upgrade_from_version', oldVersion systemProperty 'tests.upgrade_from_version', oldVersion
nonInputProperties.systemProperty('tests.rest.cluster', baseCluster.map(c -> c.allHttpSocketURI.join(","))) nonInputProperties.systemProperty('tests.rest.cluster', getClusterInfo(baseName).map { it.allHttpSocketURI.join(",") })
nonInputProperties.systemProperty('tests.clustername', baseName) nonInputProperties.systemProperty('tests.clustername', baseName)
def excludeList = [] def excludeList = []
if (excludeList.isEmpty() == false) { if (excludeList.isEmpty() == false) {

View file

@ -316,7 +316,7 @@ public class RolloverIT extends ESIntegTestCase {
final RolloverResponse response = indicesAdmin().prepareRolloverIndex("test_alias") final RolloverResponse response = indicesAdmin().prepareRolloverIndex("test_alias")
.setConditions( .setConditions(
RolloverConditions.newBuilder() RolloverConditions.newBuilder()
.addMaxIndexSizeCondition(new ByteSizeValue(10, ByteSizeUnit.MB)) .addMaxIndexSizeCondition(ByteSizeValue.of(10, ByteSizeUnit.MB))
.addMaxIndexAgeCondition(TimeValue.timeValueHours(4)) .addMaxIndexAgeCondition(TimeValue.timeValueHours(4))
) )
.get(); .get();
@ -330,7 +330,7 @@ public class RolloverIT extends ESIntegTestCase {
assertThat( assertThat(
conditions, conditions,
containsInAnyOrder( containsInAnyOrder(
new MaxSizeCondition(new ByteSizeValue(10, ByteSizeUnit.MB)).toString(), new MaxSizeCondition(ByteSizeValue.of(10, ByteSizeUnit.MB)).toString(),
new MaxAgeCondition(TimeValue.timeValueHours(4)).toString() new MaxAgeCondition(TimeValue.timeValueHours(4)).toString()
) )
); );
@ -447,7 +447,7 @@ public class RolloverIT extends ESIntegTestCase {
final RolloverResponse response = indicesAdmin().prepareRolloverIndex("test_alias") final RolloverResponse response = indicesAdmin().prepareRolloverIndex("test_alias")
.setConditions( .setConditions(
RolloverConditions.newBuilder() RolloverConditions.newBuilder()
.addMaxIndexSizeCondition(new ByteSizeValue(randomIntBetween(100, 50 * 1024), ByteSizeUnit.MB)) .addMaxIndexSizeCondition(ByteSizeValue.of(randomIntBetween(100, 50 * 1024), ByteSizeUnit.MB))
) )
.get(); .get();
assertThat(response.getOldIndex(), equalTo("test-1")); assertThat(response.getOldIndex(), equalTo("test-1"));
@ -464,7 +464,7 @@ public class RolloverIT extends ESIntegTestCase {
// A small max_size // A small max_size
{ {
ByteSizeValue maxSizeValue = new ByteSizeValue(randomIntBetween(1, 20), ByteSizeUnit.BYTES); ByteSizeValue maxSizeValue = ByteSizeValue.of(randomIntBetween(1, 20), ByteSizeUnit.BYTES);
long beforeTime = client().threadPool().absoluteTimeInMillis() - 1000L; long beforeTime = client().threadPool().absoluteTimeInMillis() - 1000L;
final RolloverResponse response = indicesAdmin().prepareRolloverIndex("test_alias") final RolloverResponse response = indicesAdmin().prepareRolloverIndex("test_alias")
.setConditions(RolloverConditions.newBuilder().addMaxIndexSizeCondition(maxSizeValue)) .setConditions(RolloverConditions.newBuilder().addMaxIndexSizeCondition(maxSizeValue))
@ -492,7 +492,7 @@ public class RolloverIT extends ESIntegTestCase {
final RolloverResponse response = indicesAdmin().prepareRolloverIndex("test_alias") final RolloverResponse response = indicesAdmin().prepareRolloverIndex("test_alias")
.setConditions( .setConditions(
RolloverConditions.newBuilder() RolloverConditions.newBuilder()
.addMaxIndexSizeCondition(new ByteSizeValue(randomNonNegativeLong(), ByteSizeUnit.BYTES)) .addMaxIndexSizeCondition(ByteSizeValue.of(randomNonNegativeLong(), ByteSizeUnit.BYTES))
.addMinIndexDocsCondition(1L) .addMinIndexDocsCondition(1L)
) )
.get(); .get();
@ -523,7 +523,7 @@ public class RolloverIT extends ESIntegTestCase {
final RolloverResponse response = indicesAdmin().prepareRolloverIndex("test_alias") final RolloverResponse response = indicesAdmin().prepareRolloverIndex("test_alias")
.setConditions( .setConditions(
RolloverConditions.newBuilder() RolloverConditions.newBuilder()
.addMaxPrimaryShardSizeCondition(new ByteSizeValue(randomIntBetween(100, 50 * 1024), ByteSizeUnit.MB)) .addMaxPrimaryShardSizeCondition(ByteSizeValue.of(randomIntBetween(100, 50 * 1024), ByteSizeUnit.MB))
) )
.get(); .get();
assertThat(response.getOldIndex(), equalTo("test-1")); assertThat(response.getOldIndex(), equalTo("test-1"));
@ -540,7 +540,7 @@ public class RolloverIT extends ESIntegTestCase {
// A small max_primary_shard_size // A small max_primary_shard_size
{ {
ByteSizeValue maxPrimaryShardSizeCondition = new ByteSizeValue(randomIntBetween(1, 20), ByteSizeUnit.BYTES); ByteSizeValue maxPrimaryShardSizeCondition = ByteSizeValue.of(randomIntBetween(1, 20), ByteSizeUnit.BYTES);
long beforeTime = client().threadPool().absoluteTimeInMillis() - 1000L; long beforeTime = client().threadPool().absoluteTimeInMillis() - 1000L;
final RolloverResponse response = indicesAdmin().prepareRolloverIndex("test_alias") final RolloverResponse response = indicesAdmin().prepareRolloverIndex("test_alias")
.setConditions(RolloverConditions.newBuilder().addMaxPrimaryShardSizeCondition(maxPrimaryShardSizeCondition)) .setConditions(RolloverConditions.newBuilder().addMaxPrimaryShardSizeCondition(maxPrimaryShardSizeCondition))
@ -568,7 +568,7 @@ public class RolloverIT extends ESIntegTestCase {
final RolloverResponse response = indicesAdmin().prepareRolloverIndex("test_alias") final RolloverResponse response = indicesAdmin().prepareRolloverIndex("test_alias")
.setConditions( .setConditions(
RolloverConditions.newBuilder() RolloverConditions.newBuilder()
.addMaxPrimaryShardSizeCondition(new ByteSizeValue(randomNonNegativeLong(), ByteSizeUnit.BYTES)) .addMaxPrimaryShardSizeCondition(ByteSizeValue.of(randomNonNegativeLong(), ByteSizeUnit.BYTES))
.addMinIndexDocsCondition(1L) .addMinIndexDocsCondition(1L)
) )
.get(); .get();

View file

@ -53,7 +53,7 @@ public class BulkProcessor2IT extends ESIntegTestCase {
// let's make sure that the bulk action limit trips, one single execution will index all the documents // let's make sure that the bulk action limit trips, one single execution will index all the documents
.setBulkActions(numDocs) .setBulkActions(numDocs)
.setFlushInterval(TimeValue.timeValueHours(24)) .setFlushInterval(TimeValue.timeValueHours(24))
.setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB)) .setBulkSize(ByteSizeValue.of(1, ByteSizeUnit.GB))
.build(); .build();
try { try {
@ -89,7 +89,7 @@ public class BulkProcessor2IT extends ESIntegTestCase {
.setBulkActions(bulkActions) .setBulkActions(bulkActions)
// set interval and size to high values // set interval and size to high values
.setFlushInterval(TimeValue.timeValueHours(24)) .setFlushInterval(TimeValue.timeValueHours(24))
.setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB)) .setBulkSize(ByteSizeValue.of(1, ByteSizeUnit.GB))
.build(); .build();
try { try {
@ -134,7 +134,7 @@ public class BulkProcessor2IT extends ESIntegTestCase {
// let's make sure that the bulk action limit trips, one single execution will index all the documents // let's make sure that the bulk action limit trips, one single execution will index all the documents
.setBulkActions(numDocs) .setBulkActions(numDocs)
.setFlushInterval(TimeValue.timeValueHours(24)) .setFlushInterval(TimeValue.timeValueHours(24))
.setBulkSize(new ByteSizeValue(randomIntBetween(1, 10), RandomPicks.randomFrom(random(), ByteSizeUnit.values()))) .setBulkSize(ByteSizeValue.of(randomIntBetween(1, 10), RandomPicks.randomFrom(random(), ByteSizeUnit.values())))
.build(); .build();
MultiGetRequestBuilder multiGetRequestBuilder = indexDocs(client(), processor, numDocs); MultiGetRequestBuilder multiGetRequestBuilder = indexDocs(client(), processor, numDocs);
@ -169,7 +169,7 @@ public class BulkProcessor2IT extends ESIntegTestCase {
.setBulkActions(bulkActions) .setBulkActions(bulkActions)
// set interval and size to high values // set interval and size to high values
.setFlushInterval(TimeValue.timeValueHours(24)) .setFlushInterval(TimeValue.timeValueHours(24))
.setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB)) .setBulkSize(ByteSizeValue.of(1, ByteSizeUnit.GB))
.build(); .build();
try { try {

View file

@ -55,7 +55,7 @@ public class BulkProcessorIT extends ESIntegTestCase {
.setConcurrentRequests(randomIntBetween(0, 1)) .setConcurrentRequests(randomIntBetween(0, 1))
.setBulkActions(numDocs) .setBulkActions(numDocs)
.setFlushInterval(TimeValue.timeValueHours(24)) .setFlushInterval(TimeValue.timeValueHours(24))
.setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB)) .setBulkSize(ByteSizeValue.of(1, ByteSizeUnit.GB))
.build() .build()
) { ) {
@ -83,7 +83,7 @@ public class BulkProcessorIT extends ESIntegTestCase {
.setConcurrentRequests(randomIntBetween(0, 10)) .setConcurrentRequests(randomIntBetween(0, 10))
.setBulkActions(numDocs + randomIntBetween(1, 100)) .setBulkActions(numDocs + randomIntBetween(1, 100))
.setFlushInterval(TimeValue.timeValueHours(24)) .setFlushInterval(TimeValue.timeValueHours(24))
.setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB)) .setBulkSize(ByteSizeValue.of(1, ByteSizeUnit.GB))
.build() .build()
) { ) {
@ -115,7 +115,7 @@ public class BulkProcessorIT extends ESIntegTestCase {
.setConcurrentRequests(randomIntBetween(0, 10)) .setConcurrentRequests(randomIntBetween(0, 10))
.setBulkActions(numDocs + randomIntBetween(1, 100)) .setBulkActions(numDocs + randomIntBetween(1, 100))
.setFlushInterval(TimeValue.timeValueHours(24)) .setFlushInterval(TimeValue.timeValueHours(24))
.setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB)) .setBulkSize(ByteSizeValue.of(1, ByteSizeUnit.GB))
.setFlushCondition(flushEnabled::get) .setFlushCondition(flushEnabled::get)
.build() .build()
) { ) {
@ -159,7 +159,7 @@ public class BulkProcessorIT extends ESIntegTestCase {
.setBulkActions(bulkActions) .setBulkActions(bulkActions)
// set interval and size to high values // set interval and size to high values
.setFlushInterval(TimeValue.timeValueHours(24)) .setFlushInterval(TimeValue.timeValueHours(24))
.setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB)) .setBulkSize(ByteSizeValue.of(1, ByteSizeUnit.GB))
.build() .build()
) { ) {
@ -202,7 +202,7 @@ public class BulkProcessorIT extends ESIntegTestCase {
.setConcurrentRequests(randomIntBetween(0, 1)) .setConcurrentRequests(randomIntBetween(0, 1))
.setBulkActions(numDocs) .setBulkActions(numDocs)
.setFlushInterval(TimeValue.timeValueHours(24)) .setFlushInterval(TimeValue.timeValueHours(24))
.setBulkSize(new ByteSizeValue(randomIntBetween(1, 10), RandomPicks.randomFrom(random(), ByteSizeUnit.values()))) .setBulkSize(ByteSizeValue.of(randomIntBetween(1, 10), RandomPicks.randomFrom(random(), ByteSizeUnit.values())))
.build(); .build();
MultiGetRequestBuilder multiGetRequestBuilder = indexDocs(client(), processor, numDocs); MultiGetRequestBuilder multiGetRequestBuilder = indexDocs(client(), processor, numDocs);
@ -250,7 +250,7 @@ public class BulkProcessorIT extends ESIntegTestCase {
.setBulkActions(bulkActions) .setBulkActions(bulkActions)
// set interval and size to high values // set interval and size to high values
.setFlushInterval(TimeValue.timeValueHours(24)) .setFlushInterval(TimeValue.timeValueHours(24))
.setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB)) .setBulkSize(ByteSizeValue.of(1, ByteSizeUnit.GB))
.build() .build()
) { ) {

View file

@ -43,7 +43,7 @@ import static org.hamcrest.Matchers.equalTo;
@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0)
public class DiskThresholdMonitorIT extends DiskUsageIntegTestCase { public class DiskThresholdMonitorIT extends DiskUsageIntegTestCase {
private static final long FLOOD_STAGE_BYTES = new ByteSizeValue(10, ByteSizeUnit.KB).getBytes(); private static final long FLOOD_STAGE_BYTES = ByteSizeValue.of(10, ByteSizeUnit.KB).getBytes();
@Override @Override
protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) {

View file

@ -60,7 +60,7 @@ import static org.hamcrest.Matchers.is;
@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0)
public class DiskThresholdDeciderIT extends DiskUsageIntegTestCase { public class DiskThresholdDeciderIT extends DiskUsageIntegTestCase {
private static final long WATERMARK_BYTES = new ByteSizeValue(10, ByteSizeUnit.KB).getBytes(); private static final long WATERMARK_BYTES = ByteSizeValue.of(10, ByteSizeUnit.KB).getBytes();
@Override @Override
protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) {

View file

@ -331,7 +331,7 @@ public class IndexShardIT extends ESSingleNodeTestCase {
Settings.builder() Settings.builder()
.put( .put(
IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(),
new ByteSizeValue(135 /* size of the operation + one generation header&footer*/, ByteSizeUnit.BYTES) ByteSizeValue.of(135 /* size of the operation + one generation header&footer*/, ByteSizeUnit.BYTES)
) )
.build() .build()
) )
@ -371,7 +371,7 @@ public class IndexShardIT extends ESSingleNodeTestCase {
indicesAdmin().prepareUpdateSettings("test") indicesAdmin().prepareUpdateSettings("test")
.setSettings( .setSettings(
Settings.builder() Settings.builder()
.put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(size, ByteSizeUnit.BYTES)) .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), ByteSizeValue.of(size, ByteSizeUnit.BYTES))
.build() .build()
) )
.get(); .get();

View file

@ -604,7 +604,7 @@ public class RemoveCorruptedShardDataCommandIT extends ESIntegTestCase {
private static void disableTranslogFlush(String index) { private static void disableTranslogFlush(String index) {
updateIndexSettings( updateIndexSettings(
Settings.builder() Settings.builder()
.put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.PB)), .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), ByteSizeValue.of(1, ByteSizeUnit.PB)),
index index
); );
} }

View file

@ -155,7 +155,7 @@ public class CorruptedFileIT extends ESIntegTestCase {
// no checkindex - we corrupt shards on purpose // no checkindex - we corrupt shards on purpose
.put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false) .put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false)
// no translog based flush - it might change the .liv / segments.N files // no translog based flush - it might change the .liv / segments.N files
.put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.PB)) .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), ByteSizeValue.of(1, ByteSizeUnit.PB))
) )
); );
ensureGreen(); ensureGreen();
@ -269,7 +269,7 @@ public class CorruptedFileIT extends ESIntegTestCase {
.put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false) // no checkindex - we corrupt shards on .put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false) // no checkindex - we corrupt shards on
// purpose // purpose
// no translog based flush - it might change the .liv / segments.N files // no translog based flush - it might change the .liv / segments.N files
.put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.PB)) .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), ByteSizeValue.of(1, ByteSizeUnit.PB))
) )
); );
ensureGreen(); ensureGreen();
@ -544,7 +544,7 @@ public class CorruptedFileIT extends ESIntegTestCase {
// no checkindex - we corrupt shards on purpose // no checkindex - we corrupt shards on purpose
.put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false) .put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false)
// no translog based flush - it might change the .liv / segments.N files // no translog based flush - it might change the .liv / segments.N files
.put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.PB)) .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), ByteSizeValue.of(1, ByteSizeUnit.PB))
) )
); );
ensureGreen(); ensureGreen();
@ -612,7 +612,7 @@ public class CorruptedFileIT extends ESIntegTestCase {
// no checkindex - we corrupt shards on purpose // no checkindex - we corrupt shards on purpose
.put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false) .put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false)
// no translog based flush - it might change the .liv / segments.N files // no translog based flush - it might change the .liv / segments.N files
.put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.PB)) .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), ByteSizeValue.of(1, ByteSizeUnit.PB))
) )
); );
ensureGreen(); ensureGreen();

View file

@ -54,7 +54,7 @@ public class CorruptedTranslogIT extends ESIntegTestCase {
prepareCreate("test").setSettings( prepareCreate("test").setSettings(
indexSettings(1, 0).put("index.refresh_interval", "-1") indexSettings(1, 0).put("index.refresh_interval", "-1")
.put(MockEngineSupport.DISABLE_FLUSH_ON_CLOSE.getKey(), true) // never flush - always recover from translog .put(MockEngineSupport.DISABLE_FLUSH_ON_CLOSE.getKey(), true) // never flush - always recover from translog
.put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.PB)) .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), ByteSizeValue.of(1, ByteSizeUnit.PB))
) )
); );

View file

@ -51,6 +51,7 @@ public class TestSystemIndexDescriptor extends SystemIndexDescriptor {
INDEX_NAME, INDEX_NAME,
0, 0,
"stack", "stack",
null,
Type.INTERNAL_MANAGED, Type.INTERNAL_MANAGED,
List.of(), List.of(),
List.of(), List.of(),
@ -70,6 +71,7 @@ public class TestSystemIndexDescriptor extends SystemIndexDescriptor {
name, name,
0, 0,
"stack", "stack",
null,
Type.INTERNAL_MANAGED, Type.INTERNAL_MANAGED,
List.of(), List.of(),
List.of(), List.of(),

View file

@ -332,7 +332,7 @@ public class CircuitBreakerServiceIT extends ESIntegTestCase {
} }
public void testLimitsRequestSize() { public void testLimitsRequestSize() {
ByteSizeValue inFlightRequestsLimit = new ByteSizeValue(8, ByteSizeUnit.KB); ByteSizeValue inFlightRequestsLimit = ByteSizeValue.of(8, ByteSizeUnit.KB);
if (noopBreakerUsed()) { if (noopBreakerUsed()) {
logger.info("--> noop breakers used, skipping test"); logger.info("--> noop breakers used, skipping test");
return; return;

View file

@ -256,7 +256,7 @@ public class IndexRecoveryIT extends AbstractIndexRecoveryIntegTestCase {
public Settings.Builder createRecoverySettingsChunkPerSecond(long chunkSizeBytes) { public Settings.Builder createRecoverySettingsChunkPerSecond(long chunkSizeBytes) {
return Settings.builder() return Settings.builder()
// Set the chunk size in bytes // Set the chunk size in bytes
.put(RecoverySettings.INDICES_RECOVERY_CHUNK_SIZE.getKey(), new ByteSizeValue(chunkSizeBytes, ByteSizeUnit.BYTES)) .put(RecoverySettings.INDICES_RECOVERY_CHUNK_SIZE.getKey(), ByteSizeValue.of(chunkSizeBytes, ByteSizeUnit.BYTES))
// Set one chunk of bytes per second. // Set one chunk of bytes per second.
.put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), chunkSizeBytes, ByteSizeUnit.BYTES); .put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), chunkSizeBytes, ByteSizeUnit.BYTES);
} }

View file

@ -285,7 +285,7 @@ public class UpdateSettingsIT extends ESIntegTestCase {
IndexService indexService = service.indexService(resolveIndex("test")); IndexService indexService = service.indexService(resolveIndex("test"));
if (indexService != null) { if (indexService != null) {
assertEquals(indexService.getIndexSettings().getRefreshInterval().millis(), -1); assertEquals(indexService.getIndexSettings().getRefreshInterval().millis(), -1);
assertEquals(indexService.getIndexSettings().getFlushThresholdSize(new ByteSizeValue(1, ByteSizeUnit.TB)).getBytes(), 1024); assertEquals(indexService.getIndexSettings().getFlushThresholdSize(ByteSizeValue.of(1, ByteSizeUnit.TB)).getBytes(), 1024);
assertEquals(indexService.getIndexSettings().getGenerationThresholdSize().getBytes(), 4096); assertEquals(indexService.getIndexSettings().getGenerationThresholdSize().getBytes(), 4096);
} }
} }
@ -296,7 +296,7 @@ public class UpdateSettingsIT extends ESIntegTestCase {
IndexService indexService = service.indexService(resolveIndex("test")); IndexService indexService = service.indexService(resolveIndex("test"));
if (indexService != null) { if (indexService != null) {
assertEquals(indexService.getIndexSettings().getRefreshInterval().millis(), 1000); assertEquals(indexService.getIndexSettings().getRefreshInterval().millis(), 1000);
assertEquals(indexService.getIndexSettings().getFlushThresholdSize(new ByteSizeValue(1, ByteSizeUnit.TB)).getBytes(), 1024); assertEquals(indexService.getIndexSettings().getFlushThresholdSize(ByteSizeValue.of(1, ByteSizeUnit.TB)).getBytes(), 1024);
assertEquals(indexService.getIndexSettings().getGenerationThresholdSize().getBytes(), 4096); assertEquals(indexService.getIndexSettings().getGenerationThresholdSize().getBytes(), 4096);
} }
} }

View file

@ -75,7 +75,7 @@ public class CloseIndexIT extends ESIntegTestCase {
.put(super.indexSettings()) .put(super.indexSettings())
.put( .put(
IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(),
new ByteSizeValue(randomIntBetween(1, 4096), ByteSizeUnit.KB) ByteSizeValue.of(randomIntBetween(1, 4096), ByteSizeUnit.KB)
) )
.build(); .build();
} }

View file

@ -63,10 +63,7 @@ public class TruncatedRecoveryIT extends ESIntegTestCase {
public void testCancelRecoveryAndResume() throws Exception { public void testCancelRecoveryAndResume() throws Exception {
updateClusterSettings( updateClusterSettings(
Settings.builder() Settings.builder()
.put( .put(RecoverySettings.INDICES_RECOVERY_CHUNK_SIZE.getKey(), ByteSizeValue.of(randomIntBetween(50, 300), ByteSizeUnit.BYTES))
RecoverySettings.INDICES_RECOVERY_CHUNK_SIZE.getKey(),
new ByteSizeValue(randomIntBetween(50, 300), ByteSizeUnit.BYTES)
)
); );
NodesStatsResponse nodeStats = clusterAdmin().prepareNodesStats().get(); NodesStatsResponse nodeStats = clusterAdmin().prepareNodesStats().get();

View file

@ -21,7 +21,7 @@ public class FsBlobStoreRepositoryIT extends ESFsBasedRepositoryIntegTestCase {
final Settings.Builder settings = Settings.builder().put("compress", randomBoolean()).put("location", randomRepoPath()); final Settings.Builder settings = Settings.builder().put("compress", randomBoolean()).put("location", randomRepoPath());
if (randomBoolean()) { if (randomBoolean()) {
long size = 1 << randomInt(10); long size = 1 << randomInt(10);
settings.put("chunk_size", new ByteSizeValue(size, ByteSizeUnit.KB)); settings.put("chunk_size", ByteSizeValue.of(size, ByteSizeUnit.KB));
} }
return settings.build(); return settings.build();
} }

View file

@ -497,7 +497,7 @@ public class SnapshotShutdownIT extends AbstractSnapshotIntegTestCase {
final String nodeForRemovalId = internalCluster().getInstance(NodeEnvironment.class, nodeForRemoval).nodeId(); final String nodeForRemovalId = internalCluster().getInstance(NodeEnvironment.class, nodeForRemoval).nodeId();
final var indexName = randomIdentifier(); final var indexName = randomIdentifier();
createIndexWithContent(indexName, indexSettings(numShards, 0).put(REQUIRE_NODE_NAME_SETTING, nodeForRemoval).build()); createIndexWithContent(indexName, indexSettings(numShards, 0).put(REQUIRE_NODE_NAME_SETTING, nodeForRemoval).build());
indexAllShardsToAnEqualOrGreaterMinimumSize(indexName, new ByteSizeValue(2, ByteSizeUnit.KB).getBytes()); indexAllShardsToAnEqualOrGreaterMinimumSize(indexName, ByteSizeValue.of(2, ByteSizeUnit.KB).getBytes());
// Start the snapshot with blocking in place on the data node not to allow shard snapshots to finish yet. // Start the snapshot with blocking in place on the data node not to allow shard snapshots to finish yet.
final var clusterService = internalCluster().getCurrentMasterNodeInstance(ClusterService.class); final var clusterService = internalCluster().getCurrentMasterNodeInstance(ClusterService.class);

View file

@ -1824,7 +1824,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
org.elasticsearch.indices.recovery.PeerRecoveryNotFound.class, org.elasticsearch.indices.recovery.PeerRecoveryNotFound.class,
org.elasticsearch.indices.recovery.PeerRecoveryNotFound::new, org.elasticsearch.indices.recovery.PeerRecoveryNotFound::new,
158, 158,
TransportVersions.V_7_9_0 UNKNOWN_VERSION_ADDED
), ),
NODE_HEALTH_CHECK_FAILURE_EXCEPTION( NODE_HEALTH_CHECK_FAILURE_EXCEPTION(
org.elasticsearch.cluster.coordination.NodeHealthCheckFailureException.class, org.elasticsearch.cluster.coordination.NodeHealthCheckFailureException.class,
@ -1836,7 +1836,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
org.elasticsearch.transport.NoSeedNodeLeftException.class, org.elasticsearch.transport.NoSeedNodeLeftException.class,
org.elasticsearch.transport.NoSeedNodeLeftException::new, org.elasticsearch.transport.NoSeedNodeLeftException::new,
160, 160,
TransportVersions.V_7_10_0 UNKNOWN_VERSION_ADDED
), ),
AUTHENTICATION_PROCESSING_ERROR( AUTHENTICATION_PROCESSING_ERROR(
org.elasticsearch.ElasticsearchAuthenticationProcessingError.class, org.elasticsearch.ElasticsearchAuthenticationProcessingError.class,

View file

@ -63,7 +63,6 @@ public class TransportVersions {
public static final TransportVersion V_7_8_1 = def(7_08_01_99); public static final TransportVersion V_7_8_1 = def(7_08_01_99);
public static final TransportVersion V_7_9_0 = def(7_09_00_99); public static final TransportVersion V_7_9_0 = def(7_09_00_99);
public static final TransportVersion V_7_10_0 = def(7_10_00_99); public static final TransportVersion V_7_10_0 = def(7_10_00_99);
public static final TransportVersion V_7_10_1 = def(7_10_01_99);
public static final TransportVersion V_7_11_0 = def(7_11_00_99); public static final TransportVersion V_7_11_0 = def(7_11_00_99);
public static final TransportVersion V_7_12_0 = def(7_12_00_99); public static final TransportVersion V_7_12_0 = def(7_12_00_99);
public static final TransportVersion V_7_13_0 = def(7_13_00_99); public static final TransportVersion V_7_13_0 = def(7_13_00_99);
@ -158,6 +157,7 @@ public class TransportVersions {
public static final TransportVersion ELASTIC_INFERENCE_SERVICE_UNIFIED_CHAT_COMPLETIONS_INTEGRATION = def(8_822_00_0); public static final TransportVersion ELASTIC_INFERENCE_SERVICE_UNIFIED_CHAT_COMPLETIONS_INTEGRATION = def(8_822_00_0);
public static final TransportVersion KQL_QUERY_TECH_PREVIEW = def(8_823_00_0); public static final TransportVersion KQL_QUERY_TECH_PREVIEW = def(8_823_00_0);
public static final TransportVersion ESQL_PROFILE_ROWS_PROCESSED = def(8_824_00_0); public static final TransportVersion ESQL_PROFILE_ROWS_PROCESSED = def(8_824_00_0);
public static final TransportVersion BYTE_SIZE_VALUE_ALWAYS_USES_BYTES = def(8_825_00_0);
/* /*
* WARNING: DO NOT MERGE INTO MAIN! * WARNING: DO NOT MERGE INTO MAIN!

View file

@ -53,7 +53,7 @@ public class TransportGetFeatureUpgradeStatusAction extends TransportMasterNodeA
GetFeatureUpgradeStatusResponse> { GetFeatureUpgradeStatusResponse> {
/** /**
* Once all feature migrations for 8.x -> 9.x have been tested, we can bump this to Version.V_8_0_0 * Once all feature migrations for 9.x -> 10.x have been tested, we can bump this to Version.V_9_0_0
*/ */
public static final Version NO_UPGRADE_REQUIRED_VERSION = Version.V_8_0_0; public static final Version NO_UPGRADE_REQUIRED_VERSION = Version.V_8_0_0;
public static final IndexVersion NO_UPGRADE_REQUIRED_INDEX_VERSION = IndexVersions.V_8_0_0; public static final IndexVersion NO_UPGRADE_REQUIRED_INDEX_VERSION = IndexVersions.V_8_0_0;

View file

@ -111,9 +111,7 @@ public class NodeInfo extends BaseNodeResponse {
addInfoIfNonNull(HttpInfo.class, in.readOptionalWriteable(HttpInfo::new)); addInfoIfNonNull(HttpInfo.class, in.readOptionalWriteable(HttpInfo::new));
addInfoIfNonNull(PluginsAndModules.class, in.readOptionalWriteable(PluginsAndModules::new)); addInfoIfNonNull(PluginsAndModules.class, in.readOptionalWriteable(PluginsAndModules::new));
addInfoIfNonNull(IngestInfo.class, in.readOptionalWriteable(IngestInfo::new)); addInfoIfNonNull(IngestInfo.class, in.readOptionalWriteable(IngestInfo::new));
if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { addInfoIfNonNull(AggregationInfo.class, in.readOptionalWriteable(AggregationInfo::new));
addInfoIfNonNull(AggregationInfo.class, in.readOptionalWriteable(AggregationInfo::new));
}
if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) {
addInfoIfNonNull(RemoteClusterServerInfo.class, in.readOptionalWriteable(RemoteClusterServerInfo::new)); addInfoIfNonNull(RemoteClusterServerInfo.class, in.readOptionalWriteable(RemoteClusterServerInfo::new));
} }
@ -285,9 +283,7 @@ public class NodeInfo extends BaseNodeResponse {
out.writeOptionalWriteable(getInfo(HttpInfo.class)); out.writeOptionalWriteable(getInfo(HttpInfo.class));
out.writeOptionalWriteable(getInfo(PluginsAndModules.class)); out.writeOptionalWriteable(getInfo(PluginsAndModules.class));
out.writeOptionalWriteable(getInfo(IngestInfo.class)); out.writeOptionalWriteable(getInfo(IngestInfo.class));
if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { out.writeOptionalWriteable(getInfo(AggregationInfo.class));
out.writeOptionalWriteable(getInfo(AggregationInfo.class));
}
if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) {
out.writeOptionalWriteable(getInfo(RemoteClusterServerInfo.class)); out.writeOptionalWriteable(getInfo(RemoteClusterServerInfo.class));
} }

View file

@ -84,7 +84,7 @@ public class BulkProcessor implements Closeable {
private final Runnable onClose; private final Runnable onClose;
private int concurrentRequests = 1; private int concurrentRequests = 1;
private int bulkActions = 1000; private int bulkActions = 1000;
private ByteSizeValue bulkSize = new ByteSizeValue(5, ByteSizeUnit.MB); private ByteSizeValue bulkSize = ByteSizeValue.of(5, ByteSizeUnit.MB);
private TimeValue flushInterval = null; private TimeValue flushInterval = null;
private BackoffPolicy backoffPolicy = BackoffPolicy.exponentialBackoff(); private BackoffPolicy backoffPolicy = BackoffPolicy.exponentialBackoff();
private String globalIndex; private String globalIndex;

View file

@ -76,8 +76,8 @@ public class BulkProcessor2 implements Closeable {
private final Listener listener; private final Listener listener;
private final ThreadPool threadPool; private final ThreadPool threadPool;
private int maxRequestsInBulk = 1000; private int maxRequestsInBulk = 1000;
private ByteSizeValue maxBulkSizeInBytes = new ByteSizeValue(5, ByteSizeUnit.MB); private ByteSizeValue maxBulkSizeInBytes = ByteSizeValue.of(5, ByteSizeUnit.MB);
private ByteSizeValue maxBytesInFlight = new ByteSizeValue(50, ByteSizeUnit.MB); private ByteSizeValue maxBytesInFlight = ByteSizeValue.of(50, ByteSizeUnit.MB);
private TimeValue flushInterval = null; private TimeValue flushInterval = null;
private int maxNumberOfRetries = 3; private int maxNumberOfRetries = 3;

View file

@ -29,7 +29,6 @@ import org.elasticsearch.search.rank.context.QueryPhaseRankCoordinatorContext;
import java.util.ArrayDeque; import java.util.ArrayDeque;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections; import java.util.Collections;
import java.util.Comparator; import java.util.Comparator;
import java.util.List; import java.util.List;
@ -67,8 +66,8 @@ public class QueryPhaseResultConsumer extends ArraySearchPhaseResults<SearchPhas
private final Consumer<Exception> onPartialMergeFailure; private final Consumer<Exception> onPartialMergeFailure;
private final int batchReduceSize; private final int batchReduceSize;
private final List<QuerySearchResult> buffer = new ArrayList<>(); private List<QuerySearchResult> buffer = new ArrayList<>();
private final List<SearchShard> emptyResults = new ArrayList<>(); private List<SearchShard> emptyResults = new ArrayList<>();
// the memory that is accounted in the circuit breaker for this consumer // the memory that is accounted in the circuit breaker for this consumer
private volatile long circuitBreakerBytes; private volatile long circuitBreakerBytes;
// the memory that is currently used in the buffer // the memory that is currently used in the buffer
@ -159,32 +158,40 @@ public class QueryPhaseResultConsumer extends ArraySearchPhaseResults<SearchPhas
if (f != null) { if (f != null) {
throw f; throw f;
} }
List<QuerySearchResult> buffer;
synchronized (this) {
// final reduce, we're done with the buffer so we just null it out and continue with a local variable to
// save field references. The synchronized block is never contended but needed to have a memory barrier and sync buffer's
// contents with all the previous writers to it
buffer = this.buffer;
buffer = buffer == null ? Collections.emptyList() : buffer;
this.buffer = null;
}
// ensure consistent ordering // ensure consistent ordering
sortBuffer(); buffer.sort(RESULT_COMPARATOR);
final TopDocsStats topDocsStats = this.topDocsStats; final TopDocsStats topDocsStats = this.topDocsStats;
var mergeResult = this.mergeResult;
this.mergeResult = null;
final int resultSize = buffer.size() + (mergeResult == null ? 0 : 1); final int resultSize = buffer.size() + (mergeResult == null ? 0 : 1);
final List<TopDocs> topDocsList = hasTopDocs ? new ArrayList<>(resultSize) : null; final List<TopDocs> topDocsList = hasTopDocs ? new ArrayList<>(resultSize) : null;
final List<DelayableWriteable<InternalAggregations>> aggsList = hasAggs ? new ArrayList<>(resultSize) : null; final List<DelayableWriteable<InternalAggregations>> aggsList = hasAggs ? new ArrayList<>(resultSize) : null;
synchronized (this) { if (mergeResult != null) {
if (mergeResult != null) { if (topDocsList != null) {
if (topDocsList != null) { topDocsList.add(mergeResult.reducedTopDocs);
topDocsList.add(mergeResult.reducedTopDocs);
}
if (aggsList != null) {
aggsList.add(DelayableWriteable.referencing(mergeResult.reducedAggs));
}
} }
for (QuerySearchResult result : buffer) { if (aggsList != null) {
topDocsStats.add(result.topDocs(), result.searchTimedOut(), result.terminatedEarly()); aggsList.add(DelayableWriteable.referencing(mergeResult.reducedAggs));
if (topDocsList != null) { }
TopDocsAndMaxScore topDocs = result.consumeTopDocs(); }
setShardIndex(topDocs.topDocs, result.getShardIndex()); for (QuerySearchResult result : buffer) {
topDocsList.add(topDocs.topDocs); topDocsStats.add(result.topDocs(), result.searchTimedOut(), result.terminatedEarly());
} if (topDocsList != null) {
if (aggsList != null) { TopDocsAndMaxScore topDocs = result.consumeTopDocs();
aggsList.add(result.getAggs()); setShardIndex(topDocs.topDocs, result.getShardIndex());
} topDocsList.add(topDocs.topDocs);
}
if (aggsList != null) {
aggsList.add(result.getAggs());
} }
} }
SearchPhaseController.ReducedQueryPhase reducePhase; SearchPhaseController.ReducedQueryPhase reducePhase;
@ -206,7 +213,7 @@ public class QueryPhaseResultConsumer extends ArraySearchPhaseResults<SearchPhas
performFinalReduce performFinalReduce
); );
} finally { } finally {
releaseAggs(); releaseAggs(buffer);
} }
if (hasAggs if (hasAggs
// reduced aggregations can be null if all shards failed // reduced aggregations can be null if all shards failed
@ -226,25 +233,25 @@ public class QueryPhaseResultConsumer extends ArraySearchPhaseResults<SearchPhas
); );
} }
return reducePhase; return reducePhase;
} }
private static final Comparator<QuerySearchResult> RESULT_COMPARATOR = Comparator.comparingInt(QuerySearchResult::getShardIndex); private static final Comparator<QuerySearchResult> RESULT_COMPARATOR = Comparator.comparingInt(QuerySearchResult::getShardIndex);
private MergeResult partialReduce( private MergeResult partialReduce(
QuerySearchResult[] toConsume, List<QuerySearchResult> toConsume,
List<SearchShard> emptyResults, List<SearchShard> processedShards,
TopDocsStats topDocsStats, TopDocsStats topDocsStats,
MergeResult lastMerge, MergeResult lastMerge,
int numReducePhases int numReducePhases
) { ) {
// ensure consistent ordering // ensure consistent ordering
Arrays.sort(toConsume, RESULT_COMPARATOR); toConsume.sort(RESULT_COMPARATOR);
final List<SearchShard> processedShards = new ArrayList<>(emptyResults);
final TopDocs newTopDocs; final TopDocs newTopDocs;
final InternalAggregations newAggs; final InternalAggregations newAggs;
final List<DelayableWriteable<InternalAggregations>> aggsList; final List<DelayableWriteable<InternalAggregations>> aggsList;
final int resultSetSize = toConsume.length + (lastMerge != null ? 1 : 0); final int resultSetSize = toConsume.size() + (lastMerge != null ? 1 : 0);
if (hasAggs) { if (hasAggs) {
aggsList = new ArrayList<>(resultSetSize); aggsList = new ArrayList<>(resultSetSize);
if (lastMerge != null) { if (lastMerge != null) {
@ -307,12 +314,6 @@ public class QueryPhaseResultConsumer extends ArraySearchPhaseResults<SearchPhas
return queue.isEmpty() == false || runningTask.get() != null; return queue.isEmpty() == false || runningTask.get() != null;
} }
void sortBuffer() {
if (buffer.size() > 0) {
buffer.sort(RESULT_COMPARATOR);
}
}
private synchronized void addWithoutBreaking(long size) { private synchronized void addWithoutBreaking(long size) {
circuitBreaker.addWithoutBreaking(size); circuitBreaker.addWithoutBreaking(size);
circuitBreakerBytes += size; circuitBreakerBytes += size;
@ -376,21 +377,21 @@ public class QueryPhaseResultConsumer extends ArraySearchPhaseResults<SearchPhas
} }
} }
if (hasFailure == false) { if (hasFailure == false) {
var b = buffer;
aggsCurrentBufferSize += aggsSize; aggsCurrentBufferSize += aggsSize;
// add one if a partial merge is pending // add one if a partial merge is pending
int size = buffer.size() + (hasPartialReduce ? 1 : 0); int size = b.size() + (hasPartialReduce ? 1 : 0);
if (size >= batchReduceSize) { if (size >= batchReduceSize) {
hasPartialReduce = true; hasPartialReduce = true;
executeNextImmediately = false; executeNextImmediately = false;
QuerySearchResult[] clone = buffer.toArray(QuerySearchResult[]::new); MergeTask task = new MergeTask(b, aggsCurrentBufferSize, emptyResults, next);
MergeTask task = new MergeTask(clone, aggsCurrentBufferSize, new ArrayList<>(emptyResults), next); b = buffer = new ArrayList<>();
emptyResults = new ArrayList<>();
aggsCurrentBufferSize = 0; aggsCurrentBufferSize = 0;
buffer.clear();
emptyResults.clear();
queue.add(task); queue.add(task);
tryExecuteNext(); tryExecuteNext();
} }
buffer.add(result); b.add(result);
} }
} }
} }
@ -404,10 +405,13 @@ public class QueryPhaseResultConsumer extends ArraySearchPhaseResults<SearchPhas
} }
private void releaseBuffer() { private void releaseBuffer() {
for (QuerySearchResult querySearchResult : buffer) { var b = buffer;
querySearchResult.releaseAggs(); if (b != null) {
this.buffer = null;
for (QuerySearchResult querySearchResult : b) {
querySearchResult.releaseAggs();
}
} }
buffer.clear();
} }
private synchronized void onMergeFailure(Exception exc) { private synchronized void onMergeFailure(Exception exc) {
@ -449,7 +453,7 @@ public class QueryPhaseResultConsumer extends ArraySearchPhaseResults<SearchPhas
@Override @Override
protected void doRun() { protected void doRun() {
MergeTask mergeTask = task; MergeTask mergeTask = task;
QuerySearchResult[] toConsume = mergeTask.consumeBuffer(); List<QuerySearchResult> toConsume = mergeTask.consumeBuffer();
while (mergeTask != null) { while (mergeTask != null) {
final MergeResult thisMergeResult = mergeResult; final MergeResult thisMergeResult = mergeResult;
long estimatedTotalSize = (thisMergeResult != null ? thisMergeResult.estimatedSize : 0) + mergeTask.aggsBufferSize; long estimatedTotalSize = (thisMergeResult != null ? thisMergeResult.estimatedSize : 0) + mergeTask.aggsBufferSize;
@ -512,15 +516,7 @@ public class QueryPhaseResultConsumer extends ArraySearchPhaseResults<SearchPhas
}); });
} }
private synchronized void releaseAggs() { private static void releaseAggs(List<QuerySearchResult> toConsume) {
if (hasAggs) {
for (QuerySearchResult result : buffer) {
result.releaseAggs();
}
}
}
private static void releaseAggs(QuerySearchResult... toConsume) {
for (QuerySearchResult result : toConsume) { for (QuerySearchResult result : toConsume) {
result.releaseAggs(); result.releaseAggs();
} }
@ -535,19 +531,19 @@ public class QueryPhaseResultConsumer extends ArraySearchPhaseResults<SearchPhas
private static class MergeTask { private static class MergeTask {
private final List<SearchShard> emptyResults; private final List<SearchShard> emptyResults;
private QuerySearchResult[] buffer; private List<QuerySearchResult> buffer;
private final long aggsBufferSize; private final long aggsBufferSize;
private Runnable next; private Runnable next;
private MergeTask(QuerySearchResult[] buffer, long aggsBufferSize, List<SearchShard> emptyResults, Runnable next) { private MergeTask(List<QuerySearchResult> buffer, long aggsBufferSize, List<SearchShard> emptyResults, Runnable next) {
this.buffer = buffer; this.buffer = buffer;
this.aggsBufferSize = aggsBufferSize; this.aggsBufferSize = aggsBufferSize;
this.emptyResults = emptyResults; this.emptyResults = emptyResults;
this.next = next; this.next = next;
} }
public synchronized QuerySearchResult[] consumeBuffer() { public synchronized List<QuerySearchResult> consumeBuffer() {
QuerySearchResult[] toRet = buffer; List<QuerySearchResult> toRet = buffer;
buffer = null; buffer = null;
return toRet; return toRet;
} }
@ -559,7 +555,7 @@ public class QueryPhaseResultConsumer extends ArraySearchPhaseResults<SearchPhas
} }
public void cancel() { public void cancel() {
QuerySearchResult[] buffer = consumeBuffer(); List<QuerySearchResult> buffer = consumeBuffer();
if (buffer != null) { if (buffer != null) {
releaseAggs(buffer); releaseAggs(buffer);
} }

View file

@ -22,7 +22,6 @@ import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.xcontent.ChunkedToXContent; import org.elasticsearch.common.xcontent.ChunkedToXContent;
import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.store.StoreStats;
import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.ToXContent;
import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentBuilder;
@ -101,9 +100,7 @@ public class ClusterInfo implements ChunkedToXContent, Writeable {
this.dataPath = in.getTransportVersion().onOrAfter(DATA_PATH_NEW_KEY_VERSION) this.dataPath = in.getTransportVersion().onOrAfter(DATA_PATH_NEW_KEY_VERSION)
? in.readImmutableMap(NodeAndShard::new, StreamInput::readString) ? in.readImmutableMap(NodeAndShard::new, StreamInput::readString)
: in.readImmutableMap(nested -> NodeAndShard.from(new ShardRouting(nested)), StreamInput::readString); : in.readImmutableMap(nested -> NodeAndShard.from(new ShardRouting(nested)), StreamInput::readString);
this.reservedSpace = in.getTransportVersion().onOrAfter(StoreStats.RESERVED_BYTES_VERSION) this.reservedSpace = in.readImmutableMap(NodeAndPath::new, ReservedSpace::new);
? in.readImmutableMap(NodeAndPath::new, ReservedSpace::new)
: Map.of();
} }
@Override @Override
@ -119,9 +116,7 @@ public class ClusterInfo implements ChunkedToXContent, Writeable {
} else { } else {
out.writeMap(this.dataPath, (o, k) -> createFakeShardRoutingFromNodeAndShard(k).writeTo(o), StreamOutput::writeString); out.writeMap(this.dataPath, (o, k) -> createFakeShardRoutingFromNodeAndShard(k).writeTo(o), StreamOutput::writeString);
} }
if (out.getTransportVersion().onOrAfter(StoreStats.RESERVED_BYTES_VERSION)) { out.writeMap(this.reservedSpace);
out.writeMap(this.reservedSpace);
}
} }
/** /**

View file

@ -563,8 +563,6 @@ public class IndexMetadata implements Diffable<IndexMetadata>, ToXContentFragmen
public static final String INDEX_STATE_FILE_PREFIX = "state-"; public static final String INDEX_STATE_FILE_PREFIX = "state-";
static final TransportVersion SYSTEM_INDEX_FLAG_ADDED = TransportVersions.V_7_10_0;
static final TransportVersion STATS_AND_FORECAST_ADDED = TransportVersions.V_8_6_0; static final TransportVersion STATS_AND_FORECAST_ADDED = TransportVersions.V_8_6_0;
private final int routingNumShards; private final int routingNumShards;
@ -1644,11 +1642,7 @@ public class IndexMetadata implements Diffable<IndexMetadata>, ToXContentFragmen
} else { } else {
mappingsUpdatedVersion = IndexVersions.ZERO; mappingsUpdatedVersion = IndexVersions.ZERO;
} }
if (in.getTransportVersion().onOrAfter(SYSTEM_INDEX_FLAG_ADDED)) { isSystem = in.readBoolean();
isSystem = in.readBoolean();
} else {
isSystem = false;
}
timestampRange = IndexLongFieldRange.readFrom(in); timestampRange = IndexLongFieldRange.readFrom(in);
if (in.getTransportVersion().onOrAfter(STATS_AND_FORECAST_ADDED)) { if (in.getTransportVersion().onOrAfter(STATS_AND_FORECAST_ADDED)) {
stats = in.readOptionalWriteable(IndexMetadataStats::new); stats = in.readOptionalWriteable(IndexMetadataStats::new);
@ -1694,9 +1688,7 @@ public class IndexMetadata implements Diffable<IndexMetadata>, ToXContentFragmen
if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_15_0)) { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_15_0)) {
IndexVersion.writeVersion(mappingsUpdatedVersion, out); IndexVersion.writeVersion(mappingsUpdatedVersion, out);
} }
if (out.getTransportVersion().onOrAfter(SYSTEM_INDEX_FLAG_ADDED)) { out.writeBoolean(isSystem);
out.writeBoolean(isSystem);
}
timestampRange.writeTo(out); timestampRange.writeTo(out);
if (out.getTransportVersion().onOrAfter(STATS_AND_FORECAST_ADDED)) { if (out.getTransportVersion().onOrAfter(STATS_AND_FORECAST_ADDED)) {
out.writeOptionalWriteable(stats); out.writeOptionalWriteable(stats);
@ -1798,9 +1790,7 @@ public class IndexMetadata implements Diffable<IndexMetadata>, ToXContentFragmen
if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_15_0)) { if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_15_0)) {
builder.mappingsUpdatedVersion(IndexVersion.readVersion(in)); builder.mappingsUpdatedVersion(IndexVersion.readVersion(in));
} }
if (in.getTransportVersion().onOrAfter(SYSTEM_INDEX_FLAG_ADDED)) { builder.system(in.readBoolean());
builder.system(in.readBoolean());
}
builder.timestampRange(IndexLongFieldRange.readFrom(in)); builder.timestampRange(IndexLongFieldRange.readFrom(in));
if (in.getTransportVersion().onOrAfter(STATS_AND_FORECAST_ADDED)) { if (in.getTransportVersion().onOrAfter(STATS_AND_FORECAST_ADDED)) {
@ -1850,9 +1840,7 @@ public class IndexMetadata implements Diffable<IndexMetadata>, ToXContentFragmen
if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_15_0)) { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_15_0)) {
IndexVersion.writeVersion(mappingsUpdatedVersion, out); IndexVersion.writeVersion(mappingsUpdatedVersion, out);
} }
if (out.getTransportVersion().onOrAfter(SYSTEM_INDEX_FLAG_ADDED)) { out.writeBoolean(isSystem);
out.writeBoolean(isSystem);
}
timestampRange.writeTo(out); timestampRange.writeTo(out);
if (out.getTransportVersion().onOrAfter(STATS_AND_FORECAST_ADDED)) { if (out.getTransportVersion().onOrAfter(STATS_AND_FORECAST_ADDED)) {
out.writeOptionalWriteable(stats); out.writeOptionalWriteable(stats);

View file

@ -22,6 +22,7 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler;
import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersion;
import org.elasticsearch.index.IndexVersions;
import org.elasticsearch.index.analysis.AnalyzerScope; import org.elasticsearch.index.analysis.AnalyzerScope;
import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.analysis.NamedAnalyzer;
import org.elasticsearch.index.mapper.MapperMetrics; import org.elasticsearch.index.mapper.MapperMetrics;
@ -139,39 +140,37 @@ public class IndexMetadataVerifier {
); );
} }
private static boolean isFullySupportedVersion(IndexMetadata indexMetadata, IndexVersion minimumIndexCompatibilityVersion) { public static boolean isFullySupportedVersion(IndexMetadata indexMetadata, IndexVersion minimumIndexCompatibilityVersion) {
return indexMetadata.getCompatibilityVersion().onOrAfter(minimumIndexCompatibilityVersion); return indexMetadata.getCompatibilityVersion().onOrAfter(minimumIndexCompatibilityVersion);
} }
/** /**
* Returns {@code true} if the index version is compatible in read-only mode. As of today, only searchable snapshots and archive indices * Returns {@code true} if the index version is compatible with read-only mode. A regular index is read-only compatible if it was
* in version N-2 with a write block are read-only compatible. This method throws an {@link IllegalStateException} if the index is * created in version N-2 and if it was marked as read-only on version N-1, a process which involves adding a write block and a special
* either a searchable snapshot or an archive index with a read-only compatible version but is missing the write block. * index setting indicating that the shard was "verified". Searchable snapshots and Archives indices created in version N-2 are also
* read-only compatible by nature as long as they have a write block. Other type of indices like CCR are not read-only compatible.
* *
* @param indexMetadata the index metadata * @param indexMetadata the index metadata
* @param minimumIndexCompatibilityVersion the min. index compatible version for reading and writing indices (used in assertion) * @param minimumCompatible the min. index compatible version for reading and writing indices (used in assertion)
* @param minReadOnlyIndexCompatibilityVersion the min. index compatible version for only reading indices * @param minimumReadOnlyCompatible the min. index compatible version for only reading indices
* *
* @return {@code true} if the index version is compatible in read-only mode, {@code false} otherwise. * @return {@code true} if the index version is compatible in read-only mode, {@code false} otherwise.
* @throws IllegalStateException if the index is read-only compatible but has no write block in place. * @throws IllegalStateException if the index is read-only compatible but has no write block or no verification index setting in place.
*/ */
public static boolean isReadOnlySupportedVersion( public static boolean isReadOnlySupportedVersion(
IndexMetadata indexMetadata, IndexMetadata indexMetadata,
IndexVersion minimumIndexCompatibilityVersion, IndexVersion minimumCompatible,
IndexVersion minReadOnlyIndexCompatibilityVersion IndexVersion minimumReadOnlyCompatible
) { ) {
boolean isReadOnlySupportedVersion = indexMetadata.getCompatibilityVersion().onOrAfter(minReadOnlyIndexCompatibilityVersion); if (isReadOnlyCompatible(indexMetadata, minimumCompatible, minimumReadOnlyCompatible)) {
assert isFullySupportedVersion(indexMetadata, minimumIndexCompatibilityVersion) == false; assert isFullySupportedVersion(indexMetadata, minimumCompatible) == false : indexMetadata;
final boolean isReadOnly = hasIndexWritesBlock(indexMetadata);
if (isReadOnlySupportedVersion
&& (indexMetadata.isSearchableSnapshot() || indexMetadata.getCreationVersion().isLegacyIndexVersion())) {
boolean isReadOnly = IndexMetadata.INDEX_BLOCKS_WRITE_SETTING.get(indexMetadata.getSettings());
if (isReadOnly == false) { if (isReadOnly == false) {
throw new IllegalStateException( throw new IllegalStateException(
"The index " "The index "
+ indexMetadata.getIndex() + indexMetadata.getIndex()
+ " created in version [" + " created in version ["
+ indexMetadata.getCreationVersion() + indexMetadata.getCreationVersion().toReleaseVersion()
+ "] with current compatibility version [" + "] with current compatibility version ["
+ indexMetadata.getCompatibilityVersion().toReleaseVersion() + indexMetadata.getCompatibilityVersion().toReleaseVersion()
+ "] must be marked as read-only using the setting [" + "] must be marked as read-only using the setting ["
@ -186,6 +185,45 @@ public class IndexMetadataVerifier {
return false; return false;
} }
private static boolean isReadOnlyCompatible(
IndexMetadata indexMetadata,
IndexVersion minimumCompatible,
IndexVersion minimumReadOnlyCompatible
) {
var compatibilityVersion = indexMetadata.getCompatibilityVersion();
if (compatibilityVersion.onOrAfter(minimumReadOnlyCompatible)) {
// searchable snapshots are read-only compatible
if (indexMetadata.isSearchableSnapshot()) {
return true;
}
// archives are read-only compatible
if (indexMetadata.getCreationVersion().isLegacyIndexVersion()) {
return true;
}
// indices (other than CCR and old-style frozen indices) are read-only compatible
return compatibilityVersion.before(minimumCompatible)
&& indexMetadata.getSettings().getAsBoolean("index.frozen", false) == false
&& indexMetadata.getSettings().getAsBoolean("index.xpack.ccr.following_index", false) == false;
}
return false;
}
private static boolean hasIndexWritesBlock(IndexMetadata indexMetadata) {
if (IndexMetadata.INDEX_BLOCKS_WRITE_SETTING.get(indexMetadata.getSettings())) {
return indexMetadata.isSearchableSnapshot()
|| indexMetadata.getCreationVersion().isLegacyIndexVersion()
|| MetadataIndexStateService.VERIFIED_READ_ONLY_SETTING.get(indexMetadata.getSettings());
}
return false;
}
public static boolean isReadOnlyVerified(IndexMetadata indexMetadata) {
if (isReadOnlyCompatible(indexMetadata, IndexVersions.MINIMUM_COMPATIBLE, IndexVersions.MINIMUM_READONLY_COMPATIBLE)) {
return hasIndexWritesBlock(indexMetadata);
}
return false;
}
/** /**
* Check that we can parse the mappings. * Check that we can parse the mappings.
* *

View file

@ -11,6 +11,7 @@ package org.elasticsearch.cluster.metadata;
import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.Logger;
import org.elasticsearch.Build;
import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.ActionRunnable;
@ -116,6 +117,15 @@ public class MetadataIndexStateService {
Setting.Property.PrivateIndex Setting.Property.PrivateIndex
); );
public static final Setting<Boolean> VERIFIED_READ_ONLY_SETTING = Setting.boolSetting(
"index.verified_read_only",
false,
Setting.Property.IndexScope,
Setting.Property.NotCopyableOnResize,
// Allow the setting to be updated in snapshot builds
Build.current().isSnapshot() ? Setting.Property.OperatorDynamic : Setting.Property.PrivateIndex
);
private final ClusterService clusterService; private final ClusterService clusterService;
private final AllocationService allocationService; private final AllocationService allocationService;
private final IndexMetadataVerifier indexMetadataVerifier; private final IndexMetadataVerifier indexMetadataVerifier;

View file

@ -174,6 +174,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings {
IndexSettings.DEFAULT_PIPELINE, IndexSettings.DEFAULT_PIPELINE,
IndexSettings.FINAL_PIPELINE, IndexSettings.FINAL_PIPELINE,
MetadataIndexStateService.VERIFIED_BEFORE_CLOSE_SETTING, MetadataIndexStateService.VERIFIED_BEFORE_CLOSE_SETTING,
MetadataIndexStateService.VERIFIED_READ_ONLY_SETTING,
ExistingShardsAllocator.EXISTING_SHARDS_ALLOCATOR_SETTING, ExistingShardsAllocator.EXISTING_SHARDS_ALLOCATOR_SETTING,
DiskThresholdDecider.SETTING_IGNORE_DISK_WATERMARKS, DiskThresholdDecider.SETTING_IGNORE_DISK_WATERMARKS,
ShardLimitValidator.INDEX_SETTING_SHARD_LIMIT_GROUP, ShardLimitValidator.INDEX_SETTING_SHARD_LIMIT_GROUP,

View file

@ -14,84 +14,44 @@ import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.logging.DeprecationCategory;
import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.logging.LogConfigurator;
import org.elasticsearch.xcontent.ToXContentFragment; import org.elasticsearch.xcontent.ToXContentFragment;
import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentBuilder;
import java.io.IOException; import java.io.IOException;
import java.math.BigDecimal;
import java.math.RoundingMode;
import java.util.Locale; import java.util.Locale;
import java.util.Objects; import java.util.Objects;
import static org.elasticsearch.TransportVersions.BYTE_SIZE_VALUE_ALWAYS_USES_BYTES;
import static org.elasticsearch.common.unit.ByteSizeUnit.BYTES;
import static org.elasticsearch.common.unit.ByteSizeUnit.GB;
import static org.elasticsearch.common.unit.ByteSizeUnit.KB;
import static org.elasticsearch.common.unit.ByteSizeUnit.MB;
import static org.elasticsearch.common.unit.ByteSizeUnit.PB;
import static org.elasticsearch.common.unit.ByteSizeUnit.TB;
public class ByteSizeValue implements Writeable, Comparable<ByteSizeValue>, ToXContentFragment { public class ByteSizeValue implements Writeable, Comparable<ByteSizeValue>, ToXContentFragment {
/** /**
* We have to lazy initialize the deprecation logger as otherwise a static logger here would be constructed before logging is configured * We have to lazy initialize the deprecation logger as otherwise a static logger here would be constructed before logging is configured
* leading to a runtime failure (see {@link LogConfigurator#checkErrorListener()} ). The premature construction would come from any * leading to a runtime failure (see {@code LogConfigurator.checkErrorListener()} ). The premature construction would come from any
* {@link ByteSizeValue} object constructed in, for example, settings in {@link org.elasticsearch.common.network.NetworkService}. * {@link ByteSizeValue} object constructed in, for example, settings in {@link org.elasticsearch.common.network.NetworkService}.
*/ */
static class DeprecationLoggerHolder { static class DeprecationLoggerHolder {
static DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(ByteSizeValue.class); static DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(ByteSizeValue.class);
} }
public static final ByteSizeValue ZERO = new ByteSizeValue(0, ByteSizeUnit.BYTES); public static final ByteSizeValue ZERO = new ByteSizeValue(0, BYTES);
public static final ByteSizeValue ONE = new ByteSizeValue(1, ByteSizeUnit.BYTES); public static final ByteSizeValue ONE = new ByteSizeValue(1, BYTES);
public static final ByteSizeValue MINUS_ONE = new ByteSizeValue(-1, ByteSizeUnit.BYTES); public static final ByteSizeValue MINUS_ONE = new ByteSizeValue(-1, BYTES);
public static ByteSizeValue ofBytes(long size) { /**
if (size == 0) { * @param size the number of {@code unit}s
return ZERO; */
} public static ByteSizeValue of(long size, ByteSizeUnit unit) {
if (size == 1) { if (size < -1 || (size == -1 && unit != BYTES)) {
return ONE;
}
if (size == -1) {
return MINUS_ONE;
}
return new ByteSizeValue(size, ByteSizeUnit.BYTES);
}
public static ByteSizeValue ofKb(long size) {
return new ByteSizeValue(size, ByteSizeUnit.KB);
}
public static ByteSizeValue ofMb(long size) {
return new ByteSizeValue(size, ByteSizeUnit.MB);
}
public static ByteSizeValue ofGb(long size) {
return new ByteSizeValue(size, ByteSizeUnit.GB);
}
public static ByteSizeValue ofTb(long size) {
return new ByteSizeValue(size, ByteSizeUnit.TB);
}
public static ByteSizeValue ofPb(long size) {
return new ByteSizeValue(size, ByteSizeUnit.PB);
}
private final long size;
private final ByteSizeUnit unit;
public static ByteSizeValue readFrom(StreamInput in) throws IOException {
long size = in.readZLong();
ByteSizeUnit unit = ByteSizeUnit.readFrom(in);
if (unit == ByteSizeUnit.BYTES) {
return ofBytes(size);
}
return new ByteSizeValue(size, unit);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeZLong(size);
unit.writeTo(out);
}
public ByteSizeValue(long size, ByteSizeUnit unit) {
if (size < -1 || (size == -1 && unit != ByteSizeUnit.BYTES)) {
throw new IllegalArgumentException("Values less than -1 bytes are not supported: " + size + unit.getSuffix()); throw new IllegalArgumentException("Values less than -1 bytes are not supported: " + size + unit.getSuffix());
} }
if (size > Long.MAX_VALUE / unit.toBytes(1)) { if (size > Long.MAX_VALUE / unit.toBytes(1)) {
@ -99,18 +59,88 @@ public class ByteSizeValue implements Writeable, Comparable<ByteSizeValue>, ToXC
"Values greater than " + Long.MAX_VALUE + " bytes are not supported: " + size + unit.getSuffix() "Values greater than " + Long.MAX_VALUE + " bytes are not supported: " + size + unit.getSuffix()
); );
} }
this.size = size; return newByteSizeValue(size * unit.toBytes(1), unit);
this.unit = unit; }
public static ByteSizeValue ofBytes(long size) {
return of(size, BYTES);
}
public static ByteSizeValue ofKb(long size) {
return of(size, KB);
}
public static ByteSizeValue ofMb(long size) {
return of(size, MB);
}
public static ByteSizeValue ofGb(long size) {
return of(size, GB);
}
public static ByteSizeValue ofTb(long size) {
return of(size, TB);
}
public static ByteSizeValue ofPb(long size) {
return of(size, PB);
}
static ByteSizeValue newByteSizeValue(long sizeInBytes, ByteSizeUnit desiredUnit) {
// Peel off some common cases to avoid allocations
if (desiredUnit == BYTES) {
if (sizeInBytes == 0) {
return ZERO;
}
if (sizeInBytes == 1) {
return ONE;
}
if (sizeInBytes == -1) {
return MINUS_ONE;
}
}
if (sizeInBytes < 0) {
throw new IllegalArgumentException("Values less than -1 bytes are not supported: " + sizeInBytes);
}
return new ByteSizeValue(sizeInBytes, desiredUnit);
}
private final long sizeInBytes;
private final ByteSizeUnit desiredUnit;
public static ByteSizeValue readFrom(StreamInput in) throws IOException {
long size = in.readZLong();
ByteSizeUnit unit = ByteSizeUnit.readFrom(in);
if (in.getTransportVersion().onOrAfter(BYTE_SIZE_VALUE_ALWAYS_USES_BYTES)) {
return newByteSizeValue(size, unit);
} else {
return of(size, unit);
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
if (out.getTransportVersion().onOrAfter(BYTE_SIZE_VALUE_ALWAYS_USES_BYTES)) {
out.writeZLong(sizeInBytes);
} else {
out.writeZLong(Math.divideExact(sizeInBytes, desiredUnit.toBytes(1)));
}
desiredUnit.writeTo(out);
}
ByteSizeValue(long sizeInBytes, ByteSizeUnit desiredUnit) {
this.sizeInBytes = sizeInBytes;
this.desiredUnit = desiredUnit;
} }
// For testing // For testing
long getSize() { long getSizeInBytes() {
return size; return sizeInBytes;
} }
// For testing // For testing
ByteSizeUnit getUnit() { ByteSizeUnit getDesiredUnit() {
return unit; return desiredUnit;
} }
@Deprecated @Deprecated
@ -123,27 +153,27 @@ public class ByteSizeValue implements Writeable, Comparable<ByteSizeValue>, ToXC
} }
public long getBytes() { public long getBytes() {
return unit.toBytes(size); return sizeInBytes;
} }
public long getKb() { public long getKb() {
return unit.toKB(size); return getBytes() / KB.toBytes(1);
} }
public long getMb() { public long getMb() {
return unit.toMB(size); return getBytes() / MB.toBytes(1);
} }
public long getGb() { public long getGb() {
return unit.toGB(size); return getBytes() / GB.toBytes(1);
} }
public long getTb() { public long getTb() {
return unit.toTB(size); return getBytes() / TB.toBytes(1);
} }
public long getPb() { public long getPb() {
return unit.toPB(size); return getBytes() / PB.toBytes(1);
} }
public double getKbFrac() { public double getKbFrac() {
@ -175,32 +205,41 @@ public class ByteSizeValue implements Writeable, Comparable<ByteSizeValue>, ToXC
* serialising the value to JSON. * serialising the value to JSON.
*/ */
public String getStringRep() { public String getStringRep() {
if (size <= 0) { if (sizeInBytes <= 0) {
return String.valueOf(size); return String.valueOf(sizeInBytes);
}
long numUnits = sizeInBytes / desiredUnit.toBytes(1);
long residue = sizeInBytes % desiredUnit.toBytes(1);
if (residue == 0) {
return numUnits + desiredUnit.getSuffix();
} else {
return sizeInBytes + BYTES.getSuffix();
} }
return size + unit.getSuffix();
} }
/**
* @return a string with at most one decimal point whose magnitude is close to {@code this}.
*/
@Override @Override
public String toString() { public String toString() {
long bytes = getBytes(); long bytes = getBytes();
double value = bytes; double value = bytes;
String suffix = ByteSizeUnit.BYTES.getSuffix(); String suffix = BYTES.getSuffix();
if (bytes >= ByteSizeUnit.C5) { if (bytes >= ByteSizeUnit.C5) {
value = getPbFrac(); value = getPbFrac();
suffix = ByteSizeUnit.PB.getSuffix(); suffix = PB.getSuffix();
} else if (bytes >= ByteSizeUnit.C4) { } else if (bytes >= ByteSizeUnit.C4) {
value = getTbFrac(); value = getTbFrac();
suffix = ByteSizeUnit.TB.getSuffix(); suffix = TB.getSuffix();
} else if (bytes >= ByteSizeUnit.C3) { } else if (bytes >= ByteSizeUnit.C3) {
value = getGbFrac(); value = getGbFrac();
suffix = ByteSizeUnit.GB.getSuffix(); suffix = GB.getSuffix();
} else if (bytes >= ByteSizeUnit.C2) { } else if (bytes >= ByteSizeUnit.C2) {
value = getMbFrac(); value = getMbFrac();
suffix = ByteSizeUnit.MB.getSuffix(); suffix = MB.getSuffix();
} else if (bytes >= ByteSizeUnit.C1) { } else if (bytes >= ByteSizeUnit.C1) {
value = getKbFrac(); value = getKbFrac();
suffix = ByteSizeUnit.KB.getSuffix(); suffix = KB.getSuffix();
} }
return Strings.format1Decimals(value, suffix); return Strings.format1Decimals(value, suffix);
} }
@ -231,25 +270,25 @@ public class ByteSizeValue implements Writeable, Comparable<ByteSizeValue>, ToXC
} }
String lowerSValue = sValue.toLowerCase(Locale.ROOT).trim(); String lowerSValue = sValue.toLowerCase(Locale.ROOT).trim();
if (lowerSValue.endsWith("k")) { if (lowerSValue.endsWith("k")) {
return parse(sValue, lowerSValue, "k", ByteSizeUnit.KB, settingName); return parse(sValue, lowerSValue, "k", KB, settingName);
} else if (lowerSValue.endsWith("kb")) { } else if (lowerSValue.endsWith("kb")) {
return parse(sValue, lowerSValue, "kb", ByteSizeUnit.KB, settingName); return parse(sValue, lowerSValue, "kb", KB, settingName);
} else if (lowerSValue.endsWith("m")) { } else if (lowerSValue.endsWith("m")) {
return parse(sValue, lowerSValue, "m", ByteSizeUnit.MB, settingName); return parse(sValue, lowerSValue, "m", MB, settingName);
} else if (lowerSValue.endsWith("mb")) { } else if (lowerSValue.endsWith("mb")) {
return parse(sValue, lowerSValue, "mb", ByteSizeUnit.MB, settingName); return parse(sValue, lowerSValue, "mb", MB, settingName);
} else if (lowerSValue.endsWith("g")) { } else if (lowerSValue.endsWith("g")) {
return parse(sValue, lowerSValue, "g", ByteSizeUnit.GB, settingName); return parse(sValue, lowerSValue, "g", GB, settingName);
} else if (lowerSValue.endsWith("gb")) { } else if (lowerSValue.endsWith("gb")) {
return parse(sValue, lowerSValue, "gb", ByteSizeUnit.GB, settingName); return parse(sValue, lowerSValue, "gb", GB, settingName);
} else if (lowerSValue.endsWith("t")) { } else if (lowerSValue.endsWith("t")) {
return parse(sValue, lowerSValue, "t", ByteSizeUnit.TB, settingName); return parse(sValue, lowerSValue, "t", TB, settingName);
} else if (lowerSValue.endsWith("tb")) { } else if (lowerSValue.endsWith("tb")) {
return parse(sValue, lowerSValue, "tb", ByteSizeUnit.TB, settingName); return parse(sValue, lowerSValue, "tb", TB, settingName);
} else if (lowerSValue.endsWith("p")) { } else if (lowerSValue.endsWith("p")) {
return parse(sValue, lowerSValue, "p", ByteSizeUnit.PB, settingName); return parse(sValue, lowerSValue, "p", PB, settingName);
} else if (lowerSValue.endsWith("pb")) { } else if (lowerSValue.endsWith("pb")) {
return parse(sValue, lowerSValue, "pb", ByteSizeUnit.PB, settingName); return parse(sValue, lowerSValue, "pb", PB, settingName);
} else if (lowerSValue.endsWith("b")) { } else if (lowerSValue.endsWith("b")) {
return parseBytes(lowerSValue, settingName, sValue); return parseBytes(lowerSValue, settingName, sValue);
} else { } else {
@ -285,24 +324,16 @@ public class ByteSizeValue implements Writeable, Comparable<ByteSizeValue>, ToXC
ByteSizeUnit unit, ByteSizeUnit unit,
final String settingName final String settingName
) { ) {
assert unit != BYTES : "Use parseBytes";
final String s = normalized.substring(0, normalized.length() - suffix.length()).trim(); final String s = normalized.substring(0, normalized.length() - suffix.length()).trim();
try { try {
try { try {
return new ByteSizeValue(Long.parseLong(s), unit); return of(Long.parseLong(s), unit);
} catch (final NumberFormatException e) { } catch (final NumberFormatException e) {
try { // If it's not an integer, it could be a valid number with a decimal
final double doubleValue = Double.parseDouble(s); BigDecimal decimalValue = parseDecimal(s, settingName, initialInput, e);
DeprecationLoggerHolder.deprecationLogger.warn( long sizeInBytes = convertToBytes(decimalValue, unit, settingName, initialInput, e);
DeprecationCategory.PARSING, return new ByteSizeValue(sizeInBytes, unit);
"fractional_byte_values",
"Fractional bytes values are deprecated. Use non-fractional bytes values instead: [{}] found for setting [{}]",
initialInput,
settingName
);
return ByteSizeValue.ofBytes((long) (doubleValue * unit.toBytes(1)));
} catch (final NumberFormatException ignored) {
throw new ElasticsearchParseException("failed to parse setting [{}] with value [{}]", e, settingName, initialInput);
}
} }
} catch (IllegalArgumentException e) { } catch (IllegalArgumentException e) {
throw new ElasticsearchParseException( throw new ElasticsearchParseException(
@ -314,6 +345,82 @@ public class ByteSizeValue implements Writeable, Comparable<ByteSizeValue>, ToXC
} }
} }
/**
* @param numericPortion the number to parse
* @param settingName for error reporting - the name of the setting we're parsing
* @param settingValue for error reporting - the whole string value of the setting
* @param originalException for error reporting - the exception that occurred when we tried to parse the setting as an integer
*/
private static BigDecimal parseDecimal(
String numericPortion,
String settingName,
String settingValue,
NumberFormatException originalException
) {
BigDecimal decimalValue;
try {
decimalValue = new BigDecimal(numericPortion);
} catch (NumberFormatException e) {
// Here, we choose to use originalException as the cause, because a NumberFormatException here
// indicates the string wasn't actually a valid BigDecimal after all, so there's no reason
// to confuse matters by reporting BigDecimal in the stack trace.
ElasticsearchParseException toThrow = new ElasticsearchParseException(
"failed to parse setting [{}] with value [{}]",
originalException,
settingName,
settingValue
);
toThrow.addSuppressed(e);
throw toThrow;
}
if (decimalValue.signum() < 0) {
throw new ElasticsearchParseException("failed to parse setting [{}] with value [{}]", settingName, settingValue);
} else if (decimalValue.scale() > 2) {
throw new ElasticsearchParseException(
"failed to parse setting [{}] with more than two decimals in value [{}]",
settingName,
settingValue
);
}
return decimalValue;
}
/**
* @param decimalValue the number of {@code unit}s
* @param unit the specified {@link ByteSizeUnit}
* @param settingName for error reporting - the name of the setting we're parsing
* @param settingValue for error reporting - the whole string value of the setting
* @param originalException for error reporting - the exception that occurred when we tried to parse the setting as an integer
*/
private static long convertToBytes(
BigDecimal decimalValue,
ByteSizeUnit unit,
String settingName,
String settingValue,
NumberFormatException originalException
) {
BigDecimal sizeInBytes = decimalValue.multiply(new BigDecimal(unit.toBytes(1)));
try {
// Note we always round up here for two reasons:
// 1. Practically: toString truncates, so if we ever round down, we'll lose a tenth
// 2. In principle: if the user asks for 1.1kb, which is 1126.4 bytes, and we only give then 1126, then
// we have not given them what they asked for.
return sizeInBytes.setScale(0, RoundingMode.UP).longValueExact();
} catch (ArithmeticException e) {
// Here, we choose to use the ArithmeticException as the cause, because we already know the
// number is a valid BigDecimal, so it makes sense to supply that context in the stack trace.
ElasticsearchParseException toThrow = new ElasticsearchParseException(
"failed to parse setting [{}] with value beyond {}: [{}]",
e,
settingName,
Long.MAX_VALUE,
settingValue
);
toThrow.addSuppressed(originalException);
throw toThrow;
}
}
@Override @Override
public boolean equals(Object o) { public boolean equals(Object o) {
if (this == o) { if (this == o) {
@ -328,7 +435,7 @@ public class ByteSizeValue implements Writeable, Comparable<ByteSizeValue>, ToXC
@Override @Override
public int hashCode() { public int hashCode() {
return Long.hashCode(size * unit.toBytes(1)); return Long.hashCode(getBytes());
} }
@Override @Override

View file

@ -87,19 +87,19 @@ public final class HttpTransportSettings {
); );
public static final Setting<ByteSizeValue> SETTING_HTTP_MAX_CONTENT_LENGTH = Setting.byteSizeSetting( public static final Setting<ByteSizeValue> SETTING_HTTP_MAX_CONTENT_LENGTH = Setting.byteSizeSetting(
"http.max_content_length", "http.max_content_length",
new ByteSizeValue(100, ByteSizeUnit.MB), ByteSizeValue.of(100, ByteSizeUnit.MB),
ByteSizeValue.ZERO, ByteSizeValue.ZERO,
ByteSizeValue.ofBytes(Integer.MAX_VALUE), ByteSizeValue.ofBytes(Integer.MAX_VALUE),
Property.NodeScope Property.NodeScope
); );
public static final Setting<ByteSizeValue> SETTING_HTTP_MAX_CHUNK_SIZE = Setting.byteSizeSetting( public static final Setting<ByteSizeValue> SETTING_HTTP_MAX_CHUNK_SIZE = Setting.byteSizeSetting(
"http.max_chunk_size", "http.max_chunk_size",
new ByteSizeValue(8, ByteSizeUnit.KB), ByteSizeValue.of(8, ByteSizeUnit.KB),
Property.NodeScope Property.NodeScope
); );
public static final Setting<ByteSizeValue> SETTING_HTTP_MAX_HEADER_SIZE = Setting.byteSizeSetting( public static final Setting<ByteSizeValue> SETTING_HTTP_MAX_HEADER_SIZE = Setting.byteSizeSetting(
"http.max_header_size", "http.max_header_size",
new ByteSizeValue(16, ByteSizeUnit.KB), ByteSizeValue.of(16, ByteSizeUnit.KB),
Property.NodeScope Property.NodeScope
); );
public static final Setting<Integer> SETTING_HTTP_MAX_WARNING_HEADER_COUNT = intSetting( public static final Setting<Integer> SETTING_HTTP_MAX_WARNING_HEADER_COUNT = intSetting(
@ -115,7 +115,7 @@ public final class HttpTransportSettings {
); );
public static final Setting<ByteSizeValue> SETTING_HTTP_MAX_INITIAL_LINE_LENGTH = Setting.byteSizeSetting( public static final Setting<ByteSizeValue> SETTING_HTTP_MAX_INITIAL_LINE_LENGTH = Setting.byteSizeSetting(
"http.max_initial_line_length", "http.max_initial_line_length",
new ByteSizeValue(4, ByteSizeUnit.KB), ByteSizeValue.of(4, ByteSizeUnit.KB),
Property.NodeScope Property.NodeScope
); );

View file

@ -353,7 +353,7 @@ public final class IndexSettings {
* Prevent the translog from growing over 10GB or 20% of the recommended shard size of 50GB. This helps bound the maximum disk usage * Prevent the translog from growing over 10GB or 20% of the recommended shard size of 50GB. This helps bound the maximum disk usage
* overhead of translogs. * overhead of translogs.
*/ */
new ByteSizeValue(10, ByteSizeUnit.GB), ByteSizeValue.of(10, ByteSizeUnit.GB),
/* /*
* An empty translog occupies 55 bytes on disk. If the flush threshold is below this, the flush thread * An empty translog occupies 55 bytes on disk. If the flush threshold is below this, the flush thread
* can get stuck in an infinite loop as the shouldPeriodicallyFlush can still be true after flushing. * can get stuck in an infinite loop as the shouldPeriodicallyFlush can still be true after flushing.
@ -385,7 +385,7 @@ public final class IndexSettings {
*/ */
public static final Setting<ByteSizeValue> INDEX_FLUSH_AFTER_MERGE_THRESHOLD_SIZE_SETTING = Setting.byteSizeSetting( public static final Setting<ByteSizeValue> INDEX_FLUSH_AFTER_MERGE_THRESHOLD_SIZE_SETTING = Setting.byteSizeSetting(
"index.flush_after_merge", "index.flush_after_merge",
new ByteSizeValue(512, ByteSizeUnit.MB), ByteSizeValue.of(512, ByteSizeUnit.MB),
ByteSizeValue.ZERO, // always flush after merge ByteSizeValue.ZERO, // always flush after merge
ByteSizeValue.ofBytes(Long.MAX_VALUE), // never flush after merge ByteSizeValue.ofBytes(Long.MAX_VALUE), // never flush after merge
Property.Dynamic, Property.Dynamic,
@ -398,7 +398,7 @@ public final class IndexSettings {
*/ */
public static final Setting<ByteSizeValue> INDEX_TRANSLOG_GENERATION_THRESHOLD_SIZE_SETTING = Setting.byteSizeSetting( public static final Setting<ByteSizeValue> INDEX_TRANSLOG_GENERATION_THRESHOLD_SIZE_SETTING = Setting.byteSizeSetting(
"index.translog.generation_threshold_size", "index.translog.generation_threshold_size",
new ByteSizeValue(64, ByteSizeUnit.MB), ByteSizeValue.of(64, ByteSizeUnit.MB),
/* /*
* An empty translog occupies 55 bytes on disk. If the generation threshold is * An empty translog occupies 55 bytes on disk. If the generation threshold is
* below this, the flush thread can get stuck in an infinite loop repeatedly * below this, the flush thread can get stuck in an infinite loop repeatedly
@ -1431,7 +1431,7 @@ public final class IndexSettings {
} }
assert onePercentOfTotalDiskSpace > Translog.DEFAULT_HEADER_SIZE_IN_BYTES; assert onePercentOfTotalDiskSpace > Translog.DEFAULT_HEADER_SIZE_IN_BYTES;
if (onePercentOfTotalDiskSpace < flushThresholdSize.getBytes()) { if (onePercentOfTotalDiskSpace < flushThresholdSize.getBytes()) {
return new ByteSizeValue(onePercentOfTotalDiskSpace, ByteSizeUnit.BYTES); return ByteSizeValue.of(onePercentOfTotalDiskSpace, ByteSizeUnit.BYTES);
} else { } else {
return flushThresholdSize; return flushThresholdSize;
} }

View file

@ -116,9 +116,9 @@ public final class MergePolicyConfig {
private final ByteSizeValue defaultMaxTimeBasedMergedSegment; private final ByteSizeValue defaultMaxTimeBasedMergedSegment;
public static final double DEFAULT_EXPUNGE_DELETES_ALLOWED = 10d; public static final double DEFAULT_EXPUNGE_DELETES_ALLOWED = 10d;
public static final ByteSizeValue DEFAULT_FLOOR_SEGMENT = new ByteSizeValue(2, ByteSizeUnit.MB); public static final ByteSizeValue DEFAULT_FLOOR_SEGMENT = ByteSizeValue.of(2, ByteSizeUnit.MB);
public static final int DEFAULT_MAX_MERGE_AT_ONCE = 10; public static final int DEFAULT_MAX_MERGE_AT_ONCE = 10;
public static final ByteSizeValue DEFAULT_MAX_MERGED_SEGMENT = new ByteSizeValue(5, ByteSizeUnit.GB); public static final ByteSizeValue DEFAULT_MAX_MERGED_SEGMENT = ByteSizeValue.of(5, ByteSizeUnit.GB);
public static final Setting<ByteSizeValue> DEFAULT_MAX_MERGED_SEGMENT_SETTING = Setting.byteSizeSetting( public static final Setting<ByteSizeValue> DEFAULT_MAX_MERGED_SEGMENT_SETTING = Setting.byteSizeSetting(
"indices.merge.policy.max_merged_segment", "indices.merge.policy.max_merged_segment",
DEFAULT_MAX_MERGED_SEGMENT, DEFAULT_MAX_MERGED_SEGMENT,
@ -131,7 +131,7 @@ public final class MergePolicyConfig {
* of merging fewer segments together than the merge factor, which in-turn increases write amplification. So we set an arbitrarily high * of merging fewer segments together than the merge factor, which in-turn increases write amplification. So we set an arbitrarily high
* roof that serves as a protection that we expect to never hit. * roof that serves as a protection that we expect to never hit.
*/ */
public static final ByteSizeValue DEFAULT_MAX_TIME_BASED_MERGED_SEGMENT = new ByteSizeValue(100, ByteSizeUnit.GB); public static final ByteSizeValue DEFAULT_MAX_TIME_BASED_MERGED_SEGMENT = ByteSizeValue.of(100, ByteSizeUnit.GB);
public static final Setting<ByteSizeValue> DEFAULT_MAX_TIME_BASED_MERGED_SEGMENT_SETTING = Setting.byteSizeSetting( public static final Setting<ByteSizeValue> DEFAULT_MAX_TIME_BASED_MERGED_SEGMENT_SETTING = Setting.byteSizeSetting(
"indices.merge.policy.max_time_based_merged_segment", "indices.merge.policy.max_time_based_merged_segment",
DEFAULT_MAX_TIME_BASED_MERGED_SEGMENT, DEFAULT_MAX_TIME_BASED_MERGED_SEGMENT,

View file

@ -266,7 +266,7 @@ public class InternalEngine extends Engine {
); );
assert translog.getGeneration() != null; assert translog.getGeneration() != null;
this.translog = translog; this.translog = translog;
this.totalDiskSpace = new ByteSizeValue(Environment.getFileStore(translog.location()).getTotalSpace(), ByteSizeUnit.BYTES); this.totalDiskSpace = ByteSizeValue.of(Environment.getFileStore(translog.location()).getTotalSpace(), ByteSizeUnit.BYTES);
this.lastCommittedSegmentInfos = store.readLastCommittedSegmentsInfo(); this.lastCommittedSegmentInfos = store.readLastCommittedSegmentsInfo();
this.softDeletesPolicy = newSoftDeletesPolicy(); this.softDeletesPolicy = newSoftDeletesPolicy();
this.combinedDeletionPolicy = new CombinedDeletionPolicy( this.combinedDeletionPolicy = new CombinedDeletionPolicy(

View file

@ -97,7 +97,7 @@ public class ReadOnlyEngine extends Engine {
@SuppressWarnings("this-escape") @SuppressWarnings("this-escape")
public ReadOnlyEngine( public ReadOnlyEngine(
EngineConfig config, EngineConfig config,
SeqNoStats seqNoStats, @Nullable SeqNoStats seqNoStats,
@Nullable TranslogStats translogStats, @Nullable TranslogStats translogStats,
boolean obtainLock, boolean obtainLock,
Function<DirectoryReader, DirectoryReader> readerWrapperFunction, Function<DirectoryReader, DirectoryReader> readerWrapperFunction,

View file

@ -180,12 +180,6 @@ public abstract class AbstractGeometryFieldMapper<T> extends FieldMapper {
}; };
} }
@Override
public BlockLoader blockLoader(BlockLoaderContext blContext) {
// Currently we can only load from source in ESQL
return blockLoaderFromSource(blContext);
}
protected BlockLoader blockLoaderFromSource(BlockLoaderContext blContext) { protected BlockLoader blockLoaderFromSource(BlockLoaderContext blContext) {
ValueFetcher fetcher = valueFetcher(blContext.sourcePaths(name()), nullValue, GeometryFormatterFactory.WKB); ValueFetcher fetcher = valueFetcher(blContext.sourcePaths(name()), nullValue, GeometryFormatterFactory.WKB);
// TODO consider optimization using BlockSourceReader.lookupFromFieldNames(blContext.fieldNames(), name()) // TODO consider optimization using BlockSourceReader.lookupFromFieldNames(blContext.fieldNames(), name())

View file

@ -10,16 +10,12 @@ package org.elasticsearch.index.mapper;
import org.apache.lucene.index.BinaryDocValues; import org.apache.lucene.index.BinaryDocValues;
import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Explicit;
import org.elasticsearch.common.geo.Orientation; import org.elasticsearch.common.geo.Orientation;
import org.elasticsearch.geometry.Rectangle; import org.elasticsearch.lucene.spatial.Extent;
import org.elasticsearch.geometry.utils.WellKnownBinary;
import org.elasticsearch.lucene.spatial.CoordinateEncoder;
import org.elasticsearch.lucene.spatial.GeometryDocValueReader; import org.elasticsearch.lucene.spatial.GeometryDocValueReader;
import java.io.IOException; import java.io.IOException;
import java.nio.ByteOrder;
import java.util.Map; import java.util.Map;
import java.util.function.Function; import java.util.function.Function;
@ -75,29 +71,27 @@ public abstract class AbstractShapeGeometryFieldMapper<T> extends AbstractGeomet
@Override @Override
protected Object nullValueAsSource(T nullValue) { protected Object nullValueAsSource(T nullValue) {
// we don't support null value fors shapes // we don't support null value for shapes
return nullValue; return nullValue;
} }
@Override protected static class BoundsBlockLoader extends BlockDocValuesReader.DocValuesBlockLoader {
public BlockLoader blockLoader(BlockLoaderContext blContext) {
return blContext.fieldExtractPreference() == FieldExtractPreference.EXTRACT_SPATIAL_BOUNDS && isBoundsExtractionSupported()
? new BoundsBlockLoader(name(), coordinateEncoder())
: blockLoaderFromSource(blContext);
}
protected abstract boolean isBoundsExtractionSupported();
protected abstract CoordinateEncoder coordinateEncoder();
// Visible for testing
static class BoundsBlockLoader extends BlockDocValuesReader.DocValuesBlockLoader {
private final String fieldName; private final String fieldName;
private final CoordinateEncoder encoder;
BoundsBlockLoader(String fieldName, CoordinateEncoder encoder) { protected BoundsBlockLoader(String fieldName) {
this.fieldName = fieldName; this.fieldName = fieldName;
this.encoder = encoder; }
protected void writeExtent(BlockLoader.IntBuilder builder, Extent extent) {
// We store the 6 values as a single multi-valued field, in the same order as the fields in the Extent class
builder.beginPositionEntry();
builder.appendInt(extent.top);
builder.appendInt(extent.bottom);
builder.appendInt(extent.negLeft);
builder.appendInt(extent.negRight);
builder.appendInt(extent.posLeft);
builder.appendInt(extent.posRight);
builder.endPositionEntry();
} }
@Override @Override
@ -107,7 +101,7 @@ public abstract class AbstractShapeGeometryFieldMapper<T> extends AbstractGeomet
public BlockLoader.Block read(BlockLoader.BlockFactory factory, BlockLoader.Docs docs) throws IOException { public BlockLoader.Block read(BlockLoader.BlockFactory factory, BlockLoader.Docs docs) throws IOException {
var binaryDocValues = context.reader().getBinaryDocValues(fieldName); var binaryDocValues = context.reader().getBinaryDocValues(fieldName);
var reader = new GeometryDocValueReader(); var reader = new GeometryDocValueReader();
try (var builder = factory.bytesRefs(docs.count())) { try (var builder = factory.ints(docs.count())) {
for (int i = 0; i < docs.count(); i++) { for (int i = 0; i < docs.count(); i++) {
read(binaryDocValues, docs.get(i), reader, builder); read(binaryDocValues, docs.get(i), reader, builder);
} }
@ -119,27 +113,17 @@ public abstract class AbstractShapeGeometryFieldMapper<T> extends AbstractGeomet
public void read(int docId, BlockLoader.StoredFields storedFields, BlockLoader.Builder builder) throws IOException { public void read(int docId, BlockLoader.StoredFields storedFields, BlockLoader.Builder builder) throws IOException {
var binaryDocValues = context.reader().getBinaryDocValues(fieldName); var binaryDocValues = context.reader().getBinaryDocValues(fieldName);
var reader = new GeometryDocValueReader(); var reader = new GeometryDocValueReader();
read(binaryDocValues, docId, reader, (BytesRefBuilder) builder); read(binaryDocValues, docId, reader, (IntBuilder) builder);
} }
private void read(BinaryDocValues binaryDocValues, int doc, GeometryDocValueReader reader, BytesRefBuilder builder) private void read(BinaryDocValues binaryDocValues, int doc, GeometryDocValueReader reader, IntBuilder builder)
throws IOException { throws IOException {
if (binaryDocValues.advanceExact(doc) == false) { if (binaryDocValues.advanceExact(doc) == false) {
builder.appendNull(); builder.appendNull();
return; return;
} }
reader.reset(binaryDocValues.binaryValue()); reader.reset(binaryDocValues.binaryValue());
var extent = reader.getExtent(); writeExtent(builder, reader.getExtent());
// This is rather silly: an extent is already encoded as ints, but we convert it to Rectangle to
// preserve its properties as a WKB shape, only to convert it back to ints when we compute the
// aggregation. An obvious optimization would be to avoid this back-and-forth conversion.
var rectangle = new Rectangle(
encoder.decodeX(extent.minX()),
encoder.decodeX(extent.maxX()),
encoder.decodeY(extent.maxY()),
encoder.decodeY(extent.minY())
);
builder.appendBytesRef(new BytesRef(WellKnownBinary.toWKB(rectangle, ByteOrder.LITTLE_ENDIAN)));
} }
@Override @Override
@ -151,7 +135,7 @@ public abstract class AbstractShapeGeometryFieldMapper<T> extends AbstractGeomet
@Override @Override
public BlockLoader.Builder builder(BlockLoader.BlockFactory factory, int expectedCount) { public BlockLoader.Builder builder(BlockLoader.BlockFactory factory, int expectedCount) {
return factory.bytesRefs(expectedCount); return factory.ints(expectedCount);
} }
} }
} }

View file

@ -21,7 +21,7 @@ public class MapperFeatures implements FeatureSpecification {
// Used to avoid noise in mixed cluster and rest compatibility tests. Must not be backported to 8.x branch. // Used to avoid noise in mixed cluster and rest compatibility tests. Must not be backported to 8.x branch.
// This label gets added to tests with such failures before merging with main, then removed when backported to 8.x. // This label gets added to tests with such failures before merging with main, then removed when backported to 8.x.
public static final NodeFeature BWC_WORKAROUND_9_0 = new NodeFeature("mapper.bwc_workaround_9_0"); public static final NodeFeature BWC_WORKAROUND_9_0 = new NodeFeature("mapper.bwc_workaround_9_0", true);
@Override @Override
public Set<NodeFeature> getFeatures() { public Set<NodeFeature> getFeatures() {

View file

@ -36,6 +36,8 @@ import java.util.List;
import java.util.Objects; import java.util.Objects;
/** /**
* Deprecated geo query. Deprecated in #64227, 7.12/8.0. We do not plan to remove this so we
* do not break any users using this.
* @deprecated use {@link GeoShapeQueryBuilder} * @deprecated use {@link GeoShapeQueryBuilder}
*/ */
@Deprecated @Deprecated

View file

@ -8,7 +8,6 @@
*/ */
package org.elasticsearch.index.query; package org.elasticsearch.index.query;
import org.elasticsearch.TransportVersions;
import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
@ -189,11 +188,7 @@ public final class InnerHitBuilder implements Writeable, ToXContentObject {
highlightBuilder = in.readOptionalWriteable(HighlightBuilder::new); highlightBuilder = in.readOptionalWriteable(HighlightBuilder::new);
this.innerCollapseBuilder = in.readOptionalWriteable(CollapseBuilder::new); this.innerCollapseBuilder = in.readOptionalWriteable(CollapseBuilder::new);
if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { fetchFields = in.readOptionalCollectionAsList(FieldAndFormat::new);
if (in.readBoolean()) {
fetchFields = in.readCollectionAsList(FieldAndFormat::new);
}
}
} }
@Override @Override
@ -228,13 +223,7 @@ public final class InnerHitBuilder implements Writeable, ToXContentObject {
} }
out.writeOptionalWriteable(highlightBuilder); out.writeOptionalWriteable(highlightBuilder);
out.writeOptionalWriteable(innerCollapseBuilder); out.writeOptionalWriteable(innerCollapseBuilder);
out.writeOptionalCollection(fetchFields);
if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) {
out.writeBoolean(fetchFields != null);
if (fetchFields != null) {
out.writeCollection(fetchFields);
}
}
} }
public String getName() { public String getName() {

View file

@ -66,9 +66,7 @@ public class MatchPhrasePrefixQueryBuilder extends AbstractQueryBuilder<MatchPhr
slop = in.readVInt(); slop = in.readVInt();
maxExpansions = in.readVInt(); maxExpansions = in.readVInt();
analyzer = in.readOptionalString(); analyzer = in.readOptionalString();
if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { zeroTermsQuery = ZeroTermsQueryOption.readFromStream(in);
this.zeroTermsQuery = ZeroTermsQueryOption.readFromStream(in);
}
} }
@Override @Override
@ -78,9 +76,7 @@ public class MatchPhrasePrefixQueryBuilder extends AbstractQueryBuilder<MatchPhr
out.writeVInt(slop); out.writeVInt(slop);
out.writeVInt(maxExpansions); out.writeVInt(maxExpansions);
out.writeOptionalString(analyzer); out.writeOptionalString(analyzer);
if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { zeroTermsQuery.writeTo(out);
zeroTermsQuery.writeTo(out);
}
} }
/** Returns the field name used in this query. */ /** Returns the field name used in this query. */

View file

@ -76,9 +76,7 @@ public class PrefixQueryBuilder extends AbstractQueryBuilder<PrefixQueryBuilder>
fieldName = in.readString(); fieldName = in.readString();
value = in.readString(); value = in.readString();
rewrite = in.readOptionalString(); rewrite = in.readOptionalString();
if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { caseInsensitive = in.readBoolean();
caseInsensitive = in.readBoolean();
}
} }
@Override @Override
@ -86,9 +84,7 @@ public class PrefixQueryBuilder extends AbstractQueryBuilder<PrefixQueryBuilder>
out.writeString(fieldName); out.writeString(fieldName);
out.writeString(value); out.writeString(value);
out.writeOptionalString(rewrite); out.writeOptionalString(rewrite);
if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { out.writeBoolean(caseInsensitive);
out.writeBoolean(caseInsensitive);
}
} }
@Override @Override

View file

@ -88,9 +88,7 @@ public class RegexpQueryBuilder extends AbstractQueryBuilder<RegexpQueryBuilder>
syntaxFlagsValue = in.readVInt(); syntaxFlagsValue = in.readVInt();
maxDeterminizedStates = in.readVInt(); maxDeterminizedStates = in.readVInt();
rewrite = in.readOptionalString(); rewrite = in.readOptionalString();
if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { caseInsensitive = in.readBoolean();
caseInsensitive = in.readBoolean();
}
} }
@Override @Override
@ -100,9 +98,7 @@ public class RegexpQueryBuilder extends AbstractQueryBuilder<RegexpQueryBuilder>
out.writeVInt(syntaxFlagsValue); out.writeVInt(syntaxFlagsValue);
out.writeVInt(maxDeterminizedStates); out.writeVInt(maxDeterminizedStates);
out.writeOptionalString(rewrite); out.writeOptionalString(rewrite);
if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { out.writeBoolean(caseInsensitive);
out.writeBoolean(caseInsensitive);
}
} }
/** Returns the field name used in this query. */ /** Returns the field name used in this query. */

View file

@ -89,17 +89,13 @@ public class TermQueryBuilder extends BaseTermQueryBuilder<TermQueryBuilder> {
*/ */
public TermQueryBuilder(StreamInput in) throws IOException { public TermQueryBuilder(StreamInput in) throws IOException {
super(in); super(in);
if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { caseInsensitive = in.readBoolean();
caseInsensitive = in.readBoolean();
}
} }
@Override @Override
protected void doWriteTo(StreamOutput out) throws IOException { protected void doWriteTo(StreamOutput out) throws IOException {
super.doWriteTo(out); super.doWriteTo(out);
if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { out.writeBoolean(caseInsensitive);
out.writeBoolean(caseInsensitive);
}
} }
public static TermQueryBuilder fromXContent(XContentParser parser) throws IOException { public static TermQueryBuilder fromXContent(XContentParser parser) throws IOException {

Some files were not shown because too many files have changed in this diff Show more