mirror of
https://github.com/elastic/elasticsearch.git
synced 2025-04-25 07:37:19 -04:00
Merge remote-tracking branch 'upstream-main/main' into merge-main-16-01-25
This commit is contained in:
commit
5a70623d8d
870 changed files with 12069 additions and 7949 deletions
|
@ -1,6 +1,6 @@
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
if [[ ! "${BUILDKITE_PULL_REQUEST:-}" || "${BUILDKITE_AGENT_META_DATA_PROVIDER:-}" == "k8s" ]]; then
|
if [[ "${BUILDKITE_PULL_REQUEST:-false}" == "false" || "${BUILDKITE_AGENT_META_DATA_PROVIDER:-}" == "k8s" ]]; then
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
import org.elasticsearch.gradle.internal.test.TestUtil
|
import org.elasticsearch.gradle.internal.test.TestUtil
|
||||||
|
import org.elasticsearch.gradle.OS
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||||
|
@ -77,7 +78,7 @@ tasks.register("copyPainless", Copy) {
|
||||||
}
|
}
|
||||||
|
|
||||||
tasks.named("run").configure {
|
tasks.named("run").configure {
|
||||||
executable = "${buildParams.runtimeJavaHome.get()}/bin/java"
|
executable = "${buildParams.runtimeJavaHome.get()}/bin/java" + (OS.current() == OS.WINDOWS ? '.exe' : '')
|
||||||
args << "-Dplugins.dir=${buildDir}/plugins" << "-Dtests.index=${buildDir}/index"
|
args << "-Dplugins.dir=${buildDir}/plugins" << "-Dtests.index=${buildDir}/index"
|
||||||
dependsOn "copyExpression", "copyPainless", configurations.nativeLib
|
dependsOn "copyExpression", "copyPainless", configurations.nativeLib
|
||||||
systemProperty 'es.nativelibs.path', TestUtil.getTestLibraryPath(file("../libs/native/libraries/build/platform/").toString())
|
systemProperty 'es.nativelibs.path', TestUtil.getTestLibraryPath(file("../libs/native/libraries/build/platform/").toString())
|
||||||
|
|
|
@ -27,6 +27,7 @@ import org.elasticsearch.compute.operator.Operator;
|
||||||
import org.elasticsearch.core.TimeValue;
|
import org.elasticsearch.core.TimeValue;
|
||||||
import org.elasticsearch.xpack.esql.core.expression.Expression;
|
import org.elasticsearch.xpack.esql.core.expression.Expression;
|
||||||
import org.elasticsearch.xpack.esql.core.expression.FieldAttribute;
|
import org.elasticsearch.xpack.esql.core.expression.FieldAttribute;
|
||||||
|
import org.elasticsearch.xpack.esql.core.expression.FoldContext;
|
||||||
import org.elasticsearch.xpack.esql.core.expression.Literal;
|
import org.elasticsearch.xpack.esql.core.expression.Literal;
|
||||||
import org.elasticsearch.xpack.esql.core.expression.predicate.regex.RLikePattern;
|
import org.elasticsearch.xpack.esql.core.expression.predicate.regex.RLikePattern;
|
||||||
import org.elasticsearch.xpack.esql.core.tree.Source;
|
import org.elasticsearch.xpack.esql.core.tree.Source;
|
||||||
|
@ -71,12 +72,11 @@ public class EvalBenchmark {
|
||||||
BigArrays.NON_RECYCLING_INSTANCE
|
BigArrays.NON_RECYCLING_INSTANCE
|
||||||
);
|
);
|
||||||
|
|
||||||
|
private static final FoldContext FOLD_CONTEXT = FoldContext.small();
|
||||||
|
|
||||||
private static final int BLOCK_LENGTH = 8 * 1024;
|
private static final int BLOCK_LENGTH = 8 * 1024;
|
||||||
|
|
||||||
static final DriverContext driverContext = new DriverContext(
|
static final DriverContext driverContext = new DriverContext(BigArrays.NON_RECYCLING_INSTANCE, blockFactory);
|
||||||
BigArrays.NON_RECYCLING_INSTANCE,
|
|
||||||
BlockFactory.getInstance(new NoopCircuitBreaker("noop"), BigArrays.NON_RECYCLING_INSTANCE)
|
|
||||||
);
|
|
||||||
|
|
||||||
static {
|
static {
|
||||||
// Smoke test all the expected values and force loading subclasses more like prod
|
// Smoke test all the expected values and force loading subclasses more like prod
|
||||||
|
@ -114,11 +114,12 @@ public class EvalBenchmark {
|
||||||
return switch (operation) {
|
return switch (operation) {
|
||||||
case "abs" -> {
|
case "abs" -> {
|
||||||
FieldAttribute longField = longField();
|
FieldAttribute longField = longField();
|
||||||
yield EvalMapper.toEvaluator(new Abs(Source.EMPTY, longField), layout(longField)).get(driverContext);
|
yield EvalMapper.toEvaluator(FOLD_CONTEXT, new Abs(Source.EMPTY, longField), layout(longField)).get(driverContext);
|
||||||
}
|
}
|
||||||
case "add" -> {
|
case "add" -> {
|
||||||
FieldAttribute longField = longField();
|
FieldAttribute longField = longField();
|
||||||
yield EvalMapper.toEvaluator(
|
yield EvalMapper.toEvaluator(
|
||||||
|
FOLD_CONTEXT,
|
||||||
new Add(Source.EMPTY, longField, new Literal(Source.EMPTY, 1L, DataType.LONG)),
|
new Add(Source.EMPTY, longField, new Literal(Source.EMPTY, 1L, DataType.LONG)),
|
||||||
layout(longField)
|
layout(longField)
|
||||||
).get(driverContext);
|
).get(driverContext);
|
||||||
|
@ -126,6 +127,7 @@ public class EvalBenchmark {
|
||||||
case "add_double" -> {
|
case "add_double" -> {
|
||||||
FieldAttribute doubleField = doubleField();
|
FieldAttribute doubleField = doubleField();
|
||||||
yield EvalMapper.toEvaluator(
|
yield EvalMapper.toEvaluator(
|
||||||
|
FOLD_CONTEXT,
|
||||||
new Add(Source.EMPTY, doubleField, new Literal(Source.EMPTY, 1D, DataType.DOUBLE)),
|
new Add(Source.EMPTY, doubleField, new Literal(Source.EMPTY, 1D, DataType.DOUBLE)),
|
||||||
layout(doubleField)
|
layout(doubleField)
|
||||||
).get(driverContext);
|
).get(driverContext);
|
||||||
|
@ -140,7 +142,8 @@ public class EvalBenchmark {
|
||||||
lhs = new Add(Source.EMPTY, lhs, new Literal(Source.EMPTY, 1L, DataType.LONG));
|
lhs = new Add(Source.EMPTY, lhs, new Literal(Source.EMPTY, 1L, DataType.LONG));
|
||||||
rhs = new Add(Source.EMPTY, rhs, new Literal(Source.EMPTY, 1L, DataType.LONG));
|
rhs = new Add(Source.EMPTY, rhs, new Literal(Source.EMPTY, 1L, DataType.LONG));
|
||||||
}
|
}
|
||||||
yield EvalMapper.toEvaluator(new Case(Source.EMPTY, condition, List.of(lhs, rhs)), layout(f1, f2)).get(driverContext);
|
yield EvalMapper.toEvaluator(FOLD_CONTEXT, new Case(Source.EMPTY, condition, List.of(lhs, rhs)), layout(f1, f2))
|
||||||
|
.get(driverContext);
|
||||||
}
|
}
|
||||||
case "date_trunc" -> {
|
case "date_trunc" -> {
|
||||||
FieldAttribute timestamp = new FieldAttribute(
|
FieldAttribute timestamp = new FieldAttribute(
|
||||||
|
@ -149,6 +152,7 @@ public class EvalBenchmark {
|
||||||
new EsField("timestamp", DataType.DATETIME, Map.of(), true)
|
new EsField("timestamp", DataType.DATETIME, Map.of(), true)
|
||||||
);
|
);
|
||||||
yield EvalMapper.toEvaluator(
|
yield EvalMapper.toEvaluator(
|
||||||
|
FOLD_CONTEXT,
|
||||||
new DateTrunc(Source.EMPTY, new Literal(Source.EMPTY, Duration.ofHours(24), DataType.TIME_DURATION), timestamp),
|
new DateTrunc(Source.EMPTY, new Literal(Source.EMPTY, Duration.ofHours(24), DataType.TIME_DURATION), timestamp),
|
||||||
layout(timestamp)
|
layout(timestamp)
|
||||||
).get(driverContext);
|
).get(driverContext);
|
||||||
|
@ -156,6 +160,7 @@ public class EvalBenchmark {
|
||||||
case "equal_to_const" -> {
|
case "equal_to_const" -> {
|
||||||
FieldAttribute longField = longField();
|
FieldAttribute longField = longField();
|
||||||
yield EvalMapper.toEvaluator(
|
yield EvalMapper.toEvaluator(
|
||||||
|
FOLD_CONTEXT,
|
||||||
new Equals(Source.EMPTY, longField, new Literal(Source.EMPTY, 100_000L, DataType.LONG)),
|
new Equals(Source.EMPTY, longField, new Literal(Source.EMPTY, 100_000L, DataType.LONG)),
|
||||||
layout(longField)
|
layout(longField)
|
||||||
).get(driverContext);
|
).get(driverContext);
|
||||||
|
@ -163,21 +168,21 @@ public class EvalBenchmark {
|
||||||
case "long_equal_to_long" -> {
|
case "long_equal_to_long" -> {
|
||||||
FieldAttribute lhs = longField();
|
FieldAttribute lhs = longField();
|
||||||
FieldAttribute rhs = longField();
|
FieldAttribute rhs = longField();
|
||||||
yield EvalMapper.toEvaluator(new Equals(Source.EMPTY, lhs, rhs), layout(lhs, rhs)).get(driverContext);
|
yield EvalMapper.toEvaluator(FOLD_CONTEXT, new Equals(Source.EMPTY, lhs, rhs), layout(lhs, rhs)).get(driverContext);
|
||||||
}
|
}
|
||||||
case "long_equal_to_int" -> {
|
case "long_equal_to_int" -> {
|
||||||
FieldAttribute lhs = longField();
|
FieldAttribute lhs = longField();
|
||||||
FieldAttribute rhs = intField();
|
FieldAttribute rhs = intField();
|
||||||
yield EvalMapper.toEvaluator(new Equals(Source.EMPTY, lhs, rhs), layout(lhs, rhs)).get(driverContext);
|
yield EvalMapper.toEvaluator(FOLD_CONTEXT, new Equals(Source.EMPTY, lhs, rhs), layout(lhs, rhs)).get(driverContext);
|
||||||
}
|
}
|
||||||
case "mv_min", "mv_min_ascending" -> {
|
case "mv_min", "mv_min_ascending" -> {
|
||||||
FieldAttribute longField = longField();
|
FieldAttribute longField = longField();
|
||||||
yield EvalMapper.toEvaluator(new MvMin(Source.EMPTY, longField), layout(longField)).get(driverContext);
|
yield EvalMapper.toEvaluator(FOLD_CONTEXT, new MvMin(Source.EMPTY, longField), layout(longField)).get(driverContext);
|
||||||
}
|
}
|
||||||
case "rlike" -> {
|
case "rlike" -> {
|
||||||
FieldAttribute keywordField = keywordField();
|
FieldAttribute keywordField = keywordField();
|
||||||
RLike rlike = new RLike(Source.EMPTY, keywordField, new RLikePattern(".ar"));
|
RLike rlike = new RLike(Source.EMPTY, keywordField, new RLikePattern(".ar"));
|
||||||
yield EvalMapper.toEvaluator(rlike, layout(keywordField)).get(driverContext);
|
yield EvalMapper.toEvaluator(FOLD_CONTEXT, rlike, layout(keywordField)).get(driverContext);
|
||||||
}
|
}
|
||||||
default -> throw new UnsupportedOperationException();
|
default -> throw new UnsupportedOperationException();
|
||||||
};
|
};
|
||||||
|
|
|
@ -22,7 +22,7 @@ public enum DockerBase {
|
||||||
// Chainguard based wolfi image with latest jdk
|
// Chainguard based wolfi image with latest jdk
|
||||||
// This is usually updated via renovatebot
|
// This is usually updated via renovatebot
|
||||||
// spotless:off
|
// spotless:off
|
||||||
WOLFI("docker.elastic.co/wolfi/chainguard-base:latest@sha256:eef54b3a414aa53b98f0f8df2633aed83c3ba6230722769282925442968f0364",
|
WOLFI("docker.elastic.co/wolfi/chainguard-base:latest@sha256:dd66beec64a7f9b19c6c35a1195153b2b630a55e16ec71949ed5187c5947eea1",
|
||||||
"-wolfi",
|
"-wolfi",
|
||||||
"apk"
|
"apk"
|
||||||
),
|
),
|
||||||
|
|
|
@ -122,7 +122,7 @@ public class TestFixturesPlugin implements Plugin<Project> {
|
||||||
composeExtension.getRemoveContainers().set(true);
|
composeExtension.getRemoveContainers().set(true);
|
||||||
composeExtension.getCaptureContainersOutput()
|
composeExtension.getCaptureContainersOutput()
|
||||||
.set(EnumSet.of(LogLevel.INFO, LogLevel.DEBUG).contains(project.getGradle().getStartParameter().getLogLevel()));
|
.set(EnumSet.of(LogLevel.INFO, LogLevel.DEBUG).contains(project.getGradle().getStartParameter().getLogLevel()));
|
||||||
composeExtension.getUseDockerComposeV2().set(false);
|
composeExtension.getUseDockerComposeV2().set(true);
|
||||||
composeExtension.getExecutable().set(this.providerFactory.provider(() -> {
|
composeExtension.getExecutable().set(this.providerFactory.provider(() -> {
|
||||||
String composePath = dockerSupport.get().getDockerAvailability().dockerComposePath();
|
String composePath = dockerSupport.get().getDockerAvailability().dockerComposePath();
|
||||||
LOGGER.debug("Docker Compose path: {}", composePath);
|
LOGGER.debug("Docker Compose path: {}", composePath);
|
||||||
|
|
|
@ -187,21 +187,13 @@ class APMJvmOptions {
|
||||||
static void extractSecureSettings(SecureSettings secrets, Map<String, String> propertiesMap) {
|
static void extractSecureSettings(SecureSettings secrets, Map<String, String> propertiesMap) {
|
||||||
final Set<String> settingNames = secrets.getSettingNames();
|
final Set<String> settingNames = secrets.getSettingNames();
|
||||||
for (String key : List.of("api_key", "secret_token")) {
|
for (String key : List.of("api_key", "secret_token")) {
|
||||||
for (String prefix : List.of("telemetry.", "tracing.apm.")) {
|
String prefix = "telemetry.";
|
||||||
if (settingNames.contains(prefix + key)) {
|
if (settingNames.contains(prefix + key)) {
|
||||||
if (propertiesMap.containsKey(key)) {
|
|
||||||
throw new IllegalStateException(
|
|
||||||
Strings.format("Duplicate telemetry setting: [telemetry.%s] and [tracing.apm.%s]", key, key)
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
try (SecureString token = secrets.getString(prefix + key)) {
|
try (SecureString token = secrets.getString(prefix + key)) {
|
||||||
propertiesMap.put(key, token.toString());
|
propertiesMap.put(key, token.toString());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -227,44 +219,12 @@ class APMJvmOptions {
|
||||||
static Map<String, String> extractApmSettings(Settings settings) throws UserException {
|
static Map<String, String> extractApmSettings(Settings settings) throws UserException {
|
||||||
final Map<String, String> propertiesMap = new HashMap<>();
|
final Map<String, String> propertiesMap = new HashMap<>();
|
||||||
|
|
||||||
// tracing.apm.agent. is deprecated by telemetry.agent.
|
|
||||||
final String telemetryAgentPrefix = "telemetry.agent.";
|
final String telemetryAgentPrefix = "telemetry.agent.";
|
||||||
final String deprecatedTelemetryAgentPrefix = "tracing.apm.agent.";
|
|
||||||
|
|
||||||
final Settings telemetryAgentSettings = settings.getByPrefix(telemetryAgentPrefix);
|
final Settings telemetryAgentSettings = settings.getByPrefix(telemetryAgentPrefix);
|
||||||
telemetryAgentSettings.keySet().forEach(key -> propertiesMap.put(key, String.valueOf(telemetryAgentSettings.get(key))));
|
telemetryAgentSettings.keySet().forEach(key -> propertiesMap.put(key, String.valueOf(telemetryAgentSettings.get(key))));
|
||||||
|
|
||||||
final Settings apmAgentSettings = settings.getByPrefix(deprecatedTelemetryAgentPrefix);
|
|
||||||
for (String key : apmAgentSettings.keySet()) {
|
|
||||||
if (propertiesMap.containsKey(key)) {
|
|
||||||
throw new IllegalStateException(
|
|
||||||
Strings.format(
|
|
||||||
"Duplicate telemetry setting: [%s%s] and [%s%s]",
|
|
||||||
telemetryAgentPrefix,
|
|
||||||
key,
|
|
||||||
deprecatedTelemetryAgentPrefix,
|
|
||||||
key
|
|
||||||
)
|
|
||||||
);
|
|
||||||
}
|
|
||||||
propertiesMap.put(key, String.valueOf(apmAgentSettings.get(key)));
|
|
||||||
}
|
|
||||||
|
|
||||||
StringJoiner globalLabels = extractGlobalLabels(telemetryAgentPrefix, propertiesMap, settings);
|
StringJoiner globalLabels = extractGlobalLabels(telemetryAgentPrefix, propertiesMap, settings);
|
||||||
if (globalLabels.length() == 0) {
|
|
||||||
globalLabels = extractGlobalLabels(deprecatedTelemetryAgentPrefix, propertiesMap, settings);
|
|
||||||
} else {
|
|
||||||
StringJoiner tracingGlobalLabels = extractGlobalLabels(deprecatedTelemetryAgentPrefix, propertiesMap, settings);
|
|
||||||
if (tracingGlobalLabels.length() != 0) {
|
|
||||||
throw new IllegalArgumentException(
|
|
||||||
"Cannot have global labels with tracing.agent prefix ["
|
|
||||||
+ globalLabels
|
|
||||||
+ "] and telemetry.apm.agent prefix ["
|
|
||||||
+ tracingGlobalLabels
|
|
||||||
+ "]"
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (globalLabels.length() > 0) {
|
if (globalLabels.length() > 0) {
|
||||||
propertiesMap.put("global_labels", globalLabels.toString());
|
propertiesMap.put("global_labels", globalLabels.toString());
|
||||||
}
|
}
|
||||||
|
@ -274,7 +234,7 @@ class APMJvmOptions {
|
||||||
if (propertiesMap.containsKey(key)) {
|
if (propertiesMap.containsKey(key)) {
|
||||||
throw new UserException(
|
throw new UserException(
|
||||||
ExitCodes.CONFIG,
|
ExitCodes.CONFIG,
|
||||||
"Do not set a value for [tracing.apm.agent." + key + "], as this is configured automatically by Elasticsearch"
|
"Do not set a value for [telemetry.agent." + key + "], as this is configured automatically by Elasticsearch"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,18 +25,15 @@ import java.util.Arrays;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.function.Function;
|
|
||||||
|
|
||||||
import static org.elasticsearch.test.MapMatcher.matchesMap;
|
import static org.elasticsearch.test.MapMatcher.matchesMap;
|
||||||
import static org.hamcrest.Matchers.allOf;
|
import static org.hamcrest.Matchers.allOf;
|
||||||
import static org.hamcrest.Matchers.containsInAnyOrder;
|
import static org.hamcrest.Matchers.containsInAnyOrder;
|
||||||
import static org.hamcrest.Matchers.containsString;
|
|
||||||
import static org.hamcrest.Matchers.endsWith;
|
import static org.hamcrest.Matchers.endsWith;
|
||||||
import static org.hamcrest.Matchers.equalTo;
|
import static org.hamcrest.Matchers.equalTo;
|
||||||
import static org.hamcrest.Matchers.hasEntry;
|
import static org.hamcrest.Matchers.hasEntry;
|
||||||
import static org.hamcrest.Matchers.hasKey;
|
import static org.hamcrest.Matchers.hasKey;
|
||||||
import static org.hamcrest.Matchers.hasSize;
|
import static org.hamcrest.Matchers.hasSize;
|
||||||
import static org.hamcrest.Matchers.is;
|
|
||||||
import static org.hamcrest.Matchers.not;
|
import static org.hamcrest.Matchers.not;
|
||||||
import static org.mockito.Mockito.doReturn;
|
import static org.mockito.Mockito.doReturn;
|
||||||
import static org.mockito.Mockito.mock;
|
import static org.mockito.Mockito.mock;
|
||||||
|
@ -82,14 +79,9 @@ public class APMJvmOptionsTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testExtractSecureSettings() {
|
public void testExtractSecureSettings() {
|
||||||
MockSecureSettings duplicateSecureSettings = new MockSecureSettings();
|
|
||||||
|
|
||||||
for (String prefix : List.of("telemetry.", "tracing.apm.")) {
|
|
||||||
MockSecureSettings secureSettings = new MockSecureSettings();
|
MockSecureSettings secureSettings = new MockSecureSettings();
|
||||||
secureSettings.setString(prefix + "secret_token", "token");
|
secureSettings.setString("telemetry.secret_token", "token");
|
||||||
secureSettings.setString(prefix + "api_key", "key");
|
secureSettings.setString("telemetry.api_key", "key");
|
||||||
|
|
||||||
duplicateSecureSettings.setString(prefix + "api_key", "secret");
|
|
||||||
|
|
||||||
Map<String, String> propertiesMap = new HashMap<>();
|
Map<String, String> propertiesMap = new HashMap<>();
|
||||||
APMJvmOptions.extractSecureSettings(secureSettings, propertiesMap);
|
APMJvmOptions.extractSecureSettings(secureSettings, propertiesMap);
|
||||||
|
@ -97,30 +89,21 @@ public class APMJvmOptionsTests extends ESTestCase {
|
||||||
assertThat(propertiesMap, matchesMap(Map.of("secret_token", "token", "api_key", "key")));
|
assertThat(propertiesMap, matchesMap(Map.of("secret_token", "token", "api_key", "key")));
|
||||||
}
|
}
|
||||||
|
|
||||||
Exception exception = expectThrows(
|
|
||||||
IllegalStateException.class,
|
|
||||||
() -> APMJvmOptions.extractSecureSettings(duplicateSecureSettings, new HashMap<>())
|
|
||||||
);
|
|
||||||
assertThat(exception.getMessage(), containsString("Duplicate telemetry setting"));
|
|
||||||
assertThat(exception.getMessage(), containsString("telemetry.api_key"));
|
|
||||||
assertThat(exception.getMessage(), containsString("tracing.apm.api_key"));
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
public void testExtractSettings() throws UserException {
|
public void testExtractSettings() throws UserException {
|
||||||
Function<String, Settings.Builder> buildSettings = (prefix) -> Settings.builder()
|
Settings defaults = Settings.builder()
|
||||||
.put(prefix + "server_url", "https://myurl:443")
|
.put("telemetry.agent.server_url", "https://myurl:443")
|
||||||
.put(prefix + "service_node_name", "instance-0000000001");
|
.put("telemetry.agent.service_node_name", "instance-0000000001")
|
||||||
|
.build();
|
||||||
|
|
||||||
for (String prefix : List.of("tracing.apm.agent.", "telemetry.agent.")) {
|
|
||||||
var name = "APM Tracing";
|
var name = "APM Tracing";
|
||||||
var deploy = "123";
|
var deploy = "123";
|
||||||
var org = "456";
|
var org = "456";
|
||||||
var extracted = APMJvmOptions.extractApmSettings(
|
var extracted = APMJvmOptions.extractApmSettings(
|
||||||
buildSettings.apply(prefix)
|
Settings.builder()
|
||||||
.put(prefix + "global_labels.deployment_name", name)
|
.put(defaults)
|
||||||
.put(prefix + "global_labels.deployment_id", deploy)
|
.put("telemetry.agent.global_labels.deployment_name", name)
|
||||||
.put(prefix + "global_labels.organization_id", org)
|
.put("telemetry.agent.global_labels.deployment_id", deploy)
|
||||||
|
.put("telemetry.agent.global_labels.organization_id", org)
|
||||||
.build()
|
.build()
|
||||||
);
|
);
|
||||||
|
|
||||||
|
@ -143,10 +126,11 @@ public class APMJvmOptionsTests extends ESTestCase {
|
||||||
deploy = "";
|
deploy = "";
|
||||||
org = ",456";
|
org = ",456";
|
||||||
extracted = APMJvmOptions.extractApmSettings(
|
extracted = APMJvmOptions.extractApmSettings(
|
||||||
buildSettings.apply(prefix)
|
Settings.builder()
|
||||||
.put(prefix + "global_labels.deployment_name", name)
|
.put(defaults)
|
||||||
.put(prefix + "global_labels.deployment_id", deploy)
|
.put("telemetry.agent.global_labels.deployment_name", name)
|
||||||
.put(prefix + "global_labels.organization_id", org)
|
.put("telemetry.agent.global_labels.deployment_id", deploy)
|
||||||
|
.put("telemetry.agent.global_labels.organization_id", org)
|
||||||
.build()
|
.build()
|
||||||
);
|
);
|
||||||
labels = Arrays.stream(extracted.get("global_labels").split(",")).toList();
|
labels = Arrays.stream(extracted.get("global_labels").split(",")).toList();
|
||||||
|
@ -154,39 +138,6 @@ public class APMJvmOptionsTests extends ESTestCase {
|
||||||
assertThat(labels, containsInAnyOrder("deployment_name=APM_Tracing", "organization_id=_456"));
|
assertThat(labels, containsInAnyOrder("deployment_name=APM_Tracing", "organization_id=_456"));
|
||||||
}
|
}
|
||||||
|
|
||||||
IllegalStateException err = expectThrows(
|
|
||||||
IllegalStateException.class,
|
|
||||||
() -> APMJvmOptions.extractApmSettings(
|
|
||||||
Settings.builder()
|
|
||||||
.put("tracing.apm.agent.server_url", "https://myurl:443")
|
|
||||||
.put("telemetry.agent.server_url", "https://myurl-2:443")
|
|
||||||
.build()
|
|
||||||
)
|
|
||||||
);
|
|
||||||
assertThat(err.getMessage(), is("Duplicate telemetry setting: [telemetry.agent.server_url] and [tracing.apm.agent.server_url]"));
|
|
||||||
}
|
|
||||||
|
|
||||||
public void testNoMixedLabels() {
|
|
||||||
String telemetryAgent = "telemetry.agent.";
|
|
||||||
String tracingAgent = "tracing.apm.agent.";
|
|
||||||
Settings settings = Settings.builder()
|
|
||||||
.put("tracing.apm.enabled", true)
|
|
||||||
.put(telemetryAgent + "server_url", "https://myurl:443")
|
|
||||||
.put(telemetryAgent + "service_node_name", "instance-0000000001")
|
|
||||||
.put(tracingAgent + "global_labels.deployment_id", "123")
|
|
||||||
.put(telemetryAgent + "global_labels.organization_id", "456")
|
|
||||||
.build();
|
|
||||||
|
|
||||||
IllegalArgumentException err = assertThrows(IllegalArgumentException.class, () -> APMJvmOptions.extractApmSettings(settings));
|
|
||||||
assertThat(
|
|
||||||
err.getMessage(),
|
|
||||||
is(
|
|
||||||
"Cannot have global labels with tracing.agent prefix [organization_id=456] and"
|
|
||||||
+ " telemetry.apm.agent prefix [deployment_id=123]"
|
|
||||||
)
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
private Path makeFakeAgentJar() throws IOException {
|
private Path makeFakeAgentJar() throws IOException {
|
||||||
Path tempFile = createTempFile();
|
Path tempFile = createTempFile();
|
||||||
Path apmPathDir = tempFile.getParent().resolve("modules").resolve("apm");
|
Path apmPathDir = tempFile.getParent().resolve("modules").resolve("apm");
|
||||||
|
|
5
docs/changelog/118188.yaml
Normal file
5
docs/changelog/118188.yaml
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
pr: 118188
|
||||||
|
summary: Check for early termination in Driver
|
||||||
|
area: ES|QL
|
||||||
|
type: enhancement
|
||||||
|
issues: []
|
5
docs/changelog/118602.yaml
Normal file
5
docs/changelog/118602.yaml
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
pr: 118602
|
||||||
|
summary: Limit memory usage of `fold`
|
||||||
|
area: ES|QL
|
||||||
|
type: bug
|
||||||
|
issues: []
|
13
docs/changelog/119227.yaml
Normal file
13
docs/changelog/119227.yaml
Normal file
|
@ -0,0 +1,13 @@
|
||||||
|
pr: 119227
|
||||||
|
summary: Remove unfreeze REST endpoint
|
||||||
|
area: Indices APIs
|
||||||
|
type: breaking
|
||||||
|
issues: []
|
||||||
|
breaking:
|
||||||
|
title: Remove unfreeze REST endpoint
|
||||||
|
area: REST API
|
||||||
|
details: >-
|
||||||
|
The `/{index}/_unfreeze` REST endpoint is no longer supported. This API was deprecated, and the corresponding
|
||||||
|
`/{index}/_freeze` endpoint was removed in 8.0.
|
||||||
|
impact: None, since it is not possible to have a frozen index in a version which is readable by Elasticsearch 9.0
|
||||||
|
notable: false
|
6
docs/changelog/119575.yaml
Normal file
6
docs/changelog/119575.yaml
Normal file
|
@ -0,0 +1,6 @@
|
||||||
|
pr: 119575
|
||||||
|
summary: Fix realtime get of nested fields with synthetic source
|
||||||
|
area: Mapping
|
||||||
|
type: bug
|
||||||
|
issues:
|
||||||
|
- 119553
|
5
docs/changelog/119679.yaml
Normal file
5
docs/changelog/119679.yaml
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
pr: 119679
|
||||||
|
summary: Support mTLS for the Elastic Inference Service integration inside the inference API
|
||||||
|
area: Machine Learning
|
||||||
|
type: feature
|
||||||
|
issues: []
|
6
docs/changelog/119772.yaml
Normal file
6
docs/changelog/119772.yaml
Normal file
|
@ -0,0 +1,6 @@
|
||||||
|
pr: 119772
|
||||||
|
summary: ESQL Support IN operator for Date nanos
|
||||||
|
area: ES|QL
|
||||||
|
type: enhancement
|
||||||
|
issues:
|
||||||
|
- 118578
|
5
docs/changelog/119831.yaml
Normal file
5
docs/changelog/119831.yaml
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
pr: 119831
|
||||||
|
summary: Run `TransportClusterGetSettingsAction` on local node
|
||||||
|
area: Infra/Settings
|
||||||
|
type: enhancement
|
||||||
|
issues: []
|
12
docs/changelog/119846.yaml
Normal file
12
docs/changelog/119846.yaml
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
pr: 119846
|
||||||
|
summary: Drop support for brackets from METADATA syntax
|
||||||
|
area: ES|QL
|
||||||
|
type: deprecation
|
||||||
|
issues:
|
||||||
|
- 115401
|
||||||
|
deprecation:
|
||||||
|
title: Drop support for brackets from METADATA syntax
|
||||||
|
area: ES|QL
|
||||||
|
details: Please describe the details of this change for the release notes. You can
|
||||||
|
use asciidoc.
|
||||||
|
impact: Please describe the impact of this change to users
|
5
docs/changelog/119893.yaml
Normal file
5
docs/changelog/119893.yaml
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
pr: 119893
|
||||||
|
summary: Add enterprise license check for Inference API actions
|
||||||
|
area: Machine Learning
|
||||||
|
type: enhancement
|
||||||
|
issues: []
|
5
docs/changelog/119922.yaml
Normal file
5
docs/changelog/119922.yaml
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
pr: 119922
|
||||||
|
summary: "[Inference API] fix spell words: covertToString to convertToString"
|
||||||
|
area: Machine Learning
|
||||||
|
type: enhancement
|
||||||
|
issues: []
|
11
docs/changelog/119926.yaml
Normal file
11
docs/changelog/119926.yaml
Normal file
|
@ -0,0 +1,11 @@
|
||||||
|
pr: 119926
|
||||||
|
summary: "Deprecated tracing.apm.* settings got removed."
|
||||||
|
area: Infra/Metrics
|
||||||
|
type: breaking
|
||||||
|
issues: []
|
||||||
|
breaking:
|
||||||
|
title: "Deprecated tracing.apm.* settings got removed."
|
||||||
|
area: Cluster and node setting
|
||||||
|
details: Deprecated `tracing.apm.*` settings got removed, use respective `telemetry.*` / `telemetry.tracing.*` settings instead.
|
||||||
|
impact: 9.x nodes will refuse to start if any such setting (including secret settings) is still present.
|
||||||
|
notable: false
|
6
docs/changelog/120014.yaml
Normal file
6
docs/changelog/120014.yaml
Normal file
|
@ -0,0 +1,6 @@
|
||||||
|
pr: 120014
|
||||||
|
summary: Fix potential file leak in ES816BinaryQuantizedVectorsWriter
|
||||||
|
area: Search
|
||||||
|
type: bug
|
||||||
|
issues:
|
||||||
|
- 119981
|
5
docs/changelog/120020.yaml
Normal file
5
docs/changelog/120020.yaml
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
pr: 120020
|
||||||
|
summary: Resume Driver on cancelled or early finished
|
||||||
|
area: ES|QL
|
||||||
|
type: enhancement
|
||||||
|
issues: []
|
5
docs/changelog/120038.yaml
Normal file
5
docs/changelog/120038.yaml
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
pr: 120038
|
||||||
|
summary: Run template simulation actions on local node
|
||||||
|
area: Ingest Node
|
||||||
|
type: enhancement
|
||||||
|
issues: []
|
5
docs/changelog/120042.yaml
Normal file
5
docs/changelog/120042.yaml
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
pr: 120042
|
||||||
|
summary: Match dot prefix of migrated DS backing index with the source index
|
||||||
|
area: Data streams
|
||||||
|
type: bug
|
||||||
|
issues: []
|
5
docs/changelog/120055.yaml
Normal file
5
docs/changelog/120055.yaml
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
pr: 120055
|
||||||
|
summary: Optimize loading mappings when determining synthetic source usage and whether host.name can be sorted on.
|
||||||
|
area: Logs
|
||||||
|
type: enhancement
|
||||||
|
issues: []
|
6
docs/changelog/120062.yaml
Normal file
6
docs/changelog/120062.yaml
Normal file
|
@ -0,0 +1,6 @@
|
||||||
|
pr: 120062
|
||||||
|
summary: Update Text Similarity Reranker to Properly Handle Aliases
|
||||||
|
area: Ranking
|
||||||
|
type: bug
|
||||||
|
issues:
|
||||||
|
- 119617
|
5
docs/changelog/120084.yaml
Normal file
5
docs/changelog/120084.yaml
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
pr: 120084
|
||||||
|
summary: Improve how reindex data stream index action handles api blocks
|
||||||
|
area: Data streams
|
||||||
|
type: enhancement
|
||||||
|
issues: []
|
5
docs/changelog/120087.yaml
Normal file
5
docs/changelog/120087.yaml
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
pr: 120087
|
||||||
|
summary: Include `clusterApplyListener` in long cluster apply warnings
|
||||||
|
area: Cluster Coordination
|
||||||
|
type: enhancement
|
||||||
|
issues: []
|
6
docs/changelog/120133.yaml
Normal file
6
docs/changelog/120133.yaml
Normal file
|
@ -0,0 +1,6 @@
|
||||||
|
pr: 120133
|
||||||
|
summary: Use approximation to advance matched queries
|
||||||
|
area: Search
|
||||||
|
type: bug
|
||||||
|
issues:
|
||||||
|
- 120130
|
6
docs/changelog/120143.yaml
Normal file
6
docs/changelog/120143.yaml
Normal file
|
@ -0,0 +1,6 @@
|
||||||
|
pr: 120143
|
||||||
|
summary: Esql - support date nanos in date format function
|
||||||
|
area: ES|QL
|
||||||
|
type: enhancement
|
||||||
|
issues:
|
||||||
|
- 109994
|
5
docs/changelog/120193.yaml
Normal file
5
docs/changelog/120193.yaml
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
pr: 120193
|
||||||
|
summary: "Do not capture `ClusterChangedEvent` in `IndicesStore` call to #onClusterStateShardsClosed"
|
||||||
|
area: Store
|
||||||
|
type: bug
|
||||||
|
issues: []
|
5
docs/changelog/120198.yaml
Normal file
5
docs/changelog/120198.yaml
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
pr: 120198
|
||||||
|
summary: Bump `TrialLicenseVersion` to allow starting new trial on 9.0
|
||||||
|
area: License
|
||||||
|
type: enhancement
|
||||||
|
issues: []
|
5
docs/changelog/120200.yaml
Normal file
5
docs/changelog/120200.yaml
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
pr: 120200
|
||||||
|
summary: "[Connector API] Support hard deletes with new URL param in delete endpoint"
|
||||||
|
area: Extract&Transform
|
||||||
|
type: feature
|
||||||
|
issues: []
|
|
@ -13,7 +13,7 @@ beta::[]
|
||||||
For the most up-to-date API details, refer to {api-es}/group/endpoint-connector[Connector APIs].
|
For the most up-to-date API details, refer to {api-es}/group/endpoint-connector[Connector APIs].
|
||||||
--
|
--
|
||||||
|
|
||||||
Soft-deletes a connector and removes associated sync jobs.
|
Deletes a connector and optionally removes associated sync jobs.
|
||||||
|
|
||||||
Note: this action doesn't delete any API key, ingest pipeline or data index associated with the connector. These need to be removed manually.
|
Note: this action doesn't delete any API key, ingest pipeline or data index associated with the connector. These need to be removed manually.
|
||||||
|
|
||||||
|
@ -37,6 +37,9 @@ To get started with Connector APIs, check out <<es-connectors-tutorial-api, our
|
||||||
`<connector_id>`::
|
`<connector_id>`::
|
||||||
(Required, string)
|
(Required, string)
|
||||||
|
|
||||||
|
`<hard>`::
|
||||||
|
(Optional, boolean) If `true`, the connector doc is deleted. If `false`, connector doc is marked as deleted (soft deletion). Defaults to `false`.
|
||||||
|
|
||||||
`delete_sync_jobs`::
|
`delete_sync_jobs`::
|
||||||
(Optional, boolean) A flag indicating if associated sync jobs should be also removed. Defaults to `false`.
|
(Optional, boolean) A flag indicating if associated sync jobs should be also removed. Defaults to `false`.
|
||||||
|
|
||||||
|
|
|
@ -30,11 +30,11 @@ include::processing-commands/limit.asciidoc[tag=limitation]
|
||||||
** You can use `to_datetime` to cast to millisecond dates to use unsupported functions
|
** You can use `to_datetime` to cast to millisecond dates to use unsupported functions
|
||||||
* `double` (`float`, `half_float`, `scaled_float` are represented as `double`)
|
* `double` (`float`, `half_float`, `scaled_float` are represented as `double`)
|
||||||
* `ip`
|
* `ip`
|
||||||
* `keyword` family including `keyword`, `constant_keyword`, and `wildcard`
|
* `keyword` <<keyword, family>> including `keyword`, `constant_keyword`, and `wildcard`
|
||||||
* `int` (`short` and `byte` are represented as `int`)
|
* `int` (`short` and `byte` are represented as `int`)
|
||||||
* `long`
|
* `long`
|
||||||
* `null`
|
* `null`
|
||||||
* `text`
|
* `text` <<text, family>> including `text`, `semantic_text` and `match_only_text`
|
||||||
* experimental:[] `unsigned_long`
|
* experimental:[] `unsigned_long`
|
||||||
* `version`
|
* `version`
|
||||||
* Spatial types
|
* Spatial types
|
||||||
|
|
|
@ -2,4 +2,4 @@
|
||||||
|
|
||||||
*Description*
|
*Description*
|
||||||
|
|
||||||
Use `MATCH` to perform a <<query-dsl-match-query,match query>> on the specified field. Using `MATCH` is equivalent to using the `match` query in the Elasticsearch Query DSL. Match can be used on text fields, as well as other field types like boolean, dates, and numeric types. For a simplified syntax, you can use the <<esql-search-operators,match operator>> `:` operator instead of `MATCH`. `MATCH` returns true if the provided query matches the row.
|
Use `MATCH` to perform a <<query-dsl-match-query,match query>> on the specified field. Using `MATCH` is equivalent to using the `match` query in the Elasticsearch Query DSL. Match can be used on fields from the text family like <<text, text>> and <<semantic-text, semantic_text>>, as well as other field types like keyword, boolean, dates, and numeric types. For a simplified syntax, you can use the <<esql-search-operators,match operator>> `:` operator instead of `MATCH`. `MATCH` returns true if the provided query matches the row.
|
||||||
|
|
|
@ -4,4 +4,4 @@
|
||||||
|
|
||||||
Converts an input to a nanosecond-resolution date value (aka date_nanos).
|
Converts an input to a nanosecond-resolution date value (aka date_nanos).
|
||||||
|
|
||||||
NOTE: The range for date nanos is 1970-01-01T00:00:00.000000000Z to 2262-04-11T23:47:16.854775807Z. Additionally, integers cannot be converted into date nanos, as the range of integer nanoseconds only covers about 2 seconds after epoch.
|
NOTE: The range for date nanos is 1970-01-01T00:00:00.000000000Z to 2262-04-11T23:47:16.854775807Z, attepting to convertvalues outside of that range will result in null with a warning.. Additionally, integers cannot be converted into date nanos, as the range of integer nanoseconds only covers about 2 seconds after epoch.
|
||||||
|
|
|
@ -1599,7 +1599,7 @@
|
||||||
"FROM sample_data \n| WHERE @timestamp >= NOW() - 1 day and @timestamp < NOW()\n| STATS COUNT(*) BY bucket = BUCKET(@timestamp, 25, NOW() - 1 day, NOW())",
|
"FROM sample_data \n| WHERE @timestamp >= NOW() - 1 day and @timestamp < NOW()\n| STATS COUNT(*) BY bucket = BUCKET(@timestamp, 25, NOW() - 1 day, NOW())",
|
||||||
"FROM employees\n| WHERE hire_date >= \"1985-01-01T00:00:00Z\" AND hire_date < \"1986-01-01T00:00:00Z\"\n| STATS AVG(salary) BY bucket = BUCKET(hire_date, 20, \"1985-01-01T00:00:00Z\", \"1986-01-01T00:00:00Z\")\n| SORT bucket",
|
"FROM employees\n| WHERE hire_date >= \"1985-01-01T00:00:00Z\" AND hire_date < \"1986-01-01T00:00:00Z\"\n| STATS AVG(salary) BY bucket = BUCKET(hire_date, 20, \"1985-01-01T00:00:00Z\", \"1986-01-01T00:00:00Z\")\n| SORT bucket",
|
||||||
"FROM employees\n| STATS s1 = b1 + 1, s2 = BUCKET(salary / 1000 + 999, 50.) + 2 BY b1 = BUCKET(salary / 100 + 99, 50.), b2 = BUCKET(salary / 1000 + 999, 50.)\n| SORT b1, b2\n| KEEP s1, b1, s2, b2",
|
"FROM employees\n| STATS s1 = b1 + 1, s2 = BUCKET(salary / 1000 + 999, 50.) + 2 BY b1 = BUCKET(salary / 100 + 99, 50.), b2 = BUCKET(salary / 1000 + 999, 50.)\n| SORT b1, b2\n| KEEP s1, b1, s2, b2",
|
||||||
"FROM employees \n| STATS dates = VALUES(birth_date) BY b = BUCKET(birth_date + 1 HOUR, 1 YEAR) - 1 HOUR\n| EVAL d_count = MV_COUNT(dates)\n| SORT d_count\n| LIMIT 3"
|
"FROM employees\n| STATS dates = MV_SORT(VALUES(birth_date)) BY b = BUCKET(birth_date + 1 HOUR, 1 YEAR) - 1 HOUR\n| EVAL d_count = MV_COUNT(dates)\n| SORT d_count, b\n| LIMIT 3"
|
||||||
],
|
],
|
||||||
"preview" : false,
|
"preview" : false,
|
||||||
"snapshot_only" : false
|
"snapshot_only" : false
|
||||||
|
|
|
@ -4,6 +4,30 @@
|
||||||
"name" : "date_format",
|
"name" : "date_format",
|
||||||
"description" : "Returns a string representation of a date, in the provided format.",
|
"description" : "Returns a string representation of a date, in the provided format.",
|
||||||
"signatures" : [
|
"signatures" : [
|
||||||
|
{
|
||||||
|
"params" : [
|
||||||
|
{
|
||||||
|
"name" : "dateFormat",
|
||||||
|
"type" : "date",
|
||||||
|
"optional" : true,
|
||||||
|
"description" : "Date format (optional). If no format is specified, the `yyyy-MM-dd'T'HH:mm:ss.SSSZ` format is used. If `null`, the function returns `null`."
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"variadic" : false,
|
||||||
|
"returnType" : "keyword"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"params" : [
|
||||||
|
{
|
||||||
|
"name" : "dateFormat",
|
||||||
|
"type" : "date_nanos",
|
||||||
|
"optional" : true,
|
||||||
|
"description" : "Date format (optional). If no format is specified, the `yyyy-MM-dd'T'HH:mm:ss.SSSZ` format is used. If `null`, the function returns `null`."
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"variadic" : false,
|
||||||
|
"returnType" : "keyword"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"params" : [
|
"params" : [
|
||||||
{
|
{
|
||||||
|
@ -22,6 +46,24 @@
|
||||||
"variadic" : false,
|
"variadic" : false,
|
||||||
"returnType" : "keyword"
|
"returnType" : "keyword"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"params" : [
|
||||||
|
{
|
||||||
|
"name" : "dateFormat",
|
||||||
|
"type" : "keyword",
|
||||||
|
"optional" : true,
|
||||||
|
"description" : "Date format (optional). If no format is specified, the `yyyy-MM-dd'T'HH:mm:ss.SSSZ` format is used. If `null`, the function returns `null`."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name" : "date",
|
||||||
|
"type" : "date_nanos",
|
||||||
|
"optional" : false,
|
||||||
|
"description" : "Date expression. If `null`, the function returns `null`."
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"variadic" : false,
|
||||||
|
"returnType" : "keyword"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"params" : [
|
"params" : [
|
||||||
{
|
{
|
||||||
|
@ -39,6 +81,24 @@
|
||||||
],
|
],
|
||||||
"variadic" : false,
|
"variadic" : false,
|
||||||
"returnType" : "keyword"
|
"returnType" : "keyword"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"params" : [
|
||||||
|
{
|
||||||
|
"name" : "dateFormat",
|
||||||
|
"type" : "text",
|
||||||
|
"optional" : true,
|
||||||
|
"description" : "Date format (optional). If no format is specified, the `yyyy-MM-dd'T'HH:mm:ss.SSSZ` format is used. If `null`, the function returns `null`."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name" : "date",
|
||||||
|
"type" : "date_nanos",
|
||||||
|
"optional" : false,
|
||||||
|
"description" : "Date expression. If `null`, the function returns `null`."
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"variadic" : false,
|
||||||
|
"returnType" : "keyword"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"examples" : [
|
"examples" : [
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
"comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.",
|
"comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.",
|
||||||
"type" : "eval",
|
"type" : "eval",
|
||||||
"name" : "match",
|
"name" : "match",
|
||||||
"description" : "Use `MATCH` to perform a <<query-dsl-match-query,match query>> on the specified field.\nUsing `MATCH` is equivalent to using the `match` query in the Elasticsearch Query DSL.\n\nMatch can be used on text fields, as well as other field types like boolean, dates, and numeric types.\n\nFor a simplified syntax, you can use the <<esql-search-operators,match operator>> `:` operator instead of `MATCH`.\n\n`MATCH` returns true if the provided query matches the row.",
|
"description" : "Use `MATCH` to perform a <<query-dsl-match-query,match query>> on the specified field.\nUsing `MATCH` is equivalent to using the `match` query in the Elasticsearch Query DSL.\n\nMatch can be used on fields from the text family like <<text, text>> and <<semantic-text, semantic_text>>,\nas well as other field types like keyword, boolean, dates, and numeric types.\n\nFor a simplified syntax, you can use the <<esql-search-operators,match operator>> `:` operator instead of `MATCH`.\n\n`MATCH` returns true if the provided query matches the row.",
|
||||||
"signatures" : [
|
"signatures" : [
|
||||||
{
|
{
|
||||||
"params" : [
|
"params" : [
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
"comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.",
|
"comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.",
|
||||||
"type" : "operator",
|
"type" : "operator",
|
||||||
"name" : "match_operator",
|
"name" : "match_operator",
|
||||||
"description" : "Performs a <<query-dsl-match-query,match query>> on the specified field. Returns true if the provided query matches the row.",
|
"description" : "Use `MATCH` to perform a <<query-dsl-match-query,match query>> on the specified field.\nUsing `MATCH` is equivalent to using the `match` query in the Elasticsearch Query DSL.\n\nMatch can be used on fields from the text family like <<text, text>> and <<semantic-text, semantic_text>>,\nas well as other field types like keyword, boolean, dates, and numeric types.\n\nFor a simplified syntax, you can use the <<esql-search-operators,match operator>> `:` operator instead of `MATCH`.\n\n`MATCH` returns true if the provided query matches the row.",
|
||||||
"signatures" : [
|
"signatures" : [
|
||||||
{
|
{
|
||||||
"params" : [
|
"params" : [
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
"type" : "eval",
|
"type" : "eval",
|
||||||
"name" : "to_date_nanos",
|
"name" : "to_date_nanos",
|
||||||
"description" : "Converts an input to a nanosecond-resolution date value (aka date_nanos).",
|
"description" : "Converts an input to a nanosecond-resolution date value (aka date_nanos).",
|
||||||
"note" : "The range for date nanos is 1970-01-01T00:00:00.000000000Z to 2262-04-11T23:47:16.854775807Z. Additionally, integers cannot be converted into date nanos, as the range of integer nanoseconds only covers about 2 seconds after epoch.",
|
"note" : "The range for date nanos is 1970-01-01T00:00:00.000000000Z to 2262-04-11T23:47:16.854775807Z, attepting to convertvalues outside of that range will result in null with a warning.. Additionally, integers cannot be converted into date nanos, as the range of integer nanoseconds only covers about 2 seconds after epoch.",
|
||||||
"signatures" : [
|
"signatures" : [
|
||||||
{
|
{
|
||||||
"params" : [
|
"params" : [
|
||||||
|
@ -90,6 +90,6 @@
|
||||||
"returnType" : "date_nanos"
|
"returnType" : "date_nanos"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"preview" : true,
|
"preview" : false,
|
||||||
"snapshot_only" : false
|
"snapshot_only" : false
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,7 +6,8 @@ This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../READ
|
||||||
Use `MATCH` to perform a <<query-dsl-match-query,match query>> on the specified field.
|
Use `MATCH` to perform a <<query-dsl-match-query,match query>> on the specified field.
|
||||||
Using `MATCH` is equivalent to using the `match` query in the Elasticsearch Query DSL.
|
Using `MATCH` is equivalent to using the `match` query in the Elasticsearch Query DSL.
|
||||||
|
|
||||||
Match can be used on text fields, as well as other field types like boolean, dates, and numeric types.
|
Match can be used on fields from the text family like <<text, text>> and <<semantic-text, semantic_text>>,
|
||||||
|
as well as other field types like keyword, boolean, dates, and numeric types.
|
||||||
|
|
||||||
For a simplified syntax, you can use the <<esql-search-operators,match operator>> `:` operator instead of `MATCH`.
|
For a simplified syntax, you can use the <<esql-search-operators,match operator>> `:` operator instead of `MATCH`.
|
||||||
|
|
||||||
|
|
|
@ -3,7 +3,15 @@ This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../READ
|
||||||
-->
|
-->
|
||||||
|
|
||||||
### MATCH_OPERATOR
|
### MATCH_OPERATOR
|
||||||
Performs a <<query-dsl-match-query,match query>> on the specified field. Returns true if the provided query matches the row.
|
Use `MATCH` to perform a <<query-dsl-match-query,match query>> on the specified field.
|
||||||
|
Using `MATCH` is equivalent to using the `match` query in the Elasticsearch Query DSL.
|
||||||
|
|
||||||
|
Match can be used on fields from the text family like <<text, text>> and <<semantic-text, semantic_text>>,
|
||||||
|
as well as other field types like keyword, boolean, dates, and numeric types.
|
||||||
|
|
||||||
|
For a simplified syntax, you can use the <<esql-search-operators,match operator>> `:` operator instead of `MATCH`.
|
||||||
|
|
||||||
|
`MATCH` returns true if the provided query matches the row.
|
||||||
|
|
||||||
```
|
```
|
||||||
FROM books
|
FROM books
|
||||||
|
|
|
@ -5,4 +5,4 @@ This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../READ
|
||||||
### TO_DATE_NANOS
|
### TO_DATE_NANOS
|
||||||
Converts an input to a nanosecond-resolution date value (aka date_nanos).
|
Converts an input to a nanosecond-resolution date value (aka date_nanos).
|
||||||
|
|
||||||
Note: The range for date nanos is 1970-01-01T00:00:00.000000000Z to 2262-04-11T23:47:16.854775807Z. Additionally, integers cannot be converted into date nanos, as the range of integer nanoseconds only covers about 2 seconds after epoch.
|
Note: The range for date nanos is 1970-01-01T00:00:00.000000000Z to 2262-04-11T23:47:16.854775807Z, attepting to convertvalues outside of that range will result in null with a warning.. Additionally, integers cannot be converted into date nanos, as the range of integer nanoseconds only covers about 2 seconds after epoch.
|
||||||
|
|
|
@ -4,8 +4,6 @@
|
||||||
[[esql-to_date_nanos]]
|
[[esql-to_date_nanos]]
|
||||||
=== `TO_DATE_NANOS`
|
=== `TO_DATE_NANOS`
|
||||||
|
|
||||||
preview::["Do not use on production environments. This functionality is in technical preview and may be changed or removed in a future release. Elastic will work to fix any issues, but features in technical preview are not subject to the support SLA of official GA features."]
|
|
||||||
|
|
||||||
*Syntax*
|
*Syntax*
|
||||||
|
|
||||||
[.text-center]
|
[.text-center]
|
||||||
|
|
|
@ -18,6 +18,7 @@
|
||||||
* <<esql-to_cartesianshape>>
|
* <<esql-to_cartesianshape>>
|
||||||
* experimental:[] <<esql-to_dateperiod>>
|
* experimental:[] <<esql-to_dateperiod>>
|
||||||
* <<esql-to_datetime>>
|
* <<esql-to_datetime>>
|
||||||
|
* <<esql-to_date_nanos>>
|
||||||
* <<esql-to_degrees>>
|
* <<esql-to_degrees>>
|
||||||
* <<esql-to_double>>
|
* <<esql-to_double>>
|
||||||
* <<esql-to_geopoint>>
|
* <<esql-to_geopoint>>
|
||||||
|
@ -37,6 +38,7 @@ include::layout/to_cartesianpoint.asciidoc[]
|
||||||
include::layout/to_cartesianshape.asciidoc[]
|
include::layout/to_cartesianshape.asciidoc[]
|
||||||
include::layout/to_dateperiod.asciidoc[]
|
include::layout/to_dateperiod.asciidoc[]
|
||||||
include::layout/to_datetime.asciidoc[]
|
include::layout/to_datetime.asciidoc[]
|
||||||
|
include::layout/to_date_nanos.asciidoc[]
|
||||||
include::layout/to_degrees.asciidoc[]
|
include::layout/to_degrees.asciidoc[]
|
||||||
include::layout/to_double.asciidoc[]
|
include::layout/to_double.asciidoc[]
|
||||||
include::layout/to_geopoint.asciidoc[]
|
include::layout/to_geopoint.asciidoc[]
|
||||||
|
|
|
@ -5,6 +5,10 @@
|
||||||
[%header.monospaced.styled,format=dsv,separator=|]
|
[%header.monospaced.styled,format=dsv,separator=|]
|
||||||
|===
|
|===
|
||||||
dateFormat | date | result
|
dateFormat | date | result
|
||||||
|
date | | keyword
|
||||||
|
date_nanos | | keyword
|
||||||
keyword | date | keyword
|
keyword | date | keyword
|
||||||
|
keyword | date_nanos | keyword
|
||||||
text | date | keyword
|
text | date | keyword
|
||||||
|
text | date_nanos | keyword
|
||||||
|===
|
|===
|
||||||
|
|
|
@ -24,7 +24,6 @@ index settings, aliases, mappings, and index templates.
|
||||||
* <<indices-split-index>>
|
* <<indices-split-index>>
|
||||||
* <<indices-clone-index>>
|
* <<indices-clone-index>>
|
||||||
* <<indices-rollover-index>>
|
* <<indices-rollover-index>>
|
||||||
* <<unfreeze-index-api>>
|
|
||||||
* <<indices-resolve-index-api>>
|
* <<indices-resolve-index-api>>
|
||||||
* <<indices-resolve-cluster-api>>
|
* <<indices-resolve-cluster-api>>
|
||||||
* <<indices-downsample-data-stream>>
|
* <<indices-downsample-data-stream>>
|
||||||
|
@ -143,6 +142,5 @@ include::indices/shrink-index.asciidoc[]
|
||||||
include::indices/simulate-index.asciidoc[]
|
include::indices/simulate-index.asciidoc[]
|
||||||
include::indices/simulate-template.asciidoc[]
|
include::indices/simulate-template.asciidoc[]
|
||||||
include::indices/split-index.asciidoc[]
|
include::indices/split-index.asciidoc[]
|
||||||
include::indices/apis/unfreeze.asciidoc[]
|
|
||||||
include::indices/update-settings.asciidoc[]
|
include::indices/update-settings.asciidoc[]
|
||||||
include::indices/put-mapping.asciidoc[]
|
include::indices/put-mapping.asciidoc[]
|
||||||
|
|
|
@ -1,61 +0,0 @@
|
||||||
[role="xpack"]
|
|
||||||
[[unfreeze-index-api]]
|
|
||||||
=== Unfreeze index API
|
|
||||||
++++
|
|
||||||
<titleabbrev>Unfreeze index</titleabbrev>
|
|
||||||
++++
|
|
||||||
|
|
||||||
[WARNING]
|
|
||||||
.Deprecated in 7.14
|
|
||||||
====
|
|
||||||
In 8.0, we removed the ability to freeze an index. In previous versions,
|
|
||||||
freezing an index reduced its memory overhead. However, frozen indices are no
|
|
||||||
longer useful due to
|
|
||||||
https://www.elastic.co/blog/significantly-decrease-your-elasticsearch-heap-memory-usage[recent
|
|
||||||
improvements in heap memory usage].
|
|
||||||
You can use this API to unfreeze indices that were frozen in 7.x. Frozen indices
|
|
||||||
are not related to the frozen data tier.
|
|
||||||
====
|
|
||||||
|
|
||||||
.New API reference
|
|
||||||
[sidebar]
|
|
||||||
--
|
|
||||||
For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs].
|
|
||||||
--
|
|
||||||
|
|
||||||
Unfreezes an index.
|
|
||||||
|
|
||||||
[[unfreeze-index-api-request]]
|
|
||||||
==== {api-request-title}
|
|
||||||
|
|
||||||
`POST /<index>/_unfreeze`
|
|
||||||
|
|
||||||
[[unfreeze-index-api-prereqs]]
|
|
||||||
==== {api-prereq-title}
|
|
||||||
|
|
||||||
* If the {es} {security-features} are enabled, you must have the `manage`
|
|
||||||
<<privileges-list-indices,index privilege>> for the target index or index alias.
|
|
||||||
|
|
||||||
[[unfreeze-index-api-desc]]
|
|
||||||
==== {api-description-title}
|
|
||||||
|
|
||||||
When a frozen index is unfrozen, the index goes through the normal recovery
|
|
||||||
process and becomes writeable again.
|
|
||||||
|
|
||||||
[[unfreeze-index-api-path-parms]]
|
|
||||||
==== {api-path-parms-title}
|
|
||||||
|
|
||||||
`<index>`::
|
|
||||||
(Required, string) Identifier for the index.
|
|
||||||
|
|
||||||
[[unfreeze-index-api-examples]]
|
|
||||||
==== {api-examples-title}
|
|
||||||
|
|
||||||
The following example unfreezes an index:
|
|
||||||
|
|
||||||
[source,console]
|
|
||||||
--------------------------------------------------
|
|
||||||
POST /my-index-000001/_unfreeze
|
|
||||||
--------------------------------------------------
|
|
||||||
// TEST[s/^/PUT my-index-000001\n/]
|
|
||||||
// TEST[skip:unable to ignore deprecation warning]
|
|
|
@ -43,7 +43,7 @@ For more information on managing indices, refer to <<indices, Index APIs>>.
|
||||||
|
|
||||||
* To filter the list of indices, use the search bar or click a badge.
|
* To filter the list of indices, use the search bar or click a badge.
|
||||||
Badges indicate if an index is a <<ccr-put-follow,follower index>>, a
|
Badges indicate if an index is a <<ccr-put-follow,follower index>>, a
|
||||||
<<rollup-get-rollup-index-caps,rollup index>>, or <<unfreeze-index-api,frozen>>.
|
<<rollup-get-rollup-index-caps,rollup index>>, or <<frozen-indices,frozen>>.
|
||||||
|
|
||||||
* To drill down into the index
|
* To drill down into the index
|
||||||
<<mapping,mappings>>, <<index-modules-settings,settings>>, and statistics,
|
<<mapping,mappings>>, <<index-modules-settings,settings>>, and statistics,
|
||||||
|
|
417
docs/reference/inference/chat-completion-inference.asciidoc
Normal file
417
docs/reference/inference/chat-completion-inference.asciidoc
Normal file
|
@ -0,0 +1,417 @@
|
||||||
|
[role="xpack"]
|
||||||
|
[[chat-completion-inference-api]]
|
||||||
|
=== Chat completion inference API
|
||||||
|
|
||||||
|
Streams a chat completion response.
|
||||||
|
|
||||||
|
IMPORTANT: The {infer} APIs enable you to use certain services, such as built-in {ml} models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face.
|
||||||
|
For built-in models and models uploaded through Eland, the {infer} APIs offer an alternative way to use and manage trained models.
|
||||||
|
However, if you do not plan to use the {infer} APIs to use these models or if you want to use non-NLP models, use the <<ml-df-trained-models-apis>>.
|
||||||
|
|
||||||
|
|
||||||
|
[discrete]
|
||||||
|
[[chat-completion-inference-api-request]]
|
||||||
|
==== {api-request-title}
|
||||||
|
|
||||||
|
`POST /_inference/<inference_id>/_unified`
|
||||||
|
|
||||||
|
`POST /_inference/chat_completion/<inference_id>/_unified`
|
||||||
|
|
||||||
|
|
||||||
|
[discrete]
|
||||||
|
[[chat-completion-inference-api-prereqs]]
|
||||||
|
==== {api-prereq-title}
|
||||||
|
|
||||||
|
* Requires the `monitor_inference` <<privileges-list-cluster,cluster privilege>>
|
||||||
|
(the built-in `inference_admin` and `inference_user` roles grant this privilege)
|
||||||
|
* You must use a client that supports streaming.
|
||||||
|
|
||||||
|
|
||||||
|
[discrete]
|
||||||
|
[[chat-completion-inference-api-desc]]
|
||||||
|
==== {api-description-title}
|
||||||
|
|
||||||
|
The chat completion {infer} API enables real-time responses for chat completion tasks by delivering answers incrementally, reducing response times during computation.
|
||||||
|
It only works with the `chat_completion` task type for `openai` and `elastic` {infer} services.
|
||||||
|
|
||||||
|
[NOTE]
|
||||||
|
====
|
||||||
|
The `chat_completion` task type is only available within the _unified API and only supports streaming.
|
||||||
|
====
|
||||||
|
|
||||||
|
[discrete]
|
||||||
|
[[chat-completion-inference-api-path-params]]
|
||||||
|
==== {api-path-parms-title}
|
||||||
|
|
||||||
|
`<inference_id>`::
|
||||||
|
(Required, string)
|
||||||
|
The unique identifier of the {infer} endpoint.
|
||||||
|
|
||||||
|
|
||||||
|
`<task_type>`::
|
||||||
|
(Optional, string)
|
||||||
|
The type of {infer} task that the model performs. If included, this must be set to the value `chat_completion`.
|
||||||
|
|
||||||
|
|
||||||
|
[discrete]
|
||||||
|
[[chat-completion-inference-api-request-body]]
|
||||||
|
==== {api-request-body-title}
|
||||||
|
|
||||||
|
`messages`::
|
||||||
|
(Required, array of objects) A list of objects representing the conversation.
|
||||||
|
Requests should generally only add new messages from the user (role `user`). The other message roles (`assistant`, `system`, or `tool`) should generally only be copied from the response to a previous completion request, such that the messages array is built up throughout a conversation.
|
||||||
|
+
|
||||||
|
.Assistant message
|
||||||
|
[%collapsible%closed]
|
||||||
|
=====
|
||||||
|
`content`::
|
||||||
|
(Required unless `tool_calls` is specified, string or array of objects)
|
||||||
|
The contents of the message.
|
||||||
|
+
|
||||||
|
include::inference-shared.asciidoc[tag=chat-completion-schema-content-with-examples]
|
||||||
|
+
|
||||||
|
`role`::
|
||||||
|
(Required, string)
|
||||||
|
The role of the message author. This should be set to `assistant` for this type of message.
|
||||||
|
+
|
||||||
|
`tool_calls`::
|
||||||
|
(Optional, array of objects)
|
||||||
|
The tool calls generated by the model.
|
||||||
|
+
|
||||||
|
.Examples
|
||||||
|
[%collapsible%closed]
|
||||||
|
======
|
||||||
|
[source,js]
|
||||||
|
------------------------------------------------------------
|
||||||
|
{
|
||||||
|
"tool_calls": [
|
||||||
|
{
|
||||||
|
"id": "call_KcAjWtAww20AihPHphUh46Gd",
|
||||||
|
"type": "function",
|
||||||
|
"function": {
|
||||||
|
"name": "get_current_weather",
|
||||||
|
"arguments": "{\"location\":\"Boston, MA\"}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
------------------------------------------------------------
|
||||||
|
// NOTCONSOLE
|
||||||
|
======
|
||||||
|
+
|
||||||
|
`id`:::
|
||||||
|
(Required, string)
|
||||||
|
The identifier of the tool call.
|
||||||
|
+
|
||||||
|
`type`:::
|
||||||
|
(Required, string)
|
||||||
|
The type of tool call. This must be set to the value `function`.
|
||||||
|
+
|
||||||
|
`function`:::
|
||||||
|
(Required, object)
|
||||||
|
The function that the model called.
|
||||||
|
+
|
||||||
|
`name`::::
|
||||||
|
(Required, string)
|
||||||
|
The name of the function to call.
|
||||||
|
+
|
||||||
|
`arguments`::::
|
||||||
|
(Required, string)
|
||||||
|
The arguments to call the function with in JSON format.
|
||||||
|
=====
|
||||||
|
+
|
||||||
|
.System message
|
||||||
|
[%collapsible%closed]
|
||||||
|
=====
|
||||||
|
`content`:::
|
||||||
|
(Required, string or array of objects)
|
||||||
|
The contents of the message.
|
||||||
|
+
|
||||||
|
include::inference-shared.asciidoc[tag=chat-completion-schema-content-with-examples]
|
||||||
|
+
|
||||||
|
`role`:::
|
||||||
|
(Required, string)
|
||||||
|
The role of the message author. This should be set to `system` for this type of message.
|
||||||
|
=====
|
||||||
|
+
|
||||||
|
.Tool message
|
||||||
|
[%collapsible%closed]
|
||||||
|
=====
|
||||||
|
`content`::
|
||||||
|
(Required, string or array of objects)
|
||||||
|
The contents of the message.
|
||||||
|
+
|
||||||
|
include::inference-shared.asciidoc[tag=chat-completion-schema-content-with-examples]
|
||||||
|
+
|
||||||
|
`role`::
|
||||||
|
(Required, string)
|
||||||
|
The role of the message author. This should be set to `tool` for this type of message.
|
||||||
|
+
|
||||||
|
`tool_call_id`::
|
||||||
|
(Required, string)
|
||||||
|
The tool call that this message is responding to.
|
||||||
|
=====
|
||||||
|
+
|
||||||
|
.User message
|
||||||
|
[%collapsible%closed]
|
||||||
|
=====
|
||||||
|
`content`::
|
||||||
|
(Required, string or array of objects)
|
||||||
|
The contents of the message.
|
||||||
|
+
|
||||||
|
include::inference-shared.asciidoc[tag=chat-completion-schema-content-with-examples]
|
||||||
|
+
|
||||||
|
`role`::
|
||||||
|
(Required, string)
|
||||||
|
The role of the message author. This should be set to `user` for this type of message.
|
||||||
|
=====
|
||||||
|
|
||||||
|
`model`::
|
||||||
|
(Optional, string)
|
||||||
|
The ID of the model to use. By default, the model ID is set to the value included when creating the inference endpoint.
|
||||||
|
|
||||||
|
`max_completion_tokens`::
|
||||||
|
(Optional, integer)
|
||||||
|
The upper bound limit for the number of tokens that can be generated for a completion request.
|
||||||
|
|
||||||
|
`stop`::
|
||||||
|
(Optional, array of strings)
|
||||||
|
A sequence of strings to control when the model should stop generating additional tokens.
|
||||||
|
|
||||||
|
`temperature`::
|
||||||
|
(Optional, float)
|
||||||
|
The sampling temperature to use.
|
||||||
|
|
||||||
|
`tools`::
|
||||||
|
(Optional, array of objects)
|
||||||
|
A list of tools that the model can call.
|
||||||
|
+
|
||||||
|
.Structure
|
||||||
|
[%collapsible%closed]
|
||||||
|
=====
|
||||||
|
`type`::
|
||||||
|
(Required, string)
|
||||||
|
The type of tool, must be set to the value `function`.
|
||||||
|
+
|
||||||
|
`function`::
|
||||||
|
(Required, object)
|
||||||
|
The function definition.
|
||||||
|
+
|
||||||
|
`description`:::
|
||||||
|
(Optional, string)
|
||||||
|
A description of what the function does. This is used by the model to choose when and how to call the function.
|
||||||
|
+
|
||||||
|
`name`:::
|
||||||
|
(Required, string)
|
||||||
|
The name of the function.
|
||||||
|
+
|
||||||
|
`parameters`:::
|
||||||
|
(Optional, object)
|
||||||
|
The parameters the functional accepts. This should be formatted as a JSON object.
|
||||||
|
+
|
||||||
|
`strict`:::
|
||||||
|
(Optional, boolean)
|
||||||
|
Whether to enable schema adherence when generating the function call.
|
||||||
|
=====
|
||||||
|
+
|
||||||
|
.Examples
|
||||||
|
[%collapsible%closed]
|
||||||
|
======
|
||||||
|
[source,js]
|
||||||
|
------------------------------------------------------------
|
||||||
|
{
|
||||||
|
"tools": [
|
||||||
|
{
|
||||||
|
"type": "function",
|
||||||
|
"function": {
|
||||||
|
"name": "get_price_of_item",
|
||||||
|
"description": "Get the current price of an item",
|
||||||
|
"parameters": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"item": {
|
||||||
|
"id": "12345"
|
||||||
|
},
|
||||||
|
"unit": {
|
||||||
|
"type": "currency"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
------------------------------------------------------------
|
||||||
|
// NOTCONSOLE
|
||||||
|
======
|
||||||
|
|
||||||
|
`tool_choice`::
|
||||||
|
(Optional, string or object)
|
||||||
|
Controls which tool is called by the model.
|
||||||
|
+
|
||||||
|
String representation:::
|
||||||
|
One of `auto`, `none`, or `requrired`. `auto` allows the model to choose between calling tools and generating a message. `none` causes the model to not call any tools. `required` forces the model to call one or more tools.
|
||||||
|
+
|
||||||
|
Object representation:::
|
||||||
|
+
|
||||||
|
.Structure
|
||||||
|
[%collapsible%closed]
|
||||||
|
=====
|
||||||
|
`type`::
|
||||||
|
(Required, string)
|
||||||
|
The type of the tool. This must be set to the value `function`.
|
||||||
|
+
|
||||||
|
`function`::
|
||||||
|
(Required, object)
|
||||||
|
+
|
||||||
|
`name`:::
|
||||||
|
(Required, string)
|
||||||
|
The name of the function to call.
|
||||||
|
=====
|
||||||
|
+
|
||||||
|
.Examples
|
||||||
|
[%collapsible%closed]
|
||||||
|
=====
|
||||||
|
[source,js]
|
||||||
|
------------------------------------------------------------
|
||||||
|
{
|
||||||
|
"tool_choice": {
|
||||||
|
"type": "function",
|
||||||
|
"function": {
|
||||||
|
"name": "get_current_weather"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
------------------------------------------------------------
|
||||||
|
// NOTCONSOLE
|
||||||
|
=====
|
||||||
|
|
||||||
|
`top_p`::
|
||||||
|
(Optional, float)
|
||||||
|
Nucleus sampling, an alternative to sampling with temperature.
|
||||||
|
|
||||||
|
[discrete]
|
||||||
|
[[chat-completion-inference-api-example]]
|
||||||
|
==== {api-examples-title}
|
||||||
|
|
||||||
|
The following example performs a chat completion on the example question with streaming.
|
||||||
|
|
||||||
|
|
||||||
|
[source,console]
|
||||||
|
------------------------------------------------------------
|
||||||
|
POST _inference/chat_completion/openai-completion/_stream
|
||||||
|
{
|
||||||
|
"model": "gpt-4o",
|
||||||
|
"messages": [
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": "What is Elastic?"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
------------------------------------------------------------
|
||||||
|
// TEST[skip:TBD]
|
||||||
|
|
||||||
|
The following example performs a chat completion using an Assistant message with `tool_calls`.
|
||||||
|
|
||||||
|
[source,console]
|
||||||
|
------------------------------------------------------------
|
||||||
|
POST _inference/chat_completion/openai-completion/_stream
|
||||||
|
{
|
||||||
|
"messages": [
|
||||||
|
{
|
||||||
|
"role": "assistant",
|
||||||
|
"content": "Let's find out what the weather is",
|
||||||
|
"tool_calls": [ <1>
|
||||||
|
{
|
||||||
|
"id": "call_KcAjWtAww20AihPHphUh46Gd",
|
||||||
|
"type": "function",
|
||||||
|
"function": {
|
||||||
|
"name": "get_current_weather",
|
||||||
|
"arguments": "{\"location\":\"Boston, MA\"}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{ <2>
|
||||||
|
"role": "tool",
|
||||||
|
"content": "The weather is cold",
|
||||||
|
"tool_call_id": "call_KcAjWtAww20AihPHphUh46Gd"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
------------------------------------------------------------
|
||||||
|
// TEST[skip:TBD]
|
||||||
|
|
||||||
|
<1> Each tool call needs a corresponding Tool message.
|
||||||
|
<2> The corresponding Tool message.
|
||||||
|
|
||||||
|
The following example performs a chat completion using a User message with `tools` and `tool_choice`.
|
||||||
|
|
||||||
|
[source,console]
|
||||||
|
------------------------------------------------------------
|
||||||
|
POST _inference/chat_completion/openai-completion/_stream
|
||||||
|
{
|
||||||
|
"messages": [
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": [
|
||||||
|
{
|
||||||
|
"type": "text",
|
||||||
|
"text": "What's the price of a scarf?"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"tools": [
|
||||||
|
{
|
||||||
|
"type": "function",
|
||||||
|
"function": {
|
||||||
|
"name": "get_current_price",
|
||||||
|
"description": "Get the current price of a item",
|
||||||
|
"parameters": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"item": {
|
||||||
|
"id": "123"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"tool_choice": {
|
||||||
|
"type": "function",
|
||||||
|
"function": {
|
||||||
|
"name": "get_current_price"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
------------------------------------------------------------
|
||||||
|
// TEST[skip:TBD]
|
||||||
|
|
||||||
|
The API returns the following response when a request is made to the OpenAI service:
|
||||||
|
|
||||||
|
|
||||||
|
[source,txt]
|
||||||
|
------------------------------------------------------------
|
||||||
|
event: message
|
||||||
|
data: {"chat_completion":{"id":"chatcmpl-Ae0TWsy2VPnSfBbv5UztnSdYUMFP3","choices":[{"delta":{"content":"","role":"assistant"},"index":0}],"model":"gpt-4o-2024-08-06","object":"chat.completion.chunk"}}
|
||||||
|
|
||||||
|
event: message
|
||||||
|
data: {"chat_completion":{"id":"chatcmpl-Ae0TWsy2VPnSfBbv5UztnSdYUMFP3","choices":[{"delta":{"content":Elastic"},"index":0}],"model":"gpt-4o-2024-08-06","object":"chat.completion.chunk"}}
|
||||||
|
|
||||||
|
event: message
|
||||||
|
data: {"chat_completion":{"id":"chatcmpl-Ae0TWsy2VPnSfBbv5UztnSdYUMFP3","choices":[{"delta":{"content":" is"},"index":0}],"model":"gpt-4o-2024-08-06","object":"chat.completion.chunk"}}
|
||||||
|
|
||||||
|
(...)
|
||||||
|
|
||||||
|
event: message
|
||||||
|
data: {"chat_completion":{"id":"chatcmpl-Ae0TWsy2VPnSfBbv5UztnSdYUMFP3","choices":[],"model":"gpt-4o-2024-08-06","object":"chat.completion.chunk","usage":{"completion_tokens":28,"prompt_tokens":16,"total_tokens":44}}} <1>
|
||||||
|
|
||||||
|
event: message
|
||||||
|
data: [DONE]
|
||||||
|
------------------------------------------------------------
|
||||||
|
// NOTCONSOLE
|
||||||
|
|
||||||
|
<1> The last object message of the stream contains the token usage information.
|
|
@ -26,6 +26,7 @@ the following APIs to manage {infer} models and perform {infer}:
|
||||||
* <<post-inference-api>>
|
* <<post-inference-api>>
|
||||||
* <<put-inference-api>>
|
* <<put-inference-api>>
|
||||||
* <<stream-inference-api>>
|
* <<stream-inference-api>>
|
||||||
|
* <<chat-completion-inference-api>>
|
||||||
* <<update-inference-api>>
|
* <<update-inference-api>>
|
||||||
|
|
||||||
[[inference-landscape]]
|
[[inference-landscape]]
|
||||||
|
@ -135,6 +136,7 @@ PUT _inference/sparse_embedding/small_chunk_size
|
||||||
include::delete-inference.asciidoc[]
|
include::delete-inference.asciidoc[]
|
||||||
include::get-inference.asciidoc[]
|
include::get-inference.asciidoc[]
|
||||||
include::post-inference.asciidoc[]
|
include::post-inference.asciidoc[]
|
||||||
|
include::chat-completion-inference.asciidoc[]
|
||||||
include::put-inference.asciidoc[]
|
include::put-inference.asciidoc[]
|
||||||
include::stream-inference.asciidoc[]
|
include::stream-inference.asciidoc[]
|
||||||
include::update-inference.asciidoc[]
|
include::update-inference.asciidoc[]
|
||||||
|
|
|
@ -63,4 +63,48 @@ Specifies the chunking strategy.
|
||||||
It could be either `sentence` or `word`.
|
It could be either `sentence` or `word`.
|
||||||
end::chunking-settings-strategy[]
|
end::chunking-settings-strategy[]
|
||||||
|
|
||||||
|
tag::chat-completion-schema-content-with-examples[]
|
||||||
|
.Examples
|
||||||
|
[%collapsible%closed]
|
||||||
|
======
|
||||||
|
String example
|
||||||
|
[source,js]
|
||||||
|
------------------------------------------------------------
|
||||||
|
{
|
||||||
|
"content": "Some string"
|
||||||
|
}
|
||||||
|
------------------------------------------------------------
|
||||||
|
// NOTCONSOLE
|
||||||
|
|
||||||
|
Object example
|
||||||
|
[source,js]
|
||||||
|
------------------------------------------------------------
|
||||||
|
{
|
||||||
|
"content": [
|
||||||
|
{
|
||||||
|
"text": "Some text",
|
||||||
|
"type": "text"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
------------------------------------------------------------
|
||||||
|
// NOTCONSOLE
|
||||||
|
======
|
||||||
|
|
||||||
|
String representation:::
|
||||||
|
(Required, string)
|
||||||
|
The text content.
|
||||||
|
+
|
||||||
|
Object representation:::
|
||||||
|
`text`::::
|
||||||
|
(Required, string)
|
||||||
|
The text content.
|
||||||
|
+
|
||||||
|
`type`::::
|
||||||
|
(Required, string)
|
||||||
|
This must be set to the value `text`.
|
||||||
|
end::chat-completion-schema-content-with-examples[]
|
||||||
|
|
||||||
|
tag::chat-completion-docs[]
|
||||||
|
For more information on how to use the `chat_completion` task type, please refer to the <<chat-completion-inference-api, chat completion documentation>>.
|
||||||
|
end::chat-completion-docs[]
|
||||||
|
|
|
@ -76,7 +76,7 @@ Click the links to review the configuration details of the services:
|
||||||
* <<infer-service-google-vertex-ai,Google Vertex AI>> (`rerank`, `text_embedding`)
|
* <<infer-service-google-vertex-ai,Google Vertex AI>> (`rerank`, `text_embedding`)
|
||||||
* <<infer-service-hugging-face,Hugging Face>> (`text_embedding`)
|
* <<infer-service-hugging-face,Hugging Face>> (`text_embedding`)
|
||||||
* <<infer-service-mistral,Mistral>> (`text_embedding`)
|
* <<infer-service-mistral,Mistral>> (`text_embedding`)
|
||||||
* <<infer-service-openai,OpenAI>> (`completion`, `text_embedding`)
|
* <<infer-service-openai,OpenAI>> (`chat_completion`, `completion`, `text_embedding`)
|
||||||
* <<infer-service-watsonx-ai>> (`text_embedding`)
|
* <<infer-service-watsonx-ai>> (`text_embedding`)
|
||||||
* <<infer-service-jinaai,JinaAI>> (`text_embedding`, `rerank`)
|
* <<infer-service-jinaai,JinaAI>> (`text_embedding`, `rerank`)
|
||||||
|
|
||||||
|
|
|
@ -31,10 +31,18 @@ include::inference-shared.asciidoc[tag=task-type]
|
||||||
--
|
--
|
||||||
Available task types:
|
Available task types:
|
||||||
|
|
||||||
|
* `chat_completion`,
|
||||||
* `completion`,
|
* `completion`,
|
||||||
* `text_embedding`.
|
* `text_embedding`.
|
||||||
--
|
--
|
||||||
|
|
||||||
|
[NOTE]
|
||||||
|
====
|
||||||
|
The `chat_completion` task type only supports streaming and only through the `_unified` API.
|
||||||
|
|
||||||
|
include::inference-shared.asciidoc[tag=chat-completion-docs]
|
||||||
|
====
|
||||||
|
|
||||||
[discrete]
|
[discrete]
|
||||||
[[infer-service-openai-api-request-body]]
|
[[infer-service-openai-api-request-body]]
|
||||||
==== {api-request-body-title}
|
==== {api-request-body-title}
|
||||||
|
|
|
@ -38,8 +38,12 @@ However, if you do not plan to use the {infer} APIs to use these models or if yo
|
||||||
==== {api-description-title}
|
==== {api-description-title}
|
||||||
|
|
||||||
The stream {infer} API enables real-time responses for completion tasks by delivering answers incrementally, reducing response times during computation.
|
The stream {infer} API enables real-time responses for completion tasks by delivering answers incrementally, reducing response times during computation.
|
||||||
It only works with the `completion` task type.
|
It only works with the `completion` and `chat_completion` task types.
|
||||||
|
|
||||||
|
[NOTE]
|
||||||
|
====
|
||||||
|
include::inference-shared.asciidoc[tag=chat-completion-docs]
|
||||||
|
====
|
||||||
|
|
||||||
[discrete]
|
[discrete]
|
||||||
[[stream-inference-api-path-params]]
|
[[stream-inference-api-path-params]]
|
||||||
|
|
|
@ -19,9 +19,9 @@ However, if you do not plan to use the {infer} APIs to use these models or if yo
|
||||||
[[update-inference-api-request]]
|
[[update-inference-api-request]]
|
||||||
==== {api-request-title}
|
==== {api-request-title}
|
||||||
|
|
||||||
`POST _inference/<inference_id>/_update`
|
`PUT _inference/<inference_id>/_update`
|
||||||
|
|
||||||
`POST _inference/<task_type>/<inference_id>/_update`
|
`PUT _inference/<task_type>/<inference_id>/_update`
|
||||||
|
|
||||||
|
|
||||||
[discrete]
|
[discrete]
|
||||||
|
@ -81,7 +81,7 @@ The following example shows how to update an API key of an {infer} endpoint call
|
||||||
|
|
||||||
[source,console]
|
[source,console]
|
||||||
------------------------------------------------------------
|
------------------------------------------------------------
|
||||||
POST _inference/my-inference-endpoint/_update
|
PUT _inference/my-inference-endpoint/_update
|
||||||
{
|
{
|
||||||
"service_settings": {
|
"service_settings": {
|
||||||
"api_key": "<API_KEY>"
|
"api_key": "<API_KEY>"
|
||||||
|
|
|
@ -384,6 +384,7 @@ A collection of model size stats fields.
|
||||||
`model_size_bytes`:::
|
`model_size_bytes`:::
|
||||||
(integer)
|
(integer)
|
||||||
The size of the model in bytes.
|
The size of the model in bytes.
|
||||||
|
This parameter applies only to PyTorch models.
|
||||||
|
|
||||||
`required_native_memory_bytes`:::
|
`required_native_memory_bytes`:::
|
||||||
(integer)
|
(integer)
|
||||||
|
|
|
@ -131,6 +131,7 @@ The free-text description of the trained model.
|
||||||
`model_size_bytes`:::
|
`model_size_bytes`:::
|
||||||
(integer)
|
(integer)
|
||||||
The estimated model size in bytes to keep the trained model in memory.
|
The estimated model size in bytes to keep the trained model in memory.
|
||||||
|
This parameter applies only to {dfanalytics} trained models.
|
||||||
|
|
||||||
`estimated_operations`:::
|
`estimated_operations`:::
|
||||||
(integer)
|
(integer)
|
||||||
|
|
|
@ -13,21 +13,24 @@ occurrence types are:
|
||||||
|=======================================================================
|
|=======================================================================
|
||||||
|Occur |Description
|
|Occur |Description
|
||||||
|`must` |The clause (query) must appear in matching documents and will
|
|`must` |The clause (query) must appear in matching documents and will
|
||||||
contribute to the score.
|
contribute to the score. Each query defined under a `must` acts as a logical "AND", returning only documents that match _all_ the specified queries.
|
||||||
|
|
||||||
|
|`should` |The clause (query) should appear in the matching document. Each query defined under a `should` acts as a logical "OR", returning documents that match _any_ of the specified queries.
|
||||||
|
|
||||||
|`filter` |The clause (query) must appear in matching documents. However unlike
|
|`filter` |The clause (query) must appear in matching documents. However unlike
|
||||||
`must` the score of the query will be ignored. Filter clauses are executed
|
`must` the score of the query will be ignored. Filter clauses are executed
|
||||||
in <<query-filter-context,filter context>>, meaning that scoring is ignored
|
in <<query-filter-context,filter context>>, meaning that scoring is ignored
|
||||||
and clauses are considered for caching.
|
and clauses are considered for caching. Each query defined under a `filter` acts as a logical "AND", returning only documents that match _all_ the specified queries.
|
||||||
|
|
||||||
|`should` |The clause (query) should appear in the matching document.
|
|
||||||
|
|
||||||
|`must_not` |The clause (query) must not appear in the matching
|
|`must_not` |The clause (query) must not appear in the matching
|
||||||
documents. Clauses are executed in <<query-filter-context,filter context>> meaning
|
documents. Clauses are executed in <<query-filter-context,filter context>> meaning
|
||||||
that scoring is ignored and clauses are considered for caching. Because scoring is
|
that scoring is ignored and clauses are considered for caching. Because scoring is
|
||||||
ignored, a score of `0` for all documents is returned.
|
ignored, a score of `0` for all documents is returned. Each query defined under a `must_not` acts as a logical "NOT", returning only documents that do not match any of the specified queries.
|
||||||
|
|
||||||
|=======================================================================
|
|=======================================================================
|
||||||
|
|
||||||
|
The `must` and `should` clauses function as logical AND, OR operators, contributing to the scoring of results. However, these results will not be cached for faster retrieval. In contrast, the `filter` and `must_not` clauses are used to include or exclude results without impacting the score, unless used within a `constant_score` query.
|
||||||
|
|
||||||
The `bool` query takes a _more-matches-is-better_ approach, so the score from
|
The `bool` query takes a _more-matches-is-better_ approach, so the score from
|
||||||
each matching `must` or `should` clause will be added together to provide the
|
each matching `must` or `should` clause will be added together to provide the
|
||||||
final `_score` for each document.
|
final `_score` for each document.
|
||||||
|
|
|
@ -156,10 +156,16 @@ See <<restore-entire-cluster>>.
|
||||||
The freeze index API was removed in 8.0.
|
The freeze index API was removed in 8.0.
|
||||||
// tag::frozen-removal-explanation[]
|
// tag::frozen-removal-explanation[]
|
||||||
Frozen indices are no longer useful due to
|
Frozen indices are no longer useful due to
|
||||||
https://www.elastic.co/blog/significantly-decrease-your-elasticsearch-heap-memory-usage[recent
|
https://www.elastic.co/blog/significantly-decrease-your-elasticsearch-heap-memory-usage[improvements
|
||||||
improvements in heap memory usage].
|
in heap memory usage].
|
||||||
// end::frozen-removal-explanation[]
|
// end::frozen-removal-explanation[]
|
||||||
|
|
||||||
|
[role="exclude",id="unfreeze-index-api"]
|
||||||
|
=== Unfreeze index API
|
||||||
|
|
||||||
|
The unfreeze index API was removed in 9.0.
|
||||||
|
include::redirects.asciidoc[tag=frozen-removal-explanation]
|
||||||
|
|
||||||
[role="exclude",id="ilm-freeze"]
|
[role="exclude",id="ilm-freeze"]
|
||||||
=== Freeze {ilm-init} action
|
=== Freeze {ilm-init} action
|
||||||
|
|
||||||
|
@ -326,7 +332,7 @@ See <<configuring-stack-security,Configuring security for the Elastic Stack>>.
|
||||||
See <<security-api-suggest-user-profile,Suggest user profile API>>.
|
See <<security-api-suggest-user-profile,Suggest user profile API>>.
|
||||||
// [END] Security redirects
|
// [END] Security redirects
|
||||||
|
|
||||||
[roles="exclude",id="modules-scripting-stored-scripts"]
|
[role="exclude",id="modules-scripting-stored-scripts"]
|
||||||
=== Stored scripts
|
=== Stored scripts
|
||||||
|
|
||||||
See <<script-stored-scripts,Store and retrieve scripts>>
|
See <<script-stored-scripts,Store and retrieve scripts>>
|
||||||
|
@ -1749,8 +1755,10 @@ See <<search-terms-enum>>.
|
||||||
=== Frozen indices
|
=== Frozen indices
|
||||||
|
|
||||||
// tag::frozen-index-redirect[]
|
// tag::frozen-index-redirect[]
|
||||||
|
Older versions of {es} provided the option to reduce the amount of data kept in memory for an index, at the expense of
|
||||||
For API documentation, see <<unfreeze-index-api>>.
|
increasing search latency. This was known as 'freezing' the index.
|
||||||
|
include::redirects.asciidoc[tag=frozen-removal-explanation]
|
||||||
|
The freeze index API was removed in 8.0, and the unfreeze index API was removed in 9.0.
|
||||||
// end::frozen-index-redirect[]
|
// end::frozen-index-redirect[]
|
||||||
|
|
||||||
[role="exclude",id="best_practices"]
|
[role="exclude",id="best_practices"]
|
||||||
|
|
|
@ -22,7 +22,7 @@ The following APIs support {ccs}:
|
||||||
* experimental:[] <<eql-search-api,EQL search>>
|
* experimental:[] <<eql-search-api,EQL search>>
|
||||||
* experimental:[] <<sql-search-api,SQL search>>
|
* experimental:[] <<sql-search-api,SQL search>>
|
||||||
* experimental:[] <<search-vector-tile-api,Vector tile search>>
|
* experimental:[] <<search-vector-tile-api,Vector tile search>>
|
||||||
* experimental:[] <<esql,ES|QL>>
|
* experimental:[] <<esql-cross-clusters,ES|QL>>
|
||||||
|
|
||||||
[discrete]
|
[discrete]
|
||||||
=== Prerequisites
|
=== Prerequisites
|
||||||
|
|
|
@ -100,7 +100,7 @@ requires the keyword `LIKE` for SQL `LIKE` pattern.
|
||||||
[[sql-index-frozen]]
|
[[sql-index-frozen]]
|
||||||
=== Frozen Indices
|
=== Frozen Indices
|
||||||
|
|
||||||
By default, {es-sql} doesn't search <<unfreeze-index-api,frozen indices>>. To
|
By default, {es-sql} doesn't search <<frozen-indices,frozen indices>>. To
|
||||||
search frozen indices, use one of the following features:
|
search frozen indices, use one of the following features:
|
||||||
|
|
||||||
dedicated configuration parameter::
|
dedicated configuration parameter::
|
||||||
|
|
|
@ -78,35 +78,31 @@ A shard can become unassigned for several reasons. The following tips outline th
|
||||||
most common causes and their solutions.
|
most common causes and their solutions.
|
||||||
|
|
||||||
[discrete]
|
[discrete]
|
||||||
[[fix-cluster-status-reenable-allocation]]
|
[[fix-cluster-status-only-one-node]]
|
||||||
===== Re-enable shard allocation
|
===== Single node cluster
|
||||||
|
|
||||||
You typically disable allocation during a <<restart-cluster,restart>> or other
|
{es} will never assign a replica to the same node as the primary shard. A single-node cluster will always have yellow status. To change to green, set <<dynamic-index-number-of-replicas,number_of_replicas>> to 0 for all indices.
|
||||||
cluster maintenance. If you forgot to re-enable allocation afterward, {es} will
|
|
||||||
be unable to assign shards. To re-enable allocation, reset the
|
|
||||||
`cluster.routing.allocation.enable` cluster setting.
|
|
||||||
|
|
||||||
[source,console]
|
Therefore, if the number of replicas equals or exceeds the number of nodes, some shards won't be allocated.
|
||||||
----
|
|
||||||
PUT _cluster/settings
|
|
||||||
{
|
|
||||||
"persistent" : {
|
|
||||||
"cluster.routing.allocation.enable" : null
|
|
||||||
}
|
|
||||||
}
|
|
||||||
----
|
|
||||||
|
|
||||||
See https://www.youtube.com/watch?v=MiKKUdZvwnI[this video] for walkthrough of troubleshooting "no allocations are allowed".
|
|
||||||
|
|
||||||
[discrete]
|
[discrete]
|
||||||
[[fix-cluster-status-recover-nodes]]
|
[[fix-cluster-status-recover-nodes]]
|
||||||
===== Recover lost nodes
|
===== Recover lost nodes
|
||||||
|
|
||||||
Shards often become unassigned when a data node leaves the cluster. This can
|
Shards often become unassigned when a data node leaves the cluster. This can
|
||||||
occur for several reasons, ranging from connectivity issues to hardware failure.
|
occur for several reasons:
|
||||||
|
|
||||||
|
* A manual node restart will cause a temporary unhealthy cluster state until the node recovers.
|
||||||
|
|
||||||
|
* When a node becomes overloaded or fails, it can temporarily disrupt the cluster’s health, leading to an unhealthy state. Prolonged garbage collection (GC) pauses, caused by out-of-memory errors or high memory usage during intensive searches, can trigger this state. See <<fix-cluster-status-jvm,Reduce JVM memory pressure>> for more JVM-related issues.
|
||||||
|
|
||||||
|
* Network issues can prevent reliable node communication, causing shards to become out of sync. Check the logs for repeated messages about nodes leaving and rejoining the cluster.
|
||||||
|
|
||||||
After you resolve the issue and recover the node, it will rejoin the cluster.
|
After you resolve the issue and recover the node, it will rejoin the cluster.
|
||||||
{es} will then automatically allocate any unassigned shards.
|
{es} will then automatically allocate any unassigned shards.
|
||||||
|
|
||||||
|
You can monitor this process by <<cluster-health,checking your cluster health>>. The number of unallocated shards should progressively decrease until green status is reached.
|
||||||
|
|
||||||
To avoid wasting resources on temporary issues, {es} <<delayed-allocation,delays
|
To avoid wasting resources on temporary issues, {es} <<delayed-allocation,delays
|
||||||
allocation>> by one minute by default. If you've recovered a node and don’t want
|
allocation>> by one minute by default. If you've recovered a node and don’t want
|
||||||
to wait for the delay period, you can call the <<cluster-reroute,cluster reroute
|
to wait for the delay period, you can call the <<cluster-reroute,cluster reroute
|
||||||
|
@ -155,7 +151,7 @@ replica, it remains unassigned. To fix this, you can:
|
||||||
|
|
||||||
* Change the `index.number_of_replicas` index setting to reduce the number of
|
* Change the `index.number_of_replicas` index setting to reduce the number of
|
||||||
replicas for each primary shard. We recommend keeping at least one replica per
|
replicas for each primary shard. We recommend keeping at least one replica per
|
||||||
primary.
|
primary for high availability.
|
||||||
|
|
||||||
[source,console]
|
[source,console]
|
||||||
----
|
----
|
||||||
|
@ -166,7 +162,6 @@ PUT _settings
|
||||||
----
|
----
|
||||||
// TEST[s/^/PUT my-index\n/]
|
// TEST[s/^/PUT my-index\n/]
|
||||||
|
|
||||||
|
|
||||||
[discrete]
|
[discrete]
|
||||||
[[fix-cluster-status-disk-space]]
|
[[fix-cluster-status-disk-space]]
|
||||||
===== Free up or increase disk space
|
===== Free up or increase disk space
|
||||||
|
@ -187,6 +182,8 @@ If your nodes are running low on disk space, you have a few options:
|
||||||
|
|
||||||
* Upgrade your nodes to increase disk space.
|
* Upgrade your nodes to increase disk space.
|
||||||
|
|
||||||
|
* Add more nodes to the cluster.
|
||||||
|
|
||||||
* Delete unneeded indices to free up space. If you use {ilm-init}, you can
|
* Delete unneeded indices to free up space. If you use {ilm-init}, you can
|
||||||
update your lifecycle policy to use <<ilm-searchable-snapshot,searchable
|
update your lifecycle policy to use <<ilm-searchable-snapshot,searchable
|
||||||
snapshots>> or add a delete phase. If you no longer need to search the data, you
|
snapshots>> or add a delete phase. If you no longer need to search the data, you
|
||||||
|
@ -219,11 +216,39 @@ watermark or set it to an explicit byte value.
|
||||||
PUT _cluster/settings
|
PUT _cluster/settings
|
||||||
{
|
{
|
||||||
"persistent": {
|
"persistent": {
|
||||||
"cluster.routing.allocation.disk.watermark.low": "30gb"
|
"cluster.routing.allocation.disk.watermark.low": "90%",
|
||||||
|
"cluster.routing.allocation.disk.watermark.high": "95%"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
----
|
----
|
||||||
// TEST[s/"30gb"/null/]
|
// TEST[s/"90%"/null/]
|
||||||
|
// TEST[s/"95%"/null/]
|
||||||
|
|
||||||
|
[IMPORTANT]
|
||||||
|
====
|
||||||
|
This is usually a temporary solution and may cause instability if disk space is not freed up.
|
||||||
|
====
|
||||||
|
|
||||||
|
[discrete]
|
||||||
|
[[fix-cluster-status-reenable-allocation]]
|
||||||
|
===== Re-enable shard allocation
|
||||||
|
|
||||||
|
You typically disable allocation during a <<restart-cluster,restart>> or other
|
||||||
|
cluster maintenance. If you forgot to re-enable allocation afterward, {es} will
|
||||||
|
be unable to assign shards. To re-enable allocation, reset the
|
||||||
|
`cluster.routing.allocation.enable` cluster setting.
|
||||||
|
|
||||||
|
[source,console]
|
||||||
|
----
|
||||||
|
PUT _cluster/settings
|
||||||
|
{
|
||||||
|
"persistent" : {
|
||||||
|
"cluster.routing.allocation.enable" : null
|
||||||
|
}
|
||||||
|
}
|
||||||
|
----
|
||||||
|
|
||||||
|
See https://www.youtube.com/watch?v=MiKKUdZvwnI[this video] for walkthrough of troubleshooting "no allocations are allowed".
|
||||||
|
|
||||||
[discrete]
|
[discrete]
|
||||||
[[fix-cluster-status-jvm]]
|
[[fix-cluster-status-jvm]]
|
||||||
|
|
|
@ -13,10 +13,22 @@ import java.io.InputStream;
|
||||||
import java.io.PrintStream;
|
import java.io.PrintStream;
|
||||||
import java.io.PrintWriter;
|
import java.io.PrintWriter;
|
||||||
import java.net.ContentHandlerFactory;
|
import java.net.ContentHandlerFactory;
|
||||||
|
import java.net.DatagramPacket;
|
||||||
|
import java.net.DatagramSocket;
|
||||||
import java.net.DatagramSocketImplFactory;
|
import java.net.DatagramSocketImplFactory;
|
||||||
import java.net.FileNameMap;
|
import java.net.FileNameMap;
|
||||||
|
import java.net.InetAddress;
|
||||||
|
import java.net.MulticastSocket;
|
||||||
|
import java.net.NetworkInterface;
|
||||||
|
import java.net.Proxy;
|
||||||
|
import java.net.ProxySelector;
|
||||||
|
import java.net.ResponseCache;
|
||||||
|
import java.net.ServerSocket;
|
||||||
|
import java.net.Socket;
|
||||||
|
import java.net.SocketAddress;
|
||||||
import java.net.SocketImplFactory;
|
import java.net.SocketImplFactory;
|
||||||
import java.net.URL;
|
import java.net.URL;
|
||||||
|
import java.net.URLStreamHandler;
|
||||||
import java.net.URLStreamHandlerFactory;
|
import java.net.URLStreamHandlerFactory;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
||||||
|
@ -167,4 +179,79 @@ public interface EntitlementChecker {
|
||||||
|
|
||||||
void check$java_net_URLConnection$$setContentHandlerFactory(Class<?> callerClass, ContentHandlerFactory fac);
|
void check$java_net_URLConnection$$setContentHandlerFactory(Class<?> callerClass, ContentHandlerFactory fac);
|
||||||
|
|
||||||
|
////////////////////
|
||||||
|
//
|
||||||
|
// Network access
|
||||||
|
//
|
||||||
|
void check$java_net_ProxySelector$$setDefault(Class<?> callerClass, ProxySelector ps);
|
||||||
|
|
||||||
|
void check$java_net_ResponseCache$$setDefault(Class<?> callerClass, ResponseCache rc);
|
||||||
|
|
||||||
|
void check$java_net_spi_InetAddressResolverProvider$(Class<?> callerClass);
|
||||||
|
|
||||||
|
void check$java_net_spi_URLStreamHandlerProvider$(Class<?> callerClass);
|
||||||
|
|
||||||
|
void check$java_net_URL$(Class<?> callerClass, String protocol, String host, int port, String file, URLStreamHandler handler);
|
||||||
|
|
||||||
|
void check$java_net_URL$(Class<?> callerClass, URL context, String spec, URLStreamHandler handler);
|
||||||
|
|
||||||
|
void check$java_net_DatagramSocket$bind(Class<?> callerClass, DatagramSocket that, SocketAddress addr);
|
||||||
|
|
||||||
|
void check$java_net_DatagramSocket$connect(Class<?> callerClass, DatagramSocket that, InetAddress addr);
|
||||||
|
|
||||||
|
void check$java_net_DatagramSocket$connect(Class<?> callerClass, DatagramSocket that, SocketAddress addr);
|
||||||
|
|
||||||
|
void check$java_net_DatagramSocket$send(Class<?> callerClass, DatagramSocket that, DatagramPacket p);
|
||||||
|
|
||||||
|
void check$java_net_DatagramSocket$receive(Class<?> callerClass, DatagramSocket that, DatagramPacket p);
|
||||||
|
|
||||||
|
void check$java_net_DatagramSocket$joinGroup(Class<?> callerClass, DatagramSocket that, SocketAddress addr, NetworkInterface ni);
|
||||||
|
|
||||||
|
void check$java_net_DatagramSocket$leaveGroup(Class<?> callerClass, DatagramSocket that, SocketAddress addr, NetworkInterface ni);
|
||||||
|
|
||||||
|
void check$java_net_MulticastSocket$joinGroup(Class<?> callerClass, MulticastSocket that, InetAddress addr);
|
||||||
|
|
||||||
|
void check$java_net_MulticastSocket$joinGroup(Class<?> callerClass, MulticastSocket that, SocketAddress addr, NetworkInterface ni);
|
||||||
|
|
||||||
|
void check$java_net_MulticastSocket$leaveGroup(Class<?> callerClass, MulticastSocket that, InetAddress addr);
|
||||||
|
|
||||||
|
void check$java_net_MulticastSocket$leaveGroup(Class<?> callerClass, MulticastSocket that, SocketAddress addr, NetworkInterface ni);
|
||||||
|
|
||||||
|
void check$java_net_MulticastSocket$send(Class<?> callerClass, MulticastSocket that, DatagramPacket p, byte ttl);
|
||||||
|
|
||||||
|
// Binding/connecting ctor
|
||||||
|
void check$java_net_ServerSocket$(Class<?> callerClass, int port);
|
||||||
|
|
||||||
|
void check$java_net_ServerSocket$(Class<?> callerClass, int port, int backlog);
|
||||||
|
|
||||||
|
void check$java_net_ServerSocket$(Class<?> callerClass, int port, int backlog, InetAddress bindAddr);
|
||||||
|
|
||||||
|
void check$java_net_ServerSocket$accept(Class<?> callerClass, ServerSocket that);
|
||||||
|
|
||||||
|
void check$java_net_ServerSocket$implAccept(Class<?> callerClass, ServerSocket that, Socket s);
|
||||||
|
|
||||||
|
void check$java_net_ServerSocket$bind(Class<?> callerClass, ServerSocket that, SocketAddress endpoint);
|
||||||
|
|
||||||
|
void check$java_net_ServerSocket$bind(Class<?> callerClass, ServerSocket that, SocketAddress endpoint, int backlog);
|
||||||
|
|
||||||
|
// Binding/connecting ctors
|
||||||
|
void check$java_net_Socket$(Class<?> callerClass, Proxy proxy);
|
||||||
|
|
||||||
|
void check$java_net_Socket$(Class<?> callerClass, String host, int port);
|
||||||
|
|
||||||
|
void check$java_net_Socket$(Class<?> callerClass, InetAddress address, int port);
|
||||||
|
|
||||||
|
void check$java_net_Socket$(Class<?> callerClass, String host, int port, InetAddress localAddr, int localPort);
|
||||||
|
|
||||||
|
void check$java_net_Socket$(Class<?> callerClass, InetAddress address, int port, InetAddress localAddr, int localPort);
|
||||||
|
|
||||||
|
void check$java_net_Socket$(Class<?> callerClass, String host, int port, boolean stream);
|
||||||
|
|
||||||
|
void check$java_net_Socket$(Class<?> callerClass, InetAddress host, int port, boolean stream);
|
||||||
|
|
||||||
|
void check$java_net_Socket$bind(Class<?> callerClass, Socket that, SocketAddress endpoint);
|
||||||
|
|
||||||
|
void check$java_net_Socket$connect(Class<?> callerClass, Socket that, SocketAddress endpoint);
|
||||||
|
|
||||||
|
void check$java_net_Socket$connect(Class<?> callerClass, Socket that, SocketAddress endpoint, int backlog);
|
||||||
}
|
}
|
||||||
|
|
|
@ -9,8 +9,19 @@
|
||||||
|
|
||||||
package org.elasticsearch.entitlement.qa.common;
|
package org.elasticsearch.entitlement.qa.common;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.io.InputStream;
|
||||||
|
import java.io.OutputStream;
|
||||||
|
import java.net.DatagramPacket;
|
||||||
|
import java.net.DatagramSocket;
|
||||||
|
import java.net.DatagramSocketImpl;
|
||||||
import java.net.InetAddress;
|
import java.net.InetAddress;
|
||||||
|
import java.net.NetworkInterface;
|
||||||
|
import java.net.ServerSocket;
|
||||||
import java.net.Socket;
|
import java.net.Socket;
|
||||||
|
import java.net.SocketAddress;
|
||||||
|
import java.net.SocketException;
|
||||||
|
import java.net.SocketImpl;
|
||||||
import java.security.cert.Certificate;
|
import java.security.cert.Certificate;
|
||||||
import java.text.BreakIterator;
|
import java.text.BreakIterator;
|
||||||
import java.text.Collator;
|
import java.text.Collator;
|
||||||
|
@ -290,6 +301,81 @@ class DummyImplementations {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private static class DummySocketImpl extends SocketImpl {
|
||||||
|
@Override
|
||||||
|
protected void create(boolean stream) {}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected void connect(String host, int port) {}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected void connect(InetAddress address, int port) {}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected void connect(SocketAddress address, int timeout) {}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected void bind(InetAddress host, int port) {}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected void listen(int backlog) {}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected void accept(SocketImpl s) {}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected InputStream getInputStream() {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected OutputStream getOutputStream() {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected int available() {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected void close() {}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected void sendUrgentData(int data) {}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void setOption(int optID, Object value) {}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Object getOption(int optID) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static class DummySocket extends Socket {
|
||||||
|
DummySocket() throws SocketException {
|
||||||
|
super(new DummySocketImpl());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static class DummyServerSocket extends ServerSocket {
|
||||||
|
DummyServerSocket() {
|
||||||
|
super(new DummySocketImpl());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static class DummyBoundServerSocket extends ServerSocket {
|
||||||
|
DummyBoundServerSocket() {
|
||||||
|
super(new DummySocketImpl());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isBound() {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static class DummySSLSocketFactory extends SSLSocketFactory {
|
static class DummySSLSocketFactory extends SSLSocketFactory {
|
||||||
@Override
|
@Override
|
||||||
public Socket createSocket(String host, int port) {
|
public Socket createSocket(String host, int port) {
|
||||||
|
@ -327,8 +413,77 @@ class DummyImplementations {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static class DummyDatagramSocket extends DatagramSocket {
|
||||||
|
DummyDatagramSocket() throws SocketException {
|
||||||
|
super(new DatagramSocketImpl() {
|
||||||
|
@Override
|
||||||
|
protected void create() throws SocketException {}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected void bind(int lport, InetAddress laddr) throws SocketException {}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected void send(DatagramPacket p) throws IOException {}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected int peek(InetAddress i) throws IOException {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected int peekData(DatagramPacket p) throws IOException {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected void receive(DatagramPacket p) throws IOException {}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected void setTTL(byte ttl) throws IOException {}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected byte getTTL() throws IOException {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected void setTimeToLive(int ttl) throws IOException {}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected int getTimeToLive() throws IOException {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected void join(InetAddress inetaddr) throws IOException {}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected void leave(InetAddress inetaddr) throws IOException {}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected void joinGroup(SocketAddress mcastaddr, NetworkInterface netIf) throws IOException {}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected void leaveGroup(SocketAddress mcastaddr, NetworkInterface netIf) throws IOException {}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected void close() {}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void setOption(int optID, Object value) throws SocketException {}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Object getOption(int optID) throws SocketException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected void connect(InetAddress address, int port) throws SocketException {}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
private static RuntimeException unexpected() {
|
private static RuntimeException unexpected() {
|
||||||
return new IllegalStateException("This method isn't supposed to be called");
|
return new IllegalStateException("This method isn't supposed to be called");
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,62 @@
|
||||||
|
/*
|
||||||
|
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||||
|
* or more contributor license agreements. Licensed under the "Elastic License
|
||||||
|
* 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
|
||||||
|
* Public License v 1"; you may not use this file except in compliance with, at
|
||||||
|
* your election, the "Elastic License 2.0", the "GNU Affero General Public
|
||||||
|
* License v3.0 only", or the "Server Side Public License, v 1".
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.elasticsearch.entitlement.qa.common;
|
||||||
|
|
||||||
|
import org.elasticsearch.core.SuppressForbidden;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.net.InetAddress;
|
||||||
|
import java.net.InetSocketAddress;
|
||||||
|
import java.net.Proxy;
|
||||||
|
import java.net.ServerSocket;
|
||||||
|
import java.net.Socket;
|
||||||
|
|
||||||
|
class NetworkAccessCheckActions {
|
||||||
|
|
||||||
|
static void serverSocketAccept() throws IOException {
|
||||||
|
try (ServerSocket socket = new DummyImplementations.DummyBoundServerSocket()) {
|
||||||
|
try {
|
||||||
|
socket.accept();
|
||||||
|
} catch (IOException e) {
|
||||||
|
// Our dummy socket cannot accept connections unless we tell the JDK how to create a socket for it.
|
||||||
|
// But Socket.setSocketImplFactory(); is one of the methods we always forbid, so we cannot use it.
|
||||||
|
// Still, we can check accept is called (allowed/denied), we don't care if it fails later for this
|
||||||
|
// known reason.
|
||||||
|
assert e.getMessage().contains("client socket implementation factory not set");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void serverSocketBind() throws IOException {
|
||||||
|
try (ServerSocket socket = new DummyImplementations.DummyServerSocket()) {
|
||||||
|
socket.bind(null);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@SuppressForbidden(reason = "Testing entitlement check on forbidden action")
|
||||||
|
static void createSocketWithProxy() throws IOException {
|
||||||
|
try (Socket socket = new Socket(new Proxy(Proxy.Type.HTTP, new InetSocketAddress(0)))) {
|
||||||
|
assert socket.isBound() == false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void socketBind() throws IOException {
|
||||||
|
try (Socket socket = new DummyImplementations.DummySocket()) {
|
||||||
|
socket.bind(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@SuppressForbidden(reason = "Testing entitlement check on forbidden action")
|
||||||
|
static void socketConnect() throws IOException {
|
||||||
|
try (Socket socket = new DummyImplementations.DummySocket()) {
|
||||||
|
socket.connect(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -11,6 +11,7 @@ package org.elasticsearch.entitlement.qa.common;
|
||||||
|
|
||||||
import org.elasticsearch.client.internal.node.NodeClient;
|
import org.elasticsearch.client.internal.node.NodeClient;
|
||||||
import org.elasticsearch.common.Strings;
|
import org.elasticsearch.common.Strings;
|
||||||
|
import org.elasticsearch.core.CheckedRunnable;
|
||||||
import org.elasticsearch.core.SuppressForbidden;
|
import org.elasticsearch.core.SuppressForbidden;
|
||||||
import org.elasticsearch.entitlement.qa.common.DummyImplementations.DummyBreakIteratorProvider;
|
import org.elasticsearch.entitlement.qa.common.DummyImplementations.DummyBreakIteratorProvider;
|
||||||
import org.elasticsearch.entitlement.qa.common.DummyImplementations.DummyCalendarDataProvider;
|
import org.elasticsearch.entitlement.qa.common.DummyImplementations.DummyCalendarDataProvider;
|
||||||
|
@ -32,16 +33,25 @@ import org.elasticsearch.rest.RestResponse;
|
||||||
import org.elasticsearch.rest.RestStatus;
|
import org.elasticsearch.rest.RestStatus;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.UncheckedIOException;
|
import java.net.DatagramPacket;
|
||||||
import java.net.DatagramSocket;
|
import java.net.DatagramSocket;
|
||||||
import java.net.DatagramSocketImpl;
|
|
||||||
import java.net.DatagramSocketImplFactory;
|
|
||||||
import java.net.HttpURLConnection;
|
import java.net.HttpURLConnection;
|
||||||
|
import java.net.InetAddress;
|
||||||
|
import java.net.InetSocketAddress;
|
||||||
|
import java.net.MalformedURLException;
|
||||||
|
import java.net.NetworkInterface;
|
||||||
|
import java.net.ProxySelector;
|
||||||
|
import java.net.ResponseCache;
|
||||||
import java.net.ServerSocket;
|
import java.net.ServerSocket;
|
||||||
import java.net.Socket;
|
import java.net.Socket;
|
||||||
|
import java.net.SocketException;
|
||||||
import java.net.URL;
|
import java.net.URL;
|
||||||
import java.net.URLClassLoader;
|
import java.net.URLClassLoader;
|
||||||
import java.net.URLConnection;
|
import java.net.URLConnection;
|
||||||
|
import java.net.URLStreamHandler;
|
||||||
|
import java.net.spi.InetAddressResolver;
|
||||||
|
import java.net.spi.InetAddressResolverProvider;
|
||||||
|
import java.net.spi.URLStreamHandlerProvider;
|
||||||
import java.security.NoSuchAlgorithmException;
|
import java.security.NoSuchAlgorithmException;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
@ -57,25 +67,26 @@ import static org.elasticsearch.entitlement.qa.common.RestEntitlementsCheckActio
|
||||||
import static org.elasticsearch.entitlement.qa.common.RestEntitlementsCheckAction.CheckAction.forPlugins;
|
import static org.elasticsearch.entitlement.qa.common.RestEntitlementsCheckAction.CheckAction.forPlugins;
|
||||||
import static org.elasticsearch.rest.RestRequest.Method.GET;
|
import static org.elasticsearch.rest.RestRequest.Method.GET;
|
||||||
|
|
||||||
|
@SuppressWarnings("unused")
|
||||||
public class RestEntitlementsCheckAction extends BaseRestHandler {
|
public class RestEntitlementsCheckAction extends BaseRestHandler {
|
||||||
private static final Logger logger = LogManager.getLogger(RestEntitlementsCheckAction.class);
|
private static final Logger logger = LogManager.getLogger(RestEntitlementsCheckAction.class);
|
||||||
public static final Thread NO_OP_SHUTDOWN_HOOK = new Thread(() -> {}, "Shutdown hook for testing");
|
public static final Thread NO_OP_SHUTDOWN_HOOK = new Thread(() -> {}, "Shutdown hook for testing");
|
||||||
private final String prefix;
|
private final String prefix;
|
||||||
|
|
||||||
record CheckAction(Runnable action, boolean isAlwaysDeniedToPlugins) {
|
record CheckAction(CheckedRunnable<Exception> action, boolean isAlwaysDeniedToPlugins) {
|
||||||
/**
|
/**
|
||||||
* These cannot be granted to plugins, so our test plugins cannot test the "allowed" case.
|
* These cannot be granted to plugins, so our test plugins cannot test the "allowed" case.
|
||||||
* Used both for always-denied entitlements as well as those granted only to the server itself.
|
* Used both for always-denied entitlements as well as those granted only to the server itself.
|
||||||
*/
|
*/
|
||||||
static CheckAction deniedToPlugins(Runnable action) {
|
static CheckAction deniedToPlugins(CheckedRunnable<Exception> action) {
|
||||||
return new CheckAction(action, true);
|
return new CheckAction(action, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
static CheckAction forPlugins(Runnable action) {
|
static CheckAction forPlugins(CheckedRunnable<Exception> action) {
|
||||||
return new CheckAction(action, false);
|
return new CheckAction(action, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
static CheckAction alwaysDenied(Runnable action) {
|
static CheckAction alwaysDenied(CheckedRunnable<Exception> action) {
|
||||||
return new CheckAction(action, true);
|
return new CheckAction(action, true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -125,15 +136,81 @@ public class RestEntitlementsCheckAction extends BaseRestHandler {
|
||||||
entry("socket_setSocketImplFactory", alwaysDenied(RestEntitlementsCheckAction::socket$$setSocketImplFactory)),
|
entry("socket_setSocketImplFactory", alwaysDenied(RestEntitlementsCheckAction::socket$$setSocketImplFactory)),
|
||||||
entry("url_setURLStreamHandlerFactory", alwaysDenied(RestEntitlementsCheckAction::url$$setURLStreamHandlerFactory)),
|
entry("url_setURLStreamHandlerFactory", alwaysDenied(RestEntitlementsCheckAction::url$$setURLStreamHandlerFactory)),
|
||||||
entry("urlConnection_setFileNameMap", alwaysDenied(RestEntitlementsCheckAction::urlConnection$$setFileNameMap)),
|
entry("urlConnection_setFileNameMap", alwaysDenied(RestEntitlementsCheckAction::urlConnection$$setFileNameMap)),
|
||||||
entry("urlConnection_setContentHandlerFactory", alwaysDenied(RestEntitlementsCheckAction::urlConnection$$setContentHandlerFactory))
|
entry("urlConnection_setContentHandlerFactory", alwaysDenied(RestEntitlementsCheckAction::urlConnection$$setContentHandlerFactory)),
|
||||||
|
|
||||||
|
entry("proxySelector_setDefault", alwaysDenied(RestEntitlementsCheckAction::setDefaultProxySelector)),
|
||||||
|
entry("responseCache_setDefault", alwaysDenied(RestEntitlementsCheckAction::setDefaultResponseCache)),
|
||||||
|
entry("createInetAddressResolverProvider", alwaysDenied(RestEntitlementsCheckAction::createInetAddressResolverProvider)),
|
||||||
|
entry("createURLStreamHandlerProvider", alwaysDenied(RestEntitlementsCheckAction::createURLStreamHandlerProvider)),
|
||||||
|
entry("createURLWithURLStreamHandler", alwaysDenied(RestEntitlementsCheckAction::createURLWithURLStreamHandler)),
|
||||||
|
entry("createURLWithURLStreamHandler2", alwaysDenied(RestEntitlementsCheckAction::createURLWithURLStreamHandler2)),
|
||||||
|
entry("datagram_socket_bind", forPlugins(RestEntitlementsCheckAction::bindDatagramSocket)),
|
||||||
|
entry("datagram_socket_connect", forPlugins(RestEntitlementsCheckAction::connectDatagramSocket)),
|
||||||
|
entry("datagram_socket_send", forPlugins(RestEntitlementsCheckAction::sendDatagramSocket)),
|
||||||
|
entry("datagram_socket_receive", forPlugins(RestEntitlementsCheckAction::receiveDatagramSocket)),
|
||||||
|
entry("datagram_socket_join_group", forPlugins(RestEntitlementsCheckAction::joinGroupDatagramSocket)),
|
||||||
|
entry("datagram_socket_leave_group", forPlugins(RestEntitlementsCheckAction::leaveGroupDatagramSocket)),
|
||||||
|
|
||||||
|
entry("create_socket_with_proxy", forPlugins(NetworkAccessCheckActions::createSocketWithProxy)),
|
||||||
|
entry("socket_bind", forPlugins(NetworkAccessCheckActions::socketBind)),
|
||||||
|
entry("socket_connect", forPlugins(NetworkAccessCheckActions::socketConnect)),
|
||||||
|
entry("server_socket_bind", forPlugins(NetworkAccessCheckActions::serverSocketBind)),
|
||||||
|
entry("server_socket_accept", forPlugins(NetworkAccessCheckActions::serverSocketAccept))
|
||||||
);
|
);
|
||||||
|
|
||||||
private static void setDefaultSSLContext() {
|
private static void createURLStreamHandlerProvider() {
|
||||||
try {
|
var x = new URLStreamHandlerProvider() {
|
||||||
SSLContext.setDefault(SSLContext.getDefault());
|
@Override
|
||||||
} catch (NoSuchAlgorithmException e) {
|
public URLStreamHandler createURLStreamHandler(String protocol) {
|
||||||
throw new RuntimeException(e);
|
return null;
|
||||||
}
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
@SuppressWarnings("deprecation")
|
||||||
|
private static void createURLWithURLStreamHandler() throws MalformedURLException {
|
||||||
|
var x = new URL("http", "host", 1234, "file", new URLStreamHandler() {
|
||||||
|
@Override
|
||||||
|
protected URLConnection openConnection(URL u) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
@SuppressWarnings("deprecation")
|
||||||
|
private static void createURLWithURLStreamHandler2() throws MalformedURLException {
|
||||||
|
var x = new URL(null, "spec", new URLStreamHandler() {
|
||||||
|
@Override
|
||||||
|
protected URLConnection openConnection(URL u) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
private static void createInetAddressResolverProvider() {
|
||||||
|
var x = new InetAddressResolverProvider() {
|
||||||
|
@Override
|
||||||
|
public InetAddressResolver get(Configuration configuration) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String name() {
|
||||||
|
return "TEST";
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
private static void setDefaultResponseCache() {
|
||||||
|
ResponseCache.setDefault(null);
|
||||||
|
}
|
||||||
|
|
||||||
|
private static void setDefaultProxySelector() {
|
||||||
|
ProxySelector.setDefault(null);
|
||||||
|
}
|
||||||
|
|
||||||
|
private static void setDefaultSSLContext() throws NoSuchAlgorithmException {
|
||||||
|
SSLContext.setDefault(SSLContext.getDefault());
|
||||||
}
|
}
|
||||||
|
|
||||||
private static void setDefaultHostnameVerifier() {
|
private static void setDefaultHostnameVerifier() {
|
||||||
|
@ -159,28 +236,18 @@ public class RestEntitlementsCheckAction extends BaseRestHandler {
|
||||||
System.exit(123);
|
System.exit(123);
|
||||||
}
|
}
|
||||||
|
|
||||||
private static void createClassLoader() {
|
private static void createClassLoader() throws IOException {
|
||||||
try (var classLoader = new URLClassLoader("test", new URL[0], RestEntitlementsCheckAction.class.getClassLoader())) {
|
try (var classLoader = new URLClassLoader("test", new URL[0], RestEntitlementsCheckAction.class.getClassLoader())) {
|
||||||
logger.info("Created URLClassLoader [{}]", classLoader.getName());
|
logger.info("Created URLClassLoader [{}]", classLoader.getName());
|
||||||
} catch (IOException e) {
|
|
||||||
throw new UncheckedIOException(e);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private static void processBuilder_start() {
|
private static void processBuilder_start() throws IOException {
|
||||||
try {
|
|
||||||
new ProcessBuilder("").start();
|
new ProcessBuilder("").start();
|
||||||
} catch (IOException e) {
|
|
||||||
throw new IllegalStateException(e);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private static void processBuilder_startPipeline() {
|
private static void processBuilder_startPipeline() throws IOException {
|
||||||
try {
|
|
||||||
ProcessBuilder.startPipeline(List.of());
|
ProcessBuilder.startPipeline(List.of());
|
||||||
} catch (IOException e) {
|
|
||||||
throw new IllegalStateException(e);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private static void setHttpsConnectionProperties() {
|
private static void setHttpsConnectionProperties() {
|
||||||
|
@ -268,17 +335,8 @@ public class RestEntitlementsCheckAction extends BaseRestHandler {
|
||||||
|
|
||||||
@SuppressWarnings("deprecation")
|
@SuppressWarnings("deprecation")
|
||||||
@SuppressForbidden(reason = "We're required to prevent calls to this forbidden API")
|
@SuppressForbidden(reason = "We're required to prevent calls to this forbidden API")
|
||||||
private static void datagramSocket$$setDatagramSocketImplFactory() {
|
private static void datagramSocket$$setDatagramSocketImplFactory() throws IOException {
|
||||||
try {
|
DatagramSocket.setDatagramSocketImplFactory(() -> { throw new IllegalStateException(); });
|
||||||
DatagramSocket.setDatagramSocketImplFactory(new DatagramSocketImplFactory() {
|
|
||||||
@Override
|
|
||||||
public DatagramSocketImpl createDatagramSocketImpl() {
|
|
||||||
throw new IllegalStateException();
|
|
||||||
}
|
|
||||||
});
|
|
||||||
} catch (IOException e) {
|
|
||||||
throw new IllegalStateException(e);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private static void httpURLConnection$$setFollowRedirects() {
|
private static void httpURLConnection$$setFollowRedirects() {
|
||||||
|
@ -287,22 +345,14 @@ public class RestEntitlementsCheckAction extends BaseRestHandler {
|
||||||
|
|
||||||
@SuppressWarnings("deprecation")
|
@SuppressWarnings("deprecation")
|
||||||
@SuppressForbidden(reason = "We're required to prevent calls to this forbidden API")
|
@SuppressForbidden(reason = "We're required to prevent calls to this forbidden API")
|
||||||
private static void serverSocket$$setSocketFactory() {
|
private static void serverSocket$$setSocketFactory() throws IOException {
|
||||||
try {
|
|
||||||
ServerSocket.setSocketFactory(() -> { throw new IllegalStateException(); });
|
ServerSocket.setSocketFactory(() -> { throw new IllegalStateException(); });
|
||||||
} catch (IOException e) {
|
|
||||||
throw new IllegalStateException(e);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@SuppressWarnings("deprecation")
|
@SuppressWarnings("deprecation")
|
||||||
@SuppressForbidden(reason = "We're required to prevent calls to this forbidden API")
|
@SuppressForbidden(reason = "We're required to prevent calls to this forbidden API")
|
||||||
private static void socket$$setSocketImplFactory() {
|
private static void socket$$setSocketImplFactory() throws IOException {
|
||||||
try {
|
|
||||||
Socket.setSocketImplFactory(() -> { throw new IllegalStateException(); });
|
Socket.setSocketImplFactory(() -> { throw new IllegalStateException(); });
|
||||||
} catch (IOException e) {
|
|
||||||
throw new IllegalStateException(e);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private static void url$$setURLStreamHandlerFactory() {
|
private static void url$$setURLStreamHandlerFactory() {
|
||||||
|
@ -317,6 +367,51 @@ public class RestEntitlementsCheckAction extends BaseRestHandler {
|
||||||
URLConnection.setContentHandlerFactory(__ -> { throw new IllegalStateException(); });
|
URLConnection.setContentHandlerFactory(__ -> { throw new IllegalStateException(); });
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private static void bindDatagramSocket() throws SocketException {
|
||||||
|
try (var socket = new DatagramSocket(null)) {
|
||||||
|
socket.bind(null);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@SuppressForbidden(reason = "testing entitlements")
|
||||||
|
private static void connectDatagramSocket() throws SocketException {
|
||||||
|
try (var socket = new DummyImplementations.DummyDatagramSocket()) {
|
||||||
|
socket.connect(new InetSocketAddress(1234));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private static void joinGroupDatagramSocket() throws IOException {
|
||||||
|
try (var socket = new DummyImplementations.DummyDatagramSocket()) {
|
||||||
|
socket.joinGroup(
|
||||||
|
new InetSocketAddress(InetAddress.getByAddress(new byte[] { (byte) 230, 0, 0, 1 }), 1234),
|
||||||
|
NetworkInterface.getByIndex(0)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private static void leaveGroupDatagramSocket() throws IOException {
|
||||||
|
try (var socket = new DummyImplementations.DummyDatagramSocket()) {
|
||||||
|
socket.leaveGroup(
|
||||||
|
new InetSocketAddress(InetAddress.getByAddress(new byte[] { (byte) 230, 0, 0, 1 }), 1234),
|
||||||
|
NetworkInterface.getByIndex(0)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@SuppressForbidden(reason = "testing entitlements")
|
||||||
|
private static void sendDatagramSocket() throws IOException {
|
||||||
|
try (var socket = new DummyImplementations.DummyDatagramSocket()) {
|
||||||
|
socket.send(new DatagramPacket(new byte[] { 0 }, 1, InetAddress.getLocalHost(), 1234));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@SuppressForbidden(reason = "testing entitlements")
|
||||||
|
private static void receiveDatagramSocket() throws IOException {
|
||||||
|
try (var socket = new DummyImplementations.DummyDatagramSocket()) {
|
||||||
|
socket.receive(new DatagramPacket(new byte[1], 1, InetAddress.getLocalHost(), 1234));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
public RestEntitlementsCheckAction(String prefix) {
|
public RestEntitlementsCheckAction(String prefix) {
|
||||||
this.prefix = prefix;
|
this.prefix = prefix;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,3 +1,8 @@
|
||||||
ALL-UNNAMED:
|
ALL-UNNAMED:
|
||||||
- create_class_loader
|
- create_class_loader
|
||||||
- set_https_connection_properties
|
- set_https_connection_properties
|
||||||
|
- network:
|
||||||
|
actions:
|
||||||
|
- listen
|
||||||
|
- accept
|
||||||
|
- connect
|
||||||
|
|
|
@ -1,3 +1,8 @@
|
||||||
org.elasticsearch.entitlement.qa.common:
|
org.elasticsearch.entitlement.qa.common:
|
||||||
- create_class_loader
|
- create_class_loader
|
||||||
- set_https_connection_properties
|
- set_https_connection_properties
|
||||||
|
- network:
|
||||||
|
actions:
|
||||||
|
- listen
|
||||||
|
- accept
|
||||||
|
- connect
|
||||||
|
|
|
@ -22,6 +22,7 @@ import org.elasticsearch.entitlement.runtime.api.ElasticsearchEntitlementChecker
|
||||||
import org.elasticsearch.entitlement.runtime.policy.CreateClassLoaderEntitlement;
|
import org.elasticsearch.entitlement.runtime.policy.CreateClassLoaderEntitlement;
|
||||||
import org.elasticsearch.entitlement.runtime.policy.Entitlement;
|
import org.elasticsearch.entitlement.runtime.policy.Entitlement;
|
||||||
import org.elasticsearch.entitlement.runtime.policy.ExitVMEntitlement;
|
import org.elasticsearch.entitlement.runtime.policy.ExitVMEntitlement;
|
||||||
|
import org.elasticsearch.entitlement.runtime.policy.NetworkEntitlement;
|
||||||
import org.elasticsearch.entitlement.runtime.policy.Policy;
|
import org.elasticsearch.entitlement.runtime.policy.Policy;
|
||||||
import org.elasticsearch.entitlement.runtime.policy.PolicyManager;
|
import org.elasticsearch.entitlement.runtime.policy.PolicyManager;
|
||||||
import org.elasticsearch.entitlement.runtime.policy.PolicyParser;
|
import org.elasticsearch.entitlement.runtime.policy.PolicyParser;
|
||||||
|
@ -44,6 +45,9 @@ import java.util.Map;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
import java.util.stream.Collectors;
|
import java.util.stream.Collectors;
|
||||||
|
|
||||||
|
import static org.elasticsearch.entitlement.runtime.policy.NetworkEntitlement.ACCEPT_ACTION;
|
||||||
|
import static org.elasticsearch.entitlement.runtime.policy.NetworkEntitlement.CONNECT_ACTION;
|
||||||
|
import static org.elasticsearch.entitlement.runtime.policy.NetworkEntitlement.LISTEN_ACTION;
|
||||||
import static org.elasticsearch.entitlement.runtime.policy.PolicyManager.ALL_UNNAMED;
|
import static org.elasticsearch.entitlement.runtime.policy.PolicyManager.ALL_UNNAMED;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -97,7 +101,15 @@ public class EntitlementInitialization {
|
||||||
List.of(
|
List.of(
|
||||||
new Scope("org.elasticsearch.base", List.of(new CreateClassLoaderEntitlement())),
|
new Scope("org.elasticsearch.base", List.of(new CreateClassLoaderEntitlement())),
|
||||||
new Scope("org.elasticsearch.xcontent", List.of(new CreateClassLoaderEntitlement())),
|
new Scope("org.elasticsearch.xcontent", List.of(new CreateClassLoaderEntitlement())),
|
||||||
new Scope("org.elasticsearch.server", List.of(new ExitVMEntitlement(), new CreateClassLoaderEntitlement()))
|
new Scope(
|
||||||
|
"org.elasticsearch.server",
|
||||||
|
List.of(
|
||||||
|
new ExitVMEntitlement(),
|
||||||
|
new CreateClassLoaderEntitlement(),
|
||||||
|
new NetworkEntitlement(LISTEN_ACTION | CONNECT_ACTION | ACCEPT_ACTION)
|
||||||
|
)
|
||||||
|
),
|
||||||
|
new Scope("org.apache.httpcomponents.httpclient", List.of(new NetworkEntitlement(CONNECT_ACTION)))
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
// agents run without a module, so this is a special hack for the apm agent
|
// agents run without a module, so this is a special hack for the apm agent
|
||||||
|
|
|
@ -10,16 +10,29 @@
|
||||||
package org.elasticsearch.entitlement.runtime.api;
|
package org.elasticsearch.entitlement.runtime.api;
|
||||||
|
|
||||||
import org.elasticsearch.entitlement.bridge.EntitlementChecker;
|
import org.elasticsearch.entitlement.bridge.EntitlementChecker;
|
||||||
|
import org.elasticsearch.entitlement.runtime.policy.NetworkEntitlement;
|
||||||
import org.elasticsearch.entitlement.runtime.policy.PolicyManager;
|
import org.elasticsearch.entitlement.runtime.policy.PolicyManager;
|
||||||
|
|
||||||
import java.io.InputStream;
|
import java.io.InputStream;
|
||||||
import java.io.PrintStream;
|
import java.io.PrintStream;
|
||||||
import java.io.PrintWriter;
|
import java.io.PrintWriter;
|
||||||
import java.net.ContentHandlerFactory;
|
import java.net.ContentHandlerFactory;
|
||||||
|
import java.net.DatagramPacket;
|
||||||
|
import java.net.DatagramSocket;
|
||||||
import java.net.DatagramSocketImplFactory;
|
import java.net.DatagramSocketImplFactory;
|
||||||
import java.net.FileNameMap;
|
import java.net.FileNameMap;
|
||||||
|
import java.net.InetAddress;
|
||||||
|
import java.net.MulticastSocket;
|
||||||
|
import java.net.NetworkInterface;
|
||||||
|
import java.net.Proxy;
|
||||||
|
import java.net.ProxySelector;
|
||||||
|
import java.net.ResponseCache;
|
||||||
|
import java.net.ServerSocket;
|
||||||
|
import java.net.Socket;
|
||||||
|
import java.net.SocketAddress;
|
||||||
import java.net.SocketImplFactory;
|
import java.net.SocketImplFactory;
|
||||||
import java.net.URL;
|
import java.net.URL;
|
||||||
|
import java.net.URLStreamHandler;
|
||||||
import java.net.URLStreamHandlerFactory;
|
import java.net.URLStreamHandlerFactory;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
||||||
|
@ -310,4 +323,185 @@ public class ElasticsearchEntitlementChecker implements EntitlementChecker {
|
||||||
public void check$javax_net_ssl_SSLContext$$setDefault(Class<?> callerClass, SSLContext context) {
|
public void check$javax_net_ssl_SSLContext$$setDefault(Class<?> callerClass, SSLContext context) {
|
||||||
policyManager.checkChangeJVMGlobalState(callerClass);
|
policyManager.checkChangeJVMGlobalState(callerClass);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void check$java_net_ProxySelector$$setDefault(Class<?> callerClass, ProxySelector ps) {
|
||||||
|
policyManager.checkChangeNetworkHandling(callerClass);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void check$java_net_ResponseCache$$setDefault(Class<?> callerClass, ResponseCache rc) {
|
||||||
|
policyManager.checkChangeNetworkHandling(callerClass);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void check$java_net_spi_InetAddressResolverProvider$(Class<?> callerClass) {
|
||||||
|
policyManager.checkChangeNetworkHandling(callerClass);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void check$java_net_spi_URLStreamHandlerProvider$(Class<?> callerClass) {
|
||||||
|
policyManager.checkChangeNetworkHandling(callerClass);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void check$java_net_URL$(Class<?> callerClass, String protocol, String host, int port, String file, URLStreamHandler handler) {
|
||||||
|
policyManager.checkChangeNetworkHandling(callerClass);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void check$java_net_URL$(Class<?> callerClass, URL context, String spec, URLStreamHandler handler) {
|
||||||
|
policyManager.checkChangeNetworkHandling(callerClass);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void check$java_net_DatagramSocket$bind(Class<?> callerClass, DatagramSocket that, SocketAddress addr) {
|
||||||
|
policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.LISTEN_ACTION);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void check$java_net_DatagramSocket$connect(Class<?> callerClass, DatagramSocket that, InetAddress addr) {
|
||||||
|
policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.CONNECT_ACTION | NetworkEntitlement.ACCEPT_ACTION);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void check$java_net_DatagramSocket$connect(Class<?> callerClass, DatagramSocket that, SocketAddress addr) {
|
||||||
|
policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.CONNECT_ACTION | NetworkEntitlement.ACCEPT_ACTION);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void check$java_net_DatagramSocket$send(Class<?> callerClass, DatagramSocket that, DatagramPacket p) {
|
||||||
|
var actions = NetworkEntitlement.CONNECT_ACTION;
|
||||||
|
if (p.getAddress().isMulticastAddress()) {
|
||||||
|
actions |= NetworkEntitlement.ACCEPT_ACTION;
|
||||||
|
}
|
||||||
|
policyManager.checkNetworkAccess(callerClass, actions);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void check$java_net_DatagramSocket$receive(Class<?> callerClass, DatagramSocket that, DatagramPacket p) {
|
||||||
|
policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.ACCEPT_ACTION);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void check$java_net_DatagramSocket$joinGroup(Class<?> caller, DatagramSocket that, SocketAddress addr, NetworkInterface ni) {
|
||||||
|
policyManager.checkNetworkAccess(caller, NetworkEntitlement.CONNECT_ACTION | NetworkEntitlement.ACCEPT_ACTION);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void check$java_net_DatagramSocket$leaveGroup(Class<?> caller, DatagramSocket that, SocketAddress addr, NetworkInterface ni) {
|
||||||
|
policyManager.checkNetworkAccess(caller, NetworkEntitlement.CONNECT_ACTION | NetworkEntitlement.ACCEPT_ACTION);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void check$java_net_MulticastSocket$joinGroup(Class<?> callerClass, MulticastSocket that, InetAddress addr) {
|
||||||
|
policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.CONNECT_ACTION | NetworkEntitlement.ACCEPT_ACTION);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void check$java_net_MulticastSocket$joinGroup(Class<?> caller, MulticastSocket that, SocketAddress addr, NetworkInterface ni) {
|
||||||
|
policyManager.checkNetworkAccess(caller, NetworkEntitlement.CONNECT_ACTION | NetworkEntitlement.ACCEPT_ACTION);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void check$java_net_MulticastSocket$leaveGroup(Class<?> caller, MulticastSocket that, InetAddress addr) {
|
||||||
|
policyManager.checkNetworkAccess(caller, NetworkEntitlement.CONNECT_ACTION | NetworkEntitlement.ACCEPT_ACTION);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void check$java_net_MulticastSocket$leaveGroup(Class<?> caller, MulticastSocket that, SocketAddress addr, NetworkInterface ni) {
|
||||||
|
policyManager.checkNetworkAccess(caller, NetworkEntitlement.CONNECT_ACTION | NetworkEntitlement.ACCEPT_ACTION);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void check$java_net_MulticastSocket$send(Class<?> callerClass, MulticastSocket that, DatagramPacket p, byte ttl) {
|
||||||
|
policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.CONNECT_ACTION | NetworkEntitlement.ACCEPT_ACTION);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void check$java_net_ServerSocket$(Class<?> callerClass, int port) {
|
||||||
|
policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.LISTEN_ACTION);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void check$java_net_ServerSocket$(Class<?> callerClass, int port, int backlog) {
|
||||||
|
policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.LISTEN_ACTION);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void check$java_net_ServerSocket$(Class<?> callerClass, int port, int backlog, InetAddress bindAddr) {
|
||||||
|
policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.LISTEN_ACTION);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void check$java_net_ServerSocket$accept(Class<?> callerClass, ServerSocket that) {
|
||||||
|
policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.ACCEPT_ACTION);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void check$java_net_ServerSocket$implAccept(Class<?> callerClass, ServerSocket that, Socket s) {
|
||||||
|
policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.ACCEPT_ACTION);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void check$java_net_ServerSocket$bind(Class<?> callerClass, ServerSocket that, SocketAddress endpoint) {
|
||||||
|
policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.LISTEN_ACTION);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void check$java_net_ServerSocket$bind(Class<?> callerClass, ServerSocket that, SocketAddress endpoint, int backlog) {
|
||||||
|
policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.LISTEN_ACTION);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void check$java_net_Socket$(Class<?> callerClass, Proxy proxy) {
|
||||||
|
if (proxy.type() == Proxy.Type.SOCKS || proxy.type() == Proxy.Type.HTTP) {
|
||||||
|
policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.CONNECT_ACTION);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void check$java_net_Socket$(Class<?> callerClass, String host, int port) {
|
||||||
|
policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.LISTEN_ACTION | NetworkEntitlement.CONNECT_ACTION);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void check$java_net_Socket$(Class<?> callerClass, InetAddress address, int port) {
|
||||||
|
policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.LISTEN_ACTION | NetworkEntitlement.CONNECT_ACTION);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void check$java_net_Socket$(Class<?> callerClass, String host, int port, InetAddress localAddr, int localPort) {
|
||||||
|
policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.LISTEN_ACTION | NetworkEntitlement.CONNECT_ACTION);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void check$java_net_Socket$(Class<?> callerClass, InetAddress address, int port, InetAddress localAddr, int localPort) {
|
||||||
|
policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.LISTEN_ACTION | NetworkEntitlement.CONNECT_ACTION);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void check$java_net_Socket$(Class<?> callerClass, String host, int port, boolean stream) {
|
||||||
|
policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.LISTEN_ACTION | NetworkEntitlement.CONNECT_ACTION);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void check$java_net_Socket$(Class<?> callerClass, InetAddress host, int port, boolean stream) {
|
||||||
|
policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.LISTEN_ACTION | NetworkEntitlement.CONNECT_ACTION);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void check$java_net_Socket$bind(Class<?> callerClass, Socket that, SocketAddress endpoint) {
|
||||||
|
policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.LISTEN_ACTION);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void check$java_net_Socket$connect(Class<?> callerClass, Socket that, SocketAddress endpoint) {
|
||||||
|
policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.CONNECT_ACTION);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void check$java_net_Socket$connect(Class<?> callerClass, Socket that, SocketAddress endpoint, int backlog) {
|
||||||
|
policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.CONNECT_ACTION);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,111 @@
|
||||||
|
/*
|
||||||
|
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||||
|
* or more contributor license agreements. Licensed under the "Elastic License
|
||||||
|
* 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
|
||||||
|
* Public License v 1"; you may not use this file except in compliance with, at
|
||||||
|
* your election, the "Elastic License 2.0", the "GNU Affero General Public
|
||||||
|
* License v3.0 only", or the "Server Side Public License, v 1".
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.elasticsearch.entitlement.runtime.policy;
|
||||||
|
|
||||||
|
import org.elasticsearch.core.Strings;
|
||||||
|
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.Objects;
|
||||||
|
import java.util.StringJoiner;
|
||||||
|
|
||||||
|
import static java.util.Map.entry;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Describes a network entitlement (sockets) with actions.
|
||||||
|
*/
|
||||||
|
public class NetworkEntitlement implements Entitlement {
|
||||||
|
|
||||||
|
public static final int LISTEN_ACTION = 0x1;
|
||||||
|
public static final int CONNECT_ACTION = 0x2;
|
||||||
|
public static final int ACCEPT_ACTION = 0x4;
|
||||||
|
|
||||||
|
static final String LISTEN = "listen";
|
||||||
|
static final String CONNECT = "connect";
|
||||||
|
static final String ACCEPT = "accept";
|
||||||
|
|
||||||
|
private static final Map<String, Integer> ACTION_MAP = Map.ofEntries(
|
||||||
|
entry(LISTEN, LISTEN_ACTION),
|
||||||
|
entry(CONNECT, CONNECT_ACTION),
|
||||||
|
entry(ACCEPT, ACCEPT_ACTION)
|
||||||
|
);
|
||||||
|
|
||||||
|
private final int actions;
|
||||||
|
|
||||||
|
@ExternalEntitlement(parameterNames = { "actions" }, esModulesOnly = false)
|
||||||
|
public NetworkEntitlement(List<String> actionsList) {
|
||||||
|
|
||||||
|
int actionsInt = 0;
|
||||||
|
|
||||||
|
for (String actionString : actionsList) {
|
||||||
|
var action = ACTION_MAP.get(actionString);
|
||||||
|
if (action == null) {
|
||||||
|
throw new IllegalArgumentException("unknown network action [" + actionString + "]");
|
||||||
|
}
|
||||||
|
if ((actionsInt & action) == action) {
|
||||||
|
throw new IllegalArgumentException(Strings.format("network action [%s] specified multiple times", actionString));
|
||||||
|
}
|
||||||
|
actionsInt |= action;
|
||||||
|
}
|
||||||
|
|
||||||
|
this.actions = actionsInt;
|
||||||
|
}
|
||||||
|
|
||||||
|
public NetworkEntitlement(int actions) {
|
||||||
|
this.actions = actions;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static String printActions(int actions) {
|
||||||
|
var joiner = new StringJoiner(",");
|
||||||
|
for (var entry : ACTION_MAP.entrySet()) {
|
||||||
|
var action = entry.getValue();
|
||||||
|
if ((actions & action) == action) {
|
||||||
|
joiner.add(entry.getKey());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return joiner.toString();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* For the actions to match, the actions present in this entitlement must be a superset
|
||||||
|
* of the actions required by a check.
|
||||||
|
* There is only one "negative" case (action required by the check but not present in the entitlement),
|
||||||
|
* and it can be expressed efficiently via this truth table:
|
||||||
|
* this.actions | requiredActions |
|
||||||
|
* 0 | 0 | 0
|
||||||
|
* 0 | 1 | 1 --> NOT this.action AND requiredActions
|
||||||
|
* 1 | 0 | 0
|
||||||
|
* 1 | 1 | 0
|
||||||
|
*
|
||||||
|
* @param requiredActions the actions required to be present for a check to pass
|
||||||
|
* @return true if requiredActions are present, false otherwise
|
||||||
|
*/
|
||||||
|
public boolean matchActions(int requiredActions) {
|
||||||
|
return (~this.actions & requiredActions) == 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean equals(Object o) {
|
||||||
|
if (this == o) return true;
|
||||||
|
if (o == null || getClass() != o.getClass()) return false;
|
||||||
|
NetworkEntitlement that = (NetworkEntitlement) o;
|
||||||
|
return actions == that.actions;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int hashCode() {
|
||||||
|
return Objects.hash(actions);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
return "NetworkEntitlement{actions=" + actions + '}';
|
||||||
|
}
|
||||||
|
}
|
|
@ -52,7 +52,11 @@ public class PolicyManager {
|
||||||
}
|
}
|
||||||
|
|
||||||
public <E extends Entitlement> Stream<E> getEntitlements(Class<E> entitlementClass) {
|
public <E extends Entitlement> Stream<E> getEntitlements(Class<E> entitlementClass) {
|
||||||
return entitlementsByType.get(entitlementClass).stream().map(entitlementClass::cast);
|
var entitlements = entitlementsByType.get(entitlementClass);
|
||||||
|
if (entitlements == null) {
|
||||||
|
return Stream.empty();
|
||||||
|
}
|
||||||
|
return entitlements.stream().map(entitlementClass::cast);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -171,25 +175,67 @@ public class PolicyManager {
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check for operations that can modify the way network operations are handled
|
||||||
|
*/
|
||||||
|
public void checkChangeNetworkHandling(Class<?> callerClass) {
|
||||||
|
checkChangeJVMGlobalState(callerClass);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check for operations that can access sensitive network information, e.g. secrets, tokens or SSL sessions
|
||||||
|
*/
|
||||||
|
public void checkReadSensitiveNetworkInformation(Class<?> callerClass) {
|
||||||
|
neverEntitled(callerClass, "access sensitive network information");
|
||||||
|
}
|
||||||
|
|
||||||
private String operationDescription(String methodName) {
|
private String operationDescription(String methodName) {
|
||||||
// TODO: Use a more human-readable description. Perhaps share code with InstrumentationServiceImpl.parseCheckerMethodName
|
// TODO: Use a more human-readable description. Perhaps share code with InstrumentationServiceImpl.parseCheckerMethodName
|
||||||
return methodName.substring(methodName.indexOf('$'));
|
return methodName.substring(methodName.indexOf('$'));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void checkNetworkAccess(Class<?> callerClass, int actions) {
|
||||||
|
var requestingClass = requestingClass(callerClass);
|
||||||
|
if (isTriviallyAllowed(requestingClass)) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
ModuleEntitlements entitlements = getEntitlements(requestingClass, NetworkEntitlement.class);
|
||||||
|
if (entitlements.getEntitlements(NetworkEntitlement.class).anyMatch(n -> n.matchActions(actions))) {
|
||||||
|
logger.debug(
|
||||||
|
() -> Strings.format(
|
||||||
|
"Entitled: class [%s], module [%s], entitlement [network], actions [%s]",
|
||||||
|
requestingClass,
|
||||||
|
requestingClass.getModule().getName(),
|
||||||
|
NetworkEntitlement.printActions(actions)
|
||||||
|
)
|
||||||
|
);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
throw new NotEntitledException(
|
||||||
|
Strings.format(
|
||||||
|
"Missing entitlement: class [%s], module [%s], entitlement [network], actions [%s]",
|
||||||
|
requestingClass,
|
||||||
|
requestingClass.getModule().getName(),
|
||||||
|
NetworkEntitlement.printActions(actions)
|
||||||
|
)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
private void checkEntitlementPresent(Class<?> callerClass, Class<? extends Entitlement> entitlementClass) {
|
private void checkEntitlementPresent(Class<?> callerClass, Class<? extends Entitlement> entitlementClass) {
|
||||||
var requestingClass = requestingClass(callerClass);
|
var requestingClass = requestingClass(callerClass);
|
||||||
if (isTriviallyAllowed(requestingClass)) {
|
if (isTriviallyAllowed(requestingClass)) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
ModuleEntitlements entitlements = getEntitlements(requestingClass);
|
ModuleEntitlements entitlements = getEntitlements(requestingClass, entitlementClass);
|
||||||
if (entitlements.hasEntitlement(entitlementClass)) {
|
if (entitlements.hasEntitlement(entitlementClass)) {
|
||||||
logger.debug(
|
logger.debug(
|
||||||
() -> Strings.format(
|
() -> Strings.format(
|
||||||
"Entitled: class [%s], module [%s], entitlement [%s]",
|
"Entitled: class [%s], module [%s], entitlement [%s]",
|
||||||
requestingClass,
|
requestingClass,
|
||||||
requestingClass.getModule().getName(),
|
requestingClass.getModule().getName(),
|
||||||
entitlementClass.getSimpleName()
|
PolicyParser.getEntitlementTypeName(entitlementClass)
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
return;
|
return;
|
||||||
|
@ -199,19 +245,22 @@ public class PolicyManager {
|
||||||
"Missing entitlement: class [%s], module [%s], entitlement [%s]",
|
"Missing entitlement: class [%s], module [%s], entitlement [%s]",
|
||||||
requestingClass,
|
requestingClass,
|
||||||
requestingClass.getModule().getName(),
|
requestingClass.getModule().getName(),
|
||||||
entitlementClass.getSimpleName()
|
PolicyParser.getEntitlementTypeName(entitlementClass)
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
ModuleEntitlements getEntitlements(Class<?> requestingClass) {
|
ModuleEntitlements getEntitlements(Class<?> requestingClass, Class<? extends Entitlement> entitlementClass) {
|
||||||
return moduleEntitlementsMap.computeIfAbsent(requestingClass.getModule(), m -> computeEntitlements(requestingClass));
|
return moduleEntitlementsMap.computeIfAbsent(
|
||||||
|
requestingClass.getModule(),
|
||||||
|
m -> computeEntitlements(requestingClass, entitlementClass)
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
private ModuleEntitlements computeEntitlements(Class<?> requestingClass) {
|
private ModuleEntitlements computeEntitlements(Class<?> requestingClass, Class<? extends Entitlement> entitlementClass) {
|
||||||
Module requestingModule = requestingClass.getModule();
|
Module requestingModule = requestingClass.getModule();
|
||||||
if (isServerModule(requestingModule)) {
|
if (isServerModule(requestingModule)) {
|
||||||
return getModuleScopeEntitlements(requestingClass, serverEntitlements, requestingModule.getName());
|
return getModuleScopeEntitlements(requestingClass, serverEntitlements, requestingModule.getName(), "server", entitlementClass);
|
||||||
}
|
}
|
||||||
|
|
||||||
// plugins
|
// plugins
|
||||||
|
@ -225,7 +274,7 @@ public class PolicyManager {
|
||||||
} else {
|
} else {
|
||||||
scopeName = requestingModule.getName();
|
scopeName = requestingModule.getName();
|
||||||
}
|
}
|
||||||
return getModuleScopeEntitlements(requestingClass, pluginEntitlements, scopeName);
|
return getModuleScopeEntitlements(requestingClass, pluginEntitlements, scopeName, pluginName, entitlementClass);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -241,11 +290,19 @@ public class PolicyManager {
|
||||||
private ModuleEntitlements getModuleScopeEntitlements(
|
private ModuleEntitlements getModuleScopeEntitlements(
|
||||||
Class<?> callerClass,
|
Class<?> callerClass,
|
||||||
Map<String, List<Entitlement>> scopeEntitlements,
|
Map<String, List<Entitlement>> scopeEntitlements,
|
||||||
String moduleName
|
String moduleName,
|
||||||
|
String component,
|
||||||
|
Class<? extends Entitlement> entitlementClass
|
||||||
) {
|
) {
|
||||||
var entitlements = scopeEntitlements.get(moduleName);
|
var entitlements = scopeEntitlements.get(moduleName);
|
||||||
if (entitlements == null) {
|
if (entitlements == null) {
|
||||||
logger.warn("No applicable entitlement policy for module [{}], class [{}]", moduleName, callerClass);
|
logger.warn(
|
||||||
|
"No applicable entitlement policy for entitlement [{}] in [{}], module [{}], class [{}]",
|
||||||
|
PolicyParser.getEntitlementTypeName(entitlementClass),
|
||||||
|
component,
|
||||||
|
moduleName,
|
||||||
|
callerClass
|
||||||
|
);
|
||||||
return ModuleEntitlements.NONE;
|
return ModuleEntitlements.NONE;
|
||||||
}
|
}
|
||||||
return ModuleEntitlements.from(entitlements);
|
return ModuleEntitlements.from(entitlements);
|
||||||
|
|
|
@ -37,7 +37,8 @@ public class PolicyParser {
|
||||||
private static final Map<String, Class<?>> EXTERNAL_ENTITLEMENTS = Stream.of(
|
private static final Map<String, Class<?>> EXTERNAL_ENTITLEMENTS = Stream.of(
|
||||||
FileEntitlement.class,
|
FileEntitlement.class,
|
||||||
CreateClassLoaderEntitlement.class,
|
CreateClassLoaderEntitlement.class,
|
||||||
SetHttpsConnectionPropertiesEntitlement.class
|
SetHttpsConnectionPropertiesEntitlement.class,
|
||||||
|
NetworkEntitlement.class
|
||||||
).collect(Collectors.toUnmodifiableMap(PolicyParser::getEntitlementTypeName, Function.identity()));
|
).collect(Collectors.toUnmodifiableMap(PolicyParser::getEntitlementTypeName, Function.identity()));
|
||||||
|
|
||||||
protected final XContentParser policyParser;
|
protected final XContentParser policyParser;
|
||||||
|
|
|
@ -0,0 +1,49 @@
|
||||||
|
/*
|
||||||
|
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||||
|
* or more contributor license agreements. Licensed under the "Elastic License
|
||||||
|
* 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
|
||||||
|
* Public License v 1"; you may not use this file except in compliance with, at
|
||||||
|
* your election, the "Elastic License 2.0", the "GNU Affero General Public
|
||||||
|
* License v3.0 only", or the "Server Side Public License, v 1".
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.elasticsearch.entitlement.runtime.policy;
|
||||||
|
|
||||||
|
import org.elasticsearch.test.ESTestCase;
|
||||||
|
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
|
import static org.hamcrest.Matchers.is;
|
||||||
|
|
||||||
|
public class NetworkEntitlementTests extends ESTestCase {
|
||||||
|
|
||||||
|
public void testMatchesActions() {
|
||||||
|
var listenEntitlement = new NetworkEntitlement(List.of(NetworkEntitlement.LISTEN));
|
||||||
|
var emptyEntitlement = new NetworkEntitlement(List.of());
|
||||||
|
var connectAcceptEntitlement = new NetworkEntitlement(List.of(NetworkEntitlement.CONNECT, NetworkEntitlement.ACCEPT));
|
||||||
|
|
||||||
|
assertThat(listenEntitlement.matchActions(0), is(true));
|
||||||
|
assertThat(listenEntitlement.matchActions(NetworkEntitlement.LISTEN_ACTION), is(true));
|
||||||
|
assertThat(listenEntitlement.matchActions(NetworkEntitlement.ACCEPT_ACTION), is(false));
|
||||||
|
assertThat(listenEntitlement.matchActions(NetworkEntitlement.CONNECT_ACTION), is(false));
|
||||||
|
assertThat(listenEntitlement.matchActions(NetworkEntitlement.LISTEN_ACTION | NetworkEntitlement.ACCEPT_ACTION), is(false));
|
||||||
|
assertThat(listenEntitlement.matchActions(NetworkEntitlement.LISTEN_ACTION | NetworkEntitlement.CONNECT_ACTION), is(false));
|
||||||
|
assertThat(listenEntitlement.matchActions(NetworkEntitlement.CONNECT_ACTION | NetworkEntitlement.ACCEPT_ACTION), is(false));
|
||||||
|
|
||||||
|
assertThat(connectAcceptEntitlement.matchActions(0), is(true));
|
||||||
|
assertThat(connectAcceptEntitlement.matchActions(NetworkEntitlement.LISTEN_ACTION), is(false));
|
||||||
|
assertThat(connectAcceptEntitlement.matchActions(NetworkEntitlement.ACCEPT_ACTION), is(true));
|
||||||
|
assertThat(connectAcceptEntitlement.matchActions(NetworkEntitlement.CONNECT_ACTION), is(true));
|
||||||
|
assertThat(connectAcceptEntitlement.matchActions(NetworkEntitlement.LISTEN_ACTION | NetworkEntitlement.ACCEPT_ACTION), is(false));
|
||||||
|
assertThat(connectAcceptEntitlement.matchActions(NetworkEntitlement.LISTEN_ACTION | NetworkEntitlement.CONNECT_ACTION), is(false));
|
||||||
|
assertThat(connectAcceptEntitlement.matchActions(NetworkEntitlement.CONNECT_ACTION | NetworkEntitlement.ACCEPT_ACTION), is(true));
|
||||||
|
|
||||||
|
assertThat(emptyEntitlement.matchActions(0), is(true));
|
||||||
|
assertThat(emptyEntitlement.matchActions(NetworkEntitlement.LISTEN_ACTION), is(false));
|
||||||
|
assertThat(emptyEntitlement.matchActions(NetworkEntitlement.ACCEPT_ACTION), is(false));
|
||||||
|
assertThat(emptyEntitlement.matchActions(NetworkEntitlement.CONNECT_ACTION), is(false));
|
||||||
|
assertThat(emptyEntitlement.matchActions(NetworkEntitlement.LISTEN_ACTION | NetworkEntitlement.ACCEPT_ACTION), is(false));
|
||||||
|
assertThat(emptyEntitlement.matchActions(NetworkEntitlement.LISTEN_ACTION | NetworkEntitlement.CONNECT_ACTION), is(false));
|
||||||
|
assertThat(emptyEntitlement.matchActions(NetworkEntitlement.CONNECT_ACTION | NetworkEntitlement.ACCEPT_ACTION), is(false));
|
||||||
|
}
|
||||||
|
}
|
|
@ -38,7 +38,7 @@ import static org.hamcrest.Matchers.sameInstance;
|
||||||
public class PolicyManagerTests extends ESTestCase {
|
public class PolicyManagerTests extends ESTestCase {
|
||||||
/**
|
/**
|
||||||
* A module you can use for test cases that don't actually care about the
|
* A module you can use for test cases that don't actually care about the
|
||||||
* entitlements module.
|
* entitlement module.
|
||||||
*/
|
*/
|
||||||
private static Module NO_ENTITLEMENTS_MODULE;
|
private static Module NO_ENTITLEMENTS_MODULE;
|
||||||
|
|
||||||
|
@ -66,7 +66,11 @@ public class PolicyManagerTests extends ESTestCase {
|
||||||
var callerClass = this.getClass();
|
var callerClass = this.getClass();
|
||||||
var requestingModule = callerClass.getModule();
|
var requestingModule = callerClass.getModule();
|
||||||
|
|
||||||
assertEquals("No policy for the unnamed module", ModuleEntitlements.NONE, policyManager.getEntitlements(callerClass));
|
assertEquals(
|
||||||
|
"No policy for the unnamed module",
|
||||||
|
ModuleEntitlements.NONE,
|
||||||
|
policyManager.getEntitlements(callerClass, Entitlement.class)
|
||||||
|
);
|
||||||
|
|
||||||
assertEquals(Map.of(requestingModule, ModuleEntitlements.NONE), policyManager.moduleEntitlementsMap);
|
assertEquals(Map.of(requestingModule, ModuleEntitlements.NONE), policyManager.moduleEntitlementsMap);
|
||||||
}
|
}
|
||||||
|
@ -78,7 +82,7 @@ public class PolicyManagerTests extends ESTestCase {
|
||||||
var callerClass = this.getClass();
|
var callerClass = this.getClass();
|
||||||
var requestingModule = callerClass.getModule();
|
var requestingModule = callerClass.getModule();
|
||||||
|
|
||||||
assertEquals("No policy for this plugin", ModuleEntitlements.NONE, policyManager.getEntitlements(callerClass));
|
assertEquals("No policy for this plugin", ModuleEntitlements.NONE, policyManager.getEntitlements(callerClass, Entitlement.class));
|
||||||
|
|
||||||
assertEquals(Map.of(requestingModule, ModuleEntitlements.NONE), policyManager.moduleEntitlementsMap);
|
assertEquals(Map.of(requestingModule, ModuleEntitlements.NONE), policyManager.moduleEntitlementsMap);
|
||||||
}
|
}
|
||||||
|
@ -90,11 +94,11 @@ public class PolicyManagerTests extends ESTestCase {
|
||||||
var callerClass = this.getClass();
|
var callerClass = this.getClass();
|
||||||
var requestingModule = callerClass.getModule();
|
var requestingModule = callerClass.getModule();
|
||||||
|
|
||||||
assertEquals(ModuleEntitlements.NONE, policyManager.getEntitlements(callerClass));
|
assertEquals(ModuleEntitlements.NONE, policyManager.getEntitlements(callerClass, Entitlement.class));
|
||||||
assertEquals(Map.of(requestingModule, ModuleEntitlements.NONE), policyManager.moduleEntitlementsMap);
|
assertEquals(Map.of(requestingModule, ModuleEntitlements.NONE), policyManager.moduleEntitlementsMap);
|
||||||
|
|
||||||
// A second time
|
// A second time
|
||||||
assertEquals(ModuleEntitlements.NONE, policyManager.getEntitlements(callerClass));
|
assertEquals(ModuleEntitlements.NONE, policyManager.getEntitlements(callerClass, Entitlement.class));
|
||||||
|
|
||||||
// Nothing new in the map
|
// Nothing new in the map
|
||||||
assertEquals(Map.of(requestingModule, ModuleEntitlements.NONE), policyManager.moduleEntitlementsMap);
|
assertEquals(Map.of(requestingModule, ModuleEntitlements.NONE), policyManager.moduleEntitlementsMap);
|
||||||
|
@ -112,7 +116,7 @@ public class PolicyManagerTests extends ESTestCase {
|
||||||
// Any class from the current module (unnamed) will do
|
// Any class from the current module (unnamed) will do
|
||||||
var callerClass = this.getClass();
|
var callerClass = this.getClass();
|
||||||
|
|
||||||
var entitlements = policyManager.getEntitlements(callerClass);
|
var entitlements = policyManager.getEntitlements(callerClass, Entitlement.class);
|
||||||
assertThat(entitlements.hasEntitlement(CreateClassLoaderEntitlement.class), is(true));
|
assertThat(entitlements.hasEntitlement(CreateClassLoaderEntitlement.class), is(true));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -126,7 +130,11 @@ public class PolicyManagerTests extends ESTestCase {
|
||||||
var mockServerClass = ModuleLayer.boot().findLoader("jdk.httpserver").loadClass("com.sun.net.httpserver.HttpServer");
|
var mockServerClass = ModuleLayer.boot().findLoader("jdk.httpserver").loadClass("com.sun.net.httpserver.HttpServer");
|
||||||
var requestingModule = mockServerClass.getModule();
|
var requestingModule = mockServerClass.getModule();
|
||||||
|
|
||||||
assertEquals("No policy for this module in server", ModuleEntitlements.NONE, policyManager.getEntitlements(mockServerClass));
|
assertEquals(
|
||||||
|
"No policy for this module in server",
|
||||||
|
ModuleEntitlements.NONE,
|
||||||
|
policyManager.getEntitlements(mockServerClass, Entitlement.class)
|
||||||
|
);
|
||||||
|
|
||||||
assertEquals(Map.of(requestingModule, ModuleEntitlements.NONE), policyManager.moduleEntitlementsMap);
|
assertEquals(Map.of(requestingModule, ModuleEntitlements.NONE), policyManager.moduleEntitlementsMap);
|
||||||
}
|
}
|
||||||
|
@ -145,9 +153,8 @@ public class PolicyManagerTests extends ESTestCase {
|
||||||
// So we use a random module in the boot layer, and a random class from that module (not java.base -- it is
|
// So we use a random module in the boot layer, and a random class from that module (not java.base -- it is
|
||||||
// loaded too early) to mimic a class that would be in the server module.
|
// loaded too early) to mimic a class that would be in the server module.
|
||||||
var mockServerClass = ModuleLayer.boot().findLoader("jdk.httpserver").loadClass("com.sun.net.httpserver.HttpServer");
|
var mockServerClass = ModuleLayer.boot().findLoader("jdk.httpserver").loadClass("com.sun.net.httpserver.HttpServer");
|
||||||
var requestingModule = mockServerClass.getModule();
|
|
||||||
|
|
||||||
var entitlements = policyManager.getEntitlements(mockServerClass);
|
var entitlements = policyManager.getEntitlements(mockServerClass, Entitlement.class);
|
||||||
assertThat(entitlements.hasEntitlement(CreateClassLoaderEntitlement.class), is(true));
|
assertThat(entitlements.hasEntitlement(CreateClassLoaderEntitlement.class), is(true));
|
||||||
assertThat(entitlements.hasEntitlement(ExitVMEntitlement.class), is(true));
|
assertThat(entitlements.hasEntitlement(ExitVMEntitlement.class), is(true));
|
||||||
}
|
}
|
||||||
|
@ -167,9 +174,8 @@ public class PolicyManagerTests extends ESTestCase {
|
||||||
|
|
||||||
var layer = createLayerForJar(jar, "org.example.plugin");
|
var layer = createLayerForJar(jar, "org.example.plugin");
|
||||||
var mockPluginClass = layer.findLoader("org.example.plugin").loadClass("q.B");
|
var mockPluginClass = layer.findLoader("org.example.plugin").loadClass("q.B");
|
||||||
var requestingModule = mockPluginClass.getModule();
|
|
||||||
|
|
||||||
var entitlements = policyManager.getEntitlements(mockPluginClass);
|
var entitlements = policyManager.getEntitlements(mockPluginClass, Entitlement.class);
|
||||||
assertThat(entitlements.hasEntitlement(CreateClassLoaderEntitlement.class), is(true));
|
assertThat(entitlements.hasEntitlement(CreateClassLoaderEntitlement.class), is(true));
|
||||||
assertThat(
|
assertThat(
|
||||||
entitlements.getEntitlements(FileEntitlement.class).toList(),
|
entitlements.getEntitlements(FileEntitlement.class).toList(),
|
||||||
|
@ -189,11 +195,11 @@ public class PolicyManagerTests extends ESTestCase {
|
||||||
// Any class from the current module (unnamed) will do
|
// Any class from the current module (unnamed) will do
|
||||||
var callerClass = this.getClass();
|
var callerClass = this.getClass();
|
||||||
|
|
||||||
var entitlements = policyManager.getEntitlements(callerClass);
|
var entitlements = policyManager.getEntitlements(callerClass, Entitlement.class);
|
||||||
assertThat(entitlements.hasEntitlement(CreateClassLoaderEntitlement.class), is(true));
|
assertThat(entitlements.hasEntitlement(CreateClassLoaderEntitlement.class), is(true));
|
||||||
assertThat(policyManager.moduleEntitlementsMap, aMapWithSize(1));
|
assertThat(policyManager.moduleEntitlementsMap, aMapWithSize(1));
|
||||||
var cachedResult = policyManager.moduleEntitlementsMap.values().stream().findFirst().get();
|
var cachedResult = policyManager.moduleEntitlementsMap.values().stream().findFirst().orElseThrow();
|
||||||
var entitlementsAgain = policyManager.getEntitlements(callerClass);
|
var entitlementsAgain = policyManager.getEntitlements(callerClass, Entitlement.class);
|
||||||
|
|
||||||
// Nothing new in the map
|
// Nothing new in the map
|
||||||
assertThat(policyManager.moduleEntitlementsMap, aMapWithSize(1));
|
assertThat(policyManager.moduleEntitlementsMap, aMapWithSize(1));
|
||||||
|
|
|
@ -52,6 +52,22 @@ public class PolicyParserTests extends ESTestCase {
|
||||||
assertEquals(expected, parsedPolicy);
|
assertEquals(expected, parsedPolicy);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testParseNetwork() throws IOException {
|
||||||
|
Policy parsedPolicy = new PolicyParser(new ByteArrayInputStream("""
|
||||||
|
entitlement-module-name:
|
||||||
|
- network:
|
||||||
|
actions:
|
||||||
|
- listen
|
||||||
|
- accept
|
||||||
|
- connect
|
||||||
|
""".getBytes(StandardCharsets.UTF_8)), "test-policy.yaml", false).parsePolicy();
|
||||||
|
Policy expected = new Policy(
|
||||||
|
"test-policy.yaml",
|
||||||
|
List.of(new Scope("entitlement-module-name", List.of(new NetworkEntitlement(List.of("listen", "accept", "connect")))))
|
||||||
|
);
|
||||||
|
assertEquals(expected, parsedPolicy);
|
||||||
|
}
|
||||||
|
|
||||||
public void testParseCreateClassloader() throws IOException {
|
public void testParseCreateClassloader() throws IOException {
|
||||||
Policy parsedPolicy = new PolicyParser(new ByteArrayInputStream("""
|
Policy parsedPolicy = new PolicyParser(new ByteArrayInputStream("""
|
||||||
entitlement-module-name:
|
entitlement-module-name:
|
||||||
|
|
|
@ -92,14 +92,7 @@ public class APM extends Plugin implements NetworkPlugin, TelemetryPlugin {
|
||||||
APMAgentSettings.TELEMETRY_TRACING_ENABLED_SETTING,
|
APMAgentSettings.TELEMETRY_TRACING_ENABLED_SETTING,
|
||||||
APMAgentSettings.TELEMETRY_TRACING_NAMES_INCLUDE_SETTING,
|
APMAgentSettings.TELEMETRY_TRACING_NAMES_INCLUDE_SETTING,
|
||||||
APMAgentSettings.TELEMETRY_TRACING_NAMES_EXCLUDE_SETTING,
|
APMAgentSettings.TELEMETRY_TRACING_NAMES_EXCLUDE_SETTING,
|
||||||
APMAgentSettings.TELEMETRY_TRACING_SANITIZE_FIELD_NAMES,
|
APMAgentSettings.TELEMETRY_TRACING_SANITIZE_FIELD_NAMES
|
||||||
// The settings below are deprecated and are currently kept as fallback.
|
|
||||||
APMAgentSettings.TRACING_APM_SECRET_TOKEN_SETTING,
|
|
||||||
APMAgentSettings.TRACING_APM_API_KEY_SETTING,
|
|
||||||
APMAgentSettings.TRACING_APM_ENABLED_SETTING,
|
|
||||||
APMAgentSettings.TRACING_APM_NAMES_INCLUDE_SETTING,
|
|
||||||
APMAgentSettings.TRACING_APM_NAMES_EXCLUDE_SETTING,
|
|
||||||
APMAgentSettings.TRACING_APM_SANITIZE_FIELD_NAMES
|
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,9 +25,7 @@ import java.security.PrivilegedAction;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Objects;
|
import java.util.Objects;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
import java.util.function.Function;
|
|
||||||
|
|
||||||
import static org.elasticsearch.common.settings.Setting.Property.Deprecated;
|
|
||||||
import static org.elasticsearch.common.settings.Setting.Property.NodeScope;
|
import static org.elasticsearch.common.settings.Setting.Property.NodeScope;
|
||||||
import static org.elasticsearch.common.settings.Setting.Property.OperatorDynamic;
|
import static org.elasticsearch.common.settings.Setting.Property.OperatorDynamic;
|
||||||
|
|
||||||
|
@ -101,9 +99,6 @@ public class APMAgentSettings {
|
||||||
|
|
||||||
private static final String TELEMETRY_SETTING_PREFIX = "telemetry.";
|
private static final String TELEMETRY_SETTING_PREFIX = "telemetry.";
|
||||||
|
|
||||||
// The old legacy prefix
|
|
||||||
private static final String LEGACY_TRACING_APM_SETTING_PREFIX = "tracing.apm.";
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Allow-list of APM agent config keys users are permitted to configure.
|
* Allow-list of APM agent config keys users are permitted to configure.
|
||||||
* @see <a href="https://www.elastic.co/guide/en/apm/agent/java/current/configuration.html">APM Java Agent Configuration</a>
|
* @see <a href="https://www.elastic.co/guide/en/apm/agent/java/current/configuration.html">APM Java Agent Configuration</a>
|
||||||
|
@ -248,56 +243,24 @@ public class APMAgentSettings {
|
||||||
|
|
||||||
public static final Setting.AffixSetting<String> APM_AGENT_SETTINGS = Setting.prefixKeySetting(
|
public static final Setting.AffixSetting<String> APM_AGENT_SETTINGS = Setting.prefixKeySetting(
|
||||||
TELEMETRY_SETTING_PREFIX + "agent.",
|
TELEMETRY_SETTING_PREFIX + "agent.",
|
||||||
LEGACY_TRACING_APM_SETTING_PREFIX + "agent.",
|
null, // no fallback
|
||||||
(namespace, qualifiedKey) -> qualifiedKey.startsWith(LEGACY_TRACING_APM_SETTING_PREFIX)
|
(namespace, qualifiedKey) -> concreteAgentSetting(namespace, qualifiedKey, NodeScope, OperatorDynamic)
|
||||||
? concreteAgentSetting(namespace, qualifiedKey, NodeScope, OperatorDynamic, Deprecated)
|
|
||||||
: concreteAgentSetting(namespace, qualifiedKey, NodeScope, OperatorDynamic)
|
|
||||||
);
|
);
|
||||||
|
|
||||||
/**
|
public static final Setting<List<String>> TELEMETRY_TRACING_NAMES_INCLUDE_SETTING = Setting.stringListSetting(
|
||||||
* @deprecated in favor of TELEMETRY_TRACING_NAMES_INCLUDE_SETTING.
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public static final Setting<List<String>> TRACING_APM_NAMES_INCLUDE_SETTING = Setting.stringListSetting(
|
|
||||||
LEGACY_TRACING_APM_SETTING_PREFIX + "names.include",
|
|
||||||
OperatorDynamic,
|
|
||||||
NodeScope,
|
|
||||||
Deprecated
|
|
||||||
);
|
|
||||||
|
|
||||||
public static final Setting<List<String>> TELEMETRY_TRACING_NAMES_INCLUDE_SETTING = Setting.listSetting(
|
|
||||||
TELEMETRY_SETTING_PREFIX + "tracing.names.include",
|
TELEMETRY_SETTING_PREFIX + "tracing.names.include",
|
||||||
TRACING_APM_NAMES_INCLUDE_SETTING,
|
|
||||||
Function.identity(),
|
|
||||||
OperatorDynamic,
|
OperatorDynamic,
|
||||||
NodeScope
|
NodeScope
|
||||||
);
|
);
|
||||||
|
|
||||||
/**
|
public static final Setting<List<String>> TELEMETRY_TRACING_NAMES_EXCLUDE_SETTING = Setting.stringListSetting(
|
||||||
* @deprecated in favor of TELEMETRY_TRACING_NAMES_EXCLUDE_SETTING.
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public static final Setting<List<String>> TRACING_APM_NAMES_EXCLUDE_SETTING = Setting.stringListSetting(
|
|
||||||
LEGACY_TRACING_APM_SETTING_PREFIX + "names.exclude",
|
|
||||||
OperatorDynamic,
|
|
||||||
NodeScope,
|
|
||||||
Deprecated
|
|
||||||
);
|
|
||||||
|
|
||||||
public static final Setting<List<String>> TELEMETRY_TRACING_NAMES_EXCLUDE_SETTING = Setting.listSetting(
|
|
||||||
TELEMETRY_SETTING_PREFIX + "tracing.names.exclude",
|
TELEMETRY_SETTING_PREFIX + "tracing.names.exclude",
|
||||||
TRACING_APM_NAMES_EXCLUDE_SETTING,
|
|
||||||
Function.identity(),
|
|
||||||
OperatorDynamic,
|
OperatorDynamic,
|
||||||
NodeScope
|
NodeScope
|
||||||
);
|
);
|
||||||
|
|
||||||
/**
|
public static final Setting<List<String>> TELEMETRY_TRACING_SANITIZE_FIELD_NAMES = Setting.stringListSetting(
|
||||||
* @deprecated in favor of TELEMETRY_TRACING_SANITIZE_FIELD_NAMES.
|
TELEMETRY_SETTING_PREFIX + "tracing.sanitize_field_names",
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public static final Setting<List<String>> TRACING_APM_SANITIZE_FIELD_NAMES = Setting.stringListSetting(
|
|
||||||
LEGACY_TRACING_APM_SETTING_PREFIX + "sanitize_field_names",
|
|
||||||
List.of(
|
List.of(
|
||||||
"password",
|
"password",
|
||||||
"passwd",
|
"passwd",
|
||||||
|
@ -313,33 +276,12 @@ public class APMAgentSettings {
|
||||||
"set-cookie"
|
"set-cookie"
|
||||||
),
|
),
|
||||||
OperatorDynamic,
|
OperatorDynamic,
|
||||||
NodeScope,
|
|
||||||
Deprecated
|
|
||||||
);
|
|
||||||
|
|
||||||
public static final Setting<List<String>> TELEMETRY_TRACING_SANITIZE_FIELD_NAMES = Setting.listSetting(
|
|
||||||
TELEMETRY_SETTING_PREFIX + "tracing.sanitize_field_names",
|
|
||||||
TRACING_APM_SANITIZE_FIELD_NAMES,
|
|
||||||
Function.identity(),
|
|
||||||
OperatorDynamic,
|
|
||||||
NodeScope
|
NodeScope
|
||||||
);
|
);
|
||||||
|
|
||||||
/**
|
|
||||||
* @deprecated in favor of TELEMETRY_TRACING_ENABLED_SETTING.
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public static final Setting<Boolean> TRACING_APM_ENABLED_SETTING = Setting.boolSetting(
|
|
||||||
LEGACY_TRACING_APM_SETTING_PREFIX + "enabled",
|
|
||||||
false,
|
|
||||||
OperatorDynamic,
|
|
||||||
NodeScope,
|
|
||||||
Deprecated
|
|
||||||
);
|
|
||||||
|
|
||||||
public static final Setting<Boolean> TELEMETRY_TRACING_ENABLED_SETTING = Setting.boolSetting(
|
public static final Setting<Boolean> TELEMETRY_TRACING_ENABLED_SETTING = Setting.boolSetting(
|
||||||
TELEMETRY_SETTING_PREFIX + "tracing.enabled",
|
TELEMETRY_SETTING_PREFIX + "tracing.enabled",
|
||||||
TRACING_APM_ENABLED_SETTING,
|
false,
|
||||||
OperatorDynamic,
|
OperatorDynamic,
|
||||||
NodeScope
|
NodeScope
|
||||||
);
|
);
|
||||||
|
@ -351,33 +293,13 @@ public class APMAgentSettings {
|
||||||
NodeScope
|
NodeScope
|
||||||
);
|
);
|
||||||
|
|
||||||
/**
|
|
||||||
* @deprecated in favor of TELEMETRY_SECRET_TOKEN_SETTING.
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public static final Setting<SecureString> TRACING_APM_SECRET_TOKEN_SETTING = SecureSetting.secureString(
|
|
||||||
LEGACY_TRACING_APM_SETTING_PREFIX + "secret_token",
|
|
||||||
null,
|
|
||||||
Deprecated
|
|
||||||
);
|
|
||||||
|
|
||||||
public static final Setting<SecureString> TELEMETRY_SECRET_TOKEN_SETTING = SecureSetting.secureString(
|
public static final Setting<SecureString> TELEMETRY_SECRET_TOKEN_SETTING = SecureSetting.secureString(
|
||||||
TELEMETRY_SETTING_PREFIX + "secret_token",
|
TELEMETRY_SETTING_PREFIX + "secret_token",
|
||||||
TRACING_APM_SECRET_TOKEN_SETTING
|
null
|
||||||
);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @deprecated in favor of TELEMETRY_API_KEY_SETTING.
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public static final Setting<SecureString> TRACING_APM_API_KEY_SETTING = SecureSetting.secureString(
|
|
||||||
LEGACY_TRACING_APM_SETTING_PREFIX + "api_key",
|
|
||||||
null,
|
|
||||||
Deprecated
|
|
||||||
);
|
);
|
||||||
|
|
||||||
public static final Setting<SecureString> TELEMETRY_API_KEY_SETTING = SecureSetting.secureString(
|
public static final Setting<SecureString> TELEMETRY_API_KEY_SETTING = SecureSetting.secureString(
|
||||||
TELEMETRY_SETTING_PREFIX + "api_key",
|
TELEMETRY_SETTING_PREFIX + "api_key",
|
||||||
TRACING_APM_API_KEY_SETTING
|
null
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
|
@ -11,8 +11,6 @@ package org.elasticsearch.telemetry.apm.internal;
|
||||||
|
|
||||||
import org.elasticsearch.cluster.service.ClusterService;
|
import org.elasticsearch.cluster.service.ClusterService;
|
||||||
import org.elasticsearch.common.settings.ClusterSettings;
|
import org.elasticsearch.common.settings.ClusterSettings;
|
||||||
import org.elasticsearch.common.settings.MockSecureSettings;
|
|
||||||
import org.elasticsearch.common.settings.SecureString;
|
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.test.ESTestCase;
|
import org.elasticsearch.test.ESTestCase;
|
||||||
import org.mockito.Mockito;
|
import org.mockito.Mockito;
|
||||||
|
@ -21,21 +19,13 @@ import java.util.List;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
|
|
||||||
import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.APM_AGENT_SETTINGS;
|
import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.APM_AGENT_SETTINGS;
|
||||||
import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TELEMETRY_API_KEY_SETTING;
|
|
||||||
import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TELEMETRY_METRICS_ENABLED_SETTING;
|
import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TELEMETRY_METRICS_ENABLED_SETTING;
|
||||||
import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TELEMETRY_SECRET_TOKEN_SETTING;
|
|
||||||
import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TELEMETRY_TRACING_ENABLED_SETTING;
|
import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TELEMETRY_TRACING_ENABLED_SETTING;
|
||||||
import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TELEMETRY_TRACING_NAMES_EXCLUDE_SETTING;
|
import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TELEMETRY_TRACING_NAMES_EXCLUDE_SETTING;
|
||||||
import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TELEMETRY_TRACING_NAMES_INCLUDE_SETTING;
|
import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TELEMETRY_TRACING_NAMES_INCLUDE_SETTING;
|
||||||
import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TELEMETRY_TRACING_SANITIZE_FIELD_NAMES;
|
import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TELEMETRY_TRACING_SANITIZE_FIELD_NAMES;
|
||||||
import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TRACING_APM_API_KEY_SETTING;
|
|
||||||
import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TRACING_APM_ENABLED_SETTING;
|
|
||||||
import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TRACING_APM_NAMES_EXCLUDE_SETTING;
|
|
||||||
import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TRACING_APM_NAMES_INCLUDE_SETTING;
|
|
||||||
import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TRACING_APM_SANITIZE_FIELD_NAMES;
|
|
||||||
import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TRACING_APM_SECRET_TOKEN_SETTING;
|
|
||||||
import static org.hamcrest.Matchers.containsInAnyOrder;
|
|
||||||
import static org.hamcrest.Matchers.containsString;
|
import static org.hamcrest.Matchers.containsString;
|
||||||
|
import static org.hamcrest.Matchers.hasEntry;
|
||||||
import static org.hamcrest.Matchers.hasItem;
|
import static org.hamcrest.Matchers.hasItem;
|
||||||
import static org.mockito.Mockito.clearInvocations;
|
import static org.mockito.Mockito.clearInvocations;
|
||||||
import static org.mockito.Mockito.mock;
|
import static org.mockito.Mockito.mock;
|
||||||
|
@ -70,14 +60,6 @@ public class APMAgentSettingsTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testEnableTracingUsingLegacySetting() {
|
|
||||||
Settings settings = Settings.builder().put(TRACING_APM_ENABLED_SETTING.getKey(), true).build();
|
|
||||||
apmAgentSettings.initAgentSystemProperties(settings);
|
|
||||||
|
|
||||||
verify(apmAgentSettings).setAgentSetting("recording", "true");
|
|
||||||
assertWarnings("[tracing.apm.enabled] setting was deprecated in Elasticsearch and will be removed in a future release.");
|
|
||||||
}
|
|
||||||
|
|
||||||
public void testEnableMetrics() {
|
public void testEnableMetrics() {
|
||||||
for (boolean tracingEnabled : List.of(true, false)) {
|
for (boolean tracingEnabled : List.of(true, false)) {
|
||||||
clearInvocations(apmAgentSettings, apmTelemetryProvider.getMeterService());
|
clearInvocations(apmAgentSettings, apmTelemetryProvider.getMeterService());
|
||||||
|
@ -121,14 +103,6 @@ public class APMAgentSettingsTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testDisableTracingUsingLegacySetting() {
|
|
||||||
Settings settings = Settings.builder().put(TRACING_APM_ENABLED_SETTING.getKey(), false).build();
|
|
||||||
apmAgentSettings.initAgentSystemProperties(settings);
|
|
||||||
|
|
||||||
verify(apmAgentSettings).setAgentSetting("recording", "false");
|
|
||||||
assertWarnings("[tracing.apm.enabled] setting was deprecated in Elasticsearch and will be removed in a future release.");
|
|
||||||
}
|
|
||||||
|
|
||||||
public void testDisableMetrics() {
|
public void testDisableMetrics() {
|
||||||
for (boolean tracingEnabled : List.of(true, false)) {
|
for (boolean tracingEnabled : List.of(true, false)) {
|
||||||
clearInvocations(apmAgentSettings, apmTelemetryProvider.getMeterService());
|
clearInvocations(apmAgentSettings, apmTelemetryProvider.getMeterService());
|
||||||
|
@ -181,70 +155,18 @@ public class APMAgentSettingsTests extends ESTestCase {
|
||||||
verify(apmAgentSettings).setAgentSetting("span_compression_enabled", "true");
|
verify(apmAgentSettings).setAgentSetting("span_compression_enabled", "true");
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testSetAgentsSettingsWithLegacyPrefix() {
|
|
||||||
Settings settings = Settings.builder()
|
|
||||||
.put(TELEMETRY_TRACING_ENABLED_SETTING.getKey(), true)
|
|
||||||
.put("tracing.apm.agent.span_compression_enabled", "true")
|
|
||||||
.build();
|
|
||||||
apmAgentSettings.initAgentSystemProperties(settings);
|
|
||||||
|
|
||||||
verify(apmAgentSettings).setAgentSetting("recording", "true");
|
|
||||||
verify(apmAgentSettings).setAgentSetting("span_compression_enabled", "true");
|
|
||||||
assertWarnings(
|
|
||||||
"[tracing.apm.agent.span_compression_enabled] setting was deprecated in Elasticsearch and will be removed in a future release."
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Check that invalid or forbidden APM agent settings are rejected.
|
* Check that invalid or forbidden APM agent settings are rejected.
|
||||||
*/
|
*/
|
||||||
public void testRejectForbiddenOrUnknownAgentSettings() {
|
public void testRejectForbiddenOrUnknownAgentSettings() {
|
||||||
List<String> prefixes = List.of(APM_AGENT_SETTINGS.getKey(), "tracing.apm.agent.");
|
String prefix = APM_AGENT_SETTINGS.getKey();
|
||||||
for (String prefix : prefixes) {
|
|
||||||
Settings settings = Settings.builder().put(prefix + "unknown", "true").build();
|
Settings settings = Settings.builder().put(prefix + "unknown", "true").build();
|
||||||
Exception exception = expectThrows(IllegalArgumentException.class, () -> APM_AGENT_SETTINGS.getAsMap(settings));
|
Exception exception = expectThrows(IllegalArgumentException.class, () -> APM_AGENT_SETTINGS.getAsMap(settings));
|
||||||
assertThat(exception.getMessage(), containsString("[" + prefix + "unknown]"));
|
assertThat(exception.getMessage(), containsString("[" + prefix + "unknown]"));
|
||||||
}
|
|
||||||
// though, accept / ignore nested global_labels
|
// though, accept / ignore nested global_labels
|
||||||
for (String prefix : prefixes) {
|
var map = APMAgentSettings.APM_AGENT_SETTINGS.getAsMap(Settings.builder().put(prefix + "global_labels.abc", "123").build());
|
||||||
Settings settings = Settings.builder().put(prefix + "global_labels.abc", "123").build();
|
assertThat(map, hasEntry("global_labels.abc", "123"));
|
||||||
APMAgentSettings.APM_AGENT_SETTINGS.getAsMap(settings);
|
|
||||||
|
|
||||||
if (prefix.startsWith("tracing.apm.agent.")) {
|
|
||||||
assertWarnings(
|
|
||||||
"[tracing.apm.agent.global_labels.abc] setting was deprecated in Elasticsearch and will be removed in a future release."
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public void testTelemetryTracingNamesIncludeFallback() {
|
|
||||||
Settings settings = Settings.builder().put(TRACING_APM_NAMES_INCLUDE_SETTING.getKey(), "abc,xyz").build();
|
|
||||||
|
|
||||||
List<String> included = TELEMETRY_TRACING_NAMES_INCLUDE_SETTING.get(settings);
|
|
||||||
|
|
||||||
assertThat(included, containsInAnyOrder("abc", "xyz"));
|
|
||||||
assertWarnings("[tracing.apm.names.include] setting was deprecated in Elasticsearch and will be removed in a future release.");
|
|
||||||
}
|
|
||||||
|
|
||||||
public void testTelemetryTracingNamesExcludeFallback() {
|
|
||||||
Settings settings = Settings.builder().put(TRACING_APM_NAMES_EXCLUDE_SETTING.getKey(), "abc,xyz").build();
|
|
||||||
|
|
||||||
List<String> included = TELEMETRY_TRACING_NAMES_EXCLUDE_SETTING.get(settings);
|
|
||||||
|
|
||||||
assertThat(included, containsInAnyOrder("abc", "xyz"));
|
|
||||||
assertWarnings("[tracing.apm.names.exclude] setting was deprecated in Elasticsearch and will be removed in a future release.");
|
|
||||||
}
|
|
||||||
|
|
||||||
public void testTelemetryTracingSanitizeFieldNamesFallback() {
|
|
||||||
Settings settings = Settings.builder().put(TRACING_APM_SANITIZE_FIELD_NAMES.getKey(), "abc,xyz").build();
|
|
||||||
|
|
||||||
List<String> included = TELEMETRY_TRACING_SANITIZE_FIELD_NAMES.get(settings);
|
|
||||||
|
|
||||||
assertThat(included, containsInAnyOrder("abc", "xyz"));
|
|
||||||
assertWarnings(
|
|
||||||
"[tracing.apm.sanitize_field_names] setting was deprecated in Elasticsearch and will be removed in a future release."
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testTelemetryTracingSanitizeFieldNamesFallbackDefault() {
|
public void testTelemetryTracingSanitizeFieldNamesFallbackDefault() {
|
||||||
|
@ -252,28 +174,6 @@ public class APMAgentSettingsTests extends ESTestCase {
|
||||||
assertThat(included, hasItem("password")); // and more defaults
|
assertThat(included, hasItem("password")); // and more defaults
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testTelemetrySecretTokenFallback() {
|
|
||||||
MockSecureSettings secureSettings = new MockSecureSettings();
|
|
||||||
secureSettings.setString(TRACING_APM_SECRET_TOKEN_SETTING.getKey(), "verysecret");
|
|
||||||
Settings settings = Settings.builder().setSecureSettings(secureSettings).build();
|
|
||||||
|
|
||||||
try (SecureString secureString = TELEMETRY_SECRET_TOKEN_SETTING.get(settings)) {
|
|
||||||
assertEquals("verysecret", secureString.toString());
|
|
||||||
}
|
|
||||||
assertWarnings("[tracing.apm.secret_token] setting was deprecated in Elasticsearch and will be removed in a future release.");
|
|
||||||
}
|
|
||||||
|
|
||||||
public void testTelemetryApiKeyFallback() {
|
|
||||||
MockSecureSettings secureSettings = new MockSecureSettings();
|
|
||||||
secureSettings.setString(TRACING_APM_API_KEY_SETTING.getKey(), "abc");
|
|
||||||
Settings settings = Settings.builder().setSecureSettings(secureSettings).build();
|
|
||||||
|
|
||||||
try (SecureString secureString = TELEMETRY_API_KEY_SETTING.get(settings)) {
|
|
||||||
assertEquals("abc", secureString.toString());
|
|
||||||
}
|
|
||||||
assertWarnings("[tracing.apm.api_key] setting was deprecated in Elasticsearch and will be removed in a future release.");
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Check that invalid or forbidden APM agent settings are rejected if their last part resembles an allowed setting.
|
* Check that invalid or forbidden APM agent settings are rejected if their last part resembles an allowed setting.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -182,7 +182,8 @@ public class DataStreamIT extends ESIntegTestCase {
|
||||||
|
|
||||||
String backingIndex = barDataStream.getIndices().get(0).getName();
|
String backingIndex = barDataStream.getIndices().get(0).getName();
|
||||||
backingIndices.add(backingIndex);
|
backingIndices.add(backingIndex);
|
||||||
GetIndexResponse getIndexResponse = indicesAdmin().getIndex(new GetIndexRequest().indices(backingIndex)).actionGet();
|
GetIndexResponse getIndexResponse = indicesAdmin().getIndex(new GetIndexRequest(TEST_REQUEST_TIMEOUT).indices(backingIndex))
|
||||||
|
.actionGet();
|
||||||
assertThat(getIndexResponse.getSettings().get(backingIndex), notNullValue());
|
assertThat(getIndexResponse.getSettings().get(backingIndex), notNullValue());
|
||||||
assertThat(getIndexResponse.getSettings().get(backingIndex).getAsBoolean("index.hidden", null), is(true));
|
assertThat(getIndexResponse.getSettings().get(backingIndex).getAsBoolean("index.hidden", null), is(true));
|
||||||
Map<?, ?> mappings = getIndexResponse.getMappings().get(backingIndex).getSourceAsMap();
|
Map<?, ?> mappings = getIndexResponse.getMappings().get(backingIndex).getSourceAsMap();
|
||||||
|
@ -190,7 +191,7 @@ public class DataStreamIT extends ESIntegTestCase {
|
||||||
|
|
||||||
backingIndex = fooDataStream.getIndices().get(0).getName();
|
backingIndex = fooDataStream.getIndices().get(0).getName();
|
||||||
backingIndices.add(backingIndex);
|
backingIndices.add(backingIndex);
|
||||||
getIndexResponse = indicesAdmin().getIndex(new GetIndexRequest().indices(backingIndex)).actionGet();
|
getIndexResponse = indicesAdmin().getIndex(new GetIndexRequest(TEST_REQUEST_TIMEOUT).indices(backingIndex)).actionGet();
|
||||||
assertThat(getIndexResponse.getSettings().get(backingIndex), notNullValue());
|
assertThat(getIndexResponse.getSettings().get(backingIndex), notNullValue());
|
||||||
assertThat(getIndexResponse.getSettings().get(backingIndex).getAsBoolean("index.hidden", null), is(true));
|
assertThat(getIndexResponse.getSettings().get(backingIndex).getAsBoolean("index.hidden", null), is(true));
|
||||||
mappings = getIndexResponse.getMappings().get(backingIndex).getSourceAsMap();
|
mappings = getIndexResponse.getMappings().get(backingIndex).getSourceAsMap();
|
||||||
|
@ -214,7 +215,7 @@ public class DataStreamIT extends ESIntegTestCase {
|
||||||
|
|
||||||
backingIndex = fooRolloverResponse.getNewIndex();
|
backingIndex = fooRolloverResponse.getNewIndex();
|
||||||
backingIndices.add(backingIndex);
|
backingIndices.add(backingIndex);
|
||||||
getIndexResponse = indicesAdmin().getIndex(new GetIndexRequest().indices(backingIndex)).actionGet();
|
getIndexResponse = indicesAdmin().getIndex(new GetIndexRequest(TEST_REQUEST_TIMEOUT).indices(backingIndex)).actionGet();
|
||||||
assertThat(getIndexResponse.getSettings().get(backingIndex), notNullValue());
|
assertThat(getIndexResponse.getSettings().get(backingIndex), notNullValue());
|
||||||
assertThat(getIndexResponse.getSettings().get(backingIndex).getAsBoolean("index.hidden", null), is(true));
|
assertThat(getIndexResponse.getSettings().get(backingIndex).getAsBoolean("index.hidden", null), is(true));
|
||||||
mappings = getIndexResponse.getMappings().get(backingIndex).getSourceAsMap();
|
mappings = getIndexResponse.getMappings().get(backingIndex).getSourceAsMap();
|
||||||
|
@ -222,7 +223,7 @@ public class DataStreamIT extends ESIntegTestCase {
|
||||||
|
|
||||||
backingIndex = barRolloverResponse.getNewIndex();
|
backingIndex = barRolloverResponse.getNewIndex();
|
||||||
backingIndices.add(backingIndex);
|
backingIndices.add(backingIndex);
|
||||||
getIndexResponse = indicesAdmin().getIndex(new GetIndexRequest().indices(backingIndex)).actionGet();
|
getIndexResponse = indicesAdmin().getIndex(new GetIndexRequest(TEST_REQUEST_TIMEOUT).indices(backingIndex)).actionGet();
|
||||||
assertThat(getIndexResponse.getSettings().get(backingIndex), notNullValue());
|
assertThat(getIndexResponse.getSettings().get(backingIndex), notNullValue());
|
||||||
assertThat(getIndexResponse.getSettings().get(backingIndex).getAsBoolean("index.hidden", null), is(true));
|
assertThat(getIndexResponse.getSettings().get(backingIndex).getAsBoolean("index.hidden", null), is(true));
|
||||||
mappings = getIndexResponse.getMappings().get(backingIndex).getSourceAsMap();
|
mappings = getIndexResponse.getMappings().get(backingIndex).getSourceAsMap();
|
||||||
|
@ -245,7 +246,7 @@ public class DataStreamIT extends ESIntegTestCase {
|
||||||
expectThrows(
|
expectThrows(
|
||||||
IndexNotFoundException.class,
|
IndexNotFoundException.class,
|
||||||
"Backing index '" + index + "' should have been deleted.",
|
"Backing index '" + index + "' should have been deleted.",
|
||||||
() -> indicesAdmin().getIndex(new GetIndexRequest().indices(index)).actionGet()
|
() -> indicesAdmin().getIndex(new GetIndexRequest(TEST_REQUEST_TIMEOUT).indices(index)).actionGet()
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -479,7 +480,8 @@ public class DataStreamIT extends ESIntegTestCase {
|
||||||
String backingIndex = getDataStreamResponse.getDataStreams().get(0).getDataStream().getIndices().get(0).getName();
|
String backingIndex = getDataStreamResponse.getDataStreams().get(0).getDataStream().getIndices().get(0).getName();
|
||||||
assertThat(backingIndex, backingIndexEqualTo(dataStreamName, 1));
|
assertThat(backingIndex, backingIndexEqualTo(dataStreamName, 1));
|
||||||
|
|
||||||
GetIndexResponse getIndexResponse = indicesAdmin().getIndex(new GetIndexRequest().indices(dataStreamName)).actionGet();
|
GetIndexResponse getIndexResponse = indicesAdmin().getIndex(new GetIndexRequest(TEST_REQUEST_TIMEOUT).indices(dataStreamName))
|
||||||
|
.actionGet();
|
||||||
assertThat(getIndexResponse.getSettings().get(backingIndex), notNullValue());
|
assertThat(getIndexResponse.getSettings().get(backingIndex), notNullValue());
|
||||||
assertThat(getIndexResponse.getSettings().get(backingIndex).getAsBoolean("index.hidden", null), is(true));
|
assertThat(getIndexResponse.getSettings().get(backingIndex).getAsBoolean("index.hidden", null), is(true));
|
||||||
assertThat(
|
assertThat(
|
||||||
|
@ -492,7 +494,7 @@ public class DataStreamIT extends ESIntegTestCase {
|
||||||
assertThat(backingIndex, backingIndexEqualTo(dataStreamName, 2));
|
assertThat(backingIndex, backingIndexEqualTo(dataStreamName, 2));
|
||||||
assertTrue(rolloverResponse.isRolledOver());
|
assertTrue(rolloverResponse.isRolledOver());
|
||||||
|
|
||||||
getIndexResponse = indicesAdmin().getIndex(new GetIndexRequest().indices(backingIndex)).actionGet();
|
getIndexResponse = indicesAdmin().getIndex(new GetIndexRequest(TEST_REQUEST_TIMEOUT).indices(backingIndex)).actionGet();
|
||||||
assertThat(getIndexResponse.getSettings().get(backingIndex), notNullValue());
|
assertThat(getIndexResponse.getSettings().get(backingIndex), notNullValue());
|
||||||
assertThat(getIndexResponse.getSettings().get(backingIndex).getAsBoolean("index.hidden", null), is(true));
|
assertThat(getIndexResponse.getSettings().get(backingIndex).getAsBoolean("index.hidden", null), is(true));
|
||||||
assertThat(
|
assertThat(
|
||||||
|
@ -518,7 +520,7 @@ public class DataStreamIT extends ESIntegTestCase {
|
||||||
expectThrows(
|
expectThrows(
|
||||||
IndexNotFoundException.class,
|
IndexNotFoundException.class,
|
||||||
"Backing index '" + index.getName() + "' should have been deleted.",
|
"Backing index '" + index.getName() + "' should have been deleted.",
|
||||||
() -> indicesAdmin().getIndex(new GetIndexRequest().indices(index.getName())).actionGet()
|
() -> indicesAdmin().getIndex(new GetIndexRequest(TEST_REQUEST_TIMEOUT).indices(index.getName())).actionGet()
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -596,7 +598,7 @@ public class DataStreamIT extends ESIntegTestCase {
|
||||||
verifyResolvability(dataStreamName, indicesAdmin().prepareGetFieldMappings(dataStreamName), false);
|
verifyResolvability(dataStreamName, indicesAdmin().prepareGetFieldMappings(dataStreamName), false);
|
||||||
verifyResolvability(dataStreamName, indicesAdmin().preparePutMapping(dataStreamName).setSource("""
|
verifyResolvability(dataStreamName, indicesAdmin().preparePutMapping(dataStreamName).setSource("""
|
||||||
{"_doc":{"properties": {"my_field":{"type":"keyword"}}}}""", XContentType.JSON), false);
|
{"_doc":{"properties": {"my_field":{"type":"keyword"}}}}""", XContentType.JSON), false);
|
||||||
verifyResolvability(dataStreamName, indicesAdmin().prepareGetMappings(dataStreamName), false);
|
verifyResolvability(dataStreamName, indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, dataStreamName), false);
|
||||||
verifyResolvability(
|
verifyResolvability(
|
||||||
dataStreamName,
|
dataStreamName,
|
||||||
indicesAdmin().prepareUpdateSettings(dataStreamName).setSettings(Settings.builder().put("index.number_of_replicas", 0)),
|
indicesAdmin().prepareUpdateSettings(dataStreamName).setSettings(Settings.builder().put("index.number_of_replicas", 0)),
|
||||||
|
@ -606,7 +608,7 @@ public class DataStreamIT extends ESIntegTestCase {
|
||||||
verifyResolvability(dataStreamName, clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, dataStreamName), false);
|
verifyResolvability(dataStreamName, clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, dataStreamName), false);
|
||||||
verifyResolvability(dataStreamName, clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).setIndices(dataStreamName), false);
|
verifyResolvability(dataStreamName, clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).setIndices(dataStreamName), false);
|
||||||
verifyResolvability(dataStreamName, client().prepareFieldCaps(dataStreamName).setFields("*"), false);
|
verifyResolvability(dataStreamName, client().prepareFieldCaps(dataStreamName).setFields("*"), false);
|
||||||
verifyResolvability(dataStreamName, indicesAdmin().prepareGetIndex().addIndices(dataStreamName), false);
|
verifyResolvability(dataStreamName, indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT).addIndices(dataStreamName), false);
|
||||||
verifyResolvability(dataStreamName, indicesAdmin().prepareOpen(dataStreamName), false);
|
verifyResolvability(dataStreamName, indicesAdmin().prepareOpen(dataStreamName), false);
|
||||||
verifyResolvability(dataStreamName, indicesAdmin().prepareClose(dataStreamName), true);
|
verifyResolvability(dataStreamName, indicesAdmin().prepareClose(dataStreamName), true);
|
||||||
verifyResolvability(aliasToDataStream, indicesAdmin().prepareClose(aliasToDataStream), true);
|
verifyResolvability(aliasToDataStream, indicesAdmin().prepareClose(aliasToDataStream), true);
|
||||||
|
@ -643,7 +645,7 @@ public class DataStreamIT extends ESIntegTestCase {
|
||||||
verifyResolvability(wildcardExpression, indicesAdmin().prepareGetFieldMappings(wildcardExpression), false);
|
verifyResolvability(wildcardExpression, indicesAdmin().prepareGetFieldMappings(wildcardExpression), false);
|
||||||
verifyResolvability(wildcardExpression, indicesAdmin().preparePutMapping(wildcardExpression).setSource("""
|
verifyResolvability(wildcardExpression, indicesAdmin().preparePutMapping(wildcardExpression).setSource("""
|
||||||
{"_doc":{"properties": {"my_field":{"type":"keyword"}}}}""", XContentType.JSON), false);
|
{"_doc":{"properties": {"my_field":{"type":"keyword"}}}}""", XContentType.JSON), false);
|
||||||
verifyResolvability(wildcardExpression, indicesAdmin().prepareGetMappings(wildcardExpression), false);
|
verifyResolvability(wildcardExpression, indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, wildcardExpression), false);
|
||||||
verifyResolvability(wildcardExpression, indicesAdmin().prepareGetSettings(wildcardExpression), false);
|
verifyResolvability(wildcardExpression, indicesAdmin().prepareGetSettings(wildcardExpression), false);
|
||||||
verifyResolvability(
|
verifyResolvability(
|
||||||
wildcardExpression,
|
wildcardExpression,
|
||||||
|
@ -653,7 +655,7 @@ public class DataStreamIT extends ESIntegTestCase {
|
||||||
verifyResolvability(wildcardExpression, clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, wildcardExpression), false);
|
verifyResolvability(wildcardExpression, clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, wildcardExpression), false);
|
||||||
verifyResolvability(wildcardExpression, clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).setIndices(wildcardExpression), false);
|
verifyResolvability(wildcardExpression, clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).setIndices(wildcardExpression), false);
|
||||||
verifyResolvability(wildcardExpression, client().prepareFieldCaps(wildcardExpression).setFields("*"), false);
|
verifyResolvability(wildcardExpression, client().prepareFieldCaps(wildcardExpression).setFields("*"), false);
|
||||||
verifyResolvability(wildcardExpression, indicesAdmin().prepareGetIndex().addIndices(wildcardExpression), false);
|
verifyResolvability(wildcardExpression, indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT).addIndices(wildcardExpression), false);
|
||||||
verifyResolvability(wildcardExpression, indicesAdmin().prepareOpen(wildcardExpression), false);
|
verifyResolvability(wildcardExpression, indicesAdmin().prepareOpen(wildcardExpression), false);
|
||||||
verifyResolvability(wildcardExpression, indicesAdmin().prepareClose(wildcardExpression), false);
|
verifyResolvability(wildcardExpression, indicesAdmin().prepareClose(wildcardExpression), false);
|
||||||
verifyResolvability(
|
verifyResolvability(
|
||||||
|
@ -1180,7 +1182,7 @@ public class DataStreamIT extends ESIntegTestCase {
|
||||||
DataStreamTimestampFieldMapper.NAME,
|
DataStreamTimestampFieldMapper.NAME,
|
||||||
Map.of("enabled", true)
|
Map.of("enabled", true)
|
||||||
);
|
);
|
||||||
GetMappingsResponse getMappingsResponse = indicesAdmin().prepareGetMappings("logs-foobar").get();
|
GetMappingsResponse getMappingsResponse = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "logs-foobar").get();
|
||||||
assertThat(getMappingsResponse.getMappings().size(), equalTo(2));
|
assertThat(getMappingsResponse.getMappings().size(), equalTo(2));
|
||||||
assertThat(getMappingsResponse.getMappings().get(backingIndex1).getSourceAsMap(), equalTo(expectedMapping));
|
assertThat(getMappingsResponse.getMappings().get(backingIndex1).getSourceAsMap(), equalTo(expectedMapping));
|
||||||
assertThat(getMappingsResponse.getMappings().get(backingIndex2).getSourceAsMap(), equalTo(expectedMapping));
|
assertThat(getMappingsResponse.getMappings().get(backingIndex2).getSourceAsMap(), equalTo(expectedMapping));
|
||||||
|
@ -1195,7 +1197,7 @@ public class DataStreamIT extends ESIntegTestCase {
|
||||||
.setSource("{\"properties\":{\"my_field\":{\"type\":\"keyword\"}}}", XContentType.JSON)
|
.setSource("{\"properties\":{\"my_field\":{\"type\":\"keyword\"}}}", XContentType.JSON)
|
||||||
.get();
|
.get();
|
||||||
// The mappings of all backing indices should be updated:
|
// The mappings of all backing indices should be updated:
|
||||||
getMappingsResponse = indicesAdmin().prepareGetMappings("logs-foobar").get();
|
getMappingsResponse = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "logs-foobar").get();
|
||||||
assertThat(getMappingsResponse.getMappings().size(), equalTo(2));
|
assertThat(getMappingsResponse.getMappings().size(), equalTo(2));
|
||||||
assertThat(getMappingsResponse.getMappings().get(backingIndex1).getSourceAsMap(), equalTo(expectedMapping));
|
assertThat(getMappingsResponse.getMappings().get(backingIndex1).getSourceAsMap(), equalTo(expectedMapping));
|
||||||
assertThat(getMappingsResponse.getMappings().get(backingIndex2).getSourceAsMap(), equalTo(expectedMapping));
|
assertThat(getMappingsResponse.getMappings().get(backingIndex2).getSourceAsMap(), equalTo(expectedMapping));
|
||||||
|
@ -1401,7 +1403,8 @@ public class DataStreamIT extends ESIntegTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
private static void assertBackingIndex(String backingIndex, String timestampFieldPathInMapping, Map<?, ?> expectedMapping) {
|
private static void assertBackingIndex(String backingIndex, String timestampFieldPathInMapping, Map<?, ?> expectedMapping) {
|
||||||
GetIndexResponse getIndexResponse = indicesAdmin().getIndex(new GetIndexRequest().indices(backingIndex)).actionGet();
|
GetIndexResponse getIndexResponse = indicesAdmin().getIndex(new GetIndexRequest(TEST_REQUEST_TIMEOUT).indices(backingIndex))
|
||||||
|
.actionGet();
|
||||||
assertThat(getIndexResponse.getSettings().get(backingIndex), notNullValue());
|
assertThat(getIndexResponse.getSettings().get(backingIndex), notNullValue());
|
||||||
assertThat(getIndexResponse.getSettings().get(backingIndex).getAsBoolean("index.hidden", null), is(true));
|
assertThat(getIndexResponse.getSettings().get(backingIndex).getAsBoolean("index.hidden", null), is(true));
|
||||||
Map<?, ?> mappings = getIndexResponse.getMappings().get(backingIndex).getSourceAsMap();
|
Map<?, ?> mappings = getIndexResponse.getMappings().get(backingIndex).getSourceAsMap();
|
||||||
|
@ -1488,7 +1491,8 @@ public class DataStreamIT extends ESIntegTestCase {
|
||||||
assertThat(getDataStreamsResponse.getDataStreams().get(2).getDataStream().getName(), equalTo("logs-foobaz2"));
|
assertThat(getDataStreamsResponse.getDataStreams().get(2).getDataStream().getName(), equalTo("logs-foobaz2"));
|
||||||
assertThat(getDataStreamsResponse.getDataStreams().get(3).getDataStream().getName(), equalTo("logs-foobaz3"));
|
assertThat(getDataStreamsResponse.getDataStreams().get(3).getDataStream().getName(), equalTo("logs-foobaz3"));
|
||||||
|
|
||||||
GetIndexResponse getIndexResponse = indicesAdmin().getIndex(new GetIndexRequest().indices("logs-bar*")).actionGet();
|
GetIndexResponse getIndexResponse = indicesAdmin().getIndex(new GetIndexRequest(TEST_REQUEST_TIMEOUT).indices("logs-bar*"))
|
||||||
|
.actionGet();
|
||||||
assertThat(getIndexResponse.getIndices(), arrayWithSize(4));
|
assertThat(getIndexResponse.getIndices(), arrayWithSize(4));
|
||||||
assertThat(getIndexResponse.getIndices(), hasItemInArray("logs-barbaz"));
|
assertThat(getIndexResponse.getIndices(), hasItemInArray("logs-barbaz"));
|
||||||
assertThat(getIndexResponse.getIndices(), hasItemInArray("logs-barfoo"));
|
assertThat(getIndexResponse.getIndices(), hasItemInArray("logs-barfoo"));
|
||||||
|
@ -1521,7 +1525,8 @@ public class DataStreamIT extends ESIntegTestCase {
|
||||||
.actionGet();
|
.actionGet();
|
||||||
assertThat(getDataStreamsResponse.getDataStreams(), hasSize(0));
|
assertThat(getDataStreamsResponse.getDataStreams(), hasSize(0));
|
||||||
|
|
||||||
GetIndexResponse getIndexResponse = indicesAdmin().getIndex(new GetIndexRequest().indices("logs-foobar")).actionGet();
|
GetIndexResponse getIndexResponse = indicesAdmin().getIndex(new GetIndexRequest(TEST_REQUEST_TIMEOUT).indices("logs-foobar"))
|
||||||
|
.actionGet();
|
||||||
assertThat(getIndexResponse.getIndices(), arrayWithSize(1));
|
assertThat(getIndexResponse.getIndices(), arrayWithSize(1));
|
||||||
assertThat(getIndexResponse.getIndices(), hasItemInArray("logs-foobar"));
|
assertThat(getIndexResponse.getIndices(), hasItemInArray("logs-foobar"));
|
||||||
assertThat(getIndexResponse.getSettings().get("logs-foobar").get(IndexMetadata.SETTING_NUMBER_OF_REPLICAS), equalTo("0"));
|
assertThat(getIndexResponse.getSettings().get("logs-foobar").get(IndexMetadata.SETTING_NUMBER_OF_REPLICAS), equalTo("0"));
|
||||||
|
@ -1657,7 +1662,7 @@ public class DataStreamIT extends ESIntegTestCase {
|
||||||
.actionGet();
|
.actionGet();
|
||||||
String newBackingIndexName = getDataStreamResponse.getDataStreams().get(0).getDataStream().getWriteIndex().getName();
|
String newBackingIndexName = getDataStreamResponse.getDataStreams().get(0).getDataStream().getWriteIndex().getName();
|
||||||
assertThat(newBackingIndexName, backingIndexEqualTo("potato-biscuit", 2));
|
assertThat(newBackingIndexName, backingIndexEqualTo("potato-biscuit", 2));
|
||||||
indicesAdmin().prepareGetIndex().addIndices(newBackingIndexName).get();
|
indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT).addIndices(newBackingIndexName).get();
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.info("--> expecting second index to be created but it has not yet been created");
|
logger.info("--> expecting second index to be created but it has not yet been created");
|
||||||
fail("expecting second index to exist");
|
fail("expecting second index to exist");
|
||||||
|
|
|
@ -1304,7 +1304,7 @@ public class DataStreamsSnapshotsIT extends AbstractSnapshotIntegTestCase {
|
||||||
assertEquals(RestStatus.OK, restoreSnapshotResponse.status());
|
assertEquals(RestStatus.OK, restoreSnapshotResponse.status());
|
||||||
|
|
||||||
assertThat(getDataStreamInfo("*"), hasSize(3));
|
assertThat(getDataStreamInfo("*"), hasSize(3));
|
||||||
assertNotNull(client.admin().indices().prepareGetIndex().setIndices(indexName).get());
|
assertNotNull(client.admin().indices().prepareGetIndex(TEST_REQUEST_TIMEOUT).setIndices(indexName).get());
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testRestoreDataStreamAliasWithConflictingDataStream() throws Exception {
|
public void testRestoreDataStreamAliasWithConflictingDataStream() throws Exception {
|
||||||
|
|
|
@ -50,7 +50,7 @@ public class DataTierDataStreamIT extends ESIntegTestCase {
|
||||||
.setWaitForActiveShards(0)
|
.setWaitForActiveShards(0)
|
||||||
.get()
|
.get()
|
||||||
.getIndex();
|
.getIndex();
|
||||||
var idxSettings = indicesAdmin().prepareGetIndex().addIndices(index).get().getSettings().get(dsIndexName);
|
var idxSettings = indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT).addIndices(index).get().getSettings().get(dsIndexName);
|
||||||
assertThat(DataTier.TIER_PREFERENCE_SETTING.get(idxSettings), equalTo(DataTier.DATA_HOT));
|
assertThat(DataTier.TIER_PREFERENCE_SETTING.get(idxSettings), equalTo(DataTier.DATA_HOT));
|
||||||
|
|
||||||
logger.info("--> waiting for {} to be yellow", index);
|
logger.info("--> waiting for {} to be yellow", index);
|
||||||
|
@ -62,7 +62,7 @@ public class DataTierDataStreamIT extends ESIntegTestCase {
|
||||||
// new index name should have the rolled over name
|
// new index name should have the rolled over name
|
||||||
assertNotEquals(dsIndexName, rolledOverIndexName);
|
assertNotEquals(dsIndexName, rolledOverIndexName);
|
||||||
|
|
||||||
idxSettings = indicesAdmin().prepareGetIndex().addIndices(index).get().getSettings().get(rolledOverIndexName);
|
idxSettings = indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT).addIndices(index).get().getSettings().get(rolledOverIndexName);
|
||||||
assertThat(DataTier.TIER_PREFERENCE_SETTING.get(idxSettings), equalTo(DataTier.DATA_HOT));
|
assertThat(DataTier.TIER_PREFERENCE_SETTING.get(idxSettings), equalTo(DataTier.DATA_HOT));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -341,7 +341,10 @@ public class ResolveClusterDataStreamIT extends AbstractMultiClustersTestCase {
|
||||||
DataStream fooDataStream = getDataStreamResponse.getDataStreams().get(0).getDataStream();
|
DataStream fooDataStream = getDataStreamResponse.getDataStreams().get(0).getDataStream();
|
||||||
String backingIndex = fooDataStream.getIndices().get(0).getName();
|
String backingIndex = fooDataStream.getIndices().get(0).getName();
|
||||||
backingIndices.add(backingIndex);
|
backingIndices.add(backingIndex);
|
||||||
GetIndexResponse getIndexResponse = client.admin().indices().getIndex(new GetIndexRequest().indices(backingIndex)).actionGet();
|
GetIndexResponse getIndexResponse = client.admin()
|
||||||
|
.indices()
|
||||||
|
.getIndex(new GetIndexRequest(TEST_REQUEST_TIMEOUT).indices(backingIndex))
|
||||||
|
.actionGet();
|
||||||
assertThat(getIndexResponse.getSettings().get(backingIndex), notNullValue());
|
assertThat(getIndexResponse.getSettings().get(backingIndex), notNullValue());
|
||||||
assertThat(getIndexResponse.getSettings().get(backingIndex).getAsBoolean("index.hidden", null), is(true));
|
assertThat(getIndexResponse.getSettings().get(backingIndex).getAsBoolean("index.hidden", null), is(true));
|
||||||
Map<?, ?> mappings = getIndexResponse.getMappings().get(backingIndex).getSourceAsMap();
|
Map<?, ?> mappings = getIndexResponse.getMappings().get(backingIndex).getSourceAsMap();
|
||||||
|
@ -377,7 +380,10 @@ public class ResolveClusterDataStreamIT extends AbstractMultiClustersTestCase {
|
||||||
DataStream barDataStream = getDataStreamResponse.getDataStreams().get(0).getDataStream();
|
DataStream barDataStream = getDataStreamResponse.getDataStreams().get(0).getDataStream();
|
||||||
String backingIndex = barDataStream.getIndices().get(0).getName();
|
String backingIndex = barDataStream.getIndices().get(0).getName();
|
||||||
backingIndices.add(backingIndex);
|
backingIndices.add(backingIndex);
|
||||||
GetIndexResponse getIndexResponse = client.admin().indices().getIndex(new GetIndexRequest().indices(backingIndex)).actionGet();
|
GetIndexResponse getIndexResponse = client.admin()
|
||||||
|
.indices()
|
||||||
|
.getIndex(new GetIndexRequest(TEST_REQUEST_TIMEOUT).indices(backingIndex))
|
||||||
|
.actionGet();
|
||||||
assertThat(getIndexResponse.getSettings().get(backingIndex), notNullValue());
|
assertThat(getIndexResponse.getSettings().get(backingIndex), notNullValue());
|
||||||
assertThat(getIndexResponse.getSettings().get(backingIndex).getAsBoolean("index.hidden", null), is(true));
|
assertThat(getIndexResponse.getSettings().get(backingIndex).getAsBoolean("index.hidden", null), is(true));
|
||||||
Map<?, ?> mappings = getIndexResponse.getMappings().get(backingIndex).getSourceAsMap();
|
Map<?, ?> mappings = getIndexResponse.getMappings().get(backingIndex).getSourceAsMap();
|
||||||
|
|
|
@ -104,7 +104,7 @@ public class SystemDataStreamSnapshotIT extends AbstractSnapshotIntegTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
GetIndexResponse indicesRemaining = indicesAdmin().prepareGetIndex().addIndices("_all").get();
|
GetIndexResponse indicesRemaining = indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT).addIndices("_all").get();
|
||||||
assertThat(indicesRemaining.indices(), arrayWithSize(0));
|
assertThat(indicesRemaining.indices(), arrayWithSize(0));
|
||||||
assertSystemDataStreamDoesNotExist();
|
assertSystemDataStreamDoesNotExist();
|
||||||
}
|
}
|
||||||
|
@ -236,7 +236,7 @@ public class SystemDataStreamSnapshotIT extends AbstractSnapshotIntegTestCase {
|
||||||
assertAcked(indicesAdmin().prepareDelete("my-index"));
|
assertAcked(indicesAdmin().prepareDelete("my-index"));
|
||||||
|
|
||||||
{
|
{
|
||||||
GetIndexResponse indicesRemaining = indicesAdmin().prepareGetIndex().addIndices("_all").get();
|
GetIndexResponse indicesRemaining = indicesAdmin().prepareGetIndex(TEST_REQUEST_TIMEOUT).addIndices("_all").get();
|
||||||
assertThat(indicesRemaining.indices(), arrayWithSize(0));
|
assertThat(indicesRemaining.indices(), arrayWithSize(0));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -155,7 +155,7 @@ public class TSDBIndexingIT extends ESSingleNodeTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
// fetch end time
|
// fetch end time
|
||||||
var getIndexResponse = indicesAdmin().getIndex(new GetIndexRequest().indices(backingIndexName)).actionGet();
|
var getIndexResponse = indicesAdmin().getIndex(new GetIndexRequest(TEST_REQUEST_TIMEOUT).indices(backingIndexName)).actionGet();
|
||||||
Instant endTime = IndexSettings.TIME_SERIES_END_TIME.get(getIndexResponse.getSettings().get(backingIndexName));
|
Instant endTime = IndexSettings.TIME_SERIES_END_TIME.get(getIndexResponse.getSettings().get(backingIndexName));
|
||||||
|
|
||||||
// index another doc and verify index
|
// index another doc and verify index
|
||||||
|
@ -194,7 +194,7 @@ public class TSDBIndexingIT extends ESSingleNodeTestCase {
|
||||||
var newBackingIndexName = rolloverResponse.getNewIndex();
|
var newBackingIndexName = rolloverResponse.getNewIndex();
|
||||||
|
|
||||||
// index and check target index is new
|
// index and check target index is new
|
||||||
getIndexResponse = indicesAdmin().getIndex(new GetIndexRequest().indices(newBackingIndexName)).actionGet();
|
getIndexResponse = indicesAdmin().getIndex(new GetIndexRequest(TEST_REQUEST_TIMEOUT).indices(newBackingIndexName)).actionGet();
|
||||||
Instant newStartTime = IndexSettings.TIME_SERIES_START_TIME.get(getIndexResponse.getSettings().get(newBackingIndexName));
|
Instant newStartTime = IndexSettings.TIME_SERIES_START_TIME.get(getIndexResponse.getSettings().get(newBackingIndexName));
|
||||||
Instant newEndTime = IndexSettings.TIME_SERIES_END_TIME.get(getIndexResponse.getSettings().get(newBackingIndexName));
|
Instant newEndTime = IndexSettings.TIME_SERIES_END_TIME.get(getIndexResponse.getSettings().get(newBackingIndexName));
|
||||||
|
|
||||||
|
|
|
@ -183,7 +183,7 @@ public class TSDBPassthroughIndexingIT extends ESSingleNodeTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
// validate index:
|
// validate index:
|
||||||
var getIndexResponse = client().admin().indices().getIndex(new GetIndexRequest().indices(index)).actionGet();
|
var getIndexResponse = client().admin().indices().getIndex(new GetIndexRequest(TEST_REQUEST_TIMEOUT).indices(index)).actionGet();
|
||||||
assertThat(getIndexResponse.getSettings().get(index).get("index.routing_path"), equalTo("[attributes.*]"));
|
assertThat(getIndexResponse.getSettings().get(index).get("index.routing_path"), equalTo("[attributes.*]"));
|
||||||
// validate mapping
|
// validate mapping
|
||||||
var mapping = getIndexResponse.mappings().get(index).getSourceAsMap();
|
var mapping = getIndexResponse.mappings().get(index).getSourceAsMap();
|
||||||
|
|
|
@ -9,10 +9,6 @@
|
||||||
|
|
||||||
package org.elasticsearch.datastreams;
|
package org.elasticsearch.datastreams;
|
||||||
|
|
||||||
import org.elasticsearch.action.admin.indices.rollover.LazyRolloverAction;
|
|
||||||
import org.elasticsearch.action.datastreams.autosharding.DataStreamAutoShardingService;
|
|
||||||
import org.elasticsearch.cluster.metadata.DataStreamGlobalRetention;
|
|
||||||
import org.elasticsearch.datastreams.lifecycle.health.DataStreamLifecycleHealthInfoPublisher;
|
|
||||||
import org.elasticsearch.features.FeatureSpecification;
|
import org.elasticsearch.features.FeatureSpecification;
|
||||||
import org.elasticsearch.features.NodeFeature;
|
import org.elasticsearch.features.NodeFeature;
|
||||||
|
|
||||||
|
@ -27,12 +23,7 @@ public class DataStreamFeatures implements FeatureSpecification {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Set<NodeFeature> getFeatures() {
|
public Set<NodeFeature> getFeatures() {
|
||||||
return Set.of(
|
return Set.of();
|
||||||
DataStreamLifecycleHealthInfoPublisher.DSL_HEALTH_INFO_FEATURE, // Added in 8.12
|
|
||||||
LazyRolloverAction.DATA_STREAM_LAZY_ROLLOVER, // Added in 8.13
|
|
||||||
DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE,
|
|
||||||
DataStreamGlobalRetention.GLOBAL_RETENTION // Added in 8.14
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -197,8 +197,7 @@ public class DataStreamsPlugin extends Plugin implements ActionPlugin, HealthPlu
|
||||||
settings,
|
settings,
|
||||||
services.client(),
|
services.client(),
|
||||||
services.clusterService(),
|
services.clusterService(),
|
||||||
errorStoreInitialisationService.get(),
|
errorStoreInitialisationService.get()
|
||||||
services.featureService()
|
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
dataLifecycleInitialisationService.set(
|
dataLifecycleInitialisationService.set(
|
||||||
|
|
|
@ -19,8 +19,6 @@ import org.elasticsearch.cluster.service.ClusterService;
|
||||||
import org.elasticsearch.common.settings.Setting;
|
import org.elasticsearch.common.settings.Setting;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.datastreams.lifecycle.DataStreamLifecycleErrorStore;
|
import org.elasticsearch.datastreams.lifecycle.DataStreamLifecycleErrorStore;
|
||||||
import org.elasticsearch.features.FeatureService;
|
|
||||||
import org.elasticsearch.features.NodeFeature;
|
|
||||||
import org.elasticsearch.health.node.DataStreamLifecycleHealthInfo;
|
import org.elasticsearch.health.node.DataStreamLifecycleHealthInfo;
|
||||||
import org.elasticsearch.health.node.DslErrorInfo;
|
import org.elasticsearch.health.node.DslErrorInfo;
|
||||||
import org.elasticsearch.health.node.UpdateHealthInfoCacheAction;
|
import org.elasticsearch.health.node.UpdateHealthInfoCacheAction;
|
||||||
|
@ -45,12 +43,10 @@ public class DataStreamLifecycleHealthInfoPublisher {
|
||||||
Setting.Property.Dynamic,
|
Setting.Property.Dynamic,
|
||||||
Setting.Property.NodeScope
|
Setting.Property.NodeScope
|
||||||
);
|
);
|
||||||
public static final NodeFeature DSL_HEALTH_INFO_FEATURE = new NodeFeature("health.dsl.info", true);
|
|
||||||
|
|
||||||
private final Client client;
|
private final Client client;
|
||||||
private final ClusterService clusterService;
|
private final ClusterService clusterService;
|
||||||
private final DataStreamLifecycleErrorStore errorStore;
|
private final DataStreamLifecycleErrorStore errorStore;
|
||||||
private final FeatureService featureService;
|
|
||||||
private volatile int signallingErrorRetryInterval;
|
private volatile int signallingErrorRetryInterval;
|
||||||
private volatile int maxNumberOfErrorsToPublish;
|
private volatile int maxNumberOfErrorsToPublish;
|
||||||
|
|
||||||
|
@ -58,13 +54,11 @@ public class DataStreamLifecycleHealthInfoPublisher {
|
||||||
Settings settings,
|
Settings settings,
|
||||||
Client client,
|
Client client,
|
||||||
ClusterService clusterService,
|
ClusterService clusterService,
|
||||||
DataStreamLifecycleErrorStore errorStore,
|
DataStreamLifecycleErrorStore errorStore
|
||||||
FeatureService featureService
|
|
||||||
) {
|
) {
|
||||||
this.client = client;
|
this.client = client;
|
||||||
this.clusterService = clusterService;
|
this.clusterService = clusterService;
|
||||||
this.errorStore = errorStore;
|
this.errorStore = errorStore;
|
||||||
this.featureService = featureService;
|
|
||||||
this.signallingErrorRetryInterval = DATA_STREAM_SIGNALLING_ERROR_RETRY_INTERVAL_SETTING.get(settings);
|
this.signallingErrorRetryInterval = DATA_STREAM_SIGNALLING_ERROR_RETRY_INTERVAL_SETTING.get(settings);
|
||||||
this.maxNumberOfErrorsToPublish = DATA_STREAM_LIFECYCLE_MAX_ERRORS_TO_PUBLISH_SETTING.get(settings);
|
this.maxNumberOfErrorsToPublish = DATA_STREAM_LIFECYCLE_MAX_ERRORS_TO_PUBLISH_SETTING.get(settings);
|
||||||
}
|
}
|
||||||
|
@ -89,9 +83,6 @@ public class DataStreamLifecycleHealthInfoPublisher {
|
||||||
* {@link org.elasticsearch.datastreams.lifecycle.DataStreamLifecycleService#DATA_STREAM_SIGNALLING_ERROR_RETRY_INTERVAL_SETTING}
|
* {@link org.elasticsearch.datastreams.lifecycle.DataStreamLifecycleService#DATA_STREAM_SIGNALLING_ERROR_RETRY_INTERVAL_SETTING}
|
||||||
*/
|
*/
|
||||||
public void publishDslErrorEntries(ActionListener<AcknowledgedResponse> actionListener) {
|
public void publishDslErrorEntries(ActionListener<AcknowledgedResponse> actionListener) {
|
||||||
if (featureService.clusterHasFeature(clusterService.state(), DSL_HEALTH_INFO_FEATURE) == false) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
// fetching the entries that persist in the error store for more than the signalling retry interval
|
// fetching the entries that persist in the error store for more than the signalling retry interval
|
||||||
// note that we're reporting this view into the error store on every publishing iteration
|
// note that we're reporting this view into the error store on every publishing iteration
|
||||||
List<DslErrorInfo> errorEntriesToSignal = errorStore.getErrorsInfo(
|
List<DslErrorInfo> errorEntriesToSignal = errorStore.getErrorsInfo(
|
||||||
|
|
|
@ -67,9 +67,7 @@ import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||||
import org.elasticsearch.core.TimeValue;
|
import org.elasticsearch.core.TimeValue;
|
||||||
import org.elasticsearch.core.Tuple;
|
import org.elasticsearch.core.Tuple;
|
||||||
import org.elasticsearch.datastreams.DataStreamFeatures;
|
|
||||||
import org.elasticsearch.datastreams.lifecycle.health.DataStreamLifecycleHealthInfoPublisher;
|
import org.elasticsearch.datastreams.lifecycle.health.DataStreamLifecycleHealthInfoPublisher;
|
||||||
import org.elasticsearch.features.FeatureService;
|
|
||||||
import org.elasticsearch.index.Index;
|
import org.elasticsearch.index.Index;
|
||||||
import org.elasticsearch.index.IndexMode;
|
import org.elasticsearch.index.IndexMode;
|
||||||
import org.elasticsearch.index.IndexSettings;
|
import org.elasticsearch.index.IndexSettings;
|
||||||
|
@ -183,13 +181,7 @@ public class DataStreamLifecycleServiceTests extends ESTestCase {
|
||||||
() -> now,
|
() -> now,
|
||||||
errorStore,
|
errorStore,
|
||||||
allocationService,
|
allocationService,
|
||||||
new DataStreamLifecycleHealthInfoPublisher(
|
new DataStreamLifecycleHealthInfoPublisher(Settings.EMPTY, client, clusterService, errorStore),
|
||||||
Settings.EMPTY,
|
|
||||||
client,
|
|
||||||
clusterService,
|
|
||||||
errorStore,
|
|
||||||
new FeatureService(List.of(new DataStreamFeatures()))
|
|
||||||
),
|
|
||||||
globalRetentionSettings
|
globalRetentionSettings
|
||||||
);
|
);
|
||||||
clientDelegate = null;
|
clientDelegate = null;
|
||||||
|
@ -1487,13 +1479,7 @@ public class DataStreamLifecycleServiceTests extends ESTestCase {
|
||||||
() -> now.getAndAdd(delta),
|
() -> now.getAndAdd(delta),
|
||||||
errorStore,
|
errorStore,
|
||||||
mock(AllocationService.class),
|
mock(AllocationService.class),
|
||||||
new DataStreamLifecycleHealthInfoPublisher(
|
new DataStreamLifecycleHealthInfoPublisher(Settings.EMPTY, getTransportRequestsRecordingClient(), clusterService, errorStore),
|
||||||
Settings.EMPTY,
|
|
||||||
getTransportRequestsRecordingClient(),
|
|
||||||
clusterService,
|
|
||||||
errorStore,
|
|
||||||
new FeatureService(List.of(new DataStreamFeatures()))
|
|
||||||
),
|
|
||||||
globalRetentionSettings
|
globalRetentionSettings
|
||||||
);
|
);
|
||||||
assertThat(service.getLastRunDuration(), is(nullValue()));
|
assertThat(service.getLastRunDuration(), is(nullValue()));
|
||||||
|
|
|
@ -24,10 +24,8 @@ import org.elasticsearch.cluster.service.ClusterService;
|
||||||
import org.elasticsearch.common.settings.ClusterSettings;
|
import org.elasticsearch.common.settings.ClusterSettings;
|
||||||
import org.elasticsearch.common.settings.Setting;
|
import org.elasticsearch.common.settings.Setting;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.datastreams.DataStreamFeatures;
|
|
||||||
import org.elasticsearch.datastreams.lifecycle.DataStreamLifecycleErrorStore;
|
import org.elasticsearch.datastreams.lifecycle.DataStreamLifecycleErrorStore;
|
||||||
import org.elasticsearch.datastreams.lifecycle.DataStreamLifecycleService;
|
import org.elasticsearch.datastreams.lifecycle.DataStreamLifecycleService;
|
||||||
import org.elasticsearch.features.FeatureService;
|
|
||||||
import org.elasticsearch.health.node.DataStreamLifecycleHealthInfo;
|
import org.elasticsearch.health.node.DataStreamLifecycleHealthInfo;
|
||||||
import org.elasticsearch.health.node.DslErrorInfo;
|
import org.elasticsearch.health.node.DslErrorInfo;
|
||||||
import org.elasticsearch.health.node.UpdateHealthInfoCacheAction;
|
import org.elasticsearch.health.node.UpdateHealthInfoCacheAction;
|
||||||
|
@ -40,7 +38,6 @@ import org.junit.Before;
|
||||||
|
|
||||||
import java.util.HashSet;
|
import java.util.HashSet;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
import java.util.concurrent.CopyOnWriteArrayList;
|
import java.util.concurrent.CopyOnWriteArrayList;
|
||||||
|
|
||||||
|
@ -83,13 +80,7 @@ public class DataStreamLifecycleHealthInfoPublisherTests extends ESTestCase {
|
||||||
|
|
||||||
final Client client = getTransportRequestsRecordingClient();
|
final Client client = getTransportRequestsRecordingClient();
|
||||||
errorStore = new DataStreamLifecycleErrorStore(() -> now);
|
errorStore = new DataStreamLifecycleErrorStore(() -> now);
|
||||||
dslHealthInfoPublisher = new DataStreamLifecycleHealthInfoPublisher(
|
dslHealthInfoPublisher = new DataStreamLifecycleHealthInfoPublisher(Settings.EMPTY, client, clusterService, errorStore);
|
||||||
Settings.EMPTY,
|
|
||||||
client,
|
|
||||||
clusterService,
|
|
||||||
errorStore,
|
|
||||||
new FeatureService(List.of(new DataStreamFeatures()))
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@After
|
@After
|
||||||
|
@ -105,16 +96,6 @@ public class DataStreamLifecycleHealthInfoPublisherTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
errorStore.recordError("testIndex", new IllegalStateException("bad state"));
|
errorStore.recordError("testIndex", new IllegalStateException("bad state"));
|
||||||
ClusterState stateWithHealthNode = ClusterStateCreationUtils.state(node1, node1, node1, allNodes);
|
ClusterState stateWithHealthNode = ClusterStateCreationUtils.state(node1, node1, node1, allNodes);
|
||||||
stateWithHealthNode = ClusterState.builder(stateWithHealthNode)
|
|
||||||
.nodeFeatures(
|
|
||||||
Map.of(
|
|
||||||
node1.getId(),
|
|
||||||
Set.of(DataStreamLifecycleHealthInfoPublisher.DSL_HEALTH_INFO_FEATURE.id()),
|
|
||||||
node2.getId(),
|
|
||||||
Set.of(DataStreamLifecycleHealthInfoPublisher.DSL_HEALTH_INFO_FEATURE.id())
|
|
||||||
)
|
|
||||||
)
|
|
||||||
.build();
|
|
||||||
ClusterServiceUtils.setState(clusterService, stateWithHealthNode);
|
ClusterServiceUtils.setState(clusterService, stateWithHealthNode);
|
||||||
dslHealthInfoPublisher.publishDslErrorEntries(new ActionListener<>() {
|
dslHealthInfoPublisher.publishDslErrorEntries(new ActionListener<>() {
|
||||||
@Override
|
@Override
|
||||||
|
@ -143,16 +124,6 @@ public class DataStreamLifecycleHealthInfoPublisherTests extends ESTestCase {
|
||||||
errorStore.recordError("testIndex", new IllegalStateException("bad state"));
|
errorStore.recordError("testIndex", new IllegalStateException("bad state"));
|
||||||
|
|
||||||
ClusterState stateNoHealthNode = ClusterStateCreationUtils.state(node1, node1, null, allNodes);
|
ClusterState stateNoHealthNode = ClusterStateCreationUtils.state(node1, node1, null, allNodes);
|
||||||
stateNoHealthNode = ClusterState.builder(stateNoHealthNode)
|
|
||||||
.nodeFeatures(
|
|
||||||
Map.of(
|
|
||||||
node1.getId(),
|
|
||||||
Set.of(DataStreamLifecycleHealthInfoPublisher.DSL_HEALTH_INFO_FEATURE.id()),
|
|
||||||
node2.getId(),
|
|
||||||
Set.of(DataStreamLifecycleHealthInfoPublisher.DSL_HEALTH_INFO_FEATURE.id())
|
|
||||||
)
|
|
||||||
)
|
|
||||||
.build();
|
|
||||||
ClusterServiceUtils.setState(clusterService, stateNoHealthNode);
|
ClusterServiceUtils.setState(clusterService, stateNoHealthNode);
|
||||||
dslHealthInfoPublisher.publishDslErrorEntries(new ActionListener<>() {
|
dslHealthInfoPublisher.publishDslErrorEntries(new ActionListener<>() {
|
||||||
@Override
|
@Override
|
||||||
|
@ -170,16 +141,6 @@ public class DataStreamLifecycleHealthInfoPublisherTests extends ESTestCase {
|
||||||
public void testPublishDslErrorEntriesEmptyErrorStore() {
|
public void testPublishDslErrorEntriesEmptyErrorStore() {
|
||||||
// publishes the empty error store (this is the "back to healthy" state where all errors have been fixed)
|
// publishes the empty error store (this is the "back to healthy" state where all errors have been fixed)
|
||||||
ClusterState state = ClusterStateCreationUtils.state(node1, node1, node1, allNodes);
|
ClusterState state = ClusterStateCreationUtils.state(node1, node1, node1, allNodes);
|
||||||
state = ClusterState.builder(state)
|
|
||||||
.nodeFeatures(
|
|
||||||
Map.of(
|
|
||||||
node1.getId(),
|
|
||||||
Set.of(DataStreamLifecycleHealthInfoPublisher.DSL_HEALTH_INFO_FEATURE.id()),
|
|
||||||
node2.getId(),
|
|
||||||
Set.of(DataStreamLifecycleHealthInfoPublisher.DSL_HEALTH_INFO_FEATURE.id())
|
|
||||||
)
|
|
||||||
)
|
|
||||||
.build();
|
|
||||||
ClusterServiceUtils.setState(clusterService, state);
|
ClusterServiceUtils.setState(clusterService, state);
|
||||||
dslHealthInfoPublisher.publishDslErrorEntries(new ActionListener<>() {
|
dslHealthInfoPublisher.publishDslErrorEntries(new ActionListener<>() {
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -300,9 +300,6 @@ index without timestamp with pipeline:
|
||||||
|
|
||||||
---
|
---
|
||||||
dynamic templates:
|
dynamic templates:
|
||||||
- requires:
|
|
||||||
cluster_features: ["mapper.pass_through_priority"]
|
|
||||||
reason: support for priority in passthrough objects
|
|
||||||
- do:
|
- do:
|
||||||
allowed_warnings:
|
allowed_warnings:
|
||||||
- "index template [my-dynamic-template] has index patterns [k9s*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-dynamic-template] will take precedence during new index creation"
|
- "index template [my-dynamic-template] has index patterns [k9s*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-dynamic-template] will take precedence during new index creation"
|
||||||
|
@ -450,9 +447,6 @@ dynamic templates:
|
||||||
|
|
||||||
---
|
---
|
||||||
dynamic templates - conflicting aliases:
|
dynamic templates - conflicting aliases:
|
||||||
- requires:
|
|
||||||
cluster_features: ["mapper.pass_through_priority"]
|
|
||||||
reason: support for priority in passthrough objects
|
|
||||||
- do:
|
- do:
|
||||||
allowed_warnings:
|
allowed_warnings:
|
||||||
- "index template [my-dynamic-template] has index patterns [k9s*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-dynamic-template] will take precedence during new index creation"
|
- "index template [my-dynamic-template] has index patterns [k9s*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-dynamic-template] will take precedence during new index creation"
|
||||||
|
@ -549,9 +543,6 @@ dynamic templates - conflicting aliases:
|
||||||
|
|
||||||
---
|
---
|
||||||
dynamic templates - conflicting aliases with top-level field:
|
dynamic templates - conflicting aliases with top-level field:
|
||||||
- requires:
|
|
||||||
cluster_features: ["mapper.pass_through_priority"]
|
|
||||||
reason: support for priority in passthrough objects
|
|
||||||
- do:
|
- do:
|
||||||
allowed_warnings:
|
allowed_warnings:
|
||||||
- "index template [my-dynamic-template] has index patterns [otel] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-dynamic-template] will take precedence during new index creation"
|
- "index template [my-dynamic-template] has index patterns [otel] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-dynamic-template] will take precedence during new index creation"
|
||||||
|
@ -632,9 +623,6 @@ dynamic templates - conflicting aliases with top-level field:
|
||||||
|
|
||||||
---
|
---
|
||||||
dynamic templates with nesting:
|
dynamic templates with nesting:
|
||||||
- requires:
|
|
||||||
cluster_features: ["mapper.pass_through_priority"]
|
|
||||||
reason: support for priority in passthrough objects
|
|
||||||
- do:
|
- do:
|
||||||
allowed_warnings:
|
allowed_warnings:
|
||||||
- "index template [my-dynamic-template] has index patterns [k9s*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-dynamic-template] will take precedence during new index creation"
|
- "index template [my-dynamic-template] has index patterns [k9s*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-dynamic-template] will take precedence during new index creation"
|
||||||
|
@ -810,10 +798,6 @@ dynamic templates with nesting:
|
||||||
|
|
||||||
---
|
---
|
||||||
dynamic templates with incremental indexing:
|
dynamic templates with incremental indexing:
|
||||||
- requires:
|
|
||||||
cluster_features: ["mapper.pass_through_priority"]
|
|
||||||
reason: support for priority in passthrough objects
|
|
||||||
|
|
||||||
- do:
|
- do:
|
||||||
allowed_warnings:
|
allowed_warnings:
|
||||||
- "index template [my-dynamic-template] has index patterns [k9s*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-dynamic-template] will take precedence during new index creation"
|
- "index template [my-dynamic-template] has index patterns [k9s*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-dynamic-template] will take precedence during new index creation"
|
||||||
|
@ -1038,9 +1022,6 @@ dynamic templates with incremental indexing:
|
||||||
|
|
||||||
---
|
---
|
||||||
subobject in passthrough object auto flatten:
|
subobject in passthrough object auto flatten:
|
||||||
- requires:
|
|
||||||
cluster_features: ["mapper.pass_through_priority"]
|
|
||||||
reason: support for priority in passthrough objects
|
|
||||||
- do:
|
- do:
|
||||||
allowed_warnings:
|
allowed_warnings:
|
||||||
- "index template [my-passthrough-template] has index patterns [k9s*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-passthrough-template] will take precedence during new index creation"
|
- "index template [my-passthrough-template] has index patterns [k9s*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-passthrough-template] will take precedence during new index creation"
|
||||||
|
@ -1108,9 +1089,6 @@ enable subobjects in passthrough object:
|
||||||
|
|
||||||
---
|
---
|
||||||
passthrough objects with duplicate priority:
|
passthrough objects with duplicate priority:
|
||||||
- requires:
|
|
||||||
cluster_features: ["mapper.pass_through_priority"]
|
|
||||||
reason: support for priority in passthrough objects
|
|
||||||
- do:
|
- do:
|
||||||
catch: /has a conflicting param/
|
catch: /has a conflicting param/
|
||||||
indices.put_index_template:
|
indices.put_index_template:
|
||||||
|
@ -1135,9 +1113,6 @@ passthrough objects with duplicate priority:
|
||||||
|
|
||||||
---
|
---
|
||||||
dimensions with ignore_malformed and ignore_above:
|
dimensions with ignore_malformed and ignore_above:
|
||||||
- requires:
|
|
||||||
cluster_features: ["mapper.keyword_dimension_ignore_above"]
|
|
||||||
reason: support for ignore_above on keyword dimensions
|
|
||||||
- do:
|
- do:
|
||||||
allowed_warnings:
|
allowed_warnings:
|
||||||
- "index template [my-dynamic-template] has index patterns [k9s*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-dynamic-template] will take precedence during new index creation"
|
- "index template [my-dynamic-template] has index patterns [k9s*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-dynamic-template] will take precedence during new index creation"
|
||||||
|
@ -1229,9 +1204,6 @@ dimensions with ignore_malformed and ignore_above:
|
||||||
|
|
||||||
---
|
---
|
||||||
non string dimension fields:
|
non string dimension fields:
|
||||||
- requires:
|
|
||||||
cluster_features: ["mapper.pass_through_priority", "routing.boolean_routing_path", "mapper.boolean_dimension"]
|
|
||||||
reason: support for priority in passthrough objects
|
|
||||||
- do:
|
- do:
|
||||||
allowed_warnings:
|
allowed_warnings:
|
||||||
- "index template [my-dynamic-template] has index patterns [k9s*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-dynamic-template] will take precedence during new index creation"
|
- "index template [my-dynamic-template] has index patterns [k9s*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-dynamic-template] will take precedence during new index creation"
|
||||||
|
@ -1339,10 +1311,6 @@ non string dimension fields:
|
||||||
|
|
||||||
---
|
---
|
||||||
multi value dimensions:
|
multi value dimensions:
|
||||||
- requires:
|
|
||||||
cluster_features: ["routing.multi_value_routing_path"]
|
|
||||||
reason: support for multi-value dimensions
|
|
||||||
|
|
||||||
- do:
|
- do:
|
||||||
allowed_warnings:
|
allowed_warnings:
|
||||||
- "index template [my-dynamic-template] has index patterns [k9s*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-dynamic-template] will take precedence during new index creation"
|
- "index template [my-dynamic-template] has index patterns [k9s*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-dynamic-template] will take precedence during new index creation"
|
||||||
|
|
|
@ -160,11 +160,6 @@ public record DatabaseConfiguration(String id, String name, Provider provider) i
|
||||||
if (provider instanceof Maxmind maxmind) {
|
if (provider instanceof Maxmind maxmind) {
|
||||||
out.writeString(maxmind.accountId);
|
out.writeString(maxmind.accountId);
|
||||||
} else {
|
} else {
|
||||||
/*
|
|
||||||
* The existence of a non-Maxmind providers is gated on the feature get_database_configuration_action.multi_node, and
|
|
||||||
* get_database_configuration_action.multi_node is only available on or after
|
|
||||||
* TransportVersions.INGEST_GEO_DATABASE_PROVIDERS.
|
|
||||||
*/
|
|
||||||
assert false : "non-maxmind DatabaseConfiguration.Provider [" + provider.getWriteableName() + "]";
|
assert false : "non-maxmind DatabaseConfiguration.Provider [" + provider.getWriteableName() + "]";
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,7 +17,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||||
import org.elasticsearch.cluster.service.ClusterService;
|
import org.elasticsearch.cluster.service.ClusterService;
|
||||||
import org.elasticsearch.common.io.stream.StreamInput;
|
import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
import org.elasticsearch.common.regex.Regex;
|
import org.elasticsearch.common.regex.Regex;
|
||||||
import org.elasticsearch.features.FeatureService;
|
|
||||||
import org.elasticsearch.ingest.geoip.DatabaseNodeService;
|
import org.elasticsearch.ingest.geoip.DatabaseNodeService;
|
||||||
import org.elasticsearch.ingest.geoip.GeoIpTaskState;
|
import org.elasticsearch.ingest.geoip.GeoIpTaskState;
|
||||||
import org.elasticsearch.ingest.geoip.IngestGeoIpMetadata;
|
import org.elasticsearch.ingest.geoip.IngestGeoIpMetadata;
|
||||||
|
@ -41,8 +40,6 @@ import java.util.Optional;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
import java.util.stream.Collectors;
|
import java.util.stream.Collectors;
|
||||||
|
|
||||||
import static org.elasticsearch.ingest.IngestGeoIpFeatures.GET_DATABASE_CONFIGURATION_ACTION_MULTI_NODE;
|
|
||||||
|
|
||||||
public class TransportGetDatabaseConfigurationAction extends TransportNodesAction<
|
public class TransportGetDatabaseConfigurationAction extends TransportNodesAction<
|
||||||
GetDatabaseConfigurationAction.Request,
|
GetDatabaseConfigurationAction.Request,
|
||||||
GetDatabaseConfigurationAction.Response,
|
GetDatabaseConfigurationAction.Response,
|
||||||
|
@ -50,7 +47,6 @@ public class TransportGetDatabaseConfigurationAction extends TransportNodesActio
|
||||||
GetDatabaseConfigurationAction.NodeResponse,
|
GetDatabaseConfigurationAction.NodeResponse,
|
||||||
List<DatabaseConfigurationMetadata>> {
|
List<DatabaseConfigurationMetadata>> {
|
||||||
|
|
||||||
private final FeatureService featureService;
|
|
||||||
private final DatabaseNodeService databaseNodeService;
|
private final DatabaseNodeService databaseNodeService;
|
||||||
|
|
||||||
@Inject
|
@Inject
|
||||||
|
@ -59,7 +55,6 @@ public class TransportGetDatabaseConfigurationAction extends TransportNodesActio
|
||||||
ClusterService clusterService,
|
ClusterService clusterService,
|
||||||
ThreadPool threadPool,
|
ThreadPool threadPool,
|
||||||
ActionFilters actionFilters,
|
ActionFilters actionFilters,
|
||||||
FeatureService featureService,
|
|
||||||
DatabaseNodeService databaseNodeService
|
DatabaseNodeService databaseNodeService
|
||||||
) {
|
) {
|
||||||
super(
|
super(
|
||||||
|
@ -70,39 +65,9 @@ public class TransportGetDatabaseConfigurationAction extends TransportNodesActio
|
||||||
GetDatabaseConfigurationAction.NodeRequest::new,
|
GetDatabaseConfigurationAction.NodeRequest::new,
|
||||||
threadPool.executor(ThreadPool.Names.MANAGEMENT)
|
threadPool.executor(ThreadPool.Names.MANAGEMENT)
|
||||||
);
|
);
|
||||||
this.featureService = featureService;
|
|
||||||
this.databaseNodeService = databaseNodeService;
|
this.databaseNodeService = databaseNodeService;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
protected void doExecute(
|
|
||||||
Task task,
|
|
||||||
GetDatabaseConfigurationAction.Request request,
|
|
||||||
ActionListener<GetDatabaseConfigurationAction.Response> listener
|
|
||||||
) {
|
|
||||||
if (featureService.clusterHasFeature(clusterService.state(), GET_DATABASE_CONFIGURATION_ACTION_MULTI_NODE) == false) {
|
|
||||||
/*
|
|
||||||
* TransportGetDatabaseConfigurationAction used to be a TransportMasterNodeAction, and not all nodes in the cluster have been
|
|
||||||
* updated. So we don't want to send node requests to the other nodes because they will blow up. Instead, we just return
|
|
||||||
* the information that we used to return from the master node (it doesn't make any difference that this might not be the master
|
|
||||||
* node, because we're only reading the cluster state). Because older nodes only know about the Maxmind provider type, we filter
|
|
||||||
* out all others here to avoid causing problems on those nodes.
|
|
||||||
*/
|
|
||||||
newResponseAsync(
|
|
||||||
task,
|
|
||||||
request,
|
|
||||||
createActionContext(task, request).stream()
|
|
||||||
.filter(database -> database.database().provider() instanceof DatabaseConfiguration.Maxmind)
|
|
||||||
.toList(),
|
|
||||||
List.of(),
|
|
||||||
List.of(),
|
|
||||||
listener
|
|
||||||
);
|
|
||||||
} else {
|
|
||||||
super.doExecute(task, request, listener);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
protected List<DatabaseConfigurationMetadata> createActionContext(Task task, GetDatabaseConfigurationAction.Request request) {
|
protected List<DatabaseConfigurationMetadata> createActionContext(Task task, GetDatabaseConfigurationAction.Request request) {
|
||||||
final Set<String> ids;
|
final Set<String> ids;
|
||||||
if (request.getDatabaseIds().length == 0) {
|
if (request.getDatabaseIds().length == 0) {
|
||||||
|
|
|
@ -29,7 +29,6 @@ import org.elasticsearch.common.util.concurrent.EsExecutors;
|
||||||
import org.elasticsearch.core.Nullable;
|
import org.elasticsearch.core.Nullable;
|
||||||
import org.elasticsearch.core.Strings;
|
import org.elasticsearch.core.Strings;
|
||||||
import org.elasticsearch.core.Tuple;
|
import org.elasticsearch.core.Tuple;
|
||||||
import org.elasticsearch.features.FeatureService;
|
|
||||||
import org.elasticsearch.ingest.geoip.IngestGeoIpMetadata;
|
import org.elasticsearch.ingest.geoip.IngestGeoIpMetadata;
|
||||||
import org.elasticsearch.ingest.geoip.direct.PutDatabaseConfigurationAction.Request;
|
import org.elasticsearch.ingest.geoip.direct.PutDatabaseConfigurationAction.Request;
|
||||||
import org.elasticsearch.injection.guice.Inject;
|
import org.elasticsearch.injection.guice.Inject;
|
||||||
|
@ -42,8 +41,6 @@ import java.util.HashMap;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Optional;
|
import java.util.Optional;
|
||||||
|
|
||||||
import static org.elasticsearch.ingest.IngestGeoIpFeatures.PUT_DATABASE_CONFIGURATION_ACTION_IPINFO;
|
|
||||||
|
|
||||||
public class TransportPutDatabaseConfigurationAction extends TransportMasterNodeAction<Request, AcknowledgedResponse> {
|
public class TransportPutDatabaseConfigurationAction extends TransportMasterNodeAction<Request, AcknowledgedResponse> {
|
||||||
|
|
||||||
private static final Logger logger = LogManager.getLogger(TransportPutDatabaseConfigurationAction.class);
|
private static final Logger logger = LogManager.getLogger(TransportPutDatabaseConfigurationAction.class);
|
||||||
|
@ -61,7 +58,6 @@ public class TransportPutDatabaseConfigurationAction extends TransportMasterNode
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
private final FeatureService featureService;
|
|
||||||
private final MasterServiceTaskQueue<UpdateDatabaseConfigurationTask> updateDatabaseConfigurationTaskQueue;
|
private final MasterServiceTaskQueue<UpdateDatabaseConfigurationTask> updateDatabaseConfigurationTaskQueue;
|
||||||
|
|
||||||
@Inject
|
@Inject
|
||||||
|
@ -70,8 +66,7 @@ public class TransportPutDatabaseConfigurationAction extends TransportMasterNode
|
||||||
ClusterService clusterService,
|
ClusterService clusterService,
|
||||||
ThreadPool threadPool,
|
ThreadPool threadPool,
|
||||||
ActionFilters actionFilters,
|
ActionFilters actionFilters,
|
||||||
IndexNameExpressionResolver indexNameExpressionResolver,
|
IndexNameExpressionResolver indexNameExpressionResolver
|
||||||
FeatureService featureService
|
|
||||||
) {
|
) {
|
||||||
super(
|
super(
|
||||||
PutDatabaseConfigurationAction.NAME,
|
PutDatabaseConfigurationAction.NAME,
|
||||||
|
@ -84,7 +79,6 @@ public class TransportPutDatabaseConfigurationAction extends TransportMasterNode
|
||||||
AcknowledgedResponse::readFrom,
|
AcknowledgedResponse::readFrom,
|
||||||
EsExecutors.DIRECT_EXECUTOR_SERVICE
|
EsExecutors.DIRECT_EXECUTOR_SERVICE
|
||||||
);
|
);
|
||||||
this.featureService = featureService;
|
|
||||||
this.updateDatabaseConfigurationTaskQueue = clusterService.createTaskQueue(
|
this.updateDatabaseConfigurationTaskQueue = clusterService.createTaskQueue(
|
||||||
"update-geoip-database-configuration-state-update",
|
"update-geoip-database-configuration-state-update",
|
||||||
Priority.NORMAL,
|
Priority.NORMAL,
|
||||||
|
@ -96,18 +90,6 @@ public class TransportPutDatabaseConfigurationAction extends TransportMasterNode
|
||||||
protected void masterOperation(Task task, Request request, ClusterState state, ActionListener<AcknowledgedResponse> listener) {
|
protected void masterOperation(Task task, Request request, ClusterState state, ActionListener<AcknowledgedResponse> listener) {
|
||||||
final String id = request.getDatabase().id();
|
final String id = request.getDatabase().id();
|
||||||
|
|
||||||
// if this is an ipinfo configuration, then make sure the whole cluster supports that feature
|
|
||||||
if (request.getDatabase().provider() instanceof DatabaseConfiguration.Ipinfo
|
|
||||||
&& featureService.clusterHasFeature(clusterService.state(), PUT_DATABASE_CONFIGURATION_ACTION_IPINFO) == false) {
|
|
||||||
listener.onFailure(
|
|
||||||
new IllegalArgumentException(
|
|
||||||
"Unable to use ipinfo database configurations in mixed-clusters with nodes that do not support feature "
|
|
||||||
+ PUT_DATABASE_CONFIGURATION_ACTION_IPINFO.id()
|
|
||||||
)
|
|
||||||
);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
updateDatabaseConfigurationTaskQueue.submitTask(
|
updateDatabaseConfigurationTaskQueue.submitTask(
|
||||||
Strings.format("update-geoip-database-configuration-[%s]", id),
|
Strings.format("update-geoip-database-configuration-[%s]", id),
|
||||||
new UpdateDatabaseConfigurationTask(listener, request.getDatabase()),
|
new UpdateDatabaseConfigurationTask(listener, request.getDatabase()),
|
||||||
|
|
|
@ -1,9 +1,3 @@
|
||||||
---
|
|
||||||
setup:
|
|
||||||
- requires:
|
|
||||||
cluster_features: ["geoip.downloader.database.configuration", "get_database_configuration_action.multi_node"]
|
|
||||||
reason: "geoip downloader database configuration APIs added in 8.15, and updated in 8.16 to return more results"
|
|
||||||
|
|
||||||
---
|
---
|
||||||
teardown:
|
teardown:
|
||||||
- do:
|
- do:
|
||||||
|
|
|
@ -1,9 +1,3 @@
|
||||||
setup:
|
|
||||||
- requires:
|
|
||||||
cluster_features:
|
|
||||||
- "put_database_configuration_action.ipinfo"
|
|
||||||
reason: "ipinfo support added in 8.16"
|
|
||||||
|
|
||||||
---
|
---
|
||||||
"Test ip_location processor with defaults":
|
"Test ip_location processor with defaults":
|
||||||
- do:
|
- do:
|
||||||
|
|
|
@ -1,10 +1,3 @@
|
||||||
---
|
|
||||||
setup:
|
|
||||||
- requires:
|
|
||||||
cluster_features:
|
|
||||||
- "put_database_configuration_action.ipinfo"
|
|
||||||
reason: "ip location downloader database configuration APIs added in 8.16 to support more types"
|
|
||||||
|
|
||||||
---
|
---
|
||||||
teardown:
|
teardown:
|
||||||
- do:
|
- do:
|
||||||
|
|
|
@ -16,6 +16,8 @@ import org.elasticsearch.action.search.SearchPhaseExecutionException;
|
||||||
import org.elasticsearch.action.search.SearchRequest;
|
import org.elasticsearch.action.search.SearchRequest;
|
||||||
import org.elasticsearch.action.support.WriteRequest;
|
import org.elasticsearch.action.support.WriteRequest;
|
||||||
import org.elasticsearch.client.internal.Client;
|
import org.elasticsearch.client.internal.Client;
|
||||||
|
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
||||||
|
import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
|
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
|
||||||
import org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor;
|
import org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor;
|
||||||
|
@ -23,7 +25,6 @@ import org.elasticsearch.index.IndexingPressure;
|
||||||
import org.elasticsearch.index.query.QueryBuilders;
|
import org.elasticsearch.index.query.QueryBuilders;
|
||||||
import org.elasticsearch.plugins.Plugin;
|
import org.elasticsearch.plugins.Plugin;
|
||||||
import org.elasticsearch.test.ESIntegTestCase;
|
import org.elasticsearch.test.ESIntegTestCase;
|
||||||
import org.elasticsearch.test.junit.annotations.TestLogging;
|
|
||||||
import org.elasticsearch.threadpool.ThreadPool;
|
import org.elasticsearch.threadpool.ThreadPool;
|
||||||
import org.elasticsearch.threadpool.ThreadPoolStats;
|
import org.elasticsearch.threadpool.ThreadPoolStats;
|
||||||
|
|
||||||
|
@ -49,10 +50,6 @@ import static org.hamcrest.Matchers.startsWith;
|
||||||
* threads that wait on a phaser. This lets us verify that operations on system indices
|
* threads that wait on a phaser. This lets us verify that operations on system indices
|
||||||
* are being directed to other thread pools.</p>
|
* are being directed to other thread pools.</p>
|
||||||
*/
|
*/
|
||||||
@TestLogging(
|
|
||||||
reason = "investigate",
|
|
||||||
value = "org.elasticsearch.kibana.KibanaThreadPoolIT:DEBUG,org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor:TRACE"
|
|
||||||
)
|
|
||||||
public class KibanaThreadPoolIT extends ESIntegTestCase {
|
public class KibanaThreadPoolIT extends ESIntegTestCase {
|
||||||
private static final Logger logger = LogManager.getLogger(KibanaThreadPoolIT.class);
|
private static final Logger logger = LogManager.getLogger(KibanaThreadPoolIT.class);
|
||||||
|
|
||||||
|
@ -68,6 +65,8 @@ public class KibanaThreadPoolIT extends ESIntegTestCase {
|
||||||
.put("thread_pool.write.queue_size", 1)
|
.put("thread_pool.write.queue_size", 1)
|
||||||
.put("thread_pool.get.size", 1)
|
.put("thread_pool.get.size", 1)
|
||||||
.put("thread_pool.get.queue_size", 1)
|
.put("thread_pool.get.queue_size", 1)
|
||||||
|
// a rejected GET may retry on an INITIALIZING shard (the target of a relocation) and unexpectedly succeed, so block rebalancing
|
||||||
|
.put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE)
|
||||||
.build();
|
.build();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -112,7 +111,12 @@ public class KibanaThreadPoolIT extends ESIntegTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testBlockedThreadPoolsRejectUserRequests() throws Exception {
|
public void testBlockedThreadPoolsRejectUserRequests() throws Exception {
|
||||||
assertAcked(client().admin().indices().prepareCreate(USER_INDEX));
|
assertAcked(
|
||||||
|
client().admin()
|
||||||
|
.indices()
|
||||||
|
.prepareCreate(USER_INDEX)
|
||||||
|
.setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)) // avoid retrying rejected actions
|
||||||
|
);
|
||||||
|
|
||||||
runWithBlockedThreadPools(this::assertThreadPoolsBlocked);
|
runWithBlockedThreadPools(this::assertThreadPoolsBlocked);
|
||||||
|
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue