diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/index/mapper/LogsDbDocumentParsingBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/index/mapper/LogsDbDocumentParsingBenchmark.java deleted file mode 100644 index 8924f84fdc90..000000000000 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/index/mapper/LogsDbDocumentParsingBenchmark.java +++ /dev/null @@ -1,400 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ - -package org.elasticsearch.benchmark.index.mapper; - -import org.elasticsearch.common.UUIDs; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.logging.LogConfigurator; -import org.elasticsearch.index.mapper.LuceneDocument; -import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.mapper.SourceToParse; -import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentType; -import org.openjdk.jmh.annotations.Benchmark; -import org.openjdk.jmh.annotations.BenchmarkMode; -import org.openjdk.jmh.annotations.Fork; -import org.openjdk.jmh.annotations.Measurement; -import org.openjdk.jmh.annotations.Mode; -import org.openjdk.jmh.annotations.OutputTimeUnit; -import org.openjdk.jmh.annotations.Scope; -import org.openjdk.jmh.annotations.Setup; -import org.openjdk.jmh.annotations.State; -import org.openjdk.jmh.annotations.Warmup; - -import java.io.IOException; -import java.util.List; -import java.util.Random; -import java.util.concurrent.TimeUnit; - -@Fork(value = 1) -@Warmup(iterations = 5) -@Measurement(iterations = 5) -@BenchmarkMode(Mode.Throughput) -@OutputTimeUnit(TimeUnit.SECONDS) -@State(Scope.Benchmark) -public class LogsDbDocumentParsingBenchmark { - private Random random; - private MapperService mapperServiceEnabled; - private MapperService mapperServiceEnabledWithStoreArrays; - private MapperService mapperServiceDisabled; - private SourceToParse[] documents; - - static { - LogConfigurator.configureESLogging(); // doc values implementations need logging - } - - private static String SAMPLE_LOGS_MAPPING_ENABLED = """ - { - "_source": { - "mode": "synthetic" - }, - "properties": { - "kafka": { - "properties": { - "log": { - "properties": { - "component": { - "ignore_above": 1024, - "type": "keyword" - }, - "trace": { - "properties": { - "message": { - "type": "text" - }, - "class": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "thread": { - "ignore_above": 1024, - "type": "keyword" - }, - "class": { - "ignore_above": 1024, - "type": "keyword" - } - } - } - } - }, - "host": { - "properties": { - "hostname": { - "ignore_above": 1024, - "type": "keyword" - }, - "os": { - "properties": { - "build": { - "ignore_above": 1024, - "type": "keyword" - }, - "kernel": { - "ignore_above": 1024, - "type": "keyword" - }, - "codename": { - "ignore_above": 1024, - "type": "keyword" - }, - "name": { - "ignore_above": 1024, - "type": "keyword", - "fields": { - "text": { - "type": "text" - } - } - }, - "family": { - "ignore_above": 1024, - "type": "keyword" - }, - "version": { - "ignore_above": 1024, - "type": "keyword" - }, - "platform": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "domain": { - "ignore_above": 1024, - "type": "keyword" - }, - "ip": { - "type": "ip" - }, - "containerized": { - "type": "boolean" - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - }, - "type": { - "ignore_above": 1024, - "type": "keyword" - }, - "mac": { - "ignore_above": 1024, - "type": "keyword" - }, - "architecture": { - "ignore_above": 1024, - "type": "keyword" - } - } - } - } - } - """; - - private static String SAMPLE_LOGS_MAPPING_ENABLED_WITH_STORE_ARRAYS = """ - { - "_source": { - "mode": "synthetic" - }, - "properties": { - "kafka": { - "properties": { - "log": { - "properties": { - "component": { - "ignore_above": 1024, - "type": "keyword" - }, - "trace": { - "synthetic_source_keep": "arrays", - "properties": { - "message": { - "type": "text" - }, - "class": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "thread": { - "ignore_above": 1024, - "type": "keyword" - }, - "class": { - "ignore_above": 1024, - "type": "keyword" - } - } - } - } - }, - "host": { - "properties": { - "hostname": { - "ignore_above": 1024, - "type": "keyword" - }, - "os": { - "properties": { - "build": { - "ignore_above": 1024, - "type": "keyword" - }, - "kernel": { - "ignore_above": 1024, - "type": "keyword" - }, - "codename": { - "ignore_above": 1024, - "type": "keyword" - }, - "name": { - "ignore_above": 1024, - "type": "keyword", - "fields": { - "text": { - "type": "text" - } - } - }, - "family": { - "ignore_above": 1024, - "type": "keyword" - }, - "version": { - "ignore_above": 1024, - "type": "keyword" - }, - "platform": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "domain": { - "ignore_above": 1024, - "type": "keyword" - }, - "ip": { - "type": "ip" - }, - "containerized": { - "type": "boolean" - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - }, - "type": { - "ignore_above": 1024, - "type": "keyword" - }, - "mac": { - "ignore_above": 1024, - "type": "keyword" - }, - "architecture": { - "ignore_above": 1024, - "type": "keyword" - } - } - } - } - } - """; - - private static String SAMPLE_LOGS_MAPPING_DISABLED = """ - { - "_source": { - "mode": "synthetic" - }, - "enabled": false - } - """; - - @Setup - public void setUp() throws IOException { - this.random = new Random(); - this.mapperServiceEnabled = MapperServiceFactory.create(SAMPLE_LOGS_MAPPING_ENABLED); - this.mapperServiceEnabledWithStoreArrays = MapperServiceFactory.create(SAMPLE_LOGS_MAPPING_ENABLED_WITH_STORE_ARRAYS); - this.mapperServiceDisabled = MapperServiceFactory.create(SAMPLE_LOGS_MAPPING_DISABLED); - this.documents = generateRandomDocuments(10_000); - } - - @Benchmark - public List benchmarkEnabledObject() { - return mapperServiceEnabled.documentMapper().parse(randomFrom(documents)).docs(); - } - - @Benchmark - public List benchmarkEnabledObjectWithStoreArrays() { - return mapperServiceEnabledWithStoreArrays.documentMapper().parse(randomFrom(documents)).docs(); - } - - @Benchmark - public List benchmarkDisabledObject() { - return mapperServiceDisabled.documentMapper().parse(randomFrom(documents)).docs(); - } - - @SafeVarargs - @SuppressWarnings("varargs") - private T randomFrom(T... items) { - return items[random.nextInt(items.length)]; - } - - private SourceToParse[] generateRandomDocuments(int count) throws IOException { - var docs = new SourceToParse[count]; - for (int i = 0; i < count; i++) { - docs[i] = generateRandomDocument(); - } - return docs; - } - - private SourceToParse generateRandomDocument() throws IOException { - var builder = XContentBuilder.builder(XContentType.JSON.xContent()); - - builder.startObject(); - - builder.startObject("kafka"); - { - builder.startObject("log"); - { - builder.field("component", randomString(10)); - builder.startArray("trace"); - { - builder.startObject(); - { - builder.field("message", randomString(50)); - builder.field("class", randomString(10)); - } - builder.endObject(); - builder.startObject(); - { - builder.field("message", randomString(50)); - builder.field("class", randomString(10)); - } - builder.endObject(); - } - builder.endArray(); - builder.field("thread", randomString(10)); - builder.field("class", randomString(10)); - - } - builder.endObject(); - } - builder.endObject(); - - builder.startObject("host"); - { - builder.field("hostname", randomString(10)); - builder.startObject("os"); - { - builder.field("name", randomString(10)); - } - builder.endObject(); - - builder.field("domain", randomString(10)); - builder.field("ip", randomIp()); - builder.field("name", randomString(10)); - } - - builder.endObject(); - - builder.endObject(); - - return new SourceToParse(UUIDs.randomBase64UUID(), BytesReference.bytes(builder), XContentType.JSON); - } - - private String randomIp() { - return "" + random.nextInt(255) + '.' + random.nextInt(255) + '.' + random.nextInt(255) + '.' + random.nextInt(255); - } - - private String randomString(int maxLength) { - var length = random.nextInt(maxLength); - var builder = new StringBuilder(length); - for (int i = 0; i < length; i++) { - builder.append((byte) (32 + random.nextInt(94))); - } - return builder.toString(); - } -} diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/DockerBase.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/DockerBase.java index 985c98bcd788..6dfb337a22ac 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/DockerBase.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/DockerBase.java @@ -22,7 +22,7 @@ public enum DockerBase { // Chainguard based wolfi image with latest jdk // This is usually updated via renovatebot // spotless:off - WOLFI("docker.elastic.co/wolfi/chainguard-base:latest@sha256:bfdeddb33330a281950c2a54adef991dbbe6a42832bc505d13b11beaf50ae73f", + WOLFI("docker.elastic.co/wolfi/chainguard-base:latest@sha256:eef54b3a414aa53b98f0f8df2633aed83c3ba6230722769282925442968f0364", "-wolfi", "apk" ), diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionArchiveSetupPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionArchiveSetupPlugin.java index f7e2f3d0d6c3..c2547b72e21f 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionArchiveSetupPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionArchiveSetupPlugin.java @@ -134,14 +134,14 @@ public class InternalDistributionArchiveSetupPlugin implements Plugin { }); File pluginsDir = new File(project.getBuildDir(), "plugins-hack/plugins"); - project.getExtensions().add("pluginsDir", pluginsDir); + project.getExtensions().getExtraProperties().set("pluginsDir", pluginsDir); project.getTasks().register("createPluginsDir", EmptyDirTask.class, t -> { t.setDir(pluginsDir); t.setDirMode(0755); }); File jvmOptionsDir = new File(project.getBuildDir(), "jvm-options-hack/jvm.options.d"); - project.getExtensions().add("jvmOptionsDir", jvmOptionsDir); + project.getExtensions().getExtraProperties().set("jvmOptionsDir", jvmOptionsDir); project.getTasks().register("createJvmOptionsDir", EmptyDirTask.class, t -> { t.setDir(jvmOptionsDir); t.setDirMode(0750); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/CheckstylePrecommitPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/CheckstylePrecommitPlugin.java index dbbe35905d20..dc8ea4424ba8 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/CheckstylePrecommitPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/CheckstylePrecommitPlugin.java @@ -42,7 +42,8 @@ public class CheckstylePrecommitPlugin extends PrecommitPlugin { File checkstyleDir = new File(project.getBuildDir(), "checkstyle"); File checkstyleSuppressions = new File(checkstyleDir, "checkstyle_suppressions.xml"); File checkstyleConf = new File(checkstyleDir, "checkstyle.xml"); - TaskProvider copyCheckstyleConf = project.getTasks().register("copyCheckstyleConf"); + TaskProvider copyCheckstyleConf = project.getTasks() + .register("copyCheckstyleConf", CopyCheckStyleConfTask.class); // configure inputs and outputs so up to date works properly copyCheckstyleConf.configure(t -> t.getOutputs().files(checkstyleSuppressions, checkstyleConf)); if ("jar".equals(checkstyleConfUrl.getProtocol())) { diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/CopyCheckStyleConfTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/CopyCheckStyleConfTask.java new file mode 100644 index 000000000000..9e0f9c24bcef --- /dev/null +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/CopyCheckStyleConfTask.java @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.gradle.internal.precommit; + +import org.gradle.api.DefaultTask; +import org.gradle.api.file.FileSystemOperations; + +import javax.inject.Inject; + +public abstract class CopyCheckStyleConfTask extends DefaultTask { + + @Inject + public abstract FileSystemOperations getFs(); +} diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/DistroTestPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/DistroTestPlugin.java index 211718c151ba..08e3c92307d7 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/DistroTestPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/DistroTestPlugin.java @@ -31,6 +31,7 @@ import org.gradle.api.Task; import org.gradle.api.artifacts.Configuration; import org.gradle.api.artifacts.dsl.DependencyHandler; import org.gradle.api.artifacts.type.ArtifactTypeDefinition; +import org.gradle.api.file.FileCollection; import org.gradle.api.plugins.JavaPluginExtension; import org.gradle.api.provider.Provider; import org.gradle.api.specs.Specs; @@ -88,8 +89,8 @@ public class DistroTestPlugin implements Plugin { Map> versionTasks = versionTasks(project, "destructiveDistroUpgradeTest", buildParams.getBwcVersions()); TaskProvider destructiveDistroTest = project.getTasks().register("destructiveDistroTest"); - Configuration examplePlugin = configureExamplePlugin(project); - + Configuration examplePluginConfiguration = configureExamplePlugin(project); + FileCollection examplePluginFileCollection = examplePluginConfiguration; List> windowsTestTasks = new ArrayList<>(); Map>> linuxTestTasks = new HashMap<>(); @@ -102,9 +103,9 @@ public class DistroTestPlugin implements Plugin { t2 -> distribution.isDocker() == false || dockerSupport.get().getDockerAvailability().isAvailable() ); addDistributionSysprop(t, DISTRIBUTION_SYSPROP, distribution::getFilepath); - addDistributionSysprop(t, EXAMPLE_PLUGIN_SYSPROP, () -> examplePlugin.getSingleFile().toString()); + addDistributionSysprop(t, EXAMPLE_PLUGIN_SYSPROP, () -> examplePluginFileCollection.getSingleFile().toString()); t.exclude("**/PackageUpgradeTests.class"); - }, distribution, examplePlugin.getDependencies()); + }, distribution, examplePluginConfiguration.getDependencies()); if (distribution.getPlatform() == Platform.WINDOWS) { windowsTestTasks.add(destructiveTask); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/toolchain/OracleOpenJdkToolchainResolver.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/toolchain/OracleOpenJdkToolchainResolver.java index 8fbe40aa8239..bb26bfd16721 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/toolchain/OracleOpenJdkToolchainResolver.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/toolchain/OracleOpenJdkToolchainResolver.java @@ -54,16 +54,16 @@ public abstract class OracleOpenJdkToolchainResolver extends AbstractCustomJavaT } } - record EarlyAccessJdkBuild(JavaLanguageVersion languageVersion, String version, String buildNumber) implements JdkBuild { - + record EarlyAccessJdkBuild(JavaLanguageVersion languageVersion) implements JdkBuild { @Override public String url(String os, String arch, String extension) { + String buildNumber = resolveBuildNumber(languageVersion.asInt()); return "https://download.java.net/java/early_access/jdk" - + version + + languageVersion.asInt() + "/" - + version + + buildNumber + "/GPL/openjdk-" - + version + + languageVersion.asInt() + "-ea+" + buildNumber + "_" @@ -73,6 +73,29 @@ public abstract class OracleOpenJdkToolchainResolver extends AbstractCustomJavaT + "_bin." + extension; } + + private static String resolveBuildNumber(int version) { + String buildNumber = System.getProperty("runtime.java." + version + ".build"); + if (buildNumber != null) { + System.out.println("buildNumber = " + buildNumber); + return buildNumber; + } + buildNumber = System.getProperty("runtime.java.build"); + if (buildNumber != null) { + System.out.println("buildNumber2 = " + buildNumber); + return buildNumber; + } + + switch (version) { + case 24: + // latest explicitly found build number for 24 + return "29"; + case 25: + return "3"; + default: + throw new IllegalArgumentException("Unsupported version " + version); + } + } } private static final Pattern VERSION_PATTERN = Pattern.compile( @@ -88,8 +111,8 @@ public abstract class OracleOpenJdkToolchainResolver extends AbstractCustomJavaT // package private so it can be replaced by tests List builds = List.of( getBundledJdkBuild(), - // 23 early access - new EarlyAccessJdkBuild(JavaLanguageVersion.of(23), "23", "24") + new EarlyAccessJdkBuild(JavaLanguageVersion.of(24)), + new EarlyAccessJdkBuild(JavaLanguageVersion.of(25)) ); private JdkBuild getBundledJdkBuild() { diff --git a/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/toolchain/AbstractToolchainResolverSpec.groovy b/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/toolchain/AbstractToolchainResolverSpec.groovy index ad0db8b1b7de..cea96437129a 100644 --- a/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/toolchain/AbstractToolchainResolverSpec.groovy +++ b/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/toolchain/AbstractToolchainResolverSpec.groovy @@ -9,6 +9,8 @@ package org.elasticsearch.gradle.internal.toolchain +import spock.lang.Unroll + import org.gradle.api.provider.Property import org.gradle.jvm.toolchain.JavaLanguageVersion import org.gradle.jvm.toolchain.JavaToolchainDownload @@ -26,6 +28,7 @@ import static org.gradle.platform.OperatingSystem.MAC_OS abstract class AbstractToolchainResolverSpec extends Specification { + @Unroll def "resolves #os #arch #vendor jdk #langVersion"() { given: def resolver = resolverImplementation() diff --git a/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/toolchain/OracleOpenJdkToolchainResolverSpec.groovy b/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/toolchain/OracleOpenJdkToolchainResolverSpec.groovy index 9c55bbc4674e..4993bf00f2af 100644 --- a/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/toolchain/OracleOpenJdkToolchainResolverSpec.groovy +++ b/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/toolchain/OracleOpenJdkToolchainResolverSpec.groovy @@ -9,15 +9,20 @@ package org.elasticsearch.gradle.internal.toolchain +import spock.util.environment.RestoreSystemProperties import org.gradle.api.services.BuildServiceParameters import org.gradle.jvm.toolchain.JavaLanguageVersion +import org.gradle.jvm.toolchain.JavaToolchainDownload + import static org.gradle.jvm.toolchain.JvmVendorSpec.ORACLE -import static org.gradle.platform.Architecture.* +import static org.gradle.platform.Architecture.AARCH64 +import static org.gradle.platform.Architecture.X86_64 import static org.gradle.platform.OperatingSystem.* class OracleOpenJdkToolchainResolverSpec extends AbstractToolchainResolverSpec { + OracleOpenJdkToolchainResolver resolverImplementation() { var toolChain = new OracleOpenJdkToolchainResolver() { @Override @@ -25,10 +30,13 @@ class OracleOpenJdkToolchainResolverSpec extends AbstractToolchainResolverSpec { return null } } - toolChain.builds = [ - new OracleOpenJdkToolchainResolver.ReleasedJdkBuild(JavaLanguageVersion.of(20), "20", "36", "bdc68b4b9cbc4ebcb30745c85038d91d"), - new OracleOpenJdkToolchainResolver.EarlyAccessJdkBuild(JavaLanguageVersion.of(21), "21", "6") - ] + toolChain.builds = toolChain.builds.findAll { it instanceof OracleOpenJdkToolchainResolver.EarlyAccessJdkBuild } + [ + new OracleOpenJdkToolchainResolver.ReleasedJdkBuild( + JavaLanguageVersion.of(20), + "20", + "36", + "bdc68b4b9cbc4ebcb30745c85038d91d" + )] toolChain } @@ -44,23 +52,67 @@ class OracleOpenJdkToolchainResolverSpec extends AbstractToolchainResolverSpec { [20, anyVendor(), LINUX, AARCH64, "https://download.oracle.com/java/GA/jdk20/bdc68b4b9cbc4ebcb30745c85038d91d/36/GPL/openjdk-20_linux-aarch64_bin.tar.gz"], [20, anyVendor(), WINDOWS, X86_64, "https://download.oracle.com/java/GA/jdk20/bdc68b4b9cbc4ebcb30745c85038d91d/36/GPL/openjdk-20_windows-x64_bin.zip"], // https://download.java.net/java/early_access/jdk23/23/GPL/openjdk-23-ea+23_macos-aarch64_bin.tar.gz - [21, ORACLE, MAC_OS, X86_64, "https://download.java.net/java/early_access/jdk21/21/GPL/openjdk-21-ea+6_macos-x64_bin.tar.gz"], - [21, ORACLE, MAC_OS, AARCH64, "https://download.java.net/java/early_access/jdk21/21/GPL/openjdk-21-ea+6_macos-aarch64_bin.tar.gz"], - [21, ORACLE, LINUX, X86_64, "https://download.java.net/java/early_access/jdk21/21/GPL/openjdk-21-ea+6_linux-x64_bin.tar.gz"], - [21, ORACLE, LINUX, AARCH64, "https://download.java.net/java/early_access/jdk21/21/GPL/openjdk-21-ea+6_linux-aarch64_bin.tar.gz"], - [21, ORACLE, WINDOWS, X86_64, "https://download.java.net/java/early_access/jdk21/21/GPL/openjdk-21-ea+6_windows-x64_bin.zip"], - [21, anyVendor(), MAC_OS, X86_64, "https://download.java.net/java/early_access/jdk21/21/GPL/openjdk-21-ea+6_macos-x64_bin.tar.gz"], - [21, anyVendor(), MAC_OS, AARCH64, "https://download.java.net/java/early_access/jdk21/21/GPL/openjdk-21-ea+6_macos-aarch64_bin.tar.gz"], - [21, anyVendor(), LINUX, X86_64, "https://download.java.net/java/early_access/jdk21/21/GPL/openjdk-21-ea+6_linux-x64_bin.tar.gz"], - [21, anyVendor(), LINUX, AARCH64, "https://download.java.net/java/early_access/jdk21/21/GPL/openjdk-21-ea+6_linux-aarch64_bin.tar.gz"], - [21, anyVendor(), WINDOWS, X86_64, "https://download.java.net/java/early_access/jdk21/21/GPL/openjdk-21-ea+6_windows-x64_bin.zip"] - ] + [24, ORACLE, MAC_OS, X86_64, "https://download.java.net/java/early_access/jdk24/29/GPL/openjdk-24-ea+29_macos-x64_bin.tar.gz"], + [24, ORACLE, MAC_OS, AARCH64, "https://download.java.net/java/early_access/jdk24/29/GPL/openjdk-24-ea+29_macos-aarch64_bin.tar.gz"], + [24, ORACLE, LINUX, X86_64, "https://download.java.net/java/early_access/jdk24/29/GPL/openjdk-24-ea+29_linux-x64_bin.tar.gz"], + [24, ORACLE, LINUX, AARCH64, "https://download.java.net/java/early_access/jdk24/29/GPL/openjdk-24-ea+29_linux-aarch64_bin.tar.gz"], + [24, ORACLE, WINDOWS, X86_64, "https://download.java.net/java/early_access/jdk24/29/GPL/openjdk-24-ea+29_windows-x64_bin.zip"], + [24, anyVendor(), MAC_OS, X86_64, "https://download.java.net/java/early_access/jdk24/29/GPL/openjdk-24-ea+29_macos-x64_bin.tar.gz"], + [24, anyVendor(), MAC_OS, AARCH64, "https://download.java.net/java/early_access/jdk24/29/GPL/openjdk-24-ea+29_macos-aarch64_bin.tar.gz"], + [24, anyVendor(), LINUX, X86_64, "https://download.java.net/java/early_access/jdk24/29/GPL/openjdk-24-ea+29_linux-x64_bin.tar.gz"], + [24, anyVendor(), LINUX, AARCH64, "https://download.java.net/java/early_access/jdk24/29/GPL/openjdk-24-ea+29_linux-aarch64_bin.tar.gz"], + [24, anyVendor(), WINDOWS, X86_64, "https://download.java.net/java/early_access/jdk24/29/GPL/openjdk-24-ea+29_windows-x64_bin.zip"]] + } + + @RestoreSystemProperties + def "can provide build number for ea versions"() { + given: + System.setProperty('runtime.java.build', "42") + System.setProperty('runtime.java.25.build', "13") + def resolver = resolverImplementation() + + when: + Optional download = resolver.resolve( + request( + JavaLanguageVersion.of(version), + vendor, + platform(os, arch) + ) + ) + + then: + download.get().uri == URI.create(expectedUrl) + + where: + version | vendor | os | arch | expectedUrl + 24 | ORACLE | MAC_OS | X86_64 | urlPrefix(24) + "42/GPL/openjdk-24-ea+42_macos-x64_bin.tar.gz" + 24 | ORACLE | MAC_OS | AARCH64 | urlPrefix(24) + "42/GPL/openjdk-24-ea+42_macos-aarch64_bin.tar.gz" + 24 | ORACLE | LINUX | X86_64 | urlPrefix(24) + "42/GPL/openjdk-24-ea+42_linux-x64_bin.tar.gz" + 24 | ORACLE | LINUX | AARCH64 | urlPrefix(24) + "42/GPL/openjdk-24-ea+42_linux-aarch64_bin.tar.gz" + 24 | ORACLE | WINDOWS | X86_64 | urlPrefix(24) + "42/GPL/openjdk-24-ea+42_windows-x64_bin.zip" + 24 | anyVendor() | MAC_OS | X86_64 | urlPrefix(24) + "42/GPL/openjdk-24-ea+42_macos-x64_bin.tar.gz" + 24 | anyVendor() | MAC_OS | AARCH64 | urlPrefix(24) + "42/GPL/openjdk-24-ea+42_macos-aarch64_bin.tar.gz" + 24 | anyVendor() | LINUX | X86_64 | urlPrefix(24) + "42/GPL/openjdk-24-ea+42_linux-x64_bin.tar.gz" + 24 | anyVendor() | LINUX | AARCH64 | urlPrefix(24) + "42/GPL/openjdk-24-ea+42_linux-aarch64_bin.tar.gz" + 24 | anyVendor() | WINDOWS | X86_64 | urlPrefix(24) + "42/GPL/openjdk-24-ea+42_windows-x64_bin.zip" + 25 | ORACLE | MAC_OS | X86_64 | urlPrefix(25) + "13/GPL/openjdk-25-ea+13_macos-x64_bin.tar.gz" + 25 | ORACLE | MAC_OS | AARCH64 | urlPrefix(25) + "13/GPL/openjdk-25-ea+13_macos-aarch64_bin.tar.gz" + 25 | ORACLE | LINUX | X86_64 | urlPrefix(25) + "13/GPL/openjdk-25-ea+13_linux-x64_bin.tar.gz" + 25 | ORACLE | LINUX | AARCH64 | urlPrefix(25) + "13/GPL/openjdk-25-ea+13_linux-aarch64_bin.tar.gz" + 25 | ORACLE | WINDOWS | X86_64 | urlPrefix(25) + "13/GPL/openjdk-25-ea+13_windows-x64_bin.zip" + 25 | anyVendor() | MAC_OS | X86_64 | urlPrefix(25) + "13/GPL/openjdk-25-ea+13_macos-x64_bin.tar.gz" + 25 | anyVendor() | MAC_OS | AARCH64 | urlPrefix(25) + "13/GPL/openjdk-25-ea+13_macos-aarch64_bin.tar.gz" + 25 | anyVendor() | LINUX | X86_64 | urlPrefix(25) + "13/GPL/openjdk-25-ea+13_linux-x64_bin.tar.gz" + 25 | anyVendor() | LINUX | AARCH64 | urlPrefix(25) + "13/GPL/openjdk-25-ea+13_linux-aarch64_bin.tar.gz" + 25 | anyVendor() | WINDOWS | X86_64 | urlPrefix(25) + "13/GPL/openjdk-25-ea+13_windows-x64_bin.zip" + } + + private static String urlPrefix(int i) { + return "https://download.java.net/java/early_access/jdk" + i + "/" } def unsupportedRequests() { - [ - [20, ORACLE, WINDOWS, AARCH64] - ] + [[20, ORACLE, WINDOWS, AARCH64]] } } diff --git a/distribution/docker/build.gradle b/distribution/docker/build.gradle index db7797ca23ec..e9327efa50c3 100644 --- a/distribution/docker/build.gradle +++ b/distribution/docker/build.gradle @@ -127,7 +127,7 @@ ext.expansions = { Architecture architecture, DockerBase base -> 'bin_dir' : base == DockerBase.IRON_BANK ? 'scripts' : 'bin', 'build_date' : buildDate, 'config_dir' : base == DockerBase.IRON_BANK ? 'scripts' : 'config', - 'git_revision' : buildParams.gitRevision, + 'git_revision' : buildParams.gitRevision.get(), 'license' : base == DockerBase.IRON_BANK ? 'Elastic License 2.0' : 'Elastic-License-2.0', 'package_manager' : base.packageManager, 'docker_base' : base.name().toLowerCase(), @@ -551,6 +551,7 @@ subprojects { Project subProject -> inputs.file("${parent.projectDir}/build/markers/${buildTaskName}.marker") executable = 'docker' outputs.file(tarFile) + outputs.doNotCacheIf("Build cache is disabled for export tasks") { true } args "save", "-o", tarFile, diff --git a/docs/changelog/118599.yaml b/docs/changelog/118599.yaml new file mode 100644 index 000000000000..b410ddf5c5d1 --- /dev/null +++ b/docs/changelog/118599.yaml @@ -0,0 +1,5 @@ +pr: 118599 +summary: Archive-Index upgrade compatibility +area: Search +type: enhancement +issues: [] diff --git a/docs/changelog/118959.yaml b/docs/changelog/118959.yaml new file mode 100644 index 000000000000..95a9c146ae67 --- /dev/null +++ b/docs/changelog/118959.yaml @@ -0,0 +1,5 @@ +pr: 118959 +summary: Allow kibana_system user to manage .reindexed-v8-internal.alerts indices +area: Authorization +type: enhancement +issues: [] diff --git a/docs/changelog/119054.yaml b/docs/changelog/119054.yaml new file mode 100644 index 000000000000..720f2e0ab02e --- /dev/null +++ b/docs/changelog/119054.yaml @@ -0,0 +1,6 @@ +pr: 119054 +summary: "[Security Solution] allows `kibana_system` user to manage .reindexed-v8-*\ + \ Security Solution indices" +area: Authorization +type: enhancement +issues: [] diff --git a/docs/changelog/119233.yaml b/docs/changelog/119233.yaml new file mode 100644 index 000000000000..ef89c011ce4f --- /dev/null +++ b/docs/changelog/119233.yaml @@ -0,0 +1,5 @@ +pr: 119233 +summary: Fixing `GetDatabaseConfigurationAction` response serialization +area: Ingest Node +type: bug +issues: [] diff --git a/docs/changelog/119474.yaml b/docs/changelog/119474.yaml new file mode 100644 index 000000000000..e37561277d22 --- /dev/null +++ b/docs/changelog/119474.yaml @@ -0,0 +1,5 @@ +pr: 119474 +summary: "Add ES|QL cross-cluster query telemetry collection" +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/119476.yaml b/docs/changelog/119476.yaml new file mode 100644 index 000000000000..c275e6965d4a --- /dev/null +++ b/docs/changelog/119476.yaml @@ -0,0 +1,6 @@ +pr: 119476 +summary: Fix TopN row size estimate +area: ES|QL +type: bug +issues: + - 106956 diff --git a/docs/changelog/119495.yaml b/docs/changelog/119495.yaml new file mode 100644 index 000000000000..b3e8f7e79d98 --- /dev/null +++ b/docs/changelog/119495.yaml @@ -0,0 +1,5 @@ +pr: 119495 +summary: Add mapping for `event_name` for OTel logs +area: Data streams +type: enhancement +issues: [] diff --git a/docs/changelog/119516.yaml b/docs/changelog/119516.yaml new file mode 100644 index 000000000000..06dd5168a082 --- /dev/null +++ b/docs/changelog/119516.yaml @@ -0,0 +1,5 @@ +pr: 119516 +summary: "Fix: do not let `_resolve/cluster` hang if remote is unresponsive" +area: Search +type: bug +issues: [] diff --git a/docs/reference/cat/alias.asciidoc b/docs/reference/cat/alias.asciidoc index 8be7ae60ff7e..0a7c1828d487 100644 --- a/docs/reference/cat/alias.asciidoc +++ b/docs/reference/cat/alias.asciidoc @@ -4,7 +4,7 @@ cat aliases ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-cat[Compact and aligned text (CAT) APIs].. diff --git a/docs/reference/cat/allocation.asciidoc b/docs/reference/cat/allocation.asciidoc index 07735335c91a..34b8069b91e2 100644 --- a/docs/reference/cat/allocation.asciidoc +++ b/docs/reference/cat/allocation.asciidoc @@ -4,7 +4,7 @@ cat allocation ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-cat[Compact and aligned text (CAT) APIs].. diff --git a/docs/reference/cat/anomaly-detectors.asciidoc b/docs/reference/cat/anomaly-detectors.asciidoc index 589ff6487226..03cd824092cf 100644 --- a/docs/reference/cat/anomaly-detectors.asciidoc +++ b/docs/reference/cat/anomaly-detectors.asciidoc @@ -5,7 +5,7 @@ cat anomaly detectors ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-cat[Compact and aligned text (CAT) APIs].. diff --git a/docs/reference/cat/component-templates.asciidoc b/docs/reference/cat/component-templates.asciidoc index c111fd8c2fe0..8be1096a215f 100644 --- a/docs/reference/cat/component-templates.asciidoc +++ b/docs/reference/cat/component-templates.asciidoc @@ -4,7 +4,7 @@ cat component templates ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-cat[Compact and aligned text (CAT) APIs].. diff --git a/docs/reference/cat/count.asciidoc b/docs/reference/cat/count.asciidoc index 39c5b5b798d8..7adcd1464dab 100644 --- a/docs/reference/cat/count.asciidoc +++ b/docs/reference/cat/count.asciidoc @@ -4,7 +4,7 @@ cat count ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-cat[Compact and aligned text (CAT) APIs].. diff --git a/docs/reference/cat/datafeeds.asciidoc b/docs/reference/cat/datafeeds.asciidoc index fa40d3aff4af..29f5bc8150af 100644 --- a/docs/reference/cat/datafeeds.asciidoc +++ b/docs/reference/cat/datafeeds.asciidoc @@ -5,7 +5,7 @@ cat {dfeeds} ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-cat[Compact and aligned text (CAT) APIs].. diff --git a/docs/reference/cat/dataframeanalytics.asciidoc b/docs/reference/cat/dataframeanalytics.asciidoc index a164ac7c460e..f00a9826ee5b 100644 --- a/docs/reference/cat/dataframeanalytics.asciidoc +++ b/docs/reference/cat/dataframeanalytics.asciidoc @@ -5,7 +5,7 @@ cat {dfanalytics} ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-cat[Compact and aligned text (CAT) APIs].. diff --git a/docs/reference/cat/fielddata.asciidoc b/docs/reference/cat/fielddata.asciidoc index 22c0d532f212..f11e40263ec2 100644 --- a/docs/reference/cat/fielddata.asciidoc +++ b/docs/reference/cat/fielddata.asciidoc @@ -4,7 +4,7 @@ cat fielddata ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-cat[Compact and aligned text (CAT) APIs].. diff --git a/docs/reference/cat/health.asciidoc b/docs/reference/cat/health.asciidoc index 70e7960b0357..7ffc170ec851 100644 --- a/docs/reference/cat/health.asciidoc +++ b/docs/reference/cat/health.asciidoc @@ -4,7 +4,7 @@ cat health ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-cat[Compact and aligned text (CAT) APIs].. diff --git a/docs/reference/cat/indices.asciidoc b/docs/reference/cat/indices.asciidoc index 22d240e8d674..3397c05f4973 100644 --- a/docs/reference/cat/indices.asciidoc +++ b/docs/reference/cat/indices.asciidoc @@ -4,7 +4,7 @@ cat indices ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-cat[Compact and aligned text (CAT) APIs].. diff --git a/docs/reference/cat/master.asciidoc b/docs/reference/cat/master.asciidoc index d2ce0c4fdbdc..4ac40ff50be6 100644 --- a/docs/reference/cat/master.asciidoc +++ b/docs/reference/cat/master.asciidoc @@ -4,7 +4,7 @@ cat master ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-cat[Compact and aligned text (CAT) APIs].. diff --git a/docs/reference/cat/nodeattrs.asciidoc b/docs/reference/cat/nodeattrs.asciidoc index e8b1128e0c21..0d354ab570e9 100644 --- a/docs/reference/cat/nodeattrs.asciidoc +++ b/docs/reference/cat/nodeattrs.asciidoc @@ -4,7 +4,7 @@ cat nodeattrs ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-cat[Compact and aligned text (CAT) APIs].. diff --git a/docs/reference/cat/nodes.asciidoc b/docs/reference/cat/nodes.asciidoc index c099449ceae6..c52315423f87 100644 --- a/docs/reference/cat/nodes.asciidoc +++ b/docs/reference/cat/nodes.asciidoc @@ -5,7 +5,7 @@ cat nodes ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-cat[Compact and aligned text (CAT) APIs].. diff --git a/docs/reference/cat/pending_tasks.asciidoc b/docs/reference/cat/pending_tasks.asciidoc index 634fec4ca44d..081a74da0755 100644 --- a/docs/reference/cat/pending_tasks.asciidoc +++ b/docs/reference/cat/pending_tasks.asciidoc @@ -4,7 +4,7 @@ cat pending tasks ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-cat[Compact and aligned text (CAT) APIs].. diff --git a/docs/reference/cat/plugins.asciidoc b/docs/reference/cat/plugins.asciidoc index ed06a8df2a55..c4d830ee52a7 100644 --- a/docs/reference/cat/plugins.asciidoc +++ b/docs/reference/cat/plugins.asciidoc @@ -4,7 +4,7 @@ cat plugins ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-cat[Compact and aligned text (CAT) APIs].. diff --git a/docs/reference/cat/recovery.asciidoc b/docs/reference/cat/recovery.asciidoc index 9ac84e4528fb..9df46f6fe93f 100644 --- a/docs/reference/cat/recovery.asciidoc +++ b/docs/reference/cat/recovery.asciidoc @@ -4,7 +4,7 @@ cat recovery ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-cat[Compact and aligned text (CAT) APIs].. diff --git a/docs/reference/cat/repositories.asciidoc b/docs/reference/cat/repositories.asciidoc index 140bcf8ee702..7e6283336e17 100644 --- a/docs/reference/cat/repositories.asciidoc +++ b/docs/reference/cat/repositories.asciidoc @@ -4,7 +4,7 @@ cat repositories ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-cat[Compact and aligned text (CAT) APIs].. diff --git a/docs/reference/cat/segments.asciidoc b/docs/reference/cat/segments.asciidoc index 0dc74ea84989..70b5e597eb95 100644 --- a/docs/reference/cat/segments.asciidoc +++ b/docs/reference/cat/segments.asciidoc @@ -4,7 +4,7 @@ cat segments ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-cat[Compact and aligned text (CAT) APIs].. diff --git a/docs/reference/cat/shards.asciidoc b/docs/reference/cat/shards.asciidoc index 4c20b6a241dd..f73ac6e263cd 100644 --- a/docs/reference/cat/shards.asciidoc +++ b/docs/reference/cat/shards.asciidoc @@ -5,7 +5,7 @@ cat shards ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-cat[Compact and aligned text (CAT) APIs].. diff --git a/docs/reference/cat/snapshots.asciidoc b/docs/reference/cat/snapshots.asciidoc index 27fdc6eb486d..1da739b20272 100644 --- a/docs/reference/cat/snapshots.asciidoc +++ b/docs/reference/cat/snapshots.asciidoc @@ -4,7 +4,7 @@ cat snapshots ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-cat[Compact and aligned text (CAT) APIs].. diff --git a/docs/reference/cat/tasks.asciidoc b/docs/reference/cat/tasks.asciidoc index 6745381f6bab..ff654b30de99 100644 --- a/docs/reference/cat/tasks.asciidoc +++ b/docs/reference/cat/tasks.asciidoc @@ -6,7 +6,7 @@ beta::["The cat task management API is new and should still be considered a beta feature. The API may change in ways that are not backwards compatible.",{es-issue}51628] -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-cat[Compact and aligned text (CAT) APIs].. diff --git a/docs/reference/cat/templates.asciidoc b/docs/reference/cat/templates.asciidoc index 624ae244d6f0..78ff60c663d2 100644 --- a/docs/reference/cat/templates.asciidoc +++ b/docs/reference/cat/templates.asciidoc @@ -4,7 +4,7 @@ cat templates ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-cat[Compact and aligned text (CAT) APIs].. diff --git a/docs/reference/cat/thread_pool.asciidoc b/docs/reference/cat/thread_pool.asciidoc index 7e7d20b77fc7..1d8517f170ae 100644 --- a/docs/reference/cat/thread_pool.asciidoc +++ b/docs/reference/cat/thread_pool.asciidoc @@ -4,7 +4,7 @@ cat thread pool ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-cat[Compact and aligned text (CAT) APIs].. diff --git a/docs/reference/cat/trainedmodel.asciidoc b/docs/reference/cat/trainedmodel.asciidoc index ec571af4b9eb..378238323f50 100644 --- a/docs/reference/cat/trainedmodel.asciidoc +++ b/docs/reference/cat/trainedmodel.asciidoc @@ -5,7 +5,7 @@ cat trained model ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-cat[Compact and aligned text (CAT) APIs].. diff --git a/docs/reference/cat/transforms.asciidoc b/docs/reference/cat/transforms.asciidoc index 475c09bcf984..8e5becc5fa76 100644 --- a/docs/reference/cat/transforms.asciidoc +++ b/docs/reference/cat/transforms.asciidoc @@ -5,7 +5,7 @@ cat transforms ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-cat[Compact and aligned text (CAT) APIs].. diff --git a/docs/reference/ccr/apis/auto-follow/delete-auto-follow-pattern.asciidoc b/docs/reference/ccr/apis/auto-follow/delete-auto-follow-pattern.asciidoc index 18cf723901b5..2ad50d68b923 100644 --- a/docs/reference/ccr/apis/auto-follow/delete-auto-follow-pattern.asciidoc +++ b/docs/reference/ccr/apis/auto-follow/delete-auto-follow-pattern.asciidoc @@ -5,7 +5,7 @@ Delete auto-follow pattern ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-ccr[Cross-cluster replication APIs]. diff --git a/docs/reference/ccr/apis/auto-follow/get-auto-follow-pattern.asciidoc b/docs/reference/ccr/apis/auto-follow/get-auto-follow-pattern.asciidoc index b4fd82008836..951185d14e92 100644 --- a/docs/reference/ccr/apis/auto-follow/get-auto-follow-pattern.asciidoc +++ b/docs/reference/ccr/apis/auto-follow/get-auto-follow-pattern.asciidoc @@ -5,7 +5,7 @@ Get auto-follow pattern ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-ccr[Cross-cluster replication APIs]. diff --git a/docs/reference/ccr/apis/auto-follow/pause-auto-follow-pattern.asciidoc b/docs/reference/ccr/apis/auto-follow/pause-auto-follow-pattern.asciidoc index 2d70b8da92b4..462ee213ed4e 100644 --- a/docs/reference/ccr/apis/auto-follow/pause-auto-follow-pattern.asciidoc +++ b/docs/reference/ccr/apis/auto-follow/pause-auto-follow-pattern.asciidoc @@ -5,7 +5,7 @@ Pause auto-follow pattern ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-ccr[Cross-cluster replication APIs]. diff --git a/docs/reference/ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc b/docs/reference/ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc index 9b25bf174659..672a11302fdd 100644 --- a/docs/reference/ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc +++ b/docs/reference/ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc @@ -5,7 +5,7 @@ Create auto-follow pattern ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-ccr[Cross-cluster replication APIs]. diff --git a/docs/reference/ccr/apis/auto-follow/resume-auto-follow-pattern.asciidoc b/docs/reference/ccr/apis/auto-follow/resume-auto-follow-pattern.asciidoc index 345b5e764547..d97c41d67c1e 100644 --- a/docs/reference/ccr/apis/auto-follow/resume-auto-follow-pattern.asciidoc +++ b/docs/reference/ccr/apis/auto-follow/resume-auto-follow-pattern.asciidoc @@ -5,7 +5,7 @@ Resume auto-follow pattern ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-ccr[Cross-cluster replication APIs]. diff --git a/docs/reference/ccr/apis/ccr-apis.asciidoc b/docs/reference/ccr/apis/ccr-apis.asciidoc index 21e859a0bff9..0decb98197d3 100644 --- a/docs/reference/ccr/apis/ccr-apis.asciidoc +++ b/docs/reference/ccr/apis/ccr-apis.asciidoc @@ -2,7 +2,7 @@ [[ccr-apis]] == {ccr-cap} APIs -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-ccr[Cross-cluster replication APIs]. diff --git a/docs/reference/ccr/apis/follow/get-follow-info.asciidoc b/docs/reference/ccr/apis/follow/get-follow-info.asciidoc index a913e2e2bd15..4c05faa0a7db 100644 --- a/docs/reference/ccr/apis/follow/get-follow-info.asciidoc +++ b/docs/reference/ccr/apis/follow/get-follow-info.asciidoc @@ -5,7 +5,7 @@ Get follower info ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-ccr[Cross-cluster replication APIs]. diff --git a/docs/reference/ccr/apis/follow/get-follow-stats.asciidoc b/docs/reference/ccr/apis/follow/get-follow-stats.asciidoc index ff0a72a8b234..29000a98f64b 100644 --- a/docs/reference/ccr/apis/follow/get-follow-stats.asciidoc +++ b/docs/reference/ccr/apis/follow/get-follow-stats.asciidoc @@ -5,7 +5,7 @@ Get follower stats ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-ccr[Cross-cluster replication APIs]. diff --git a/docs/reference/ccr/apis/follow/post-forget-follower.asciidoc b/docs/reference/ccr/apis/follow/post-forget-follower.asciidoc index b6f397647817..8a7887072f6a 100644 --- a/docs/reference/ccr/apis/follow/post-forget-follower.asciidoc +++ b/docs/reference/ccr/apis/follow/post-forget-follower.asciidoc @@ -5,7 +5,7 @@ Forget follower ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-ccr[Cross-cluster replication APIs]. diff --git a/docs/reference/ccr/apis/follow/post-pause-follow.asciidoc b/docs/reference/ccr/apis/follow/post-pause-follow.asciidoc index f5ce00cbfccf..c49e9a49b56c 100644 --- a/docs/reference/ccr/apis/follow/post-pause-follow.asciidoc +++ b/docs/reference/ccr/apis/follow/post-pause-follow.asciidoc @@ -5,7 +5,7 @@ Pause follower ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-ccr[Cross-cluster replication APIs]. diff --git a/docs/reference/ccr/apis/follow/post-resume-follow.asciidoc b/docs/reference/ccr/apis/follow/post-resume-follow.asciidoc index 7bca84827be1..f6da0110d5c2 100644 --- a/docs/reference/ccr/apis/follow/post-resume-follow.asciidoc +++ b/docs/reference/ccr/apis/follow/post-resume-follow.asciidoc @@ -5,7 +5,7 @@ Resume follower ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-ccr[Cross-cluster replication APIs]. diff --git a/docs/reference/ccr/apis/follow/post-unfollow.asciidoc b/docs/reference/ccr/apis/follow/post-unfollow.asciidoc index 933e3c567446..56b3195e8a13 100644 --- a/docs/reference/ccr/apis/follow/post-unfollow.asciidoc +++ b/docs/reference/ccr/apis/follow/post-unfollow.asciidoc @@ -5,7 +5,7 @@ Unfollow ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-ccr[Cross-cluster replication APIs]. diff --git a/docs/reference/ccr/apis/follow/put-follow.asciidoc b/docs/reference/ccr/apis/follow/put-follow.asciidoc index 97a4948c43a7..d9538fc43656 100644 --- a/docs/reference/ccr/apis/follow/put-follow.asciidoc +++ b/docs/reference/ccr/apis/follow/put-follow.asciidoc @@ -5,7 +5,7 @@ Create follower ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-ccr[Cross-cluster replication APIs]. diff --git a/docs/reference/ccr/apis/get-ccr-stats.asciidoc b/docs/reference/ccr/apis/get-ccr-stats.asciidoc index 69dae2ca6966..e92ad17e1043 100644 --- a/docs/reference/ccr/apis/get-ccr-stats.asciidoc +++ b/docs/reference/ccr/apis/get-ccr-stats.asciidoc @@ -6,7 +6,7 @@ Get {ccr-init} stats ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-ccr[Cross-cluster replication APIs]. diff --git a/docs/reference/cluster/allocation-explain.asciidoc b/docs/reference/cluster/allocation-explain.asciidoc index aa7f091c8670..7099163cc98e 100644 --- a/docs/reference/cluster/allocation-explain.asciidoc +++ b/docs/reference/cluster/allocation-explain.asciidoc @@ -4,7 +4,7 @@ Cluster allocation explain ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-cluster[Cluster APIs]. diff --git a/docs/reference/cluster/cluster-info.asciidoc b/docs/reference/cluster/cluster-info.asciidoc index 2106fcfc6975..52ae637d8f46 100644 --- a/docs/reference/cluster/cluster-info.asciidoc +++ b/docs/reference/cluster/cluster-info.asciidoc @@ -7,7 +7,7 @@ experimental::[] Cluster Info ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-cluster[Cluster APIs]. diff --git a/docs/reference/cluster/delete-desired-balance.asciidoc b/docs/reference/cluster/delete-desired-balance.asciidoc index 49b555fd5005..45fa147258b7 100644 --- a/docs/reference/cluster/delete-desired-balance.asciidoc +++ b/docs/reference/cluster/delete-desired-balance.asciidoc @@ -6,7 +6,7 @@ NOTE: {cloud-only} -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-cluster[Cluster APIs]. diff --git a/docs/reference/cluster/delete-desired-nodes.asciidoc b/docs/reference/cluster/delete-desired-nodes.asciidoc index 93e8f6484895..883bc22f6d96 100644 --- a/docs/reference/cluster/delete-desired-nodes.asciidoc +++ b/docs/reference/cluster/delete-desired-nodes.asciidoc @@ -6,7 +6,7 @@ NOTE: {cloud-only} -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-cluster[Cluster APIs]. diff --git a/docs/reference/cluster/get-desired-balance.asciidoc b/docs/reference/cluster/get-desired-balance.asciidoc index 381f263a5ee7..3c6b1dc48719 100644 --- a/docs/reference/cluster/get-desired-balance.asciidoc +++ b/docs/reference/cluster/get-desired-balance.asciidoc @@ -6,7 +6,7 @@ NOTE: {cloud-only} -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-cluster[Cluster APIs]. diff --git a/docs/reference/cluster/get-desired-nodes.asciidoc b/docs/reference/cluster/get-desired-nodes.asciidoc index c4182a37b2d0..56af6913e34a 100644 --- a/docs/reference/cluster/get-desired-nodes.asciidoc +++ b/docs/reference/cluster/get-desired-nodes.asciidoc @@ -6,7 +6,7 @@ NOTE: {cloud-only} -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-cluster[Cluster APIs]. diff --git a/docs/reference/cluster/get-settings.asciidoc b/docs/reference/cluster/get-settings.asciidoc index 0b577356aa76..5c0fe7a2026c 100644 --- a/docs/reference/cluster/get-settings.asciidoc +++ b/docs/reference/cluster/get-settings.asciidoc @@ -4,7 +4,7 @@ Cluster get settings ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-cluster[Cluster APIs]. diff --git a/docs/reference/cluster/health.asciidoc b/docs/reference/cluster/health.asciidoc index b24b408964d3..374dd5d4a6f8 100644 --- a/docs/reference/cluster/health.asciidoc +++ b/docs/reference/cluster/health.asciidoc @@ -4,7 +4,7 @@ Cluster health ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-cluster[Cluster APIs]. diff --git a/docs/reference/cluster/nodes-hot-threads.asciidoc b/docs/reference/cluster/nodes-hot-threads.asciidoc index f78719772d5a..f8b414453ae6 100644 --- a/docs/reference/cluster/nodes-hot-threads.asciidoc +++ b/docs/reference/cluster/nodes-hot-threads.asciidoc @@ -4,7 +4,7 @@ Nodes hot threads ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-cluster[Cluster APIs]. diff --git a/docs/reference/cluster/nodes-info.asciidoc b/docs/reference/cluster/nodes-info.asciidoc index 32959eda3176..7ae6db7aa9a5 100644 --- a/docs/reference/cluster/nodes-info.asciidoc +++ b/docs/reference/cluster/nodes-info.asciidoc @@ -4,7 +4,7 @@ Nodes info ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-cluster[Cluster APIs]. diff --git a/docs/reference/cluster/nodes-reload-secure-settings.asciidoc b/docs/reference/cluster/nodes-reload-secure-settings.asciidoc index 0db189a4a396..842ca30c335f 100644 --- a/docs/reference/cluster/nodes-reload-secure-settings.asciidoc +++ b/docs/reference/cluster/nodes-reload-secure-settings.asciidoc @@ -4,7 +4,7 @@ Nodes reload secure settings ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-cluster[Cluster APIs]. diff --git a/docs/reference/cluster/nodes-stats.asciidoc b/docs/reference/cluster/nodes-stats.asciidoc index b1c6d3ab466a..522983035079 100644 --- a/docs/reference/cluster/nodes-stats.asciidoc +++ b/docs/reference/cluster/nodes-stats.asciidoc @@ -5,7 +5,7 @@ Nodes stats ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-cluster[Cluster APIs]. diff --git a/docs/reference/cluster/nodes-usage.asciidoc b/docs/reference/cluster/nodes-usage.asciidoc index 58815b86a881..c7994e32204a 100644 --- a/docs/reference/cluster/nodes-usage.asciidoc +++ b/docs/reference/cluster/nodes-usage.asciidoc @@ -4,7 +4,7 @@ Nodes feature usage ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-cluster[Cluster APIs]. diff --git a/docs/reference/cluster/pending.asciidoc b/docs/reference/cluster/pending.asciidoc index dd5146d0f04d..f5d42a6df76a 100644 --- a/docs/reference/cluster/pending.asciidoc +++ b/docs/reference/cluster/pending.asciidoc @@ -4,7 +4,7 @@ Pending cluster tasks ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-cluster[Cluster APIs]. diff --git a/docs/reference/cluster/prevalidate-node-removal.asciidoc b/docs/reference/cluster/prevalidate-node-removal.asciidoc index d2c2fe562d56..0a09f1adda77 100644 --- a/docs/reference/cluster/prevalidate-node-removal.asciidoc +++ b/docs/reference/cluster/prevalidate-node-removal.asciidoc @@ -6,7 +6,7 @@ NOTE: {cloud-only} -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-cluster[Cluster APIs]. diff --git a/docs/reference/cluster/remote-info.asciidoc b/docs/reference/cluster/remote-info.asciidoc index bbfc44b6b555..691acafd8ddb 100644 --- a/docs/reference/cluster/remote-info.asciidoc +++ b/docs/reference/cluster/remote-info.asciidoc @@ -4,7 +4,7 @@ Remote cluster info ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-cluster[Cluster APIs]. diff --git a/docs/reference/cluster/reroute.asciidoc b/docs/reference/cluster/reroute.asciidoc index 65ecb47dc7cb..b393a9a68d2b 100644 --- a/docs/reference/cluster/reroute.asciidoc +++ b/docs/reference/cluster/reroute.asciidoc @@ -4,7 +4,7 @@ Cluster reroute ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-cluster[Cluster APIs]. diff --git a/docs/reference/cluster/state.asciidoc b/docs/reference/cluster/state.asciidoc index cda375c514c3..bf2863018893 100644 --- a/docs/reference/cluster/state.asciidoc +++ b/docs/reference/cluster/state.asciidoc @@ -4,7 +4,7 @@ Cluster state ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-cluster[Cluster APIs]. diff --git a/docs/reference/cluster/stats.asciidoc b/docs/reference/cluster/stats.asciidoc index 278ac8ec7821..f078fd2b7f2e 100644 --- a/docs/reference/cluster/stats.asciidoc +++ b/docs/reference/cluster/stats.asciidoc @@ -5,7 +5,7 @@ Cluster stats ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-cluster[Cluster APIs]. @@ -25,7 +25,6 @@ Returns cluster statistics. * If the {es} {security-features} are enabled, you must have the `monitor` or `manage` <> to use this API. - [[cluster-stats-api-desc]] ==== {api-description-title} @@ -1397,7 +1396,7 @@ as a human-readable string. `_search`::: -(object) Contains the information about the <> usage in the cluster. +(object) Contains information about <> usage. + .Properties of `_search` [%collapsible%open] @@ -1528,7 +1527,11 @@ This may include requests where partial results were returned, but not requests ======= + ====== +`_esql`::: +(object) Contains information about <> usage. +The structure of the object is the same as the `_search` object above. ===== diff --git a/docs/reference/cluster/tasks.asciidoc b/docs/reference/cluster/tasks.asciidoc index 4d8703900337..79727d9a3078 100644 --- a/docs/reference/cluster/tasks.asciidoc +++ b/docs/reference/cluster/tasks.asciidoc @@ -6,7 +6,7 @@ beta::["The task management API is new and should still be considered a beta feature. The API may change in ways that are not backwards compatible.",{es-issue}51628] -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-tasks[task management APIs]. diff --git a/docs/reference/cluster/update-desired-nodes.asciidoc b/docs/reference/cluster/update-desired-nodes.asciidoc index edcaeb745e7a..f83f55139513 100644 --- a/docs/reference/cluster/update-desired-nodes.asciidoc +++ b/docs/reference/cluster/update-desired-nodes.asciidoc @@ -6,7 +6,7 @@ NOTE: {cloud-only} -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-cluster[Cluster APIs]. diff --git a/docs/reference/cluster/update-settings.asciidoc b/docs/reference/cluster/update-settings.asciidoc index 20dfef1bea76..9a718ee413e6 100644 --- a/docs/reference/cluster/update-settings.asciidoc +++ b/docs/reference/cluster/update-settings.asciidoc @@ -4,7 +4,7 @@ Cluster update settings ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-cluster[Cluster APIs]. diff --git a/docs/reference/cluster/voting-exclusions.asciidoc b/docs/reference/cluster/voting-exclusions.asciidoc index f1aa03c6550b..e60b3be26508 100644 --- a/docs/reference/cluster/voting-exclusions.asciidoc +++ b/docs/reference/cluster/voting-exclusions.asciidoc @@ -4,7 +4,7 @@ Voting configuration exclusions ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-cluster[Cluster APIs]. diff --git a/docs/reference/connector/apis/cancel-connector-sync-job-api.asciidoc b/docs/reference/connector/apis/cancel-connector-sync-job-api.asciidoc index 0c3b7350d0aa..d4db8ce62bc4 100644 --- a/docs/reference/connector/apis/cancel-connector-sync-job-api.asciidoc +++ b/docs/reference/connector/apis/cancel-connector-sync-job-api.asciidoc @@ -6,7 +6,7 @@ beta::[] -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-connector[Connector APIs]. diff --git a/docs/reference/connector/apis/check-in-connector-api.asciidoc b/docs/reference/connector/apis/check-in-connector-api.asciidoc index 808f835a7484..be7521e93731 100644 --- a/docs/reference/connector/apis/check-in-connector-api.asciidoc +++ b/docs/reference/connector/apis/check-in-connector-api.asciidoc @@ -6,7 +6,7 @@ preview::[] -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-connector[Connector APIs]. diff --git a/docs/reference/connector/apis/check-in-connector-sync-job-api.asciidoc b/docs/reference/connector/apis/check-in-connector-sync-job-api.asciidoc index 1aa5762e8e6f..5f9b584621c2 100644 --- a/docs/reference/connector/apis/check-in-connector-sync-job-api.asciidoc +++ b/docs/reference/connector/apis/check-in-connector-sync-job-api.asciidoc @@ -6,7 +6,7 @@ preview::[] -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-connector[Connector APIs]. diff --git a/docs/reference/connector/apis/claim-connector-sync-job-api.asciidoc b/docs/reference/connector/apis/claim-connector-sync-job-api.asciidoc index c8de1efa07f2..565a39c2083a 100644 --- a/docs/reference/connector/apis/claim-connector-sync-job-api.asciidoc +++ b/docs/reference/connector/apis/claim-connector-sync-job-api.asciidoc @@ -6,7 +6,7 @@ preview::[] -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-connector[Connector APIs]. diff --git a/docs/reference/connector/apis/connector-apis.asciidoc b/docs/reference/connector/apis/connector-apis.asciidoc index 33b58abd193b..719db5a31571 100644 --- a/docs/reference/connector/apis/connector-apis.asciidoc +++ b/docs/reference/connector/apis/connector-apis.asciidoc @@ -1,7 +1,7 @@ [[connector-apis]] == Connector APIs -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-connector[Connector APIs]. diff --git a/docs/reference/connector/apis/create-connector-api.asciidoc b/docs/reference/connector/apis/create-connector-api.asciidoc index 3b7ffd4b4255..3300ce270c34 100644 --- a/docs/reference/connector/apis/create-connector-api.asciidoc +++ b/docs/reference/connector/apis/create-connector-api.asciidoc @@ -6,7 +6,7 @@ beta::[] -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-connector[Connector APIs]. diff --git a/docs/reference/connector/apis/create-connector-sync-job-api.asciidoc b/docs/reference/connector/apis/create-connector-sync-job-api.asciidoc index 58608bfd7fbd..1a66d47578a8 100644 --- a/docs/reference/connector/apis/create-connector-sync-job-api.asciidoc +++ b/docs/reference/connector/apis/create-connector-sync-job-api.asciidoc @@ -6,7 +6,7 @@ beta::[] -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-connector[Connector APIs]. diff --git a/docs/reference/connector/apis/delete-connector-api.asciidoc b/docs/reference/connector/apis/delete-connector-api.asciidoc index ae2c2ac3b424..b36a99bc2d8c 100644 --- a/docs/reference/connector/apis/delete-connector-api.asciidoc +++ b/docs/reference/connector/apis/delete-connector-api.asciidoc @@ -6,7 +6,7 @@ beta::[] -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-connector[Connector APIs]. diff --git a/docs/reference/connector/apis/delete-connector-sync-job-api.asciidoc b/docs/reference/connector/apis/delete-connector-sync-job-api.asciidoc index 67666392ad19..bc906d12cae4 100644 --- a/docs/reference/connector/apis/delete-connector-sync-job-api.asciidoc +++ b/docs/reference/connector/apis/delete-connector-sync-job-api.asciidoc @@ -6,7 +6,7 @@ beta::[] -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-connector[Connector APIs]. diff --git a/docs/reference/connector/apis/get-connector-api.asciidoc b/docs/reference/connector/apis/get-connector-api.asciidoc index 50ec84617b9d..cff13539f80c 100644 --- a/docs/reference/connector/apis/get-connector-api.asciidoc +++ b/docs/reference/connector/apis/get-connector-api.asciidoc @@ -6,7 +6,7 @@ beta::[] -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-connector[Connector APIs]. diff --git a/docs/reference/connector/apis/get-connector-sync-job-api.asciidoc b/docs/reference/connector/apis/get-connector-sync-job-api.asciidoc index e30b0e4ec98d..f4ccc59e0315 100644 --- a/docs/reference/connector/apis/get-connector-sync-job-api.asciidoc +++ b/docs/reference/connector/apis/get-connector-sync-job-api.asciidoc @@ -6,7 +6,7 @@ beta::[] -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-connector[Connector APIs]. diff --git a/docs/reference/connector/apis/list-connector-sync-jobs-api.asciidoc b/docs/reference/connector/apis/list-connector-sync-jobs-api.asciidoc index 9bcf9fbb4b8e..b5f52e31ac29 100644 --- a/docs/reference/connector/apis/list-connector-sync-jobs-api.asciidoc +++ b/docs/reference/connector/apis/list-connector-sync-jobs-api.asciidoc @@ -7,7 +7,7 @@ beta::[] -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-connector[Connector APIs]. diff --git a/docs/reference/connector/apis/list-connectors-api.asciidoc b/docs/reference/connector/apis/list-connectors-api.asciidoc index 9e7ea17bfaa3..5cc099a6b67e 100644 --- a/docs/reference/connector/apis/list-connectors-api.asciidoc +++ b/docs/reference/connector/apis/list-connectors-api.asciidoc @@ -7,7 +7,7 @@ beta::[] -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-connector[Connector APIs]. diff --git a/docs/reference/connector/apis/set-connector-sync-job-error-api.asciidoc b/docs/reference/connector/apis/set-connector-sync-job-error-api.asciidoc index 2a29cd9ba54f..b5a0cd667f3e 100644 --- a/docs/reference/connector/apis/set-connector-sync-job-error-api.asciidoc +++ b/docs/reference/connector/apis/set-connector-sync-job-error-api.asciidoc @@ -6,7 +6,7 @@ preview::[] -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-connector[Connector APIs]. diff --git a/docs/reference/connector/apis/set-connector-sync-job-stats-api.asciidoc b/docs/reference/connector/apis/set-connector-sync-job-stats-api.asciidoc index 63c4a0ee152b..5691280a30dd 100644 --- a/docs/reference/connector/apis/set-connector-sync-job-stats-api.asciidoc +++ b/docs/reference/connector/apis/set-connector-sync-job-stats-api.asciidoc @@ -6,7 +6,7 @@ preview::[] -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-connector[Connector APIs]. diff --git a/docs/reference/connector/apis/update-connector-api-key-id-api.asciidoc b/docs/reference/connector/apis/update-connector-api-key-id-api.asciidoc index 440b8025138f..8df49c43c128 100644 --- a/docs/reference/connector/apis/update-connector-api-key-id-api.asciidoc +++ b/docs/reference/connector/apis/update-connector-api-key-id-api.asciidoc @@ -6,7 +6,7 @@ beta::[] -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-connector[Connector APIs]. diff --git a/docs/reference/connector/apis/update-connector-configuration-api.asciidoc b/docs/reference/connector/apis/update-connector-configuration-api.asciidoc index a987b03c7674..d02c332d7f34 100644 --- a/docs/reference/connector/apis/update-connector-configuration-api.asciidoc +++ b/docs/reference/connector/apis/update-connector-configuration-api.asciidoc @@ -6,7 +6,7 @@ beta::[] -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-connector[Connector APIs]. diff --git a/docs/reference/connector/apis/update-connector-error-api.asciidoc b/docs/reference/connector/apis/update-connector-error-api.asciidoc index 278f9377b91d..859a48c31c0c 100644 --- a/docs/reference/connector/apis/update-connector-error-api.asciidoc +++ b/docs/reference/connector/apis/update-connector-error-api.asciidoc @@ -6,7 +6,7 @@ preview::[] -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-connector[Connector APIs]. diff --git a/docs/reference/connector/apis/update-connector-features-api.asciidoc b/docs/reference/connector/apis/update-connector-features-api.asciidoc index d9da7b767e47..74c512e42cd1 100644 --- a/docs/reference/connector/apis/update-connector-features-api.asciidoc +++ b/docs/reference/connector/apis/update-connector-features-api.asciidoc @@ -6,7 +6,7 @@ beta::[] -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-connector[Connector APIs]. diff --git a/docs/reference/connector/apis/update-connector-filtering-api.asciidoc b/docs/reference/connector/apis/update-connector-filtering-api.asciidoc index 3cebee37b533..7ba0080cde28 100644 --- a/docs/reference/connector/apis/update-connector-filtering-api.asciidoc +++ b/docs/reference/connector/apis/update-connector-filtering-api.asciidoc @@ -6,7 +6,7 @@ beta::[] -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-connector[Connector APIs]. diff --git a/docs/reference/connector/apis/update-connector-index-name-api.asciidoc b/docs/reference/connector/apis/update-connector-index-name-api.asciidoc index 7a7e9fc535ef..fbbef6e66ca4 100644 --- a/docs/reference/connector/apis/update-connector-index-name-api.asciidoc +++ b/docs/reference/connector/apis/update-connector-index-name-api.asciidoc @@ -6,7 +6,7 @@ beta::[] -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-connector[Connector APIs]. diff --git a/docs/reference/connector/apis/update-connector-last-sync-api.asciidoc b/docs/reference/connector/apis/update-connector-last-sync-api.asciidoc index 9fa3aeaa3a56..d827d25c12b4 100644 --- a/docs/reference/connector/apis/update-connector-last-sync-api.asciidoc +++ b/docs/reference/connector/apis/update-connector-last-sync-api.asciidoc @@ -6,7 +6,7 @@ preview::[] -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-connector[Connector APIs]. diff --git a/docs/reference/connector/apis/update-connector-name-description-api.asciidoc b/docs/reference/connector/apis/update-connector-name-description-api.asciidoc index 0a0a63567706..c0d0568baef3 100644 --- a/docs/reference/connector/apis/update-connector-name-description-api.asciidoc +++ b/docs/reference/connector/apis/update-connector-name-description-api.asciidoc @@ -6,7 +6,7 @@ beta::[] -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-connector[Connector APIs]. diff --git a/docs/reference/connector/apis/update-connector-pipeline-api.asciidoc b/docs/reference/connector/apis/update-connector-pipeline-api.asciidoc index 666897a5f9a8..a886fe6f20da 100644 --- a/docs/reference/connector/apis/update-connector-pipeline-api.asciidoc +++ b/docs/reference/connector/apis/update-connector-pipeline-api.asciidoc @@ -6,7 +6,7 @@ beta::[] -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-connector[Connector APIs]. diff --git a/docs/reference/connector/apis/update-connector-scheduling-api.asciidoc b/docs/reference/connector/apis/update-connector-scheduling-api.asciidoc index af3b353eaf94..eed3d14ea1d9 100644 --- a/docs/reference/connector/apis/update-connector-scheduling-api.asciidoc +++ b/docs/reference/connector/apis/update-connector-scheduling-api.asciidoc @@ -6,7 +6,7 @@ beta::[] -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-connector[Connector APIs]. diff --git a/docs/reference/connector/apis/update-connector-service-type-api.asciidoc b/docs/reference/connector/apis/update-connector-service-type-api.asciidoc index e92f8cd96b4c..28b4c72b682d 100644 --- a/docs/reference/connector/apis/update-connector-service-type-api.asciidoc +++ b/docs/reference/connector/apis/update-connector-service-type-api.asciidoc @@ -6,7 +6,7 @@ beta::[] -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-connector[Connector APIs]. diff --git a/docs/reference/connector/apis/update-connector-status-api.asciidoc b/docs/reference/connector/apis/update-connector-status-api.asciidoc index 910cc32605db..7812cbff89d1 100644 --- a/docs/reference/connector/apis/update-connector-status-api.asciidoc +++ b/docs/reference/connector/apis/update-connector-status-api.asciidoc @@ -6,7 +6,7 @@ preview::[] -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-connector[Connector APIs]. diff --git a/docs/reference/data-management.asciidoc b/docs/reference/data-management.asciidoc index 4245227a1524..7ef021dc6370 100644 --- a/docs/reference/data-management.asciidoc +++ b/docs/reference/data-management.asciidoc @@ -6,29 +6,26 @@ -- The data you store in {es} generally falls into one of two categories: -* Content: a collection of items you want to search, such as a catalog of products -* Time series data: a stream of continuously-generated timestamped data, such as log entries - -Content might be frequently updated, +* *Content*: a collection of items you want to search, such as a catalog of products +* *Time series data*: a stream of continuously-generated timestamped data, such as log entries +*Content* might be frequently updated, but the value of the content remains relatively constant over time. You want to be able to retrieve items quickly regardless of how old they are. -Time series data keeps accumulating over time, so you need strategies for +*Time series data* keeps accumulating over time, so you need strategies for balancing the value of the data against the cost of storing it. As it ages, it tends to become less important and less-frequently accessed, so you can move it to less expensive, less performant hardware. For your oldest data, what matters is that you have access to the data. It's ok if queries take longer to complete. -To help you manage your data, {es} offers you: +To help you manage your data, {es} offers you the following options: -* <> ({ilm-init}) to manage both indices and data streams and it is fully customisable, and -* <> which is the built-in lifecycle of data streams and addresses the most -common lifecycle management needs. +* <> +* <> +* {curator-ref-current}/about.html[Elastic Curator] -preview::["The built-in data stream lifecycle is in technical preview and may be changed or removed in a future release. Elastic will work to fix any issues, but this feature is not subject to the support SLA of official GA features."] - -**{ilm-init}** can be used to manage both indices and data streams and it allows you to: +**{ilm-init}** can be used to manage both indices and data streams. It allows you to do the following: * Define the retention period of your data. The retention period is the minimum time your data will be stored in {es}. Data older than this period can be deleted by {es}. @@ -38,12 +35,24 @@ Data older than this period can be deleted by {es}. for your older indices while reducing operating costs and maintaining search performance. * Perform <> of data stored on less-performant hardware. -**Data stream lifecycle** is less feature rich but is focused on simplicity, so it allows you to easily: +**Data stream lifecycle** is less feature rich but is focused on simplicity. It allows you to do the following: * Define the retention period of your data. The retention period is the minimum time your data will be stored in {es}. Data older than this period can be deleted by {es} at a later time. -* Improve the performance of your data stream by performing background operations that will optimise the way your data -stream is stored. +* Improve the performance of your data stream by performing background operations that will optimise the way your data stream is stored. + +**Elastic Curator** is a tool that allows you to manage your indices and snapshots using user-defined filters and predefined actions. If ILM provides the functionality to manage your index lifecycle, and you have at least a Basic license, consider using ILM in place of Curator. Many stack components make use of ILM by default. {curator-ref-current}/ilm.html[Learn more]. + +NOTE: <> is a deprecated Elasticsearch feature that allows you to manage the amount of data that is stored in your cluster, similar to the downsampling functionality of {ilm-init} and data stream lifecycle. This feature should not be used for new deployments. + +[TIP] +==== +{ilm-init} is not available on {es-serverless}. + +In an {ecloud} or self-managed environment, ILM lets you automatically transition indices through data tiers according to your performance needs and retention requirements. This allows you to balance hardware costs with performance. {es-serverless} eliminates this complexity by optimizing your cluster performance for you. + +Data stream lifecycle is an optimized lifecycle tool that lets you focus on the most common lifecycle management needs, without unnecessary hardware-centric concepts like data tiers. +==== -- include::ilm/index.asciidoc[] diff --git a/docs/reference/data-store-architecture.asciidoc b/docs/reference/data-store-architecture.asciidoc new file mode 100644 index 000000000000..4ee75c15562e --- /dev/null +++ b/docs/reference/data-store-architecture.asciidoc @@ -0,0 +1,18 @@ += Data store architecture + +[partintro] +-- + +{es} is a distributed document store. Instead of storing information as rows of columnar data, {es} stores complex data structures that have been serialized as JSON documents. When you have multiple {es} nodes in a cluster, stored documents are distributed across the cluster and can be accessed immediately +from any node. + +The topics in this section provides information about the architecture of {es} and how it stores and retrieves data: + +* <>: Learn about the basic building blocks of an {es} cluster, including nodes, shards, primaries, and replicas. +* <>: Learn how {es} replicates read and write operations across shards and shard copies. +* <>: Learn how {es} allocates and balances shards across nodes. +-- + +include::nodes-shards.asciidoc[] +include::docs/data-replication.asciidoc[leveloffset=-1] +include::modules/shard-ops.asciidoc[] \ No newline at end of file diff --git a/docs/reference/data-streams/data-stream-apis.asciidoc b/docs/reference/data-streams/data-stream-apis.asciidoc index 66662ef4a218..8b952fad59f8 100644 --- a/docs/reference/data-streams/data-stream-apis.asciidoc +++ b/docs/reference/data-streams/data-stream-apis.asciidoc @@ -2,7 +2,7 @@ [[data-stream-apis]] == Data stream APIs -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-data-stream[Data stream APIs]. diff --git a/docs/reference/data-streams/lifecycle/apis/delete-lifecycle.asciidoc b/docs/reference/data-streams/lifecycle/apis/delete-lifecycle.asciidoc index 70e901bb43b9..5222d33b5870 100644 --- a/docs/reference/data-streams/lifecycle/apis/delete-lifecycle.asciidoc +++ b/docs/reference/data-streams/lifecycle/apis/delete-lifecycle.asciidoc @@ -4,7 +4,7 @@ Delete Data Stream Lifecycle ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-data-stream[Data stream APIs]. diff --git a/docs/reference/data-streams/lifecycle/apis/explain-lifecycle.asciidoc b/docs/reference/data-streams/lifecycle/apis/explain-lifecycle.asciidoc index 0a76114abfa0..8289fb54d51b 100644 --- a/docs/reference/data-streams/lifecycle/apis/explain-lifecycle.asciidoc +++ b/docs/reference/data-streams/lifecycle/apis/explain-lifecycle.asciidoc @@ -4,7 +4,7 @@ Explain Data Stream Lifecycle ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-data-stream[Data stream APIs]. diff --git a/docs/reference/data-streams/lifecycle/apis/get-lifecycle-stats.asciidoc b/docs/reference/data-streams/lifecycle/apis/get-lifecycle-stats.asciidoc index ba33574b4977..0fbe7de287f7 100644 --- a/docs/reference/data-streams/lifecycle/apis/get-lifecycle-stats.asciidoc +++ b/docs/reference/data-streams/lifecycle/apis/get-lifecycle-stats.asciidoc @@ -4,7 +4,7 @@ Get Data Stream Lifecycle ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-data-stream[Data stream APIs]. diff --git a/docs/reference/data-streams/lifecycle/apis/get-lifecycle.asciidoc b/docs/reference/data-streams/lifecycle/apis/get-lifecycle.asciidoc index 32e68c0c3c31..57d63fee2ddc 100644 --- a/docs/reference/data-streams/lifecycle/apis/get-lifecycle.asciidoc +++ b/docs/reference/data-streams/lifecycle/apis/get-lifecycle.asciidoc @@ -4,7 +4,7 @@ Get Data Stream Lifecycle ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-data-stream[Data stream APIs]. diff --git a/docs/reference/data-streams/lifecycle/apis/put-lifecycle.asciidoc b/docs/reference/data-streams/lifecycle/apis/put-lifecycle.asciidoc index e2eb52959c95..c5002cf4882e 100644 --- a/docs/reference/data-streams/lifecycle/apis/put-lifecycle.asciidoc +++ b/docs/reference/data-streams/lifecycle/apis/put-lifecycle.asciidoc @@ -4,7 +4,7 @@ Put Data Stream Lifecycle ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-data-stream[Data stream APIs]. diff --git a/docs/reference/data-streams/modify-data-streams-api.asciidoc b/docs/reference/data-streams/modify-data-streams-api.asciidoc index 082034213467..2f717f9ec3b4 100644 --- a/docs/reference/data-streams/modify-data-streams-api.asciidoc +++ b/docs/reference/data-streams/modify-data-streams-api.asciidoc @@ -4,7 +4,7 @@ Modify data streams ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-data-stream[Data stream APIs]. diff --git a/docs/reference/data-streams/promote-data-stream-api.asciidoc b/docs/reference/data-streams/promote-data-stream-api.asciidoc index c8bb575e399f..33005e80e940 100644 --- a/docs/reference/data-streams/promote-data-stream-api.asciidoc +++ b/docs/reference/data-streams/promote-data-stream-api.asciidoc @@ -5,7 +5,7 @@ Promote data stream ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-data-stream[Data stream APIs]. diff --git a/docs/reference/docs.asciidoc b/docs/reference/docs.asciidoc index 34662401842f..ccdbaaffb2b7 100644 --- a/docs/reference/docs.asciidoc +++ b/docs/reference/docs.asciidoc @@ -7,9 +7,7 @@ For the most up-to-date API details, refer to {api-es}/group/endpoint-document[Document APIs]. -- -This section starts with a short introduction to {es}'s <>, followed by a detailed description of the following CRUD -APIs: +This section describes the following CRUD APIs: .Single document APIs * <> @@ -24,8 +22,6 @@ APIs: * <> * <> -include::docs/data-replication.asciidoc[] - include::docs/index_.asciidoc[] include::docs/get.asciidoc[] diff --git a/docs/reference/docs/bulk.asciidoc b/docs/reference/docs/bulk.asciidoc index e2093dbf4ff3..78169e841dab 100644 --- a/docs/reference/docs/bulk.asciidoc +++ b/docs/reference/docs/bulk.asciidoc @@ -4,7 +4,7 @@ Bulk ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-document[Document APIs]. diff --git a/docs/reference/docs/data-replication.asciidoc b/docs/reference/docs/data-replication.asciidoc index 2c1a16c81d01..6ee266070e72 100644 --- a/docs/reference/docs/data-replication.asciidoc +++ b/docs/reference/docs/data-replication.asciidoc @@ -1,6 +1,6 @@ [[docs-replication]] -=== Reading and Writing documents +=== Reading and writing documents [discrete] ==== Introduction diff --git a/docs/reference/docs/delete.asciidoc b/docs/reference/docs/delete.asciidoc index 966561909663..046a20abdaff 100644 --- a/docs/reference/docs/delete.asciidoc +++ b/docs/reference/docs/delete.asciidoc @@ -4,7 +4,7 @@ Delete ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-document[Document APIs]. diff --git a/docs/reference/docs/get.asciidoc b/docs/reference/docs/get.asciidoc index bdbcb08e186c..a3ff70fb95f6 100644 --- a/docs/reference/docs/get.asciidoc +++ b/docs/reference/docs/get.asciidoc @@ -4,7 +4,7 @@ Get ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-document[Document APIs]. diff --git a/docs/reference/docs/multi-get.asciidoc b/docs/reference/docs/multi-get.asciidoc index b27bd6a97d53..293bd2568a34 100644 --- a/docs/reference/docs/multi-get.asciidoc +++ b/docs/reference/docs/multi-get.asciidoc @@ -4,7 +4,7 @@ Multi get ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-document[Document APIs]. diff --git a/docs/reference/docs/reindex.asciidoc b/docs/reference/docs/reindex.asciidoc index 26e4d1ab67c5..2f6ddd344eaa 100644 --- a/docs/reference/docs/reindex.asciidoc +++ b/docs/reference/docs/reindex.asciidoc @@ -4,7 +4,7 @@ Reindex ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-document[Document APIs]. diff --git a/docs/reference/docs/termvectors.asciidoc b/docs/reference/docs/termvectors.asciidoc index 285db46fd4d2..d40452fb4875 100644 --- a/docs/reference/docs/termvectors.asciidoc +++ b/docs/reference/docs/termvectors.asciidoc @@ -4,7 +4,7 @@ Term vectors ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-document[Document APIs]. diff --git a/docs/reference/docs/update-by-query.asciidoc b/docs/reference/docs/update-by-query.asciidoc index c5fccc86a1cc..c8d68082c8ea 100644 --- a/docs/reference/docs/update-by-query.asciidoc +++ b/docs/reference/docs/update-by-query.asciidoc @@ -4,7 +4,7 @@ Update by query ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-document[Document APIs]. diff --git a/docs/reference/docs/update.asciidoc b/docs/reference/docs/update.asciidoc index 619cc921c4d6..ae9ae8fe73fc 100644 --- a/docs/reference/docs/update.asciidoc +++ b/docs/reference/docs/update.asciidoc @@ -4,7 +4,7 @@ Update ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-document[Document APIs]. diff --git a/docs/reference/eql/delete-async-eql-search-api.asciidoc b/docs/reference/eql/delete-async-eql-search-api.asciidoc index f3c516188c25..2783c9ac0b87 100644 --- a/docs/reference/eql/delete-async-eql-search-api.asciidoc +++ b/docs/reference/eql/delete-async-eql-search-api.asciidoc @@ -6,7 +6,7 @@ Delete async EQL search ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-eql[EQL APIs]. diff --git a/docs/reference/eql/eql-apis.asciidoc b/docs/reference/eql/eql-apis.asciidoc index 0bf7462d8f4d..4d6aafd2039d 100644 --- a/docs/reference/eql/eql-apis.asciidoc +++ b/docs/reference/eql/eql-apis.asciidoc @@ -1,7 +1,7 @@ [[eql-apis]] == EQL APIs -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-eql[EQL APIs]. diff --git a/docs/reference/eql/eql-search-api.asciidoc b/docs/reference/eql/eql-search-api.asciidoc index a4928ce82f56..544e4d7325c5 100644 --- a/docs/reference/eql/eql-search-api.asciidoc +++ b/docs/reference/eql/eql-search-api.asciidoc @@ -6,7 +6,7 @@ EQL search ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-eql[EQL APIs]. diff --git a/docs/reference/eql/get-async-eql-search-api.asciidoc b/docs/reference/eql/get-async-eql-search-api.asciidoc index 36f18a7061bc..ff4a07811b73 100644 --- a/docs/reference/eql/get-async-eql-search-api.asciidoc +++ b/docs/reference/eql/get-async-eql-search-api.asciidoc @@ -6,7 +6,7 @@ Get async EQL search ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-eql[EQL APIs]. diff --git a/docs/reference/eql/get-async-eql-status-api.asciidoc b/docs/reference/eql/get-async-eql-status-api.asciidoc index fe188ec9112e..cb3904a64492 100644 --- a/docs/reference/eql/get-async-eql-status-api.asciidoc +++ b/docs/reference/eql/get-async-eql-status-api.asciidoc @@ -6,7 +6,7 @@ Get async EQL search status ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-eql[EQL APIs]. diff --git a/docs/reference/esql/esql-apis.asciidoc b/docs/reference/esql/esql-apis.asciidoc index 57747d3e357e..157f4e4357e7 100644 --- a/docs/reference/esql/esql-apis.asciidoc +++ b/docs/reference/esql/esql-apis.asciidoc @@ -1,7 +1,7 @@ [[esql-apis]] == {esql} APIs -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-esql[ES|QL APIs]. diff --git a/docs/reference/esql/esql-async-query-api.asciidoc b/docs/reference/esql/esql-async-query-api.asciidoc index 76597822a3fa..8cb974cf6773 100644 --- a/docs/reference/esql/esql-async-query-api.asciidoc +++ b/docs/reference/esql/esql-async-query-api.asciidoc @@ -4,7 +4,7 @@ {esql} async query API ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-esql[ES|QL APIs]. diff --git a/docs/reference/esql/esql-async-query-delete-api.asciidoc b/docs/reference/esql/esql-async-query-delete-api.asciidoc index a3f21ad81e44..421c59191f3b 100644 --- a/docs/reference/esql/esql-async-query-delete-api.asciidoc +++ b/docs/reference/esql/esql-async-query-delete-api.asciidoc @@ -4,7 +4,7 @@ {esql} async query delete API ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-esql[ES|QL APIs]. diff --git a/docs/reference/esql/esql-async-query-get-api.asciidoc b/docs/reference/esql/esql-async-query-get-api.asciidoc index 54d62ede608e..693e96861adb 100644 --- a/docs/reference/esql/esql-async-query-get-api.asciidoc +++ b/docs/reference/esql/esql-async-query-get-api.asciidoc @@ -4,7 +4,7 @@ {esql} async query get API ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-esql[ES|QL APIs]. diff --git a/docs/reference/esql/esql-query-api.asciidoc b/docs/reference/esql/esql-query-api.asciidoc index 689a534facab..eac66ecfde2d 100644 --- a/docs/reference/esql/esql-query-api.asciidoc +++ b/docs/reference/esql/esql-query-api.asciidoc @@ -4,7 +4,7 @@ {esql} query API ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-esql[ES|QL APIs]. diff --git a/docs/reference/features/apis/features-apis.asciidoc b/docs/reference/features/apis/features-apis.asciidoc index 94055a0a41d7..2582446340f1 100644 --- a/docs/reference/features/apis/features-apis.asciidoc +++ b/docs/reference/features/apis/features-apis.asciidoc @@ -1,7 +1,7 @@ [[features-apis]] == Features APIs -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-features[Features APIs]. diff --git a/docs/reference/features/apis/get-features-api.asciidoc b/docs/reference/features/apis/get-features-api.asciidoc index 3df7ccb53689..62986d7728ca 100644 --- a/docs/reference/features/apis/get-features-api.asciidoc +++ b/docs/reference/features/apis/get-features-api.asciidoc @@ -4,7 +4,7 @@ Get features ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-features[Features APIs]. diff --git a/docs/reference/features/apis/reset-features-api.asciidoc b/docs/reference/features/apis/reset-features-api.asciidoc index 5223315caa56..e2d3f249304b 100644 --- a/docs/reference/features/apis/reset-features-api.asciidoc +++ b/docs/reference/features/apis/reset-features-api.asciidoc @@ -6,7 +6,7 @@ experimental::[] -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-features[Features APIs]. diff --git a/docs/reference/fleet/fleet-multi-search.asciidoc b/docs/reference/fleet/fleet-multi-search.asciidoc index eaec76ebdd83..5673e1abbbd1 100644 --- a/docs/reference/fleet/fleet-multi-search.asciidoc +++ b/docs/reference/fleet/fleet-multi-search.asciidoc @@ -5,7 +5,7 @@ Fleet multi search ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-fleet[{fleet} APIs]. diff --git a/docs/reference/fleet/fleet-search.asciidoc b/docs/reference/fleet/fleet-search.asciidoc index 10e77d9e6652..81ad0c9a7aa9 100644 --- a/docs/reference/fleet/fleet-search.asciidoc +++ b/docs/reference/fleet/fleet-search.asciidoc @@ -5,7 +5,7 @@ Fleet search ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-fleet[{fleet} APIs]. diff --git a/docs/reference/fleet/index.asciidoc b/docs/reference/fleet/index.asciidoc index bdffee227029..8cd3cd59f21b 100644 --- a/docs/reference/fleet/index.asciidoc +++ b/docs/reference/fleet/index.asciidoc @@ -2,7 +2,7 @@ [[fleet-apis]] == Fleet APIs -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-fleet[{fleet} APIs]. diff --git a/docs/reference/graph/explore.asciidoc b/docs/reference/graph/explore.asciidoc index 1f5faeda76ef..60e9edb3b0f3 100644 --- a/docs/reference/graph/explore.asciidoc +++ b/docs/reference/graph/explore.asciidoc @@ -2,7 +2,7 @@ [[graph-explore-api]] == Graph explore API -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-graph[Graph explore APIs]. diff --git a/docs/reference/health/health.asciidoc b/docs/reference/health/health.asciidoc index 7077cb28f894..606804a83bbd 100644 --- a/docs/reference/health/health.asciidoc +++ b/docs/reference/health/health.asciidoc @@ -4,7 +4,7 @@ Health ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-health_report[Cluster health APIs]. diff --git a/docs/reference/high-availability.asciidoc b/docs/reference/high-availability.asciidoc index 2f34b6bc1bb2..37e2a38aa0f2 100644 --- a/docs/reference/high-availability.asciidoc +++ b/docs/reference/high-availability.asciidoc @@ -3,28 +3,28 @@ [partintro] -- -Your data is important to you. Keeping it safe and available is important -to {es}. Sometimes your cluster may experience hardware failure or a power -loss. To help you plan for this, {es} offers a number of features -to achieve high availability despite failures. +Your data is important to you. Keeping it safe and available is important to Elastic. Sometimes your cluster may experience hardware failure or a power loss. To help you plan for this, {es} offers a number of features to achieve high availability despite failures. Depending on your deployment type, you might need to provision servers in different zones or configure external repositories to meet your organization's availability needs. -* With proper planning, a cluster can be - <> to many of the - things that commonly go wrong, from the loss of a single node or network - connection right up to a zone-wide outage such as power loss. +* *<>* ++ +Distributed systems like Elasticsearch are designed to keep working even if some of their components have failed. An Elasticsearch cluster can continue operating normally if some of its nodes are unavailable or disconnected, as long as there are enough well-connected nodes to take over the unavailable node's responsibilities. ++ +If you're designing a smaller cluster, you might focus on making your cluster resilient to single-node failures. Designers of larger clusters must also consider cases where multiple nodes fail at the same time. +// need to improve connections to ECE, EC hosted, ECK pod/zone docs in the child topics -* You can use <> to replicate data to a remote _follower_ - cluster which may be in a different data centre or even on a different - continent from the leader cluster. The follower cluster acts as a hot - standby, ready for you to fail over in the event of a disaster so severe that - the leader cluster fails. The follower cluster can also act as a geo-replica - to serve searches from nearby clients. +* *<>* ++ +To effectively distribute read and write operations across nodes, the nodes in a cluster need good, reliable connections to each other. To provide better connections, you typically co-locate the nodes in the same data center or nearby data centers. ++ +Co-locating nodes in a single location exposes you to the risk of a single outage taking your entire cluster offline. To maintain high availability, you can prepare a second cluster that can take over in case of disaster by implementing {ccr} (CCR). ++ +CCR provides a way to automatically synchronize indices from a leader cluster to a follower cluster. This cluster could be in a different data center or even a different content from the leader cluster. If the primary cluster fails, the secondary cluster can take over. ++ +TIP: You can also use CCR to create secondary clusters to serve read requests in geo-proximity to your users. -* The last line of defence against data loss is to take - <> of your cluster so that you can - restore a completely fresh copy of it elsewhere if needed. +* *<>* ++ +Take snapshots of your cluster that can be restored in case of failure. -- include::high-availability/cluster-design.asciidoc[] - -include::ccr/index.asciidoc[] diff --git a/docs/reference/ilm/apis/delete-lifecycle.asciidoc b/docs/reference/ilm/apis/delete-lifecycle.asciidoc index fc9a35e4ef57..64a421f091ef 100644 --- a/docs/reference/ilm/apis/delete-lifecycle.asciidoc +++ b/docs/reference/ilm/apis/delete-lifecycle.asciidoc @@ -5,6 +5,12 @@ Delete policy ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ilm[{ilm-cap} APIs]. +-- + Deletes an index <> policy. [[ilm-delete-lifecycle-request]] diff --git a/docs/reference/ilm/apis/explain.asciidoc b/docs/reference/ilm/apis/explain.asciidoc index 31c6ae9e82ec..4cc266718644 100644 --- a/docs/reference/ilm/apis/explain.asciidoc +++ b/docs/reference/ilm/apis/explain.asciidoc @@ -5,6 +5,12 @@ Explain lifecycle ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ilm[{ilm-cap} APIs]. +-- + Retrieves the current <> status for one or more indices. For data streams, the API retrieves the current lifecycle status for the stream's backing indices. diff --git a/docs/reference/ilm/apis/get-lifecycle.asciidoc b/docs/reference/ilm/apis/get-lifecycle.asciidoc index b4e07389a9fb..b02d129ebe73 100644 --- a/docs/reference/ilm/apis/get-lifecycle.asciidoc +++ b/docs/reference/ilm/apis/get-lifecycle.asciidoc @@ -5,6 +5,12 @@ Get policy ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ilm[{ilm-cap} APIs]. +-- + Retrieves a <> policy. [[ilm-get-lifecycle-request]] diff --git a/docs/reference/ilm/apis/get-status.asciidoc b/docs/reference/ilm/apis/get-status.asciidoc index f2ab8d65ec9a..648080f26b79 100644 --- a/docs/reference/ilm/apis/get-status.asciidoc +++ b/docs/reference/ilm/apis/get-status.asciidoc @@ -7,6 +7,12 @@ Get {ilm} status ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ilm[{ilm-cap} APIs]. +-- + Retrieves the current <> ({ilm-init}) status. You can start or stop {ilm-init} with the <> and diff --git a/docs/reference/ilm/apis/ilm-api.asciidoc b/docs/reference/ilm/apis/ilm-api.asciidoc index 3a6c8430bb4c..9cc594be1bea 100644 --- a/docs/reference/ilm/apis/ilm-api.asciidoc +++ b/docs/reference/ilm/apis/ilm-api.asciidoc @@ -1,13 +1,12 @@ [[index-lifecycle-management-api]] == {ilm-cap} APIs +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-ilm[{ilm-cap} APIs]. -- -https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-ilm - You use the following APIs to set up policies to automatically manage the index lifecycle. For more information about {ilm} ({ilm-init}), see <>. diff --git a/docs/reference/ilm/apis/migrate-to-data-tiers.asciidoc b/docs/reference/ilm/apis/migrate-to-data-tiers.asciidoc index 8ba57120a8a6..76810170daa1 100644 --- a/docs/reference/ilm/apis/migrate-to-data-tiers.asciidoc +++ b/docs/reference/ilm/apis/migrate-to-data-tiers.asciidoc @@ -5,6 +5,12 @@ Migrate indices, ILM policies, and legacy, composable and component templates to data tiers routing ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ilm[{ilm-cap} APIs]. +-- + Switches the indices, ILM policies, and legacy, composable and component templates from using custom node attributes and <> to using <>, and optionally deletes one legacy index template. diff --git a/docs/reference/ilm/apis/move-to-step.asciidoc b/docs/reference/ilm/apis/move-to-step.asciidoc index f3441fa997cf..a7a8cacf551d 100644 --- a/docs/reference/ilm/apis/move-to-step.asciidoc +++ b/docs/reference/ilm/apis/move-to-step.asciidoc @@ -5,6 +5,12 @@ Move to step ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ilm[{ilm-cap} APIs]. +-- + Triggers execution of a specific step in the <> policy. [[ilm-move-to-step-request]] diff --git a/docs/reference/ilm/apis/put-lifecycle.asciidoc b/docs/reference/ilm/apis/put-lifecycle.asciidoc index 390f6b1bb4d1..3aa691ab06da 100644 --- a/docs/reference/ilm/apis/put-lifecycle.asciidoc +++ b/docs/reference/ilm/apis/put-lifecycle.asciidoc @@ -5,6 +5,12 @@ Create or update lifecycle policy ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ilm[{ilm-cap} APIs]. +-- + Creates or updates <> policy. See <> for definitions of policy components. diff --git a/docs/reference/ilm/apis/remove-policy-from-index.asciidoc b/docs/reference/ilm/apis/remove-policy-from-index.asciidoc index 107cab4d5aa1..5b5842f28619 100644 --- a/docs/reference/ilm/apis/remove-policy-from-index.asciidoc +++ b/docs/reference/ilm/apis/remove-policy-from-index.asciidoc @@ -5,6 +5,12 @@ Remove policy ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ilm[{ilm-cap} APIs]. +-- + Removes assigned <> policies from an index or a data stream's backing indices. diff --git a/docs/reference/ilm/apis/retry-policy.asciidoc b/docs/reference/ilm/apis/retry-policy.asciidoc index 8f01f15e0c3a..a41d064b7340 100644 --- a/docs/reference/ilm/apis/retry-policy.asciidoc +++ b/docs/reference/ilm/apis/retry-policy.asciidoc @@ -5,6 +5,12 @@ Retry policy ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ilm[{ilm-cap} APIs]. +-- + Retry executing the <> policy for an index that is in the ERROR step. [[ilm-retry-policy-request]] diff --git a/docs/reference/ilm/apis/start.asciidoc b/docs/reference/ilm/apis/start.asciidoc index c38b3d9ca883..ce9b64455be0 100644 --- a/docs/reference/ilm/apis/start.asciidoc +++ b/docs/reference/ilm/apis/start.asciidoc @@ -7,6 +7,12 @@ Start {ilm} ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ilm[{ilm-cap} APIs]. +-- + Start the <> ({ilm-init}) plugin. [[ilm-start-request]] diff --git a/docs/reference/ilm/apis/stop.asciidoc b/docs/reference/ilm/apis/stop.asciidoc index a6100d794c2d..50d8aaf4e1fd 100644 --- a/docs/reference/ilm/apis/stop.asciidoc +++ b/docs/reference/ilm/apis/stop.asciidoc @@ -7,6 +7,12 @@ Stop {ilm} ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ilm[{ilm-cap} APIs]. +-- + Stop the <> ({ilm-init}) plugin. [[ilm-stop-request]] diff --git a/docs/reference/index.asciidoc b/docs/reference/index.asciidoc index 18052cfb64e8..8e1c211eb942 100644 --- a/docs/reference/index.asciidoc +++ b/docs/reference/index.asciidoc @@ -76,8 +76,12 @@ include::autoscaling/index.asciidoc[] include::snapshot-restore/index.asciidoc[] +include::ccr/index.asciidoc[leveloffset=-1] + // reference +include::data-store-architecture.asciidoc[] + include::rest-api/index.asciidoc[] include::commands/index.asciidoc[] diff --git a/docs/reference/indices/add-alias.asciidoc b/docs/reference/indices/add-alias.asciidoc index b8f6362f974c..13b49f5a47dd 100644 --- a/docs/reference/indices/add-alias.asciidoc +++ b/docs/reference/indices/add-alias.asciidoc @@ -4,7 +4,7 @@ Create or update alias ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. diff --git a/docs/reference/indices/alias-exists.asciidoc b/docs/reference/indices/alias-exists.asciidoc index 1c40779323de..c086143ed3f1 100644 --- a/docs/reference/indices/alias-exists.asciidoc +++ b/docs/reference/indices/alias-exists.asciidoc @@ -4,7 +4,7 @@ Alias exists ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. diff --git a/docs/reference/indices/aliases.asciidoc b/docs/reference/indices/aliases.asciidoc index cff331d88af9..243f436f55b5 100644 --- a/docs/reference/indices/aliases.asciidoc +++ b/docs/reference/indices/aliases.asciidoc @@ -4,7 +4,7 @@ Aliases ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. diff --git a/docs/reference/indices/analyze.asciidoc b/docs/reference/indices/analyze.asciidoc index 49e1c9de749f..a211a44730a6 100644 --- a/docs/reference/indices/analyze.asciidoc +++ b/docs/reference/indices/analyze.asciidoc @@ -4,7 +4,7 @@ Analyze ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. diff --git a/docs/reference/indices/apis/reload-analyzers.asciidoc b/docs/reference/indices/apis/reload-analyzers.asciidoc index ca5f540564f8..a27a3bb859cf 100644 --- a/docs/reference/indices/apis/reload-analyzers.asciidoc +++ b/docs/reference/indices/apis/reload-analyzers.asciidoc @@ -2,6 +2,12 @@ [[indices-reload-analyzers]] == Reload search analyzers API +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/operation/operation-indices-reload-search-analyzers[Reload search analyzers]. +-- + Reloads an index's <> and their resources. For data streams, the API reloads search analyzers and resources for the stream's backing indices. diff --git a/docs/reference/indices/apis/unfreeze.asciidoc b/docs/reference/indices/apis/unfreeze.asciidoc index ece1f37c7784..5d04d44db744 100644 --- a/docs/reference/indices/apis/unfreeze.asciidoc +++ b/docs/reference/indices/apis/unfreeze.asciidoc @@ -17,7 +17,7 @@ You can use this API to unfreeze indices that were frozen in 7.x. Frozen indices are not related to the frozen data tier. ==== -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. diff --git a/docs/reference/indices/clearcache.asciidoc b/docs/reference/indices/clearcache.asciidoc index 19b2d204356a..f981a6d69dd6 100644 --- a/docs/reference/indices/clearcache.asciidoc +++ b/docs/reference/indices/clearcache.asciidoc @@ -4,7 +4,7 @@ Clear cache ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. diff --git a/docs/reference/indices/clone-index.asciidoc b/docs/reference/indices/clone-index.asciidoc index 192cf7035b84..734ff33e4821 100644 --- a/docs/reference/indices/clone-index.asciidoc +++ b/docs/reference/indices/clone-index.asciidoc @@ -4,7 +4,7 @@ Clone index ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. diff --git a/docs/reference/indices/close.asciidoc b/docs/reference/indices/close.asciidoc index a3014f811a1b..56d318aad2f7 100644 --- a/docs/reference/indices/close.asciidoc +++ b/docs/reference/indices/close.asciidoc @@ -4,7 +4,7 @@ Close index ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. diff --git a/docs/reference/indices/create-data-stream.asciidoc b/docs/reference/indices/create-data-stream.asciidoc index 278670047d4c..dd2d47111794 100644 --- a/docs/reference/indices/create-data-stream.asciidoc +++ b/docs/reference/indices/create-data-stream.asciidoc @@ -5,7 +5,7 @@ Create data stream ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-data-stream[Data stream APIs]. diff --git a/docs/reference/indices/create-index.asciidoc b/docs/reference/indices/create-index.asciidoc index 9a3934efa718..a210b05b2bcf 100644 --- a/docs/reference/indices/create-index.asciidoc +++ b/docs/reference/indices/create-index.asciidoc @@ -4,7 +4,7 @@ Create index ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. diff --git a/docs/reference/indices/dangling-index-delete.asciidoc b/docs/reference/indices/dangling-index-delete.asciidoc index 0afe9e0204bc..b1fd6790972c 100644 --- a/docs/reference/indices/dangling-index-delete.asciidoc +++ b/docs/reference/indices/dangling-index-delete.asciidoc @@ -4,7 +4,7 @@ Delete dangling index ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. diff --git a/docs/reference/indices/dangling-index-import.asciidoc b/docs/reference/indices/dangling-index-import.asciidoc index 2b5922a2c0cf..e266ecd2c9c7 100644 --- a/docs/reference/indices/dangling-index-import.asciidoc +++ b/docs/reference/indices/dangling-index-import.asciidoc @@ -4,7 +4,7 @@ Import dangling index ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. diff --git a/docs/reference/indices/dangling-indices-list.asciidoc b/docs/reference/indices/dangling-indices-list.asciidoc index b098cb696e5f..2b7dcdc497eb 100644 --- a/docs/reference/indices/dangling-indices-list.asciidoc +++ b/docs/reference/indices/dangling-indices-list.asciidoc @@ -4,7 +4,7 @@ List dangling indices ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. diff --git a/docs/reference/indices/data-stream-stats.asciidoc b/docs/reference/indices/data-stream-stats.asciidoc index 1d664e85628e..cce145b4dadc 100644 --- a/docs/reference/indices/data-stream-stats.asciidoc +++ b/docs/reference/indices/data-stream-stats.asciidoc @@ -5,7 +5,7 @@ Data stream stats ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-data-stream[Data stream APIs]. diff --git a/docs/reference/indices/delete-alias.asciidoc b/docs/reference/indices/delete-alias.asciidoc index 5a8baa262d99..c204c8957935 100644 --- a/docs/reference/indices/delete-alias.asciidoc +++ b/docs/reference/indices/delete-alias.asciidoc @@ -4,7 +4,7 @@ Delete alias ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. diff --git a/docs/reference/indices/delete-component-template.asciidoc b/docs/reference/indices/delete-component-template.asciidoc index 0affd9e4d424..27c1e00a5d57 100644 --- a/docs/reference/indices/delete-component-template.asciidoc +++ b/docs/reference/indices/delete-component-template.asciidoc @@ -4,7 +4,7 @@ Delete component template ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. diff --git a/docs/reference/indices/delete-data-stream.asciidoc b/docs/reference/indices/delete-data-stream.asciidoc index b8339624c837..1cb43a615ede 100644 --- a/docs/reference/indices/delete-data-stream.asciidoc +++ b/docs/reference/indices/delete-data-stream.asciidoc @@ -5,7 +5,7 @@ Delete data stream ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-data-stream[Data stream APIs]. diff --git a/docs/reference/indices/delete-index-template-v1.asciidoc b/docs/reference/indices/delete-index-template-v1.asciidoc index e8d5e2e355a5..e035c13a7bce 100644 --- a/docs/reference/indices/delete-index-template-v1.asciidoc +++ b/docs/reference/indices/delete-index-template-v1.asciidoc @@ -9,7 +9,7 @@ templates>>, which are deprecated and will be replaced by the composable templates introduced in {es} 7.8. For information about composable templates, see <>. -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. diff --git a/docs/reference/indices/delete-index-template.asciidoc b/docs/reference/indices/delete-index-template.asciidoc index 430a18470da4..23713f62fa03 100644 --- a/docs/reference/indices/delete-index-template.asciidoc +++ b/docs/reference/indices/delete-index-template.asciidoc @@ -4,7 +4,7 @@ Delete index template ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. diff --git a/docs/reference/indices/delete-index.asciidoc b/docs/reference/indices/delete-index.asciidoc index ec147fb10ff3..81ee7b502d37 100644 --- a/docs/reference/indices/delete-index.asciidoc +++ b/docs/reference/indices/delete-index.asciidoc @@ -4,7 +4,7 @@ Delete index ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. diff --git a/docs/reference/indices/diskusage.asciidoc b/docs/reference/indices/diskusage.asciidoc index 85d84ea9ad43..6db122ab7d83 100644 --- a/docs/reference/indices/diskusage.asciidoc +++ b/docs/reference/indices/diskusage.asciidoc @@ -6,7 +6,7 @@ experimental[] -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. diff --git a/docs/reference/indices/downsample-data-stream.asciidoc b/docs/reference/indices/downsample-data-stream.asciidoc index ecff35033e2d..a99d7b246ba4 100644 --- a/docs/reference/indices/downsample-data-stream.asciidoc +++ b/docs/reference/indices/downsample-data-stream.asciidoc @@ -5,7 +5,7 @@ Downsample ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-data-stream[Data stream APIs]. diff --git a/docs/reference/indices/field-usage-stats.asciidoc b/docs/reference/indices/field-usage-stats.asciidoc index f50c97ceddf0..cbb65f8c2eff 100644 --- a/docs/reference/indices/field-usage-stats.asciidoc +++ b/docs/reference/indices/field-usage-stats.asciidoc @@ -6,7 +6,7 @@ experimental[] -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. diff --git a/docs/reference/indices/flush.asciidoc b/docs/reference/indices/flush.asciidoc index 03fd2c175ba5..a458e6906b62 100644 --- a/docs/reference/indices/flush.asciidoc +++ b/docs/reference/indices/flush.asciidoc @@ -4,7 +4,7 @@ Flush ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. diff --git a/docs/reference/indices/forcemerge.asciidoc b/docs/reference/indices/forcemerge.asciidoc index 13579740c966..cddf368ce239 100644 --- a/docs/reference/indices/forcemerge.asciidoc +++ b/docs/reference/indices/forcemerge.asciidoc @@ -4,7 +4,7 @@ Force merge ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. diff --git a/docs/reference/indices/get-alias.asciidoc b/docs/reference/indices/get-alias.asciidoc index 0877eb4aea7a..d305bf02e746 100644 --- a/docs/reference/indices/get-alias.asciidoc +++ b/docs/reference/indices/get-alias.asciidoc @@ -4,7 +4,7 @@ Get alias ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. diff --git a/docs/reference/indices/get-component-template.asciidoc b/docs/reference/indices/get-component-template.asciidoc index a83871d927c9..33676c0cf2a7 100644 --- a/docs/reference/indices/get-component-template.asciidoc +++ b/docs/reference/indices/get-component-template.asciidoc @@ -4,7 +4,7 @@ Get component template ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. diff --git a/docs/reference/indices/get-data-stream.asciidoc b/docs/reference/indices/get-data-stream.asciidoc index 88f136d0726f..ccab53f020e5 100644 --- a/docs/reference/indices/get-data-stream.asciidoc +++ b/docs/reference/indices/get-data-stream.asciidoc @@ -5,7 +5,7 @@ Get data stream ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-data-stream[Data stream APIs]. diff --git a/docs/reference/indices/get-field-mapping.asciidoc b/docs/reference/indices/get-field-mapping.asciidoc index aac0ae9801f4..4b25e0e92554 100644 --- a/docs/reference/indices/get-field-mapping.asciidoc +++ b/docs/reference/indices/get-field-mapping.asciidoc @@ -4,7 +4,7 @@ Get field mapping ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. diff --git a/docs/reference/indices/get-index-template-v1.asciidoc b/docs/reference/indices/get-index-template-v1.asciidoc index 03f7e2873903..5072a194319e 100644 --- a/docs/reference/indices/get-index-template-v1.asciidoc +++ b/docs/reference/indices/get-index-template-v1.asciidoc @@ -8,7 +8,7 @@ IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in {es} 7.8. For information about composable templates, see <>. -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. diff --git a/docs/reference/indices/get-index-template.asciidoc b/docs/reference/indices/get-index-template.asciidoc index a1fb974b2bab..9d27bf5b2b2a 100644 --- a/docs/reference/indices/get-index-template.asciidoc +++ b/docs/reference/indices/get-index-template.asciidoc @@ -4,7 +4,7 @@ Get index template ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. diff --git a/docs/reference/indices/get-index.asciidoc b/docs/reference/indices/get-index.asciidoc index 691493e078c4..4e026f4ff07a 100644 --- a/docs/reference/indices/get-index.asciidoc +++ b/docs/reference/indices/get-index.asciidoc @@ -4,7 +4,7 @@ Get index ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. diff --git a/docs/reference/indices/get-mapping.asciidoc b/docs/reference/indices/get-mapping.asciidoc index 9fd02e6fba89..543bda6cb324 100644 --- a/docs/reference/indices/get-mapping.asciidoc +++ b/docs/reference/indices/get-mapping.asciidoc @@ -4,7 +4,7 @@ Get mapping ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. diff --git a/docs/reference/indices/get-settings.asciidoc b/docs/reference/indices/get-settings.asciidoc index 4b3929664bf1..d48514b9a349 100644 --- a/docs/reference/indices/get-settings.asciidoc +++ b/docs/reference/indices/get-settings.asciidoc @@ -4,7 +4,7 @@ Get index settings ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. diff --git a/docs/reference/indices/index-template-exists-v1.asciidoc b/docs/reference/indices/index-template-exists-v1.asciidoc index d9aec6f4b86c..f7b3a4416715 100644 --- a/docs/reference/indices/index-template-exists-v1.asciidoc +++ b/docs/reference/indices/index-template-exists-v1.asciidoc @@ -9,7 +9,7 @@ templates>>, which are deprecated and will be replaced by the composable templates introduced in {es} 7.8. For information about composable templates, see <>. -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. diff --git a/docs/reference/indices/index-templates.asciidoc b/docs/reference/indices/index-templates.asciidoc index 5b152ecf177e..90c4a6952446 100644 --- a/docs/reference/indices/index-templates.asciidoc +++ b/docs/reference/indices/index-templates.asciidoc @@ -61,7 +61,7 @@ applying the templates, do one or more of the following: - Use a non-overlapping index pattern. -- Assign templates with an overlapping pattern a `priority` higher than `200`. +- Assign templates with an overlapping pattern a `priority` higher than `500`. For example, if you don't use {fleet} or {agent} and want to create a template for the `logs-*` index pattern, assign your template a priority of `500`. This ensures your template is applied instead of the built-in template for diff --git a/docs/reference/indices/indices-exists.asciidoc b/docs/reference/indices/indices-exists.asciidoc index 3594619c22fc..11da491adf8e 100644 --- a/docs/reference/indices/indices-exists.asciidoc +++ b/docs/reference/indices/indices-exists.asciidoc @@ -4,7 +4,7 @@ Exists ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. diff --git a/docs/reference/indices/migrate-to-data-stream.asciidoc b/docs/reference/indices/migrate-to-data-stream.asciidoc index 8c5a8d7b1376..745e3e28683c 100644 --- a/docs/reference/indices/migrate-to-data-stream.asciidoc +++ b/docs/reference/indices/migrate-to-data-stream.asciidoc @@ -5,7 +5,7 @@ Migrate to data stream ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-data-stream[Data stream APIs]. diff --git a/docs/reference/indices/open-close.asciidoc b/docs/reference/indices/open-close.asciidoc index e9502b3f8328..fe00e5a968d9 100644 --- a/docs/reference/indices/open-close.asciidoc +++ b/docs/reference/indices/open-close.asciidoc @@ -4,7 +4,7 @@ Open index ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. diff --git a/docs/reference/indices/put-component-template.asciidoc b/docs/reference/indices/put-component-template.asciidoc index 9104008d66c5..9f129c3507d8 100644 --- a/docs/reference/indices/put-component-template.asciidoc +++ b/docs/reference/indices/put-component-template.asciidoc @@ -4,7 +4,7 @@ Create or update component template ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. diff --git a/docs/reference/indices/put-index-template-v1.asciidoc b/docs/reference/indices/put-index-template-v1.asciidoc index e23e8995cca6..8b07acfdadba 100644 --- a/docs/reference/indices/put-index-template-v1.asciidoc +++ b/docs/reference/indices/put-index-template-v1.asciidoc @@ -8,7 +8,7 @@ IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in {es} 7.8. For information about composable templates, see <>. -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. diff --git a/docs/reference/indices/put-index-template.asciidoc b/docs/reference/indices/put-index-template.asciidoc index 17035daeca15..fac928c4a61b 100644 --- a/docs/reference/indices/put-index-template.asciidoc +++ b/docs/reference/indices/put-index-template.asciidoc @@ -4,7 +4,7 @@ Create or update index template ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. diff --git a/docs/reference/indices/put-mapping.asciidoc b/docs/reference/indices/put-mapping.asciidoc index acd5df00cb77..479bdff22a80 100644 --- a/docs/reference/indices/put-mapping.asciidoc +++ b/docs/reference/indices/put-mapping.asciidoc @@ -4,7 +4,7 @@ Update mapping ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. diff --git a/docs/reference/indices/recovery.asciidoc b/docs/reference/indices/recovery.asciidoc index 44094eea73f6..032ffc5c5a08 100644 --- a/docs/reference/indices/recovery.asciidoc +++ b/docs/reference/indices/recovery.asciidoc @@ -4,7 +4,7 @@ Index recovery ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. diff --git a/docs/reference/indices/refresh.asciidoc b/docs/reference/indices/refresh.asciidoc index 60774ffd6400..6cc78b22323c 100644 --- a/docs/reference/indices/refresh.asciidoc +++ b/docs/reference/indices/refresh.asciidoc @@ -4,7 +4,7 @@ Refresh ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. diff --git a/docs/reference/indices/resolve-cluster.asciidoc b/docs/reference/indices/resolve-cluster.asciidoc index fc0301916e68..e623cb983e86 100644 --- a/docs/reference/indices/resolve-cluster.asciidoc +++ b/docs/reference/indices/resolve-cluster.asciidoc @@ -4,7 +4,7 @@ Resolve cluster ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. diff --git a/docs/reference/indices/resolve.asciidoc b/docs/reference/indices/resolve.asciidoc index fb6c97d9ecf4..f0b91695b0aa 100644 --- a/docs/reference/indices/resolve.asciidoc +++ b/docs/reference/indices/resolve.asciidoc @@ -4,7 +4,7 @@ Resolve index ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. diff --git a/docs/reference/indices/rollover-index.asciidoc b/docs/reference/indices/rollover-index.asciidoc index b15444977ba8..cc93204469b8 100644 --- a/docs/reference/indices/rollover-index.asciidoc +++ b/docs/reference/indices/rollover-index.asciidoc @@ -4,7 +4,7 @@ Rollover ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. diff --git a/docs/reference/indices/segments.asciidoc b/docs/reference/indices/segments.asciidoc index df999ec4c897..b2b7b244321f 100644 --- a/docs/reference/indices/segments.asciidoc +++ b/docs/reference/indices/segments.asciidoc @@ -4,7 +4,7 @@ Index segments ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. diff --git a/docs/reference/indices/shard-stores.asciidoc b/docs/reference/indices/shard-stores.asciidoc index b6f2aca78be0..35f6a0915caa 100644 --- a/docs/reference/indices/shard-stores.asciidoc +++ b/docs/reference/indices/shard-stores.asciidoc @@ -4,7 +4,7 @@ Index shard stores ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. diff --git a/docs/reference/indices/shrink-index.asciidoc b/docs/reference/indices/shrink-index.asciidoc index 1f4e849abff1..931731fceb49 100644 --- a/docs/reference/indices/shrink-index.asciidoc +++ b/docs/reference/indices/shrink-index.asciidoc @@ -4,7 +4,7 @@ Shrink index ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. diff --git a/docs/reference/indices/simulate-index.asciidoc b/docs/reference/indices/simulate-index.asciidoc index 598e4a12acb2..414b54aa5f41 100644 --- a/docs/reference/indices/simulate-index.asciidoc +++ b/docs/reference/indices/simulate-index.asciidoc @@ -4,7 +4,7 @@ Simulate index ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. diff --git a/docs/reference/indices/simulate-template.asciidoc b/docs/reference/indices/simulate-template.asciidoc index cb59fd811ce4..1ea72970b647 100644 --- a/docs/reference/indices/simulate-template.asciidoc +++ b/docs/reference/indices/simulate-template.asciidoc @@ -4,7 +4,7 @@ Simulate template ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. diff --git a/docs/reference/indices/split-index.asciidoc b/docs/reference/indices/split-index.asciidoc index a4ee3faa9dc2..4bda77883973 100644 --- a/docs/reference/indices/split-index.asciidoc +++ b/docs/reference/indices/split-index.asciidoc @@ -4,7 +4,7 @@ Split index ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. diff --git a/docs/reference/indices/stats.asciidoc b/docs/reference/indices/stats.asciidoc index 486b13e47827..1370935a7cde 100644 --- a/docs/reference/indices/stats.asciidoc +++ b/docs/reference/indices/stats.asciidoc @@ -4,7 +4,7 @@ Index stats ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. diff --git a/docs/reference/indices/update-settings.asciidoc b/docs/reference/indices/update-settings.asciidoc index ad500943a042..dd92a922ec65 100644 --- a/docs/reference/indices/update-settings.asciidoc +++ b/docs/reference/indices/update-settings.asciidoc @@ -4,7 +4,7 @@ Update index settings ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. diff --git a/docs/reference/inference/delete-inference.asciidoc b/docs/reference/inference/delete-inference.asciidoc index a83fb1a516b8..a9dfeabcdf25 100644 --- a/docs/reference/inference/delete-inference.asciidoc +++ b/docs/reference/inference/delete-inference.asciidoc @@ -2,6 +2,12 @@ [[delete-inference-api]] === Delete {infer} API +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-inference[{infer-cap} APIs]. +-- + Deletes an {infer} endpoint. IMPORTANT: The {infer} APIs enable you to use certain services, such as built-in {ml} models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. diff --git a/docs/reference/inference/get-inference.asciidoc b/docs/reference/inference/get-inference.asciidoc index 726facd383c1..74220514eeb9 100644 --- a/docs/reference/inference/get-inference.asciidoc +++ b/docs/reference/inference/get-inference.asciidoc @@ -2,6 +2,12 @@ [[get-inference-api]] === Get {infer} API +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-inference[{infer-cap} APIs]. +-- + Retrieves {infer} endpoint information. IMPORTANT: The {infer} APIs enable you to use certain services, such as built-in {ml} models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. diff --git a/docs/reference/inference/inference-apis.asciidoc b/docs/reference/inference/inference-apis.asciidoc index 8d5ee1b7d6ba..4593cb31ca18 100644 --- a/docs/reference/inference/inference-apis.asciidoc +++ b/docs/reference/inference/inference-apis.asciidoc @@ -10,6 +10,12 @@ trained models. However, if you do not plan to use the {infer} APIs to use these models or if you want to use non-NLP models, use the <>. +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-inference[{infer-cap} APIs]. +-- + The {infer} APIs enable you to create {infer} endpoints and use {ml} models of different providers - such as Amazon Bedrock, Anthropic, Azure AI Studio, Cohere, Google AI, Mistral, OpenAI, or HuggingFace - as a service. Use diff --git a/docs/reference/inference/post-inference.asciidoc b/docs/reference/inference/post-inference.asciidoc index 4edefcc911e2..07e557aa355b 100644 --- a/docs/reference/inference/post-inference.asciidoc +++ b/docs/reference/inference/post-inference.asciidoc @@ -2,6 +2,12 @@ [[post-inference-api]] === Perform inference API +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-inference[{infer-cap} APIs]. +-- + Performs an inference task on an input text by using an {infer} endpoint. IMPORTANT: The {infer} APIs enable you to use certain services, such as built-in {ml} models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. diff --git a/docs/reference/inference/put-inference.asciidoc b/docs/reference/inference/put-inference.asciidoc index 4f82889f562d..307efc479c32 100644 --- a/docs/reference/inference/put-inference.asciidoc +++ b/docs/reference/inference/put-inference.asciidoc @@ -2,6 +2,12 @@ [[put-inference-api]] === Create {infer} API +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-inference[{infer-cap} APIs]. +-- + Creates an {infer} endpoint to perform an {infer} task. [IMPORTANT] diff --git a/docs/reference/inference/service-alibabacloud-ai-search.asciidoc b/docs/reference/inference/service-alibabacloud-ai-search.asciidoc index c3ff40a39cd8..5a11190549ce 100644 --- a/docs/reference/inference/service-alibabacloud-ai-search.asciidoc +++ b/docs/reference/inference/service-alibabacloud-ai-search.asciidoc @@ -1,6 +1,12 @@ [[infer-service-alibabacloud-ai-search]] === AlibabaCloud AI Search {infer} service +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-inference[{infer-cap} APIs]. +-- + Creates an {infer} endpoint to perform an {infer} task with the `alibabacloud-ai-search` service. [discrete] diff --git a/docs/reference/inference/service-amazon-bedrock.asciidoc b/docs/reference/inference/service-amazon-bedrock.asciidoc index 761777e32f8e..ed25ce0d515b 100644 --- a/docs/reference/inference/service-amazon-bedrock.asciidoc +++ b/docs/reference/inference/service-amazon-bedrock.asciidoc @@ -1,6 +1,12 @@ [[infer-service-amazon-bedrock]] === Amazon Bedrock {infer} service +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-inference[{infer-cap} APIs]. +-- + Creates an {infer} endpoint to perform an {infer} task with the `amazonbedrock` service. [discrete] diff --git a/docs/reference/inference/service-anthropic.asciidoc b/docs/reference/inference/service-anthropic.asciidoc index 7fb3d1d5bea3..4ce76dc1d57b 100644 --- a/docs/reference/inference/service-anthropic.asciidoc +++ b/docs/reference/inference/service-anthropic.asciidoc @@ -1,6 +1,12 @@ [[infer-service-anthropic]] === Anthropic {infer} service +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-inference[{infer-cap} APIs]. +-- + Creates an {infer} endpoint to perform an {infer} task with the `anthropic` service. diff --git a/docs/reference/inference/service-azure-ai-studio.asciidoc b/docs/reference/inference/service-azure-ai-studio.asciidoc index dd13a3e59aae..7ada8df1ecda 100644 --- a/docs/reference/inference/service-azure-ai-studio.asciidoc +++ b/docs/reference/inference/service-azure-ai-studio.asciidoc @@ -1,6 +1,12 @@ [[infer-service-azure-ai-studio]] === Azure AI studio {infer} service +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-inference[{infer-cap} APIs]. +-- + Creates an {infer} endpoint to perform an {infer} task with the `azureaistudio` service. diff --git a/docs/reference/inference/service-azure-openai.asciidoc b/docs/reference/inference/service-azure-openai.asciidoc index b134e2b687f6..170c0939166f 100644 --- a/docs/reference/inference/service-azure-openai.asciidoc +++ b/docs/reference/inference/service-azure-openai.asciidoc @@ -1,6 +1,12 @@ [[infer-service-azure-openai]] === Azure OpenAI {infer} service +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-inference[{infer-cap} APIs]. +-- + Creates an {infer} endpoint to perform an {infer} task with the `azureopenai` service. diff --git a/docs/reference/inference/service-cohere.asciidoc b/docs/reference/inference/service-cohere.asciidoc index 1a815e3c45f3..70e311c810cd 100644 --- a/docs/reference/inference/service-cohere.asciidoc +++ b/docs/reference/inference/service-cohere.asciidoc @@ -1,6 +1,12 @@ [[infer-service-cohere]] === Cohere {infer} service +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-inference[{infer-cap} APIs]. +-- + Creates an {infer} endpoint to perform an {infer} task with the `cohere` service. diff --git a/docs/reference/inference/service-elasticsearch.asciidoc b/docs/reference/inference/service-elasticsearch.asciidoc index bf7e2976bbe6..8870fbed357a 100644 --- a/docs/reference/inference/service-elasticsearch.asciidoc +++ b/docs/reference/inference/service-elasticsearch.asciidoc @@ -1,10 +1,19 @@ [[infer-service-elasticsearch]] === Elasticsearch {infer} service +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-inference[{infer-cap} APIs]. +-- + Creates an {infer} endpoint to perform an {infer} task with the `elasticsearch` service. -NOTE: If you use the ELSER or the E5 model through the `elasticsearch` service, the API request will automatically download and deploy the model if it isn't downloaded yet. - +[NOTE] +==== +* Your {es} deployment contains <>, you only need to create the enpoints using the API if you want to customize the settings. +* If you use the ELSER or the E5 model through the `elasticsearch` service, the API request will automatically download and deploy the model if it isn't downloaded yet. +==== [discrete] [[infer-service-elasticsearch-api-request]] diff --git a/docs/reference/inference/service-elser.asciidoc b/docs/reference/inference/service-elser.asciidoc index c1cc23c8c9ad..47aaa5881460 100644 --- a/docs/reference/inference/service-elser.asciidoc +++ b/docs/reference/inference/service-elser.asciidoc @@ -1,17 +1,26 @@ [[infer-service-elser]] === ELSER {infer} service +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-inference[{infer-cap} APIs]. +-- + Creates an {infer} endpoint to perform an {infer} task with the `elser` service. You can also deploy ELSER by using the <>. -NOTE: The API request will automatically download and deploy the ELSER model if -it isn't already downloaded. +[NOTE] +==== +* Your {es} deployment contains <>, you only need to create the enpoint using the API if you want to customize the settings. +* The API request will automatically download and deploy the ELSER model if it isn't already downloaded. +==== [WARNING] .Deprecated in 8.16 ==== -The elser service is deprecated and will be removed in a future release. -Use the <> instead, with model_id included in the service_settings. +The `elser` service is deprecated and will be removed in a future release. +Use the <> instead, with `model_id` included in the `service_settings`. ==== [discrete] diff --git a/docs/reference/inference/service-google-ai-studio.asciidoc b/docs/reference/inference/service-google-ai-studio.asciidoc index 738fce3d53e9..5b30292fb9be 100644 --- a/docs/reference/inference/service-google-ai-studio.asciidoc +++ b/docs/reference/inference/service-google-ai-studio.asciidoc @@ -1,6 +1,12 @@ [[infer-service-google-ai-studio]] === Google AI Studio {infer} service +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-inference[{infer-cap} APIs]. +-- + Creates an {infer} endpoint to perform an {infer} task with the `googleaistudio` service. diff --git a/docs/reference/inference/service-google-vertex-ai.asciidoc b/docs/reference/inference/service-google-vertex-ai.asciidoc index 34e14e05e072..28fa65b6e5fc 100644 --- a/docs/reference/inference/service-google-vertex-ai.asciidoc +++ b/docs/reference/inference/service-google-vertex-ai.asciidoc @@ -1,6 +1,12 @@ [[infer-service-google-vertex-ai]] === Google Vertex AI {infer} service +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-inference[{infer-cap} APIs]. +-- + Creates an {infer} endpoint to perform an {infer} task with the `googlevertexai` service. diff --git a/docs/reference/inference/service-hugging-face.asciidoc b/docs/reference/inference/service-hugging-face.asciidoc index 6d8667351a6b..862914c14174 100644 --- a/docs/reference/inference/service-hugging-face.asciidoc +++ b/docs/reference/inference/service-hugging-face.asciidoc @@ -1,6 +1,12 @@ [[infer-service-hugging-face]] === HuggingFace {infer} service +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-inference[{infer-cap} APIs]. +-- + Creates an {infer} endpoint to perform an {infer} task with the `hugging_face` service. diff --git a/docs/reference/inference/service-mistral.asciidoc b/docs/reference/inference/service-mistral.asciidoc index 244381d10716..326e8458be76 100644 --- a/docs/reference/inference/service-mistral.asciidoc +++ b/docs/reference/inference/service-mistral.asciidoc @@ -1,6 +1,12 @@ [[infer-service-mistral]] === Mistral {infer} service +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-inference[{infer-cap} APIs]. +-- + Creates an {infer} endpoint to perform an {infer} task with the `mistral` service. diff --git a/docs/reference/inference/service-openai.asciidoc b/docs/reference/inference/service-openai.asciidoc index 9211e2d08e88..e4be7f18e09d 100644 --- a/docs/reference/inference/service-openai.asciidoc +++ b/docs/reference/inference/service-openai.asciidoc @@ -1,6 +1,12 @@ [[infer-service-openai]] === OpenAI {infer} service +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-inference[{infer-cap} APIs]. +-- + Creates an {infer} endpoint to perform an {infer} task with the `openai` service. diff --git a/docs/reference/inference/service-watsonx-ai.asciidoc b/docs/reference/inference/service-watsonx-ai.asciidoc index 597afc27fd0c..e7bba7b4e9a9 100644 --- a/docs/reference/inference/service-watsonx-ai.asciidoc +++ b/docs/reference/inference/service-watsonx-ai.asciidoc @@ -1,6 +1,12 @@ [[infer-service-watsonx-ai]] === Watsonx {infer} service +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-inference[{infer-cap} APIs]. +-- + Creates an {infer} endpoint to perform an {infer} task with the `watsonxai` service. You need an https://cloud.ibm.com/docs/databases-for-elasticsearch?topic=databases-for-elasticsearch-provisioning&interface=api[IBM Cloud® Databases for Elasticsearch deployment] to use the `watsonxai` {infer} service. diff --git a/docs/reference/inference/stream-inference.asciidoc b/docs/reference/inference/stream-inference.asciidoc index e66acd630cb3..42abb589f9af 100644 --- a/docs/reference/inference/stream-inference.asciidoc +++ b/docs/reference/inference/stream-inference.asciidoc @@ -2,6 +2,12 @@ [[stream-inference-api]] === Stream inference API +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-inference[{infer-cap} APIs]. +-- + Streams a chat completion response. IMPORTANT: The {infer} APIs enable you to use certain services, such as built-in {ml} models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. diff --git a/docs/reference/inference/update-inference.asciidoc b/docs/reference/inference/update-inference.asciidoc index efd29231ac12..d3a90f5d84e6 100644 --- a/docs/reference/inference/update-inference.asciidoc +++ b/docs/reference/inference/update-inference.asciidoc @@ -2,6 +2,12 @@ [[update-inference-api]] === Update inference API +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-inference[{infer-cap} APIs]. +-- + Updates an {infer} endpoint. IMPORTANT: The {infer} APIs enable you to use certain services, such as built-in {ml} models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. diff --git a/docs/reference/ingest/apis/delete-ip-location-database.asciidoc b/docs/reference/ingest/apis/delete-ip-location-database.asciidoc index c3a10a914d2f..23f1d15bfa7e 100644 --- a/docs/reference/ingest/apis/delete-ip-location-database.asciidoc +++ b/docs/reference/ingest/apis/delete-ip-location-database.asciidoc @@ -4,6 +4,12 @@ Delete IP geolocation database configuration ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ingest[Ingest APIs]. +-- + Deletes a IP geolocation database configuration. [source,console] diff --git a/docs/reference/ingest/apis/delete-pipeline.asciidoc b/docs/reference/ingest/apis/delete-pipeline.asciidoc index 94ac87c61b56..cd4cae1b636c 100644 --- a/docs/reference/ingest/apis/delete-pipeline.asciidoc +++ b/docs/reference/ingest/apis/delete-pipeline.asciidoc @@ -4,6 +4,12 @@ Delete pipeline ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ingest[Ingest APIs]. +-- + Deletes one or more existing ingest pipeline. //// diff --git a/docs/reference/ingest/apis/enrich/delete-enrich-policy.asciidoc b/docs/reference/ingest/apis/enrich/delete-enrich-policy.asciidoc index d190322fbfe9..cd68bd1fcc67 100644 --- a/docs/reference/ingest/apis/enrich/delete-enrich-policy.asciidoc +++ b/docs/reference/ingest/apis/enrich/delete-enrich-policy.asciidoc @@ -5,7 +5,7 @@ Delete enrich policy ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-enrich[Enrich APIs]. diff --git a/docs/reference/ingest/apis/enrich/execute-enrich-policy.asciidoc b/docs/reference/ingest/apis/enrich/execute-enrich-policy.asciidoc index 312cb2593f1f..8e72a51514a5 100644 --- a/docs/reference/ingest/apis/enrich/execute-enrich-policy.asciidoc +++ b/docs/reference/ingest/apis/enrich/execute-enrich-policy.asciidoc @@ -5,7 +5,7 @@ Execute enrich policy ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-enrich[Enrich APIs]. diff --git a/docs/reference/ingest/apis/enrich/get-enrich-policy.asciidoc b/docs/reference/ingest/apis/enrich/get-enrich-policy.asciidoc index 48448f592504..2f9e069d5e43 100644 --- a/docs/reference/ingest/apis/enrich/get-enrich-policy.asciidoc +++ b/docs/reference/ingest/apis/enrich/get-enrich-policy.asciidoc @@ -5,7 +5,7 @@ Get enrich policy ++++ -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-enrich[Enrich APIs]. diff --git a/docs/reference/ingest/apis/enrich/index.asciidoc b/docs/reference/ingest/apis/enrich/index.asciidoc index 0488d535000e..7566b8812ec3 100644 --- a/docs/reference/ingest/apis/enrich/index.asciidoc +++ b/docs/reference/ingest/apis/enrich/index.asciidoc @@ -2,7 +2,7 @@ [[enrich-apis]] == Enrich APIs -..New API reference +.New API reference [sidebar] -- For the most up-to-date API details, refer to {api-es}/group/endpoint-enrich[Enrich APIs]. diff --git a/docs/reference/ingest/apis/geoip-stats.asciidoc b/docs/reference/ingest/apis/geoip-stats.asciidoc index 6135b7821f2a..423fee1c5bce 100644 --- a/docs/reference/ingest/apis/geoip-stats.asciidoc +++ b/docs/reference/ingest/apis/geoip-stats.asciidoc @@ -4,6 +4,12 @@ GeoIP stats ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ingest[Ingest APIs]. +-- + Gets statistics about the <>, including download statistics for GeoIP2 databases used with it. diff --git a/docs/reference/ingest/apis/get-ip-location-database.asciidoc b/docs/reference/ingest/apis/get-ip-location-database.asciidoc index 26e9ddc1eee5..25f1c7ffdf69 100644 --- a/docs/reference/ingest/apis/get-ip-location-database.asciidoc +++ b/docs/reference/ingest/apis/get-ip-location-database.asciidoc @@ -4,6 +4,12 @@ Get IP geolocation database configuration ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ingest[Ingest APIs]. +-- + Returns information about one or more IP geolocation database configurations. [source,console] diff --git a/docs/reference/ingest/apis/get-pipeline.asciidoc b/docs/reference/ingest/apis/get-pipeline.asciidoc index f2a1155bca12..88fc22590004 100644 --- a/docs/reference/ingest/apis/get-pipeline.asciidoc +++ b/docs/reference/ingest/apis/get-pipeline.asciidoc @@ -4,6 +4,12 @@ Get pipeline ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ingest[Ingest APIs]. +-- + Returns information about one or more ingest pipelines. This API returns a local reference of the pipeline. diff --git a/docs/reference/ingest/apis/index.asciidoc b/docs/reference/ingest/apis/index.asciidoc index 35adc4782197..e83131b55445 100644 --- a/docs/reference/ingest/apis/index.asciidoc +++ b/docs/reference/ingest/apis/index.asciidoc @@ -1,6 +1,12 @@ [[ingest-apis]] == Ingest APIs +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ingest[Ingest APIs]. +-- + Use ingest APIs to manage tasks and resources related to <> and processors. diff --git a/docs/reference/ingest/apis/put-ip-location-database.asciidoc b/docs/reference/ingest/apis/put-ip-location-database.asciidoc index e42d84752694..1cdd78900888 100644 --- a/docs/reference/ingest/apis/put-ip-location-database.asciidoc +++ b/docs/reference/ingest/apis/put-ip-location-database.asciidoc @@ -4,6 +4,12 @@ Create or update IP geolocation database configuration ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ingest[Ingest APIs]. +-- + Creates or updates an IP geolocation database configuration. IMPORTANT: The Maxmind `account_id` shown below requires a license key. Because the license key is sensitive information, diff --git a/docs/reference/ingest/apis/put-pipeline.asciidoc b/docs/reference/ingest/apis/put-pipeline.asciidoc index 5b532dedf8e8..7d9854a55758 100644 --- a/docs/reference/ingest/apis/put-pipeline.asciidoc +++ b/docs/reference/ingest/apis/put-pipeline.asciidoc @@ -4,6 +4,12 @@ Create or update pipeline ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ingest[Ingest APIs]. +-- + Creates or updates an <>. Changes made using this API take effect immediately. diff --git a/docs/reference/ingest/apis/simulate-ingest.asciidoc b/docs/reference/ingest/apis/simulate-ingest.asciidoc index 52ed09b1d32c..b48faff87664 100644 --- a/docs/reference/ingest/apis/simulate-ingest.asciidoc +++ b/docs/reference/ingest/apis/simulate-ingest.asciidoc @@ -5,6 +5,12 @@ Simulate ingest ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ingest[Ingest APIs]. +-- + Executes ingest pipelines against a set of provided documents, optionally with substitute pipeline definitions. This API is meant to be used for troubleshooting or pipeline development, as it does not actually index any diff --git a/docs/reference/ingest/apis/simulate-pipeline.asciidoc b/docs/reference/ingest/apis/simulate-pipeline.asciidoc index bb7c34338313..fe7e5f2e91c6 100644 --- a/docs/reference/ingest/apis/simulate-pipeline.asciidoc +++ b/docs/reference/ingest/apis/simulate-pipeline.asciidoc @@ -5,6 +5,12 @@ Simulate pipeline ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ingest[Ingest APIs]. +-- + Executes an ingest pipeline against a set of provided documents. diff --git a/docs/reference/intro.asciidoc b/docs/reference/intro.asciidoc index e0100b1c5640..391439df2ae8 100644 --- a/docs/reference/intro.asciidoc +++ b/docs/reference/intro.asciidoc @@ -397,51 +397,18 @@ geographic location of your users and your resources. [[use-multiple-nodes-shards]] ==== Use multiple nodes and shards -[NOTE] -==== -Nodes and shards are what make {es} distributed and scalable. +When you move to production, you need to introduce multiple nodes and shards to your cluster. Nodes and shards are what make {es} distributed and scalable. The size and number of these nodes and shards depends on your data, your use case, and your budget. -These concepts aren’t essential if you’re just getting started. How you <> in production determines what you need to know: +These concepts aren't essential if you're just getting started. How you <> in production determines what you need to know: * *Self-managed {es}*: You are responsible for setting up and managing nodes, clusters, shards, and replicas. This includes managing the underlying infrastructure, scaling, and ensuring high availability through failover and backup strategies. * *Elastic Cloud*: Elastic can autoscale resources in response to workload changes. Choose from different deployment types to apply sensible defaults for your use case. A basic understanding of nodes, shards, and replicas is still important. -* *Elastic Cloud Serverless*: You don’t need to worry about nodes, shards, or replicas. These resources are 100% automated +* *Elastic Cloud Serverless*: You don't need to worry about nodes, shards, or replicas. These resources are 100% automated on the serverless platform, which is designed to scale with your workload. -==== -You can add servers (_nodes_) to a cluster to increase capacity, and {es} automatically distributes your data and query load -across all of the available nodes. - -Elastic is able to distribute your data across nodes by subdividing an index into _shards_. Each index in {es} is a grouping -of one or more physical shards, where each shard is a self-contained Lucene index containing a subset of the documents in -the index. By distributing the documents in an index across multiple shards, and distributing those shards across multiple -nodes, {es} increases indexing and query capacity. - -There are two types of shards: _primaries_ and _replicas_. Each document in an index belongs to one primary shard. A replica -shard is a copy of a primary shard. Replicas maintain redundant copies of your data across the nodes in your cluster. -This protects against hardware failure and increases capacity to serve read requests like searching or retrieving a document. - -[TIP] -==== -The number of primary shards in an index is fixed at the time that an index is created, but the number of replica shards can -be changed at any time, without interrupting indexing or query operations. -==== - -Shard copies in your cluster are automatically balanced across nodes to provide scale and high availability. All nodes are -aware of all the other nodes in the cluster and can forward client requests to the appropriate node. This allows {es} -to distribute indexing and query load across the cluster. - -If you’re exploring {es} for the first time or working in a development environment, then you can use a cluster with a single node and create indices -with only one shard. However, in a production environment, you should build a cluster with multiple nodes and indices -with multiple shards to increase performance and resilience. - -// TODO - diagram - -To learn about optimizing the number and size of shards in your cluster, refer to <>. -To learn about how read and write operations are replicated across shards and shard copies, refer to <>. -To adjust how shards are allocated and balanced across nodes, refer to <>. +Learn more about <>. [discrete] [[ccr-disaster-recovery-geo-proximity]] diff --git a/docs/reference/licensing/delete-license.asciidoc b/docs/reference/licensing/delete-license.asciidoc index 0f0aac416ccf..fd9236664e1c 100644 --- a/docs/reference/licensing/delete-license.asciidoc +++ b/docs/reference/licensing/delete-license.asciidoc @@ -5,6 +5,12 @@ Delete license ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-license[Licensing APIs]. +-- + This API enables you to delete licensing information. [discrete] diff --git a/docs/reference/licensing/get-basic-status.asciidoc b/docs/reference/licensing/get-basic-status.asciidoc index 44bc246e72bc..0e354e0dc07c 100644 --- a/docs/reference/licensing/get-basic-status.asciidoc +++ b/docs/reference/licensing/get-basic-status.asciidoc @@ -5,6 +5,12 @@ Get basic status ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-license[Licensing APIs]. +-- + This API enables you to check the status of your basic license. [discrete] diff --git a/docs/reference/licensing/get-license.asciidoc b/docs/reference/licensing/get-license.asciidoc index 816bdd36ff4b..1b50a5be8fa2 100644 --- a/docs/reference/licensing/get-license.asciidoc +++ b/docs/reference/licensing/get-license.asciidoc @@ -5,6 +5,12 @@ Get license ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-license[Licensing APIs]. +-- + This API enables you to retrieve licensing information. [discrete] diff --git a/docs/reference/licensing/get-trial-status.asciidoc b/docs/reference/licensing/get-trial-status.asciidoc index 912d90bde850..b0ae110faa8c 100644 --- a/docs/reference/licensing/get-trial-status.asciidoc +++ b/docs/reference/licensing/get-trial-status.asciidoc @@ -5,6 +5,12 @@ Get trial status ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-license[Licensing APIs]. +-- + Enables you to check the status of your trial. [discrete] diff --git a/docs/reference/licensing/index.asciidoc b/docs/reference/licensing/index.asciidoc index a1dfd398acfe..12df7b606c5b 100644 --- a/docs/reference/licensing/index.asciidoc +++ b/docs/reference/licensing/index.asciidoc @@ -2,6 +2,12 @@ [[licensing-apis]] == Licensing APIs +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-license[Licensing APIs]. +-- + You can use the following APIs to manage your licenses: * <> diff --git a/docs/reference/licensing/start-basic.asciidoc b/docs/reference/licensing/start-basic.asciidoc index 8ae25e1940a6..291a0ec452ca 100644 --- a/docs/reference/licensing/start-basic.asciidoc +++ b/docs/reference/licensing/start-basic.asciidoc @@ -5,6 +5,12 @@ Start basic ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-license[Licensing APIs]. +-- + This API starts an indefinite basic license. [discrete] diff --git a/docs/reference/licensing/update-license.asciidoc b/docs/reference/licensing/update-license.asciidoc index 4207dbb092fb..03b6c3ce40ed 100644 --- a/docs/reference/licensing/update-license.asciidoc +++ b/docs/reference/licensing/update-license.asciidoc @@ -5,6 +5,12 @@ Update license ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-license[Licensing APIs]. +-- + Updates the license for your {es} cluster. [[update-license-api-request]] diff --git a/docs/reference/migration/apis/deprecation.asciidoc b/docs/reference/migration/apis/deprecation.asciidoc index 67b4c113af2b..71453a001eee 100644 --- a/docs/reference/migration/apis/deprecation.asciidoc +++ b/docs/reference/migration/apis/deprecation.asciidoc @@ -5,6 +5,12 @@ Deprecation info ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-migration[Migration APIs]. +-- + include::{es-ref-dir}/migration/apis/shared-migration-apis-tip.asciidoc[] The deprecation API is to be used to retrieve information about different diff --git a/docs/reference/migration/apis/feature-migration.asciidoc b/docs/reference/migration/apis/feature-migration.asciidoc index e38639ac4453..717a46442bd9 100644 --- a/docs/reference/migration/apis/feature-migration.asciidoc +++ b/docs/reference/migration/apis/feature-migration.asciidoc @@ -5,6 +5,12 @@ Feature migration ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-migration[Migration APIs]. +-- + include::{es-ref-dir}/migration/apis/shared-migration-apis-tip.asciidoc[] Version upgrades sometimes require changes to how features store configuration diff --git a/docs/reference/migration/migration.asciidoc b/docs/reference/migration/migration.asciidoc index ffb2ca7a7859..57b6c88aefea 100644 --- a/docs/reference/migration/migration.asciidoc +++ b/docs/reference/migration/migration.asciidoc @@ -2,6 +2,12 @@ [[migration-api]] == Migration APIs +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-migration[Migration APIs]. +-- + The migration APIs power {kib}'s **Upgrade Assistant** feature. include::apis/shared-migration-apis-tip.asciidoc[] diff --git a/docs/reference/ml/anomaly-detection/apis/close-job.asciidoc b/docs/reference/ml/anomaly-detection/apis/close-job.asciidoc index f0cb968e082c..865213dad3f5 100644 --- a/docs/reference/ml/anomaly-detection/apis/close-job.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/close-job.asciidoc @@ -5,6 +5,12 @@ Close jobs ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Closes one or more {anomaly-jobs}. [[ml-close-job-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/delete-calendar-event.asciidoc b/docs/reference/ml/anomaly-detection/apis/delete-calendar-event.asciidoc index b80a248038ae..d67f7cce9d1f 100644 --- a/docs/reference/ml/anomaly-detection/apis/delete-calendar-event.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/delete-calendar-event.asciidoc @@ -5,6 +5,12 @@ Delete events from calendar ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Deletes scheduled events from a calendar. [[ml-delete-calendar-event-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/delete-calendar-job.asciidoc b/docs/reference/ml/anomaly-detection/apis/delete-calendar-job.asciidoc index 6720e236fd63..17a581f964f5 100644 --- a/docs/reference/ml/anomaly-detection/apis/delete-calendar-job.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/delete-calendar-job.asciidoc @@ -5,6 +5,12 @@ Delete jobs from calendar ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Deletes {anomaly-jobs} from a calendar. [[ml-delete-calendar-job-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/delete-calendar.asciidoc b/docs/reference/ml/anomaly-detection/apis/delete-calendar.asciidoc index 6684366c6f33..23ce74c8fb31 100644 --- a/docs/reference/ml/anomaly-detection/apis/delete-calendar.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/delete-calendar.asciidoc @@ -5,6 +5,12 @@ Delete calendars ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Deletes a calendar. [[ml-delete-calendar-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/delete-datafeed.asciidoc b/docs/reference/ml/anomaly-detection/apis/delete-datafeed.asciidoc index 64a1e4c336fe..61d3624ef50d 100644 --- a/docs/reference/ml/anomaly-detection/apis/delete-datafeed.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/delete-datafeed.asciidoc @@ -7,6 +7,12 @@ Delete {dfeeds} ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Deletes an existing {dfeed}. [[ml-delete-datafeed-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/delete-expired-data.asciidoc b/docs/reference/ml/anomaly-detection/apis/delete-expired-data.asciidoc index a8c1279ac1b4..bee453f8d208 100644 --- a/docs/reference/ml/anomaly-detection/apis/delete-expired-data.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/delete-expired-data.asciidoc @@ -5,6 +5,12 @@ Delete expired data ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Deletes expired and unused machine learning data. [[ml-delete-expired-data-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/delete-filter.asciidoc b/docs/reference/ml/anomaly-detection/apis/delete-filter.asciidoc index 4b41347543e8..84346ae01230 100644 --- a/docs/reference/ml/anomaly-detection/apis/delete-filter.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/delete-filter.asciidoc @@ -5,6 +5,12 @@ Delete filters ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Deletes a filter. [[ml-delete-filter-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/delete-forecast.asciidoc b/docs/reference/ml/anomaly-detection/apis/delete-forecast.asciidoc index 74e6ce27084a..ee3167036f66 100644 --- a/docs/reference/ml/anomaly-detection/apis/delete-forecast.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/delete-forecast.asciidoc @@ -5,6 +5,12 @@ Delete forecasts ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Deletes forecasts from a {ml} job. [[ml-delete-forecast-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/delete-job.asciidoc b/docs/reference/ml/anomaly-detection/apis/delete-job.asciidoc index 1bbe07fd44f4..595c5acc041d 100644 --- a/docs/reference/ml/anomaly-detection/apis/delete-job.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/delete-job.asciidoc @@ -5,6 +5,12 @@ Delete jobs ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Deletes an existing {anomaly-job}. [[ml-delete-job-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/delete-snapshot.asciidoc b/docs/reference/ml/anomaly-detection/apis/delete-snapshot.asciidoc index ad10de7a2ba0..345b933a1962 100644 --- a/docs/reference/ml/anomaly-detection/apis/delete-snapshot.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/delete-snapshot.asciidoc @@ -5,6 +5,12 @@ Delete model snapshots ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Deletes an existing model snapshot. [[ml-delete-snapshot-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/estimate-model-memory.asciidoc b/docs/reference/ml/anomaly-detection/apis/estimate-model-memory.asciidoc index 1cc4df42f083..8be8e8df72d4 100644 --- a/docs/reference/ml/anomaly-detection/apis/estimate-model-memory.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/estimate-model-memory.asciidoc @@ -5,6 +5,12 @@ Estimate model memory ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Makes an estimation of the memory usage for an {anomaly-job} model. It is based on analysis configuration details for the job and cardinality estimates for the fields it references. diff --git a/docs/reference/ml/anomaly-detection/apis/flush-job.asciidoc b/docs/reference/ml/anomaly-detection/apis/flush-job.asciidoc index 68ff601749b4..dcf4d7a6b1d6 100644 --- a/docs/reference/ml/anomaly-detection/apis/flush-job.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/flush-job.asciidoc @@ -5,6 +5,12 @@ Flush jobs ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Forces any buffered data to be processed by the job. [[ml-flush-job-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/forecast.asciidoc b/docs/reference/ml/anomaly-detection/apis/forecast.asciidoc index 3e6067ab0585..ac4e3eb4ed29 100644 --- a/docs/reference/ml/anomaly-detection/apis/forecast.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/forecast.asciidoc @@ -5,6 +5,12 @@ Forecast jobs ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Predicts the future behavior of a time series by using its historical behavior. [[ml-forecast-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/get-bucket.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-bucket.asciidoc index bca839d1db31..d33d6f64021a 100644 --- a/docs/reference/ml/anomaly-detection/apis/get-bucket.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-bucket.asciidoc @@ -5,6 +5,12 @@ Get buckets ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Retrieves {anomaly-job} results for one or more buckets. [[ml-get-bucket-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/get-calendar-event.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-calendar-event.asciidoc index fc06e286bf46..565a553e1897 100644 --- a/docs/reference/ml/anomaly-detection/apis/get-calendar-event.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-calendar-event.asciidoc @@ -5,6 +5,12 @@ Get scheduled events ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Retrieves information about the scheduled events in calendars. [[ml-get-calendar-event-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/get-calendar.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-calendar.asciidoc index b2c46bbe16c0..bf0045829963 100644 --- a/docs/reference/ml/anomaly-detection/apis/get-calendar.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-calendar.asciidoc @@ -5,6 +5,12 @@ Get calendars ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Retrieves configuration information for calendars. [[ml-get-calendar-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/get-category.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-category.asciidoc index 33de5e0f71a0..0f5fa1504c9f 100644 --- a/docs/reference/ml/anomaly-detection/apis/get-category.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-category.asciidoc @@ -5,6 +5,12 @@ Get categories ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Retrieves {anomaly-job} results for one or more categories. [[ml-get-category-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/get-datafeed-stats.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-datafeed-stats.asciidoc index 0a8f1e802715..f9130bb78f5e 100644 --- a/docs/reference/ml/anomaly-detection/apis/get-datafeed-stats.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-datafeed-stats.asciidoc @@ -7,6 +7,12 @@ Get {dfeed} statistics ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Retrieves usage information for {ml-docs}/ml-ad-run-jobs.html#ml-ad-datafeeds[{dfeeds}]. [[ml-get-datafeed-stats-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/get-datafeed.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-datafeed.asciidoc index a986e2220f92..3b86c5cc22a8 100644 --- a/docs/reference/ml/anomaly-detection/apis/get-datafeed.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-datafeed.asciidoc @@ -7,6 +7,12 @@ Get {dfeeds} ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Retrieves configuration information for {dfeeds}. [[ml-get-datafeed-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/get-filter.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-filter.asciidoc index f73dcd236f1a..dc167238701c 100644 --- a/docs/reference/ml/anomaly-detection/apis/get-filter.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-filter.asciidoc @@ -5,6 +5,12 @@ Get filters ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Retrieves filters. [[ml-get-filter-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/get-influencer.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-influencer.asciidoc index 31489e361a84..f2e26344207a 100644 --- a/docs/reference/ml/anomaly-detection/apis/get-influencer.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-influencer.asciidoc @@ -5,6 +5,12 @@ Get influencers ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Retrieves {anomaly-job} results for one or more influencers. [[ml-get-influencer-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/get-job-model-snapshot-upgrade-stats.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-job-model-snapshot-upgrade-stats.asciidoc index 0939282a7591..32a9b67258b2 100644 --- a/docs/reference/ml/anomaly-detection/apis/get-job-model-snapshot-upgrade-stats.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-job-model-snapshot-upgrade-stats.asciidoc @@ -7,6 +7,12 @@ Get model snapshot upgrade statistics ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Retrieves usage information for {anomaly-job} model snapshot upgrades. [[ml-get-job-model-snapshot-upgrade-stats-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/get-job-stats.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-job-stats.asciidoc index 627109d11e11..1df41abdda27 100644 --- a/docs/reference/ml/anomaly-detection/apis/get-job-stats.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-job-stats.asciidoc @@ -5,6 +5,12 @@ Get job statistics ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Retrieves usage information for {ml-docs}/ml-ad-overview.html[{anomaly-jobs}]. [[ml-get-job-stats-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/get-job.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-job.asciidoc index 33692fd182fa..74fd1c7ecb54 100644 --- a/docs/reference/ml/anomaly-detection/apis/get-job.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-job.asciidoc @@ -5,6 +5,12 @@ Get jobs ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Retrieves configuration information for {anomaly-jobs}. [[ml-get-job-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/get-overall-buckets.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-overall-buckets.asciidoc index b581b5c3a2eb..45ae61647bfd 100644 --- a/docs/reference/ml/anomaly-detection/apis/get-overall-buckets.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-overall-buckets.asciidoc @@ -5,6 +5,12 @@ Get overall buckets ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Retrieves overall bucket results that summarize the bucket results of multiple {anomaly-jobs}. diff --git a/docs/reference/ml/anomaly-detection/apis/get-record.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-record.asciidoc index e74ab3ecb4b1..266ab64ba5e1 100644 --- a/docs/reference/ml/anomaly-detection/apis/get-record.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-record.asciidoc @@ -5,6 +5,12 @@ Get records ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Retrieves anomaly records for an {anomaly-job}. [[ml-get-record-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/get-snapshot.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-snapshot.asciidoc index d94bd4060854..47fdc3db46b6 100644 --- a/docs/reference/ml/anomaly-detection/apis/get-snapshot.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-snapshot.asciidoc @@ -5,6 +5,12 @@ Get model snapshots ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Retrieves information about model snapshots. [[ml-get-snapshot-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/ml-ad-apis.asciidoc b/docs/reference/ml/anomaly-detection/apis/ml-ad-apis.asciidoc index 856232c93343..951f9522db74 100644 --- a/docs/reference/ml/anomaly-detection/apis/ml-ad-apis.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/ml-ad-apis.asciidoc @@ -2,6 +2,12 @@ [[ml-ad-apis]] = {ml-cap} {anomaly-detect} APIs +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + You can use the following APIs to perform {ml} {anomaly-detect} activities. See also <>, <>, <>. diff --git a/docs/reference/ml/anomaly-detection/apis/open-job.asciidoc b/docs/reference/ml/anomaly-detection/apis/open-job.asciidoc index 385f672f467f..9a5153952b5a 100644 --- a/docs/reference/ml/anomaly-detection/apis/open-job.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/open-job.asciidoc @@ -5,6 +5,12 @@ Open jobs ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Opens one or more {anomaly-jobs}. [[ml-open-job-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/post-calendar-event.asciidoc b/docs/reference/ml/anomaly-detection/apis/post-calendar-event.asciidoc index 46ffeab694fa..429dca427a9d 100644 --- a/docs/reference/ml/anomaly-detection/apis/post-calendar-event.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/post-calendar-event.asciidoc @@ -5,6 +5,12 @@ Add events to calendar ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Posts scheduled events in a calendar. [[ml-post-calendar-event-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/post-data.asciidoc b/docs/reference/ml/anomaly-detection/apis/post-data.asciidoc index 931efcf8c2a5..83e5dd77e814 100644 --- a/docs/reference/ml/anomaly-detection/apis/post-data.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/post-data.asciidoc @@ -7,6 +7,12 @@ deprecated::[7.11.0, "Posting data directly to anomaly detection jobs is deprecated, in a future major version a <> will be required."] +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Sends data to an anomaly detection job for analysis. [[ml-post-data-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/preview-datafeed.asciidoc b/docs/reference/ml/anomaly-detection/apis/preview-datafeed.asciidoc index 243cd2a5f32a..c57965401e07 100644 --- a/docs/reference/ml/anomaly-detection/apis/preview-datafeed.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/preview-datafeed.asciidoc @@ -7,6 +7,12 @@ Preview {dfeeds} ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Previews a {dfeed}. [[ml-preview-datafeed-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/put-calendar-job.asciidoc b/docs/reference/ml/anomaly-detection/apis/put-calendar-job.asciidoc index 0c19a08cbd74..612f472bba37 100644 --- a/docs/reference/ml/anomaly-detection/apis/put-calendar-job.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/put-calendar-job.asciidoc @@ -5,6 +5,12 @@ Add jobs to calendar ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Adds an {anomaly-job} to a calendar. [[ml-put-calendar-job-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/put-calendar.asciidoc b/docs/reference/ml/anomaly-detection/apis/put-calendar.asciidoc index fd2b58a31737..69a4498cc29d 100644 --- a/docs/reference/ml/anomaly-detection/apis/put-calendar.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/put-calendar.asciidoc @@ -5,6 +5,12 @@ Create calendars ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Instantiates a calendar. [[ml-put-calendar-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/put-datafeed.asciidoc b/docs/reference/ml/anomaly-detection/apis/put-datafeed.asciidoc index 47e3059666d7..18260d3538e4 100644 --- a/docs/reference/ml/anomaly-detection/apis/put-datafeed.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/put-datafeed.asciidoc @@ -7,6 +7,12 @@ Create {dfeeds} ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Instantiates a {dfeed}. [[ml-put-datafeed-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/put-filter.asciidoc b/docs/reference/ml/anomaly-detection/apis/put-filter.asciidoc index b50ba8cb1e23..3f3157f008e9 100644 --- a/docs/reference/ml/anomaly-detection/apis/put-filter.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/put-filter.asciidoc @@ -5,6 +5,12 @@ Create filters ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Instantiates a filter. [[ml-put-filter-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/put-job.asciidoc b/docs/reference/ml/anomaly-detection/apis/put-job.asciidoc index 7bf02e7a0dd6..e3c292cc534b 100644 --- a/docs/reference/ml/anomaly-detection/apis/put-job.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/put-job.asciidoc @@ -5,6 +5,12 @@ Create jobs ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Instantiates an {anomaly-job}. [[ml-put-job-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/reset-job.asciidoc b/docs/reference/ml/anomaly-detection/apis/reset-job.asciidoc index 9009d634a2e9..2d9c4696cab4 100644 --- a/docs/reference/ml/anomaly-detection/apis/reset-job.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/reset-job.asciidoc @@ -5,6 +5,12 @@ Reset jobs ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Resets an existing {anomaly-job}. [[ml-reset-job-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/revert-snapshot.asciidoc b/docs/reference/ml/anomaly-detection/apis/revert-snapshot.asciidoc index c8d7a27ee204..792a102ff295 100644 --- a/docs/reference/ml/anomaly-detection/apis/revert-snapshot.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/revert-snapshot.asciidoc @@ -5,6 +5,12 @@ Revert model snapshots ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Reverts to a specific snapshot. [[ml-revert-snapshot-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/start-datafeed.asciidoc b/docs/reference/ml/anomaly-detection/apis/start-datafeed.asciidoc index b54c80133d7d..4281b3a02b82 100644 --- a/docs/reference/ml/anomaly-detection/apis/start-datafeed.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/start-datafeed.asciidoc @@ -7,6 +7,12 @@ Start {dfeeds} ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Starts one or more {dfeeds}. [[ml-start-datafeed-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/stop-datafeed.asciidoc b/docs/reference/ml/anomaly-detection/apis/stop-datafeed.asciidoc index bc15a1de8a05..91e5087715fb 100644 --- a/docs/reference/ml/anomaly-detection/apis/stop-datafeed.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/stop-datafeed.asciidoc @@ -7,6 +7,12 @@ Stop {dfeeds} ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Stops one or more {dfeeds}. [[ml-stop-datafeed-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/update-datafeed.asciidoc b/docs/reference/ml/anomaly-detection/apis/update-datafeed.asciidoc index b3920d9d4f80..c03c921e7875 100644 --- a/docs/reference/ml/anomaly-detection/apis/update-datafeed.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/update-datafeed.asciidoc @@ -7,6 +7,12 @@ Update {dfeeds} ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Updates certain properties of a {dfeed}. diff --git a/docs/reference/ml/anomaly-detection/apis/update-filter.asciidoc b/docs/reference/ml/anomaly-detection/apis/update-filter.asciidoc index a4221c37a438..8fb5376f4acb 100644 --- a/docs/reference/ml/anomaly-detection/apis/update-filter.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/update-filter.asciidoc @@ -5,6 +5,12 @@ Update filters ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Updates the description of a filter, adds items, or removes items. [[ml-update-filter-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/update-job.asciidoc b/docs/reference/ml/anomaly-detection/apis/update-job.asciidoc index ee13247fc883..bed013c113c0 100644 --- a/docs/reference/ml/anomaly-detection/apis/update-job.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/update-job.asciidoc @@ -5,6 +5,12 @@ Update jobs ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Updates certain properties of an {anomaly-job}. [[ml-update-job-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/update-snapshot.asciidoc b/docs/reference/ml/anomaly-detection/apis/update-snapshot.asciidoc index f8c038486002..54276f62ff09 100644 --- a/docs/reference/ml/anomaly-detection/apis/update-snapshot.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/update-snapshot.asciidoc @@ -5,6 +5,12 @@ Update model snapshots ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Updates certain properties of a snapshot. [[ml-update-snapshot-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/upgrade-job-model-snapshot.asciidoc b/docs/reference/ml/anomaly-detection/apis/upgrade-job-model-snapshot.asciidoc index 3a74e3b2296d..2538b4958ada 100644 --- a/docs/reference/ml/anomaly-detection/apis/upgrade-job-model-snapshot.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/upgrade-job-model-snapshot.asciidoc @@ -5,6 +5,12 @@ Upgrade model snapshots ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Upgrades an {anomaly-detect} model snapshot to the latest major version. NOTE: From {es} 8.10.0, a new version number is used to diff --git a/docs/reference/ml/anomaly-detection/apis/validate-detector.asciidoc b/docs/reference/ml/anomaly-detection/apis/validate-detector.asciidoc index c71673be7dc0..820845d0fd23 100644 --- a/docs/reference/ml/anomaly-detection/apis/validate-detector.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/validate-detector.asciidoc @@ -5,6 +5,12 @@ Validate detectors ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Validates detector configuration information. [[ml-valid-detector-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/validate-job.asciidoc b/docs/reference/ml/anomaly-detection/apis/validate-job.asciidoc index c77623566d4c..382199e31569 100644 --- a/docs/reference/ml/anomaly-detection/apis/validate-job.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/validate-job.asciidoc @@ -5,6 +5,12 @@ Validate jobs ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Validates {anomaly-job} configuration information. [[ml-valid-job-request]] diff --git a/docs/reference/ml/common/apis/get-ml-info.asciidoc b/docs/reference/ml/common/apis/get-ml-info.asciidoc index 104375bd641c..62fae2a9a918 100644 --- a/docs/reference/ml/common/apis/get-ml-info.asciidoc +++ b/docs/reference/ml/common/apis/get-ml-info.asciidoc @@ -7,6 +7,12 @@ Get {ml} info ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml[{ml-cap} APIs]. +-- + Returns defaults and limits used by machine learning. [[get-ml-info-request]] diff --git a/docs/reference/ml/common/apis/get-ml-memory.asciidoc b/docs/reference/ml/common/apis/get-ml-memory.asciidoc index 81e0f59a97e5..8c1cf620b70e 100644 --- a/docs/reference/ml/common/apis/get-ml-memory.asciidoc +++ b/docs/reference/ml/common/apis/get-ml-memory.asciidoc @@ -7,6 +7,12 @@ Get {ml} memory stats ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml[{ml-cap} APIs]. +-- + Returns information on how {ml} is using memory. [[get-ml-memory-request]] diff --git a/docs/reference/ml/common/apis/ml-apis.asciidoc b/docs/reference/ml/common/apis/ml-apis.asciidoc index c4349f3eb736..95f102ceecfa 100644 --- a/docs/reference/ml/common/apis/ml-apis.asciidoc +++ b/docs/reference/ml/common/apis/ml-apis.asciidoc @@ -2,6 +2,12 @@ [[ml-apis]] = {ml-cap} APIs +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml[{ml-cap} APIs]. +-- + You can use the following APIs to retrieve information related to the {stack-ml-features}: diff --git a/docs/reference/ml/common/apis/set-upgrade-mode.asciidoc b/docs/reference/ml/common/apis/set-upgrade-mode.asciidoc index 2469753382f9..52adcac9bacd 100644 --- a/docs/reference/ml/common/apis/set-upgrade-mode.asciidoc +++ b/docs/reference/ml/common/apis/set-upgrade-mode.asciidoc @@ -5,6 +5,12 @@ Set upgrade mode ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml[{ml-cap} APIs]. +-- + Sets a cluster wide upgrade_mode setting that prepares {ml} indices for an upgrade. diff --git a/docs/reference/ml/df-analytics/apis/delete-dfanalytics.asciidoc b/docs/reference/ml/df-analytics/apis/delete-dfanalytics.asciidoc index b505da570244..a6b5058eab88 100644 --- a/docs/reference/ml/df-analytics/apis/delete-dfanalytics.asciidoc +++ b/docs/reference/ml/df-analytics/apis/delete-dfanalytics.asciidoc @@ -6,6 +6,12 @@ Delete {dfanalytics-jobs} ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-data-frame[{ml-cap} {dfanalytics} APIs]. +-- + Deletes an existing {dfanalytics-job}. diff --git a/docs/reference/ml/df-analytics/apis/evaluate-dfanalytics.asciidoc b/docs/reference/ml/df-analytics/apis/evaluate-dfanalytics.asciidoc index 8d31fcd0107d..fb0b6da0bc80 100644 --- a/docs/reference/ml/df-analytics/apis/evaluate-dfanalytics.asciidoc +++ b/docs/reference/ml/df-analytics/apis/evaluate-dfanalytics.asciidoc @@ -7,6 +7,12 @@ Evaluate {dfanalytics} ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-data-frame[{ml-cap} {dfanalytics} APIs]. +-- + Evaluates the {dfanalytics} for an annotated index. diff --git a/docs/reference/ml/df-analytics/apis/explain-dfanalytics.asciidoc b/docs/reference/ml/df-analytics/apis/explain-dfanalytics.asciidoc index 0ee7ec563458..566cfc3ba359 100644 --- a/docs/reference/ml/df-analytics/apis/explain-dfanalytics.asciidoc +++ b/docs/reference/ml/df-analytics/apis/explain-dfanalytics.asciidoc @@ -7,6 +7,12 @@ Explain {dfanalytics} ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-data-frame[{ml-cap} {dfanalytics} APIs]. +-- + Explains a {dataframe-analytics-config}. diff --git a/docs/reference/ml/df-analytics/apis/get-dfanalytics-stats.asciidoc b/docs/reference/ml/df-analytics/apis/get-dfanalytics-stats.asciidoc index 9037819c9f23..960388d8d4e3 100644 --- a/docs/reference/ml/df-analytics/apis/get-dfanalytics-stats.asciidoc +++ b/docs/reference/ml/df-analytics/apis/get-dfanalytics-stats.asciidoc @@ -6,6 +6,12 @@ Get {dfanalytics-jobs} stats ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-data-frame[{ml-cap} {dfanalytics} APIs]. +-- + Retrieves usage information for {ml-docs}/ml-dfanalytics.html[{dfanalytics-jobs}]. diff --git a/docs/reference/ml/df-analytics/apis/get-dfanalytics.asciidoc b/docs/reference/ml/df-analytics/apis/get-dfanalytics.asciidoc index c2a4caa981da..6e65bdfe83a2 100644 --- a/docs/reference/ml/df-analytics/apis/get-dfanalytics.asciidoc +++ b/docs/reference/ml/df-analytics/apis/get-dfanalytics.asciidoc @@ -5,6 +5,12 @@ Get {dfanalytics-jobs} ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-data-frame[{ml-cap} {dfanalytics} APIs]. +-- + Retrieves configuration information for {dfanalytics-jobs}. diff --git a/docs/reference/ml/df-analytics/apis/ml-df-analytics-apis.asciidoc b/docs/reference/ml/df-analytics/apis/ml-df-analytics-apis.asciidoc index ba16f728e4b9..a90f42b7b975 100644 --- a/docs/reference/ml/df-analytics/apis/ml-df-analytics-apis.asciidoc +++ b/docs/reference/ml/df-analytics/apis/ml-df-analytics-apis.asciidoc @@ -2,6 +2,12 @@ [[ml-df-analytics-apis]] = {ml-cap} {dfanalytics} APIs +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-data-frame[{ml-cap} {dfanalytics} APIs]. +-- + You can use the following APIs to perform {ml} {dfanalytics} activities: * <> diff --git a/docs/reference/ml/df-analytics/apis/preview-dfanalytics.asciidoc b/docs/reference/ml/df-analytics/apis/preview-dfanalytics.asciidoc index 2c61c3263992..896cf5ca8eb8 100644 --- a/docs/reference/ml/df-analytics/apis/preview-dfanalytics.asciidoc +++ b/docs/reference/ml/df-analytics/apis/preview-dfanalytics.asciidoc @@ -7,6 +7,12 @@ Preview {dfanalytics} ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-data-frame[{ml-cap} {dfanalytics} APIs]. +-- + Previews the features used by a {dataframe-analytics-config}. diff --git a/docs/reference/ml/df-analytics/apis/put-dfanalytics.asciidoc b/docs/reference/ml/df-analytics/apis/put-dfanalytics.asciidoc index 54cbe78b3445..8c2e95e0744e 100644 --- a/docs/reference/ml/df-analytics/apis/put-dfanalytics.asciidoc +++ b/docs/reference/ml/df-analytics/apis/put-dfanalytics.asciidoc @@ -6,6 +6,12 @@ Create {dfanalytics-jobs} ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-data-frame[{ml-cap} {dfanalytics} APIs]. +-- + Instantiates a {dfanalytics-job}. diff --git a/docs/reference/ml/df-analytics/apis/start-dfanalytics.asciidoc b/docs/reference/ml/df-analytics/apis/start-dfanalytics.asciidoc index 70e996ef8dd0..7409c443ff80 100644 --- a/docs/reference/ml/df-analytics/apis/start-dfanalytics.asciidoc +++ b/docs/reference/ml/df-analytics/apis/start-dfanalytics.asciidoc @@ -7,6 +7,12 @@ Start {dfanalytics-jobs} ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-data-frame[{ml-cap} {dfanalytics} APIs]. +-- + Starts a {dfanalytics-job}. diff --git a/docs/reference/ml/df-analytics/apis/stop-dfanalytics.asciidoc b/docs/reference/ml/df-analytics/apis/stop-dfanalytics.asciidoc index 2fa3bc4413d7..95d39b98e3bb 100644 --- a/docs/reference/ml/df-analytics/apis/stop-dfanalytics.asciidoc +++ b/docs/reference/ml/df-analytics/apis/stop-dfanalytics.asciidoc @@ -7,6 +7,12 @@ Stop {dfanalytics-jobs} ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-data-frame[{ml-cap} {dfanalytics} APIs]. +-- + Stops one or more {dfanalytics-jobs}. diff --git a/docs/reference/ml/df-analytics/apis/update-dfanalytics.asciidoc b/docs/reference/ml/df-analytics/apis/update-dfanalytics.asciidoc index 49cca176be69..35e871761b21 100644 --- a/docs/reference/ml/df-analytics/apis/update-dfanalytics.asciidoc +++ b/docs/reference/ml/df-analytics/apis/update-dfanalytics.asciidoc @@ -6,6 +6,12 @@ Update {dfanalytics-jobs} ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-data-frame[{ml-cap} {dfanalytics} APIs]. +-- + Updates an existing {dfanalytics-job}. diff --git a/docs/reference/ml/trained-models/apis/clear-trained-model-deployment-cache.asciidoc b/docs/reference/ml/trained-models/apis/clear-trained-model-deployment-cache.asciidoc index f24379705fc7..1e1639ab5eee 100644 --- a/docs/reference/ml/trained-models/apis/clear-trained-model-deployment-cache.asciidoc +++ b/docs/reference/ml/trained-models/apis/clear-trained-model-deployment-cache.asciidoc @@ -6,6 +6,12 @@ Clear trained model deployment cache ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-trained-model[{ml-cap} trained model APIs]. +-- + Clears the {infer} cache on all nodes where the deployment is assigned. [[clear-trained-model-deployment-cache-request]] diff --git a/docs/reference/ml/trained-models/apis/delete-trained-models-aliases.asciidoc b/docs/reference/ml/trained-models/apis/delete-trained-models-aliases.asciidoc index 1b67a642f7b7..22e0422df9fb 100644 --- a/docs/reference/ml/trained-models/apis/delete-trained-models-aliases.asciidoc +++ b/docs/reference/ml/trained-models/apis/delete-trained-models-aliases.asciidoc @@ -6,6 +6,12 @@ Delete trained model aliases ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-trained-model[{ml-cap} trained model APIs]. +-- + Deletes a trained model alias. diff --git a/docs/reference/ml/trained-models/apis/delete-trained-models.asciidoc b/docs/reference/ml/trained-models/apis/delete-trained-models.asciidoc index 1b54343d1f1c..d738c6630a78 100644 --- a/docs/reference/ml/trained-models/apis/delete-trained-models.asciidoc +++ b/docs/reference/ml/trained-models/apis/delete-trained-models.asciidoc @@ -6,6 +6,12 @@ Delete trained models ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-trained-model[{ml-cap} trained model APIs]. +-- + Deletes an existing trained {infer} model. diff --git a/docs/reference/ml/trained-models/apis/get-trained-models-stats.asciidoc b/docs/reference/ml/trained-models/apis/get-trained-models-stats.asciidoc index b55f022a5d16..82263c98e911 100644 --- a/docs/reference/ml/trained-models/apis/get-trained-models-stats.asciidoc +++ b/docs/reference/ml/trained-models/apis/get-trained-models-stats.asciidoc @@ -6,6 +6,12 @@ Get trained models stats ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-trained-model[{ml-cap} trained model APIs]. +-- + Retrieves usage information for trained models. diff --git a/docs/reference/ml/trained-models/apis/get-trained-models.asciidoc b/docs/reference/ml/trained-models/apis/get-trained-models.asciidoc index ffb46eaa1fe2..4f583319ca38 100644 --- a/docs/reference/ml/trained-models/apis/get-trained-models.asciidoc +++ b/docs/reference/ml/trained-models/apis/get-trained-models.asciidoc @@ -6,6 +6,12 @@ Get trained models ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-trained-model[{ml-cap} trained model APIs]. +-- + Retrieves configuration information about {ml-docs}/ml-nlp-deploy-models.html[{infer} trained models]. diff --git a/docs/reference/ml/trained-models/apis/infer-trained-model-deployment.asciidoc b/docs/reference/ml/trained-models/apis/infer-trained-model-deployment.asciidoc index 83bc56d18df6..8bb48eeb5cbf 100644 --- a/docs/reference/ml/trained-models/apis/infer-trained-model-deployment.asciidoc +++ b/docs/reference/ml/trained-models/apis/infer-trained-model-deployment.asciidoc @@ -6,6 +6,12 @@ Infer trained model deployment ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-trained-model[{ml-cap} trained model APIs]. +-- + Evaluates a trained model. deprecated::[8.3.0,Replaced by <>.] diff --git a/docs/reference/ml/trained-models/apis/infer-trained-model.asciidoc b/docs/reference/ml/trained-models/apis/infer-trained-model.asciidoc index 7acbc0bd2385..93b8aa3422ab 100644 --- a/docs/reference/ml/trained-models/apis/infer-trained-model.asciidoc +++ b/docs/reference/ml/trained-models/apis/infer-trained-model.asciidoc @@ -6,6 +6,12 @@ Infer trained model ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-trained-model[{ml-cap} trained model APIs]. +-- + Evaluates a trained model. The model may be any supervised model either trained by {dfanalytics} or imported. diff --git a/docs/reference/ml/trained-models/apis/ml-trained-models-apis.asciidoc b/docs/reference/ml/trained-models/apis/ml-trained-models-apis.asciidoc index 83ef3c49fb5e..5b0fc2b83afa 100644 --- a/docs/reference/ml/trained-models/apis/ml-trained-models-apis.asciidoc +++ b/docs/reference/ml/trained-models/apis/ml-trained-models-apis.asciidoc @@ -2,6 +2,12 @@ [[ml-df-trained-models-apis]] = {ml-cap} trained model APIs +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-trained-model[{ml-cap} trained model APIs]. +-- + You can use the following APIs to perform model management operations: * <> diff --git a/docs/reference/ml/trained-models/apis/put-trained-model-definition-part.asciidoc b/docs/reference/ml/trained-models/apis/put-trained-model-definition-part.asciidoc index d1da29abffcd..857b86892a6b 100644 --- a/docs/reference/ml/trained-models/apis/put-trained-model-definition-part.asciidoc +++ b/docs/reference/ml/trained-models/apis/put-trained-model-definition-part.asciidoc @@ -6,6 +6,12 @@ Create part of a trained model ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-trained-model[{ml-cap} trained model APIs]. +-- + Creates part of a trained model definition. [[ml-put-trained-model-definition-part-request]] diff --git a/docs/reference/ml/trained-models/apis/put-trained-model-vocabulary.asciidoc b/docs/reference/ml/trained-models/apis/put-trained-model-vocabulary.asciidoc index 2fdf86259388..39f93f882235 100644 --- a/docs/reference/ml/trained-models/apis/put-trained-model-vocabulary.asciidoc +++ b/docs/reference/ml/trained-models/apis/put-trained-model-vocabulary.asciidoc @@ -6,6 +6,12 @@ Create trained model vocabulary ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-trained-model[{ml-cap} trained model APIs]. +-- + Creates a trained model vocabulary. This is supported only for natural language processing (NLP) models. diff --git a/docs/reference/ml/trained-models/apis/put-trained-models-aliases.asciidoc b/docs/reference/ml/trained-models/apis/put-trained-models-aliases.asciidoc index d8c00efe2801..3fcc7f5bb2da 100644 --- a/docs/reference/ml/trained-models/apis/put-trained-models-aliases.asciidoc +++ b/docs/reference/ml/trained-models/apis/put-trained-models-aliases.asciidoc @@ -6,6 +6,11 @@ Create or update trained model aliases ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-trained-model[{ml-cap} trained model APIs]. +-- Creates or updates a trained model alias. diff --git a/docs/reference/ml/trained-models/apis/put-trained-models.asciidoc b/docs/reference/ml/trained-models/apis/put-trained-models.asciidoc index 32265af5f795..256c0d29f8d2 100644 --- a/docs/reference/ml/trained-models/apis/put-trained-models.asciidoc +++ b/docs/reference/ml/trained-models/apis/put-trained-models.asciidoc @@ -3,11 +3,15 @@ = Create trained models API [subs="attributes"] ++++ - Create trained models - ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-trained-model[{ml-cap} trained model APIs]. +-- + Creates a trained model. WARNING: Models created in version 7.8.0 are not backwards compatible diff --git a/docs/reference/ml/trained-models/apis/start-trained-model-deployment.asciidoc b/docs/reference/ml/trained-models/apis/start-trained-model-deployment.asciidoc index bf9c4d14db29..7eaa14997665 100644 --- a/docs/reference/ml/trained-models/apis/start-trained-model-deployment.asciidoc +++ b/docs/reference/ml/trained-models/apis/start-trained-model-deployment.asciidoc @@ -6,6 +6,12 @@ Start trained model deployment ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-trained-model[{ml-cap} trained model APIs]. +-- + Starts a new trained model deployment. [[start-trained-model-deployment-request]] diff --git a/docs/reference/ml/trained-models/apis/stop-trained-model-deployment.asciidoc b/docs/reference/ml/trained-models/apis/stop-trained-model-deployment.asciidoc index 622b440622cd..2f179e2391b7 100644 --- a/docs/reference/ml/trained-models/apis/stop-trained-model-deployment.asciidoc +++ b/docs/reference/ml/trained-models/apis/stop-trained-model-deployment.asciidoc @@ -6,6 +6,12 @@ Stop trained model deployment ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-trained-model[{ml-cap} trained model APIs]. +-- + Stops a trained model deployment. [[stop-trained-model-deployment-request]] diff --git a/docs/reference/ml/trained-models/apis/update-trained-model-deployment.asciidoc b/docs/reference/ml/trained-models/apis/update-trained-model-deployment.asciidoc index d49ee3c6e872..a986e412f1a8 100644 --- a/docs/reference/ml/trained-models/apis/update-trained-model-deployment.asciidoc +++ b/docs/reference/ml/trained-models/apis/update-trained-model-deployment.asciidoc @@ -7,6 +7,12 @@ Update trained model deployment ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-trained-model[{ml-cap} trained model APIs]. +-- + Updates certain properties of a trained model deployment. [[update-trained-model-deployment-request]] diff --git a/docs/reference/modules/shard-ops.asciidoc b/docs/reference/modules/shard-ops.asciidoc index c0e5ee6a220f..66ceebcfa031 100644 --- a/docs/reference/modules/shard-ops.asciidoc +++ b/docs/reference/modules/shard-ops.asciidoc @@ -1,5 +1,5 @@ [[shard-allocation-relocation-recovery]] -=== Shard allocation, relocation, and recovery +== Shard allocation, relocation, and recovery Each <> in Elasticsearch is divided into one or more <>. Each document in an index belongs to a single shard. @@ -12,14 +12,16 @@ Over the course of normal operation, Elasticsearch allocates shard copies to nod TIP: To learn about optimizing the number and size of shards in your cluster, refer to <>. To learn about how read and write operations are replicated across shards and shard copies, refer to <>. +[discrete] [[shard-allocation]] -==== Shard allocation +=== Shard allocation include::{es-ref-dir}/modules/shard-allocation-desc.asciidoc[] By default, the primary and replica shard copies for an index can be allocated to any node in the cluster, and may be relocated to rebalance the cluster. -===== Adjust shard allocation settings +[discrete] +==== Adjust shard allocation settings You can control how shard copies are allocated using the following settings: @@ -27,7 +29,8 @@ You can control how shard copies are allocated using the following settings: - <>: Use these settings to control how the shard copies for a specific index are allocated. For example, you might want to allocate an index to a node in a specific data tier, or to an node with specific attributes. -===== Monitor shard allocation +[discrete] +==== Monitor shard allocation If a shard copy is unassigned, it means that the shard copy is not allocated to any node in the cluster. This can happen if there are not enough nodes in the cluster to allocate the shard copy, or if the shard copy can't be allocated to any node that satisfies the shard allocation filtering rules. When a shard copy is unassigned, your cluster is considered unhealthy and returns a yellow or red cluster health status. @@ -39,12 +42,14 @@ You can use the following APIs to monitor shard allocation: <>. +[discrete] [[shard-recovery]] -==== Shard recovery +=== Shard recovery include::{es-ref-dir}/modules/shard-recovery-desc.asciidoc[] -===== Adjust shard recovery settings +[discrete] +==== Adjust shard recovery settings To control how shards are recovered, for example the resources that can be used by recovery operations, and which indices should be prioritized for recovery, you can adjust the following settings: @@ -54,21 +59,24 @@ To control how shards are recovered, for example the resources that can be used Shard recovery operations also respect general shard allocation settings. -===== Monitor shard recovery +[discrete] +==== Monitor shard recovery You can use the following APIs to monitor shard allocation: - View a list of in-progress and completed recoveries using the <> - View detailed information about a specific recovery using the <> +[discrete] [[shard-relocation]] -==== Shard relocation +=== Shard relocation Shard relocation is the process of moving shard copies from one node to another. This can happen when a node joins or leaves the cluster, or when the cluster is rebalancing. When a shard copy is relocated, it is created as a new shard copy on the target node. When the shard copy is fully allocated and recovered, the old shard copy is deleted. If the shard copy being relocated is a primary, then the new shard copy is marked as primary before the old shard copy is deleted. -===== Adjust shard relocation settings +[discrete] +==== Adjust shard relocation settings You can control how and when shard copies are relocated. For example, you can adjust the rebalancing settings that control when shard copies are relocated to balance the cluster, or the high watermark for disk-based shard allocation that can trigger relocation. These settings are part of the <>. diff --git a/docs/reference/nodes-shards.asciidoc b/docs/reference/nodes-shards.asciidoc new file mode 100644 index 000000000000..11095ed7b7eb --- /dev/null +++ b/docs/reference/nodes-shards.asciidoc @@ -0,0 +1,43 @@ +[[nodes-shards]] +== Nodes and shards + +[NOTE] +==== +Nodes and shards are what make {es} distributed and scalable. +These concepts aren't essential if you're just getting started. How you <> in production determines what you need to know: + +* *Self-managed {es}*: You are responsible for setting up and managing nodes, clusters, shards, and replicas. This includes managing the underlying infrastructure, scaling, and ensuring high availability through failover and backup strategies. +* *Elastic Cloud*: Elastic can autoscale resources in response to workload changes. Choose from different deployment types to apply sensible defaults for your use case. A basic understanding of nodes, shards, and replicas is still important. +* *Elastic Cloud Serverless*: You don't need to worry about nodes, shards, or replicas. These resources are 100% automated on the serverless platform, which is designed to scale with your workload. +==== + +You can add servers (_nodes_) to a cluster to increase capacity, and {es} automatically distributes your data and query load across all of the available nodes. + +Elastic is able to distribute your data across nodes by subdividing an index into _shards_. Each index in {es} is a grouping +of one or more physical shards, where each shard is a self-contained Lucene index containing a subset of the documents in +the index. By distributing the documents in an index across multiple shards, and distributing those shards across multiple +nodes, {es} increases indexing and query capacity. + +There are two types of shards: _primaries_ and _replicas_. Each document in an index belongs to one primary shard. A replica +shard is a copy of a primary shard. Replicas maintain redundant copies of your data across the nodes in your cluster. +This protects against hardware failure and increases capacity to serve read requests like searching or retrieving a document. + +[TIP] +==== +The number of primary shards in an index is fixed at the time that an index is created, but the number of replica shards can +be changed at any time, without interrupting indexing or query operations. +==== + +Shard copies in your cluster are automatically balanced across nodes to provide scale and high availability. All nodes are +aware of all the other nodes in the cluster and can forward client requests to the appropriate node. This allows {es} +to distribute indexing and query load across the cluster. + +If you're exploring {es} for the first time or working in a development environment, then you can use a cluster with a single node and create indices +with only one shard. However, in a production environment, you should build a cluster with multiple nodes and indices +with multiple shards to increase performance and resilience. + +// TODO - diagram + +* To learn about optimizing the number and size of shards in your cluster, refer to <>. +* To learn about how read and write operations are replicated across shards and shard copies, refer to <>. +* To adjust how shards are allocated and balanced across nodes, refer to <>. \ No newline at end of file diff --git a/docs/reference/query-rules/apis/delete-query-rule.asciidoc b/docs/reference/query-rules/apis/delete-query-rule.asciidoc index 01b73033aa36..4d91092eaf8a 100644 --- a/docs/reference/query-rules/apis/delete-query-rule.asciidoc +++ b/docs/reference/query-rules/apis/delete-query-rule.asciidoc @@ -6,6 +6,12 @@ Delete query rule ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-query_rules[Query rules APIs]. +-- + Removes an individual query rule within an existing query ruleset. This is a destructive action that is only recoverable by re-adding the same rule via the <> API. diff --git a/docs/reference/query-rules/apis/delete-query-ruleset.asciidoc b/docs/reference/query-rules/apis/delete-query-ruleset.asciidoc index 31507dce3d12..168310dcd407 100644 --- a/docs/reference/query-rules/apis/delete-query-ruleset.asciidoc +++ b/docs/reference/query-rules/apis/delete-query-ruleset.asciidoc @@ -6,6 +6,12 @@ Delete query ruleset ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-query_rules[Query rules APIs]. +-- + Removes a query ruleset and its associated data. This is a destructive action that is not recoverable. diff --git a/docs/reference/query-rules/apis/get-query-rule.asciidoc b/docs/reference/query-rules/apis/get-query-rule.asciidoc index 56713965d7bd..742982e5897e 100644 --- a/docs/reference/query-rules/apis/get-query-rule.asciidoc +++ b/docs/reference/query-rules/apis/get-query-rule.asciidoc @@ -6,6 +6,12 @@ Get query rule ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-query_rules[Query rules APIs]. +-- + Retrieves information about an individual query rule within a query ruleset. [[get-query-rule-request]] diff --git a/docs/reference/query-rules/apis/get-query-ruleset.asciidoc b/docs/reference/query-rules/apis/get-query-ruleset.asciidoc index 6bbcd157ea9e..55574fb7c67e 100644 --- a/docs/reference/query-rules/apis/get-query-ruleset.asciidoc +++ b/docs/reference/query-rules/apis/get-query-ruleset.asciidoc @@ -6,6 +6,12 @@ Get query ruleset ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-query_rules[Query rules APIs]. +-- + Retrieves information about a query ruleset. [[get-query-ruleset-request]] diff --git a/docs/reference/query-rules/apis/index.asciidoc b/docs/reference/query-rules/apis/index.asciidoc index fbeb477acacb..7dcdf0e8f4e1 100644 --- a/docs/reference/query-rules/apis/index.asciidoc +++ b/docs/reference/query-rules/apis/index.asciidoc @@ -7,6 +7,12 @@ --- +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-query_rules[Query rules APIs]. +-- + <> allow you to configure per-query rules that are applied at query time to queries that match the specific rule. Query rules are organized into _rulesets_, collections of query rules that are matched against incoming queries. Query rules are applied using the <>. diff --git a/docs/reference/query-rules/apis/list-query-rulesets.asciidoc b/docs/reference/query-rules/apis/list-query-rulesets.asciidoc index 304b8c774500..5f61bcb98b08 100644 --- a/docs/reference/query-rules/apis/list-query-rulesets.asciidoc +++ b/docs/reference/query-rules/apis/list-query-rulesets.asciidoc @@ -6,6 +6,12 @@ List query rulesets ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-query_rules[Query rules APIs]. +-- + Returns information about all stored query rulesets. Summary information on the number of rules per ruleset will be returned, and full details can be returned with the <> command. diff --git a/docs/reference/query-rules/apis/put-query-rule.asciidoc b/docs/reference/query-rules/apis/put-query-rule.asciidoc index 714ed9b096d1..df33d22b3902 100644 --- a/docs/reference/query-rules/apis/put-query-rule.asciidoc +++ b/docs/reference/query-rules/apis/put-query-rule.asciidoc @@ -6,6 +6,12 @@ Create or update query rule ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-query_rules[Query rules APIs]. +-- + Creates or updates an individual query rule within a query ruleset. [[put-query-rule-request]] diff --git a/docs/reference/query-rules/apis/put-query-ruleset.asciidoc b/docs/reference/query-rules/apis/put-query-ruleset.asciidoc index df7ec100db07..ea689dc0bf30 100644 --- a/docs/reference/query-rules/apis/put-query-ruleset.asciidoc +++ b/docs/reference/query-rules/apis/put-query-ruleset.asciidoc @@ -6,6 +6,12 @@ Create or update query ruleset ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-query_rules[Query rules APIs]. +-- + Creates or updates a query ruleset. [[put-query-ruleset-request]] diff --git a/docs/reference/query-rules/apis/test-query-ruleset.asciidoc b/docs/reference/query-rules/apis/test-query-ruleset.asciidoc index 4a670645cea6..4a47754f572e 100644 --- a/docs/reference/query-rules/apis/test-query-ruleset.asciidoc +++ b/docs/reference/query-rules/apis/test-query-ruleset.asciidoc @@ -6,6 +6,12 @@ Tests query ruleset ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-query_rules[Query rules APIs]. +-- + Evaluates match criteria against a query ruleset to identify the rules that would match that criteria. preview::[] diff --git a/docs/reference/repositories-metering-api/apis/clear-repositories-metering-archive.asciidoc b/docs/reference/repositories-metering-api/apis/clear-repositories-metering-archive.asciidoc index addcd65f0e84..bd05d76705eb 100644 --- a/docs/reference/repositories-metering-api/apis/clear-repositories-metering-archive.asciidoc +++ b/docs/reference/repositories-metering-api/apis/clear-repositories-metering-archive.asciidoc @@ -5,6 +5,12 @@ Clear repositories metering archive ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/operation/operation-nodes-clear-repositories-metering-archive[Clear the archived repositories metering API]. +-- + Removes the archived repositories metering information present in the cluster. [[clear-repositories-metering-archive-api-request]] diff --git a/docs/reference/repositories-metering-api/apis/get-repositories-metering.asciidoc b/docs/reference/repositories-metering-api/apis/get-repositories-metering.asciidoc index 314f85a7dba5..6b6d98a69ded 100644 --- a/docs/reference/repositories-metering-api/apis/get-repositories-metering.asciidoc +++ b/docs/reference/repositories-metering-api/apis/get-repositories-metering.asciidoc @@ -5,6 +5,12 @@ Get repositories metering information ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/operation/operation-nodes-get-repositories-metering-info[Get cluster repositories metering API]. +-- + Returns cluster repositories metering information. [[get-repositories-metering-api-request]] diff --git a/docs/reference/repositories-metering-api/repositories-metering-apis.asciidoc b/docs/reference/repositories-metering-api/repositories-metering-apis.asciidoc index b838e0fb213f..ca9bffa32a91 100644 --- a/docs/reference/repositories-metering-api/repositories-metering-apis.asciidoc +++ b/docs/reference/repositories-metering-api/repositories-metering-apis.asciidoc @@ -4,6 +4,12 @@ experimental[] +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-cluster[Cluster APIs]. +-- + You can use the following APIs to retrieve repositories metering information. This is an API used by Elastic's commercial offerings. diff --git a/docs/reference/rest-api/index.asciidoc b/docs/reference/rest-api/index.asciidoc index bfab15ed012c..9d9047c93cc9 100644 --- a/docs/reference/rest-api/index.asciidoc +++ b/docs/reference/rest-api/index.asciidoc @@ -6,7 +6,7 @@ {es} exposes REST APIs that are used by the UI components and can be called directly to configure and access {es} features. -..New API reference +.New API reference [sidebar] For the most up-to-date API details, refer to {api-es}[{es} APIs]. diff --git a/docs/reference/rest-api/info.asciidoc b/docs/reference/rest-api/info.asciidoc index fda5b07d2820..b9547163b07b 100644 --- a/docs/reference/rest-api/info.asciidoc +++ b/docs/reference/rest-api/info.asciidoc @@ -2,6 +2,12 @@ [[info-api]] == Info API +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-info[Info APIs]. +-- + Provides general information about the installed {xpack} features. [discrete] diff --git a/docs/reference/rest-api/logstash/delete-pipeline.asciidoc b/docs/reference/rest-api/logstash/delete-pipeline.asciidoc index ff7494d34e61..2e56b0289a39 100644 --- a/docs/reference/rest-api/logstash/delete-pipeline.asciidoc +++ b/docs/reference/rest-api/logstash/delete-pipeline.asciidoc @@ -5,6 +5,12 @@ Delete {ls} pipeline ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-logstash[{ls} APIs]. +-- + This API deletes a pipeline used for {logstash-ref}/logstash-centralized-pipeline-management.html[{ls} Central Management]. diff --git a/docs/reference/rest-api/logstash/get-pipeline.asciidoc b/docs/reference/rest-api/logstash/get-pipeline.asciidoc index 8409a5128d52..8e440f218aa0 100644 --- a/docs/reference/rest-api/logstash/get-pipeline.asciidoc +++ b/docs/reference/rest-api/logstash/get-pipeline.asciidoc @@ -5,6 +5,12 @@ Get {ls} pipeline ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-logstash[{ls} APIs]. +-- + This API retrieves pipelines used for {logstash-ref}/logstash-centralized-pipeline-management.html[{ls} Central Management]. diff --git a/docs/reference/rest-api/logstash/index.asciidoc b/docs/reference/rest-api/logstash/index.asciidoc index a52be19a75df..7f28020a8b18 100644 --- a/docs/reference/rest-api/logstash/index.asciidoc +++ b/docs/reference/rest-api/logstash/index.asciidoc @@ -2,6 +2,12 @@ [[logstash-apis]] == {ls} APIs +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-logstash[{ls} APIs]. +-- + The following APIs are used to manage pipelines used by {logstash-ref}/logstash-centralized-pipeline-management.html[{ls} Central Management]: diff --git a/docs/reference/rest-api/logstash/put-pipeline.asciidoc b/docs/reference/rest-api/logstash/put-pipeline.asciidoc index a0d2f02f2bf6..26af8d0f124c 100644 --- a/docs/reference/rest-api/logstash/put-pipeline.asciidoc +++ b/docs/reference/rest-api/logstash/put-pipeline.asciidoc @@ -5,6 +5,12 @@ Create or update {ls} pipeline ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-logstash[{ls} APIs]. +-- + This API creates or updates a {ls} pipeline used for {logstash-ref}/logstash-centralized-pipeline-management.html[{ls} Central Management]. diff --git a/docs/reference/rest-api/root.asciidoc b/docs/reference/rest-api/root.asciidoc index 8821981c2afe..aaf40b31db21 100644 --- a/docs/reference/rest-api/root.asciidoc +++ b/docs/reference/rest-api/root.asciidoc @@ -4,6 +4,12 @@ Root API ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-info[Info APIs]. +-- + The Elasticsearch API's base url returns its basic build, version, and cluster information. diff --git a/docs/reference/rest-api/security.asciidoc b/docs/reference/rest-api/security.asciidoc index 82cf38e52bd8..57726b074ac3 100644 --- a/docs/reference/rest-api/security.asciidoc +++ b/docs/reference/rest-api/security.asciidoc @@ -1,6 +1,13 @@ [role="xpack"] [[security-api]] == Security APIs + +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + To use the security APIs, you must set `xpack.security.enabled` to `true` in the `elasticsearch.yml` file. diff --git a/docs/reference/rest-api/security/activate-user-profile.asciidoc b/docs/reference/rest-api/security/activate-user-profile.asciidoc index f6ce32e1bb19..0db41937f2ff 100644 --- a/docs/reference/rest-api/security/activate-user-profile.asciidoc +++ b/docs/reference/rest-api/security/activate-user-profile.asciidoc @@ -5,6 +5,12 @@ Activate user profile ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + NOTE: The user profile feature is designed only for use by {kib} and Elastic’s {observability}, {ents}, and {elastic-sec} solutions. Individual users and external applications should not call this API directly. Elastic reserves diff --git a/docs/reference/rest-api/security/authenticate.asciidoc b/docs/reference/rest-api/security/authenticate.asciidoc index a02deb444628..78d9cc2bcaa9 100644 --- a/docs/reference/rest-api/security/authenticate.asciidoc +++ b/docs/reference/rest-api/security/authenticate.asciidoc @@ -5,6 +5,12 @@ Authenticate ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Enables you to submit a request with a basic auth header to authenticate a user and retrieve information about the authenticated user. diff --git a/docs/reference/rest-api/security/bulk-create-roles.asciidoc b/docs/reference/rest-api/security/bulk-create-roles.asciidoc index 37f49f244577..030e5e42bf29 100644 --- a/docs/reference/rest-api/security/bulk-create-roles.asciidoc +++ b/docs/reference/rest-api/security/bulk-create-roles.asciidoc @@ -5,6 +5,12 @@ Bulk create or update roles API ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Bulk adds and updates roles in the native realm. [[security-api-bulk-put-role-request]] diff --git a/docs/reference/rest-api/security/bulk-delete-roles.asciidoc b/docs/reference/rest-api/security/bulk-delete-roles.asciidoc index b9978c89bef3..899591b3276d 100644 --- a/docs/reference/rest-api/security/bulk-delete-roles.asciidoc +++ b/docs/reference/rest-api/security/bulk-delete-roles.asciidoc @@ -5,6 +5,12 @@ Bulk delete roles API ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Bulk deletes roles in the native realm. [[security-api-bulk-delete-role-request]] diff --git a/docs/reference/rest-api/security/bulk-update-api-keys.asciidoc b/docs/reference/rest-api/security/bulk-update-api-keys.asciidoc index faf87c67d1cc..8206ac5d9d4f 100644 --- a/docs/reference/rest-api/security/bulk-update-api-keys.asciidoc +++ b/docs/reference/rest-api/security/bulk-update-api-keys.asciidoc @@ -6,6 +6,12 @@ Bulk update API keys ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + [[security-api-bulk-update-api-keys-request]] ==== {api-request-title} diff --git a/docs/reference/rest-api/security/change-password.asciidoc b/docs/reference/rest-api/security/change-password.asciidoc index c035661cdd70..f2dc45cad20b 100644 --- a/docs/reference/rest-api/security/change-password.asciidoc +++ b/docs/reference/rest-api/security/change-password.asciidoc @@ -5,6 +5,12 @@ Change passwords ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Changes the passwords of users in the native realm and built-in users. [[security-api-change-password-request]] diff --git a/docs/reference/rest-api/security/clear-api-key-cache.asciidoc b/docs/reference/rest-api/security/clear-api-key-cache.asciidoc index 7828026b604f..61f05a85dfaf 100644 --- a/docs/reference/rest-api/security/clear-api-key-cache.asciidoc +++ b/docs/reference/rest-api/security/clear-api-key-cache.asciidoc @@ -5,6 +5,12 @@ Clear API key cache ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Evicts a subset of all entries from the API key cache. The cache is also automatically cleared on state changes of the security index. diff --git a/docs/reference/rest-api/security/clear-cache.asciidoc b/docs/reference/rest-api/security/clear-cache.asciidoc index 3e4e5432768b..270856ba28e6 100644 --- a/docs/reference/rest-api/security/clear-cache.asciidoc +++ b/docs/reference/rest-api/security/clear-cache.asciidoc @@ -5,6 +5,12 @@ Clear cache ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Evicts users from the user cache. You can completely clear the cache or evict specific users. diff --git a/docs/reference/rest-api/security/clear-privileges-cache.asciidoc b/docs/reference/rest-api/security/clear-privileges-cache.asciidoc index 69a5743419d2..cf615010779b 100644 --- a/docs/reference/rest-api/security/clear-privileges-cache.asciidoc +++ b/docs/reference/rest-api/security/clear-privileges-cache.asciidoc @@ -5,6 +5,12 @@ Clear privileges cache ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Evicts privileges from the native application privilege cache. The cache is also automatically cleared for applications that have their privileges updated. diff --git a/docs/reference/rest-api/security/clear-roles-cache.asciidoc b/docs/reference/rest-api/security/clear-roles-cache.asciidoc index 63c54b51dcf2..edcb2c272351 100644 --- a/docs/reference/rest-api/security/clear-roles-cache.asciidoc +++ b/docs/reference/rest-api/security/clear-roles-cache.asciidoc @@ -5,6 +5,12 @@ Clear roles cache ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Evicts roles from the native role cache. [[security-api-clear-role-cache-request]] diff --git a/docs/reference/rest-api/security/clear-service-token-caches.asciidoc b/docs/reference/rest-api/security/clear-service-token-caches.asciidoc index ff4587549534..26158220418e 100644 --- a/docs/reference/rest-api/security/clear-service-token-caches.asciidoc +++ b/docs/reference/rest-api/security/clear-service-token-caches.asciidoc @@ -6,6 +6,12 @@ Clear service account token caches ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Evicts a subset of all entries from the <> token caches. diff --git a/docs/reference/rest-api/security/create-api-keys.asciidoc b/docs/reference/rest-api/security/create-api-keys.asciidoc index f740e9413e3e..20f1c2cb155c 100644 --- a/docs/reference/rest-api/security/create-api-keys.asciidoc +++ b/docs/reference/rest-api/security/create-api-keys.asciidoc @@ -5,6 +5,12 @@ Create API keys ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Creates an API key for access without requiring basic authentication. [[security-api-create-api-key-request]] diff --git a/docs/reference/rest-api/security/create-cross-cluster-api-key.asciidoc b/docs/reference/rest-api/security/create-cross-cluster-api-key.asciidoc index d7d55bafc6d1..63b2f37063f9 100644 --- a/docs/reference/rest-api/security/create-cross-cluster-api-key.asciidoc +++ b/docs/reference/rest-api/security/create-cross-cluster-api-key.asciidoc @@ -1,11 +1,16 @@ [role="xpack"] [[security-api-create-cross-cluster-api-key]] === Create Cross-Cluster API key API - ++++ Create Cross-Cluster API key ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Creates an API key of the `cross_cluster` type for the <> access. A `cross_cluster` API key cannot be used to authenticate through the REST interface. On the contrary, a <> is meant to be used through the REST interface diff --git a/docs/reference/rest-api/security/create-role-mappings.asciidoc b/docs/reference/rest-api/security/create-role-mappings.asciidoc index e78d06a5676e..71c931260865 100644 --- a/docs/reference/rest-api/security/create-role-mappings.asciidoc +++ b/docs/reference/rest-api/security/create-role-mappings.asciidoc @@ -5,6 +5,12 @@ Create or update role mappings ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Creates and updates role mappings. [[security-api-put-role-mapping-request]] diff --git a/docs/reference/rest-api/security/create-roles.asciidoc b/docs/reference/rest-api/security/create-roles.asciidoc index d23b9f06e2d8..0b0cd828140a 100644 --- a/docs/reference/rest-api/security/create-roles.asciidoc +++ b/docs/reference/rest-api/security/create-roles.asciidoc @@ -5,6 +5,12 @@ Create or update roles ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Adds and updates roles in the native realm. [[security-api-put-role-request]] diff --git a/docs/reference/rest-api/security/create-service-token.asciidoc b/docs/reference/rest-api/security/create-service-token.asciidoc index 9a3c2b678c92..30195d89ff47 100644 --- a/docs/reference/rest-api/security/create-service-token.asciidoc +++ b/docs/reference/rest-api/security/create-service-token.asciidoc @@ -5,6 +5,12 @@ Create service account tokens ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Creates a <> token for access without requiring basic authentication. diff --git a/docs/reference/rest-api/security/create-users.asciidoc b/docs/reference/rest-api/security/create-users.asciidoc index 428df1102329..6d8e1cdaa9ba 100644 --- a/docs/reference/rest-api/security/create-users.asciidoc +++ b/docs/reference/rest-api/security/create-users.asciidoc @@ -5,6 +5,12 @@ Create or update users ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Adds and updates users in the native realm. These users are commonly referred to as _native users_. diff --git a/docs/reference/rest-api/security/delegate-pki-authentication.asciidoc b/docs/reference/rest-api/security/delegate-pki-authentication.asciidoc index 860708011cd2..00a80b3fd27b 100644 --- a/docs/reference/rest-api/security/delegate-pki-authentication.asciidoc +++ b/docs/reference/rest-api/security/delegate-pki-authentication.asciidoc @@ -5,6 +5,12 @@ Delegate PKI authentication ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Implements the exchange of an _X509Certificate_ chain into an {es} access token. diff --git a/docs/reference/rest-api/security/delete-app-privileges.asciidoc b/docs/reference/rest-api/security/delete-app-privileges.asciidoc index 39ac1706c6dc..dad57bcac409 100644 --- a/docs/reference/rest-api/security/delete-app-privileges.asciidoc +++ b/docs/reference/rest-api/security/delete-app-privileges.asciidoc @@ -5,6 +5,12 @@ Delete application privileges ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Removes <>. [[security-api-delete-privilege-request]] diff --git a/docs/reference/rest-api/security/delete-role-mappings.asciidoc b/docs/reference/rest-api/security/delete-role-mappings.asciidoc index c5dd1aa9c909..4ec7e3817b03 100644 --- a/docs/reference/rest-api/security/delete-role-mappings.asciidoc +++ b/docs/reference/rest-api/security/delete-role-mappings.asciidoc @@ -5,6 +5,12 @@ Delete role mappings ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Removes role mappings. [[security-api-delete-role-mapping-request]] diff --git a/docs/reference/rest-api/security/delete-roles.asciidoc b/docs/reference/rest-api/security/delete-roles.asciidoc index 427e7c6b1860..d30a4c525180 100644 --- a/docs/reference/rest-api/security/delete-roles.asciidoc +++ b/docs/reference/rest-api/security/delete-roles.asciidoc @@ -5,6 +5,12 @@ Delete roles ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Removes roles in the native realm. [[security-api-delete-role-request]] diff --git a/docs/reference/rest-api/security/delete-service-token.asciidoc b/docs/reference/rest-api/security/delete-service-token.asciidoc index b704fb912126..f7c488e9e713 100644 --- a/docs/reference/rest-api/security/delete-service-token.asciidoc +++ b/docs/reference/rest-api/security/delete-service-token.asciidoc @@ -5,6 +5,12 @@ Delete service account token ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Deletes <> tokens for a `service` in a specified `namespace`. diff --git a/docs/reference/rest-api/security/delete-users.asciidoc b/docs/reference/rest-api/security/delete-users.asciidoc index b08f99e809b4..ff781a7d9ef0 100644 --- a/docs/reference/rest-api/security/delete-users.asciidoc +++ b/docs/reference/rest-api/security/delete-users.asciidoc @@ -5,6 +5,12 @@ Delete users ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Deletes users from the native realm. [[security-api-delete-user-request]] diff --git a/docs/reference/rest-api/security/disable-user-profile.asciidoc b/docs/reference/rest-api/security/disable-user-profile.asciidoc index 35658f071679..f665b8955e0d 100644 --- a/docs/reference/rest-api/security/disable-user-profile.asciidoc +++ b/docs/reference/rest-api/security/disable-user-profile.asciidoc @@ -5,6 +5,12 @@ Disable user profile ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + NOTE: The user profile feature is designed only for use by {kib} and Elastic’s {observability}, {ents}, and {elastic-sec} solutions. Individual users and external applications should not call this API directly. Elastic reserves diff --git a/docs/reference/rest-api/security/disable-users.asciidoc b/docs/reference/rest-api/security/disable-users.asciidoc index 9859085cb182..3f8bc74d7e10 100644 --- a/docs/reference/rest-api/security/disable-users.asciidoc +++ b/docs/reference/rest-api/security/disable-users.asciidoc @@ -5,6 +5,12 @@ Disable users ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Disables users in the native realm. diff --git a/docs/reference/rest-api/security/enable-user-profile.asciidoc b/docs/reference/rest-api/security/enable-user-profile.asciidoc index e27673b07f59..9f74d90f88b9 100644 --- a/docs/reference/rest-api/security/enable-user-profile.asciidoc +++ b/docs/reference/rest-api/security/enable-user-profile.asciidoc @@ -5,6 +5,12 @@ Enable user profile ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + NOTE: The user profile feature is designed only for use by {kib} and Elastic’s {observability}, {ents}, and {elastic-sec} solutions. Individual users and external applications should not call this API directly. Elastic reserves diff --git a/docs/reference/rest-api/security/enable-users.asciidoc b/docs/reference/rest-api/security/enable-users.asciidoc index 04193e6c2794..db78a5c22211 100644 --- a/docs/reference/rest-api/security/enable-users.asciidoc +++ b/docs/reference/rest-api/security/enable-users.asciidoc @@ -5,6 +5,12 @@ Enable users ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Enables users in the native realm. diff --git a/docs/reference/rest-api/security/enroll-kibana.asciidoc b/docs/reference/rest-api/security/enroll-kibana.asciidoc index 55de31b5407d..78bc0569fa19 100644 --- a/docs/reference/rest-api/security/enroll-kibana.asciidoc +++ b/docs/reference/rest-api/security/enroll-kibana.asciidoc @@ -4,6 +4,12 @@ Enroll {kib} ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Enables a {kib} instance to configure itself for communication with a secured {es} cluster. NOTE: This API is currently intended for internal use only by {kib}. diff --git a/docs/reference/rest-api/security/enroll-node.asciidoc b/docs/reference/rest-api/security/enroll-node.asciidoc index 81cae73fb22d..d5c2ce2a5174 100644 --- a/docs/reference/rest-api/security/enroll-node.asciidoc +++ b/docs/reference/rest-api/security/enroll-node.asciidoc @@ -4,6 +4,12 @@ Enroll node ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Allows a new node to join an existing cluster with security features enabled. [[security-api-node-enrollment-api-request]] diff --git a/docs/reference/rest-api/security/get-api-keys.asciidoc b/docs/reference/rest-api/security/get-api-keys.asciidoc index bf4929753989..6a133f136d4b 100644 --- a/docs/reference/rest-api/security/get-api-keys.asciidoc +++ b/docs/reference/rest-api/security/get-api-keys.asciidoc @@ -5,6 +5,12 @@ Get API key information ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Retrieves information for one or more API keys. [[security-api-get-api-key-request]] diff --git a/docs/reference/rest-api/security/get-app-privileges.asciidoc b/docs/reference/rest-api/security/get-app-privileges.asciidoc index f0f3f1b69071..c8bb709f96d5 100644 --- a/docs/reference/rest-api/security/get-app-privileges.asciidoc +++ b/docs/reference/rest-api/security/get-app-privileges.asciidoc @@ -5,6 +5,12 @@ Get application privileges ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Retrieves <>. [[security-api-get-privileges-request]] diff --git a/docs/reference/rest-api/security/get-builtin-privileges.asciidoc b/docs/reference/rest-api/security/get-builtin-privileges.asciidoc index 7f3d75b92678..08a03a5b1e83 100644 --- a/docs/reference/rest-api/security/get-builtin-privileges.asciidoc +++ b/docs/reference/rest-api/security/get-builtin-privileges.asciidoc @@ -6,6 +6,12 @@ Get builtin privileges ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Retrieves the list of <> and <> that are available in this version of {es}. diff --git a/docs/reference/rest-api/security/get-role-mappings.asciidoc b/docs/reference/rest-api/security/get-role-mappings.asciidoc index 8272ec4d015a..49063e775982 100644 --- a/docs/reference/rest-api/security/get-role-mappings.asciidoc +++ b/docs/reference/rest-api/security/get-role-mappings.asciidoc @@ -5,6 +5,12 @@ Get role mappings ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Retrieves role mappings. [[security-api-get-role-mapping-request]] diff --git a/docs/reference/rest-api/security/get-roles.asciidoc b/docs/reference/rest-api/security/get-roles.asciidoc index 3cc2f95c6ea7..03f083e8202f 100644 --- a/docs/reference/rest-api/security/get-roles.asciidoc +++ b/docs/reference/rest-api/security/get-roles.asciidoc @@ -5,6 +5,12 @@ Get roles ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Retrieves roles in the native realm. [[security-api-get-role-request]] diff --git a/docs/reference/rest-api/security/get-service-accounts.asciidoc b/docs/reference/rest-api/security/get-service-accounts.asciidoc index 74f98f2602e3..e392b3100d98 100644 --- a/docs/reference/rest-api/security/get-service-accounts.asciidoc +++ b/docs/reference/rest-api/security/get-service-accounts.asciidoc @@ -6,6 +6,12 @@ Get service accounts ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Retrieves information about <>. NOTE: Currently, only the `elastic/fleet-server` service account is available. diff --git a/docs/reference/rest-api/security/get-service-credentials.asciidoc b/docs/reference/rest-api/security/get-service-credentials.asciidoc index 3da6c3d86055..7a24aef059ae 100644 --- a/docs/reference/rest-api/security/get-service-credentials.asciidoc +++ b/docs/reference/rest-api/security/get-service-credentials.asciidoc @@ -1,11 +1,16 @@ [role="xpack"] [[security-api-get-service-credentials]] === Get service account credentials API - ++++ Get service account credentials ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Retrieves all service credentials for a <>. [[security-api-get-service-credentials-request]] diff --git a/docs/reference/rest-api/security/get-settings.asciidoc b/docs/reference/rest-api/security/get-settings.asciidoc index 46e4e0cf529b..c99b9bcedba2 100644 --- a/docs/reference/rest-api/security/get-settings.asciidoc +++ b/docs/reference/rest-api/security/get-settings.asciidoc @@ -5,6 +5,12 @@ Get Security settings ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Retrieves settings for the security internal indices. [[security-api-get-settings-prereqs]] diff --git a/docs/reference/rest-api/security/get-tokens.asciidoc b/docs/reference/rest-api/security/get-tokens.asciidoc index 9f5261a477bc..eefc86528ef4 100644 --- a/docs/reference/rest-api/security/get-tokens.asciidoc +++ b/docs/reference/rest-api/security/get-tokens.asciidoc @@ -5,6 +5,12 @@ Get token ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Creates a bearer token for access without requiring basic authentication. [[security-api-get-token-request]] diff --git a/docs/reference/rest-api/security/get-user-privileges.asciidoc b/docs/reference/rest-api/security/get-user-privileges.asciidoc index 8115cd365c5a..4e0d68b4b679 100644 --- a/docs/reference/rest-api/security/get-user-privileges.asciidoc +++ b/docs/reference/rest-api/security/get-user-privileges.asciidoc @@ -5,6 +5,12 @@ Get user privileges ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Retrieves the <> for the logged in user. diff --git a/docs/reference/rest-api/security/get-user-profile.asciidoc b/docs/reference/rest-api/security/get-user-profile.asciidoc index 9f0ba64d136a..60732cf0ab39 100644 --- a/docs/reference/rest-api/security/get-user-profile.asciidoc +++ b/docs/reference/rest-api/security/get-user-profile.asciidoc @@ -5,6 +5,12 @@ Get user profiles ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + NOTE: The user profile feature is designed only for use by {kib} and Elastic’s {observability}, {ents}, and {elastic-sec} solutions. Individual users and external applications should not call this API directly. Elastic reserves diff --git a/docs/reference/rest-api/security/get-users.asciidoc b/docs/reference/rest-api/security/get-users.asciidoc index 59a390f6f253..8770c90cdec0 100644 --- a/docs/reference/rest-api/security/get-users.asciidoc +++ b/docs/reference/rest-api/security/get-users.asciidoc @@ -5,8 +5,13 @@ Get users ++++ -Retrieves information about users in the native realm and built-in users. +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- +Retrieves information about users in the native realm and built-in users. [[security-api-get-user-request]] ==== {api-request-title} diff --git a/docs/reference/rest-api/security/grant-api-keys.asciidoc b/docs/reference/rest-api/security/grant-api-keys.asciidoc index 10c109b00bbf..4ab599c18dd6 100644 --- a/docs/reference/rest-api/security/grant-api-keys.asciidoc +++ b/docs/reference/rest-api/security/grant-api-keys.asciidoc @@ -5,6 +5,12 @@ Grant API keys ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Creates an API key on behalf of another user. [[security-api-grant-api-key-request]] diff --git a/docs/reference/rest-api/security/has-privileges-user-profile.asciidoc b/docs/reference/rest-api/security/has-privileges-user-profile.asciidoc index afadf394aa43..3fc825b60bce 100644 --- a/docs/reference/rest-api/security/has-privileges-user-profile.asciidoc +++ b/docs/reference/rest-api/security/has-privileges-user-profile.asciidoc @@ -5,6 +5,12 @@ Has privileges user profile ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + NOTE: The user profile feature is designed only for use by {kib} and Elastic’s {observability}, {ents}, and {elastic-sec} solutions. Individual users and external applications should not call this API directly. Elastic reserves diff --git a/docs/reference/rest-api/security/has-privileges.asciidoc b/docs/reference/rest-api/security/has-privileges.asciidoc index 229ffb499727..1e08b41d92fe 100644 --- a/docs/reference/rest-api/security/has-privileges.asciidoc +++ b/docs/reference/rest-api/security/has-privileges.asciidoc @@ -6,6 +6,12 @@ ++++ [[security-api-has-privilege]] +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Determines whether the logged in user has a specified list of privileges. [[security-api-has-privileges-request]] diff --git a/docs/reference/rest-api/security/invalidate-api-keys.asciidoc b/docs/reference/rest-api/security/invalidate-api-keys.asciidoc index 57a36a97634a..27c5a8c83180 100644 --- a/docs/reference/rest-api/security/invalidate-api-keys.asciidoc +++ b/docs/reference/rest-api/security/invalidate-api-keys.asciidoc @@ -5,6 +5,12 @@ Invalidate API key ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Invalidates one or more API keys. [[security-api-invalidate-api-key-request]] diff --git a/docs/reference/rest-api/security/invalidate-tokens.asciidoc b/docs/reference/rest-api/security/invalidate-tokens.asciidoc index 58f20fdcdc42..9a6fe6185b47 100644 --- a/docs/reference/rest-api/security/invalidate-tokens.asciidoc +++ b/docs/reference/rest-api/security/invalidate-tokens.asciidoc @@ -5,6 +5,12 @@ Invalidate token ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Invalidates one or more access tokens or refresh tokens. [[security-api-invalidate-token-request]] diff --git a/docs/reference/rest-api/security/oidc-authenticate-api.asciidoc b/docs/reference/rest-api/security/oidc-authenticate-api.asciidoc index 282a054717e6..7bf8a1aad3ee 100644 --- a/docs/reference/rest-api/security/oidc-authenticate-api.asciidoc +++ b/docs/reference/rest-api/security/oidc-authenticate-api.asciidoc @@ -5,6 +5,12 @@ OpenID Connect authenticate ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Submits the response to an oAuth 2.0 authentication request for consumption from {es}. Upon successful validation, {es} will respond with an {es} internal Access Token and Refresh Token that can be subsequently used for authentication. diff --git a/docs/reference/rest-api/security/oidc-logout-api.asciidoc b/docs/reference/rest-api/security/oidc-logout-api.asciidoc index a181f4c836fb..d8bd60b3cd85 100644 --- a/docs/reference/rest-api/security/oidc-logout-api.asciidoc +++ b/docs/reference/rest-api/security/oidc-logout-api.asciidoc @@ -5,6 +5,12 @@ OpenID Connect logout ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Submits a request to invalidate a refresh token and an access token that was generated as a response to a call to `/_security/oidc/authenticate`. diff --git a/docs/reference/rest-api/security/oidc-prepare-authentication-api.asciidoc b/docs/reference/rest-api/security/oidc-prepare-authentication-api.asciidoc index 4452020b4547..227f15493430 100644 --- a/docs/reference/rest-api/security/oidc-prepare-authentication-api.asciidoc +++ b/docs/reference/rest-api/security/oidc-prepare-authentication-api.asciidoc @@ -5,6 +5,12 @@ OpenID Connect prepare authentication ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Creates an oAuth 2.0 authentication request as a URL string based on the configuration of the respective OpenID Connect authentication realm in {es}. diff --git a/docs/reference/rest-api/security/put-app-privileges.asciidoc b/docs/reference/rest-api/security/put-app-privileges.asciidoc index 28be4c08c4e7..cf903d99a724 100644 --- a/docs/reference/rest-api/security/put-app-privileges.asciidoc +++ b/docs/reference/rest-api/security/put-app-privileges.asciidoc @@ -5,6 +5,12 @@ Create or update application privileges ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Adds or updates <>. [[security-api-put-privileges-request]] diff --git a/docs/reference/rest-api/security/query-api-key.asciidoc b/docs/reference/rest-api/security/query-api-key.asciidoc index 513cb99a55a4..1dec37c166f8 100644 --- a/docs/reference/rest-api/security/query-api-key.asciidoc +++ b/docs/reference/rest-api/security/query-api-key.asciidoc @@ -1,11 +1,16 @@ [role="xpack"] [[security-api-query-api-key]] === Query API key information API - ++++ Query API key information ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + //// [source,console] ---- diff --git a/docs/reference/rest-api/security/query-role.asciidoc b/docs/reference/rest-api/security/query-role.asciidoc index 937bd263140f..acdfbb45b84f 100644 --- a/docs/reference/rest-api/security/query-role.asciidoc +++ b/docs/reference/rest-api/security/query-role.asciidoc @@ -6,6 +6,12 @@ Query Role ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Retrieves roles with <> in a <> fashion. [[security-api-query-role-request]] diff --git a/docs/reference/rest-api/security/saml-authenticate-api.asciidoc b/docs/reference/rest-api/security/saml-authenticate-api.asciidoc index aa556a42d699..4c156df6e1bb 100644 --- a/docs/reference/rest-api/security/saml-authenticate-api.asciidoc +++ b/docs/reference/rest-api/security/saml-authenticate-api.asciidoc @@ -5,6 +5,12 @@ SAML authenticate ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Submits a SAML `Response` message to {es} for consumption. NOTE: This API is intended for use by custom web applications other than {kib}. diff --git a/docs/reference/rest-api/security/saml-complete-logout-api.asciidoc b/docs/reference/rest-api/security/saml-complete-logout-api.asciidoc index 1fb4ab1581ab..d4847fb481cd 100644 --- a/docs/reference/rest-api/security/saml-complete-logout-api.asciidoc +++ b/docs/reference/rest-api/security/saml-complete-logout-api.asciidoc @@ -5,6 +5,12 @@ SAML complete logout ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Verifies the logout response sent from the SAML IdP. NOTE: This API is intended for use by custom web applications other than {kib}. diff --git a/docs/reference/rest-api/security/saml-invalidate-api.asciidoc b/docs/reference/rest-api/security/saml-invalidate-api.asciidoc index 21c10341c6fe..fb233f3903d1 100644 --- a/docs/reference/rest-api/security/saml-invalidate-api.asciidoc +++ b/docs/reference/rest-api/security/saml-invalidate-api.asciidoc @@ -5,6 +5,12 @@ SAML invalidate ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Submits a SAML LogoutRequest message to {es} for consumption. NOTE: This API is intended for use by custom web applications other than {kib}. diff --git a/docs/reference/rest-api/security/saml-logout-api.asciidoc b/docs/reference/rest-api/security/saml-logout-api.asciidoc index 71729365865d..560a713b5bf1 100644 --- a/docs/reference/rest-api/security/saml-logout-api.asciidoc +++ b/docs/reference/rest-api/security/saml-logout-api.asciidoc @@ -5,6 +5,12 @@ SAML logout ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Submits a request to invalidate an access token and refresh token. NOTE: This API is intended for use by custom web applications other than {kib}. diff --git a/docs/reference/rest-api/security/saml-prepare-authentication-api.asciidoc b/docs/reference/rest-api/security/saml-prepare-authentication-api.asciidoc index b62d3d2ac9f7..60b3451cc531 100644 --- a/docs/reference/rest-api/security/saml-prepare-authentication-api.asciidoc +++ b/docs/reference/rest-api/security/saml-prepare-authentication-api.asciidoc @@ -5,6 +5,12 @@ SAML prepare authentication ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Creates a SAML authentication request (``) as a URL string, based on the configuration of the respective SAML realm in {es}. NOTE: This API is intended for use by custom web applications other than {kib}. diff --git a/docs/reference/rest-api/security/saml-sp-metadata.asciidoc b/docs/reference/rest-api/security/saml-sp-metadata.asciidoc index deecbf5f0e66..0f66b7a9f76f 100644 --- a/docs/reference/rest-api/security/saml-sp-metadata.asciidoc +++ b/docs/reference/rest-api/security/saml-sp-metadata.asciidoc @@ -5,6 +5,12 @@ SAML service provider metadata ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Generate SAML metadata for a SAML 2.0 Service Provider. [[security-api-saml-sp-metadata-request]] diff --git a/docs/reference/rest-api/security/ssl.asciidoc b/docs/reference/rest-api/security/ssl.asciidoc index 3b8ba0eab688..78b9aee30109 100644 --- a/docs/reference/rest-api/security/ssl.asciidoc +++ b/docs/reference/rest-api/security/ssl.asciidoc @@ -5,6 +5,12 @@ SSL certificate ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + The `certificates` API enables you to retrieve information about the X.509 certificates that are used to encrypt communications in your {es} cluster. diff --git a/docs/reference/rest-api/security/suggest-user-profile.asciidoc b/docs/reference/rest-api/security/suggest-user-profile.asciidoc index ad01987a1e70..4ea04864e4a6 100644 --- a/docs/reference/rest-api/security/suggest-user-profile.asciidoc +++ b/docs/reference/rest-api/security/suggest-user-profile.asciidoc @@ -5,6 +5,12 @@ Suggest user profile ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + NOTE: The user profile feature is designed only for use by {kib} and Elastic’s {observability}, {ents}, and {elastic-sec} solutions. Individual users and external applications should not call this API directly. Elastic reserves diff --git a/docs/reference/rest-api/security/update-api-key.asciidoc b/docs/reference/rest-api/security/update-api-key.asciidoc index f297e3922a65..12dd883b5955 100644 --- a/docs/reference/rest-api/security/update-api-key.asciidoc +++ b/docs/reference/rest-api/security/update-api-key.asciidoc @@ -6,6 +6,12 @@ Update API key ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + [[security-api-update-api-key-request]] ==== {api-request-title} diff --git a/docs/reference/rest-api/security/update-cross-cluster-api-key.asciidoc b/docs/reference/rest-api/security/update-cross-cluster-api-key.asciidoc index b90cb6368eef..5c5f55b73597 100644 --- a/docs/reference/rest-api/security/update-cross-cluster-api-key.asciidoc +++ b/docs/reference/rest-api/security/update-cross-cluster-api-key.asciidoc @@ -6,6 +6,12 @@ Update Cross-Cluster API key ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Update an existing cross-cluster API Key that is used for <> access. diff --git a/docs/reference/rest-api/security/update-settings.asciidoc b/docs/reference/rest-api/security/update-settings.asciidoc index b227bb70b31d..3ec2ef98153c 100644 --- a/docs/reference/rest-api/security/update-settings.asciidoc +++ b/docs/reference/rest-api/security/update-settings.asciidoc @@ -5,6 +5,12 @@ Update Security settings ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Updates the settings of the security internal indices. diff --git a/docs/reference/rest-api/security/update-user-profile-data.asciidoc b/docs/reference/rest-api/security/update-user-profile-data.asciidoc index 01fa5e11d10e..c461c169a517 100644 --- a/docs/reference/rest-api/security/update-user-profile-data.asciidoc +++ b/docs/reference/rest-api/security/update-user-profile-data.asciidoc @@ -5,6 +5,12 @@ Update user profile data ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + NOTE: The user profile feature is designed only for use by {kib} and Elastic’s {observability}, {ents}, and {elastic-sec} solutions. Individual users and external applications should not call this API directly. Elastic reserves diff --git a/docs/reference/rest-api/usage.asciidoc b/docs/reference/rest-api/usage.asciidoc index b284e1eb00e7..cb9d80cc3a97 100644 --- a/docs/reference/rest-api/usage.asciidoc +++ b/docs/reference/rest-api/usage.asciidoc @@ -2,6 +2,12 @@ [[usage-api]] == Usage API +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-xpack[Usage APIs]. +-- + Provides usage information about the installed {xpack} features. [discrete] diff --git a/docs/reference/rest-api/watcher.asciidoc b/docs/reference/rest-api/watcher.asciidoc index 4c4ce1ab7ee0..227eda660546 100644 --- a/docs/reference/rest-api/watcher.asciidoc +++ b/docs/reference/rest-api/watcher.asciidoc @@ -2,6 +2,12 @@ [[watcher-api]] == Watcher APIs +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-watcher[{watcher} APIs]. +-- + * <> * <> * <> diff --git a/docs/reference/rest-api/watcher/ack-watch.asciidoc b/docs/reference/rest-api/watcher/ack-watch.asciidoc index 3c45b068a34c..b3ea9b5d6b41 100644 --- a/docs/reference/rest-api/watcher/ack-watch.asciidoc +++ b/docs/reference/rest-api/watcher/ack-watch.asciidoc @@ -5,6 +5,12 @@ Ack watch ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-watcher[{watcher} APIs]. +-- + <> enables you to manually throttle execution of the watch's actions. diff --git a/docs/reference/rest-api/watcher/activate-watch.asciidoc b/docs/reference/rest-api/watcher/activate-watch.asciidoc index d8af79854c83..c37d85cc5029 100644 --- a/docs/reference/rest-api/watcher/activate-watch.asciidoc +++ b/docs/reference/rest-api/watcher/activate-watch.asciidoc @@ -5,6 +5,12 @@ Activate watch ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-watcher[{watcher} APIs]. +-- + A watch can be either <>. This API enables you to activate a currently inactive watch. diff --git a/docs/reference/rest-api/watcher/deactivate-watch.asciidoc b/docs/reference/rest-api/watcher/deactivate-watch.asciidoc index ba4170174343..058ada195f97 100644 --- a/docs/reference/rest-api/watcher/deactivate-watch.asciidoc +++ b/docs/reference/rest-api/watcher/deactivate-watch.asciidoc @@ -5,6 +5,12 @@ Deactivate watch ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-watcher[{watcher} APIs]. +-- + A watch can be either <>. This API enables you to deactivate a currently active watch. diff --git a/docs/reference/rest-api/watcher/delete-watch.asciidoc b/docs/reference/rest-api/watcher/delete-watch.asciidoc index 3ffcb43ed65e..536ec293ab8f 100644 --- a/docs/reference/rest-api/watcher/delete-watch.asciidoc +++ b/docs/reference/rest-api/watcher/delete-watch.asciidoc @@ -5,6 +5,12 @@ Delete watch ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-watcher[{watcher} APIs]. +-- + Removes a watch from {watcher}. [[watcher-api-delete-watch-request]] diff --git a/docs/reference/rest-api/watcher/execute-watch.asciidoc b/docs/reference/rest-api/watcher/execute-watch.asciidoc index 7acecf170903..eab15a152b15 100644 --- a/docs/reference/rest-api/watcher/execute-watch.asciidoc +++ b/docs/reference/rest-api/watcher/execute-watch.asciidoc @@ -5,6 +5,12 @@ Execute watch ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-watcher[{watcher} APIs]. +-- + Forces the execution of a stored watch. [[watcher-api-execute-watch-request]] diff --git a/docs/reference/rest-api/watcher/get-settings.asciidoc b/docs/reference/rest-api/watcher/get-settings.asciidoc index c5773e6ee32b..80d86cc455da 100644 --- a/docs/reference/rest-api/watcher/get-settings.asciidoc +++ b/docs/reference/rest-api/watcher/get-settings.asciidoc @@ -5,6 +5,12 @@ Get Watcher settings ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-watcher[{watcher} APIs]. +-- + This API allows a user to retrieve the user-configurable settings for the Watcher internal index (`.watches`). Only a subset of the index settings—those that are user-configurable—will be shown. This includes: - `index.auto_expand_replicas` diff --git a/docs/reference/rest-api/watcher/get-watch.asciidoc b/docs/reference/rest-api/watcher/get-watch.asciidoc index e80bfed88b6e..1b5dbe2d0e47 100644 --- a/docs/reference/rest-api/watcher/get-watch.asciidoc +++ b/docs/reference/rest-api/watcher/get-watch.asciidoc @@ -5,6 +5,12 @@ Get watch ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-watcher[{watcher} APIs]. +-- + Retrieves a watch by its ID. [[watcher-api-get-watch-request]] diff --git a/docs/reference/rest-api/watcher/put-watch.asciidoc b/docs/reference/rest-api/watcher/put-watch.asciidoc index deab44f106fb..134e8149fde7 100644 --- a/docs/reference/rest-api/watcher/put-watch.asciidoc +++ b/docs/reference/rest-api/watcher/put-watch.asciidoc @@ -5,6 +5,12 @@ Create or update watch ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-watcher[{watcher} APIs]. +-- + Either registers a new watch in {watcher} or updates an existing one. [[watcher-api-put-watch-request]] diff --git a/docs/reference/rest-api/watcher/query-watches.asciidoc b/docs/reference/rest-api/watcher/query-watches.asciidoc index 7a006243ed7f..b96261d4a47e 100644 --- a/docs/reference/rest-api/watcher/query-watches.asciidoc +++ b/docs/reference/rest-api/watcher/query-watches.asciidoc @@ -5,6 +5,12 @@ Query watches ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-watcher[{watcher} APIs]. +-- + Retrieves all registered watches. [[watcher-api-query-watches-request]] diff --git a/docs/reference/rest-api/watcher/start.asciidoc b/docs/reference/rest-api/watcher/start.asciidoc index b153410ed290..15606ba0a857 100644 --- a/docs/reference/rest-api/watcher/start.asciidoc +++ b/docs/reference/rest-api/watcher/start.asciidoc @@ -5,6 +5,12 @@ Start watch service ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-watcher[{watcher} APIs]. +-- + Starts the {watcher} service if it is not already running. [[watcher-api-start-request]] diff --git a/docs/reference/rest-api/watcher/stats.asciidoc b/docs/reference/rest-api/watcher/stats.asciidoc index 2dbca69a6761..e0bdc1b5a889 100644 --- a/docs/reference/rest-api/watcher/stats.asciidoc +++ b/docs/reference/rest-api/watcher/stats.asciidoc @@ -6,6 +6,12 @@ Get {watcher} stats ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-watcher[{watcher} APIs]. +-- + Retrieves the current {watcher} metrics. [[watcher-api-stats-request]] diff --git a/docs/reference/rest-api/watcher/stop.asciidoc b/docs/reference/rest-api/watcher/stop.asciidoc index 50acd6e9eb2d..272899a2cfa3 100644 --- a/docs/reference/rest-api/watcher/stop.asciidoc +++ b/docs/reference/rest-api/watcher/stop.asciidoc @@ -5,6 +5,12 @@ Stop watch service ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-watcher[{watcher} APIs]. +-- + Stops the {watcher} service if it is running. [[watcher-api-stop-request]] diff --git a/docs/reference/rest-api/watcher/update-settings.asciidoc b/docs/reference/rest-api/watcher/update-settings.asciidoc index 8602c6776997..9ad38064e34a 100644 --- a/docs/reference/rest-api/watcher/update-settings.asciidoc +++ b/docs/reference/rest-api/watcher/update-settings.asciidoc @@ -5,6 +5,12 @@ Update Watcher settings ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-watcher[{watcher} APIs]. +-- + This API allows a user to modify the settings for the Watcher internal index (`.watches`). Only a subset of settings are allowed to by modified. This includes: - `index.auto_expand_replicas` diff --git a/docs/reference/rollup/apis/delete-job.asciidoc b/docs/reference/rollup/apis/delete-job.asciidoc index c563e705039e..03f5349e15d4 100644 --- a/docs/reference/rollup/apis/delete-job.asciidoc +++ b/docs/reference/rollup/apis/delete-job.asciidoc @@ -8,6 +8,12 @@ deprecated::[8.11.0,"Rollups will be removed in a future version. Use <> instead."] +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-rollup[Rollup APIs]. +-- + Deletes an existing {rollup-job}. [[rollup-delete-job-request]] diff --git a/docs/reference/rollup/apis/get-job.asciidoc b/docs/reference/rollup/apis/get-job.asciidoc index fcafbbe95159..9fff4d665f5f 100644 --- a/docs/reference/rollup/apis/get-job.asciidoc +++ b/docs/reference/rollup/apis/get-job.asciidoc @@ -7,6 +7,12 @@ deprecated::[8.11.0,"Rollups will be removed in a future version. Use <> instead."] +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-rollup[Rollup APIs]. +-- + Retrieves the configuration, stats, and status of {rollup-jobs}. [[rollup-get-job-request]] diff --git a/docs/reference/rollup/apis/put-job.asciidoc b/docs/reference/rollup/apis/put-job.asciidoc index c52e7a042e0c..a60f20a3de5b 100644 --- a/docs/reference/rollup/apis/put-job.asciidoc +++ b/docs/reference/rollup/apis/put-job.asciidoc @@ -12,6 +12,12 @@ WARNING: From 8.15.0 invoking this API in a cluster with no rollup usage will fa deprecation and planned removal. A cluster either needs to contain a rollup job or a rollup index in order for this API to be allowed to execute. +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-rollup[Rollup APIs]. +-- + Creates a {rollup-job}. [[rollup-put-job-api-request]] diff --git a/docs/reference/rollup/apis/rollup-caps.asciidoc b/docs/reference/rollup/apis/rollup-caps.asciidoc index 95f652f6d441..be1c3ed171a2 100644 --- a/docs/reference/rollup/apis/rollup-caps.asciidoc +++ b/docs/reference/rollup/apis/rollup-caps.asciidoc @@ -7,6 +7,12 @@ deprecated::[8.11.0,"Rollups will be removed in a future version. Use <> instead."] +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-rollup[Rollup APIs]. +-- + Returns the capabilities of any {rollup-jobs} that have been configured for a specific index or index pattern. diff --git a/docs/reference/rollup/apis/rollup-index-caps.asciidoc b/docs/reference/rollup/apis/rollup-index-caps.asciidoc index c5b729f2e52e..830cc332e8f4 100644 --- a/docs/reference/rollup/apis/rollup-index-caps.asciidoc +++ b/docs/reference/rollup/apis/rollup-index-caps.asciidoc @@ -7,6 +7,12 @@ deprecated::[8.11.0,"Rollups will be removed in a future version. Use <> instead."] +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-rollup[Rollup APIs]. +-- + Returns the rollup capabilities of all jobs inside of a rollup index (e.g. the index where rollup data is stored). diff --git a/docs/reference/rollup/apis/rollup-search.asciidoc b/docs/reference/rollup/apis/rollup-search.asciidoc index 491dcc6c38ae..088a74973806 100644 --- a/docs/reference/rollup/apis/rollup-search.asciidoc +++ b/docs/reference/rollup/apis/rollup-search.asciidoc @@ -7,6 +7,12 @@ deprecated::[8.11.0,"Rollups will be removed in a future version. Use <> instead."] +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-rollup[Rollup APIs]. +-- + Enables searching rolled-up data using the standard Query DSL. [[rollup-search-request]] diff --git a/docs/reference/rollup/apis/start-job.asciidoc b/docs/reference/rollup/apis/start-job.asciidoc index c102c26ea5d8..dbeed8b09d1c 100644 --- a/docs/reference/rollup/apis/start-job.asciidoc +++ b/docs/reference/rollup/apis/start-job.asciidoc @@ -8,6 +8,12 @@ deprecated::[8.11.0,"Rollups will be removed in a future version. Use <> instead."] +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-rollup[Rollup APIs]. +-- + Starts an existing, stopped {rollup-job}. [[rollup-start-job-request]] diff --git a/docs/reference/rollup/apis/stop-job.asciidoc b/docs/reference/rollup/apis/stop-job.asciidoc index 61e561b4ceac..8c0fd6ab2f3a 100644 --- a/docs/reference/rollup/apis/stop-job.asciidoc +++ b/docs/reference/rollup/apis/stop-job.asciidoc @@ -8,6 +8,12 @@ deprecated::[8.11.0,"Rollups will be removed in a future version. Use <> instead."] +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-rollup[Rollup APIs]. +-- + Stops an existing, started {rollup-job}. [[rollup-stop-job-request]] diff --git a/docs/reference/rollup/rollup-apis.asciidoc b/docs/reference/rollup/rollup-apis.asciidoc index 44833a0846c2..71922f0932a0 100644 --- a/docs/reference/rollup/rollup-apis.asciidoc +++ b/docs/reference/rollup/rollup-apis.asciidoc @@ -4,6 +4,12 @@ deprecated::[8.11.0,"Rollups will be removed in a future version. Please <> to <> instead."] +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-rollup[Rollup APIs]. +-- + [discrete] [[rollup-jobs-endpoint]] === Jobs diff --git a/docs/reference/scripting/apis/create-stored-script-api.asciidoc b/docs/reference/scripting/apis/create-stored-script-api.asciidoc index dab1314e65dc..5636e212180b 100644 --- a/docs/reference/scripting/apis/create-stored-script-api.asciidoc +++ b/docs/reference/scripting/apis/create-stored-script-api.asciidoc @@ -4,6 +4,12 @@ Create or update stored script ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-script[Script APIs]. +-- + Creates or updates a <> or <>. diff --git a/docs/reference/scripting/apis/delete-stored-script-api.asciidoc b/docs/reference/scripting/apis/delete-stored-script-api.asciidoc index e233922c9a7d..c6e570f2013e 100644 --- a/docs/reference/scripting/apis/delete-stored-script-api.asciidoc +++ b/docs/reference/scripting/apis/delete-stored-script-api.asciidoc @@ -4,6 +4,12 @@ Delete stored script ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-script[Script APIs]. +-- + Deletes a <> or <>. diff --git a/docs/reference/scripting/apis/get-script-contexts-api.asciidoc b/docs/reference/scripting/apis/get-script-contexts-api.asciidoc index ca24c97e494e..0ef6eccf947a 100644 --- a/docs/reference/scripting/apis/get-script-contexts-api.asciidoc +++ b/docs/reference/scripting/apis/get-script-contexts-api.asciidoc @@ -4,6 +4,12 @@ Get script contexts ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-script[Script APIs]. +-- + Retrieves a list of supported script contexts and their methods. [source,console] diff --git a/docs/reference/scripting/apis/get-script-languages-api.asciidoc b/docs/reference/scripting/apis/get-script-languages-api.asciidoc index dd5935bc4dcd..a35c979b2fcc 100644 --- a/docs/reference/scripting/apis/get-script-languages-api.asciidoc +++ b/docs/reference/scripting/apis/get-script-languages-api.asciidoc @@ -4,6 +4,12 @@ Get script languages ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-script[Script APIs]. +-- + Retrieves a list of supported <> and their contexts. diff --git a/docs/reference/scripting/apis/get-stored-script-api.asciidoc b/docs/reference/scripting/apis/get-stored-script-api.asciidoc index fffeb24e0331..d2e5a7beedad 100644 --- a/docs/reference/scripting/apis/get-stored-script-api.asciidoc +++ b/docs/reference/scripting/apis/get-stored-script-api.asciidoc @@ -4,6 +4,12 @@ Get stored script ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-script[Script APIs]. +-- + Retrieves a <> or <>. diff --git a/docs/reference/scripting/apis/script-apis.asciidoc b/docs/reference/scripting/apis/script-apis.asciidoc index e344cb00ee6f..f5499ade8458 100644 --- a/docs/reference/scripting/apis/script-apis.asciidoc +++ b/docs/reference/scripting/apis/script-apis.asciidoc @@ -1,6 +1,12 @@ [[script-apis]] == Script APIs +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-script[Script APIs]. +-- + Use the following APIs to manage, store, and test your <>. diff --git a/docs/reference/search-application/apis/delete-search-application.asciidoc b/docs/reference/search-application/apis/delete-search-application.asciidoc index 4043942b0950..52a32247a79e 100644 --- a/docs/reference/search-application/apis/delete-search-application.asciidoc +++ b/docs/reference/search-application/apis/delete-search-application.asciidoc @@ -1,13 +1,18 @@ [role="xpack"] [[delete-search-application]] === Delete Search Application - -beta::[] - ++++ Delete Search Application ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-search_application[Search application APIs]. +-- + +beta::[] + Removes a Search Application and its associated alias. Indices attached to the Search Application are not removed. diff --git a/docs/reference/search-application/apis/get-search-application.asciidoc b/docs/reference/search-application/apis/get-search-application.asciidoc index f0c107011eb4..adeb84bdbe0a 100644 --- a/docs/reference/search-application/apis/get-search-application.asciidoc +++ b/docs/reference/search-application/apis/get-search-application.asciidoc @@ -1,13 +1,18 @@ [role="xpack"] [[get-search-application]] === Get Search Application - -beta::[] - ++++ Get Search Application ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-search_application[Search application APIs]. +-- + +beta::[] + Retrieves information about a search application. [[get-search-application-request]] diff --git a/docs/reference/search-application/apis/index.asciidoc b/docs/reference/search-application/apis/index.asciidoc index 1df38f6a841c..a01b93a1ee4a 100644 --- a/docs/reference/search-application/apis/index.asciidoc +++ b/docs/reference/search-application/apis/index.asciidoc @@ -9,6 +9,12 @@ beta::[] --- +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-search_application[Search application APIs]. +-- + Use Search Application APIs to manage tasks and resources related to Search Applications. * <> diff --git a/docs/reference/search-application/apis/list-search-applications.asciidoc b/docs/reference/search-application/apis/list-search-applications.asciidoc index 3cc077bf682d..33bd8ddee009 100644 --- a/docs/reference/search-application/apis/list-search-applications.asciidoc +++ b/docs/reference/search-application/apis/list-search-applications.asciidoc @@ -1,13 +1,18 @@ [role="xpack"] [[list-search-applications]] === List Search Applications - -beta::[] - ++++ List Search Applications ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-search_application[Search application APIs]. +-- + +beta::[] + Returns information about Search Applications. [[list-search-applications-request]] diff --git a/docs/reference/search-application/apis/put-search-application.asciidoc b/docs/reference/search-application/apis/put-search-application.asciidoc index dc5e20ec40b7..bb8edb536804 100644 --- a/docs/reference/search-application/apis/put-search-application.asciidoc +++ b/docs/reference/search-application/apis/put-search-application.asciidoc @@ -1,13 +1,18 @@ [role="xpack"] [[put-search-application]] === Put Search Application - -beta::[] - ++++ Put Search Application ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-search_application[Search application APIs]. +-- + +beta::[] + Creates or updates a Search Application. [[put-search-application-request]] diff --git a/docs/reference/search-application/apis/search-application-render-query.asciidoc b/docs/reference/search-application/apis/search-application-render-query.asciidoc index 687176b4fb07..42586c7186bc 100644 --- a/docs/reference/search-application/apis/search-application-render-query.asciidoc +++ b/docs/reference/search-application/apis/search-application-render-query.asciidoc @@ -1,13 +1,18 @@ [role="xpack"] [[search-application-render-query]] === Render Search Application Query - -preview::[] - ++++ Render Search Application Query ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-search_application[Search application APIs]. +-- + +preview::[] + Given specified query parameters, generates an {es} query using the search template associated with the search application or a default template if none is specified. Unspecified template parameters will be assigned their default values (if applicable). diff --git a/docs/reference/search-application/apis/search-application-search.asciidoc b/docs/reference/search-application/apis/search-application-search.asciidoc index 2d13ed5f1103..1aab4ddf8476 100644 --- a/docs/reference/search-application/apis/search-application-search.asciidoc +++ b/docs/reference/search-application/apis/search-application-search.asciidoc @@ -1,13 +1,18 @@ [role="xpack"] [[search-application-search]] === Search Application Search - -beta::[] - ++++ Search Application Search ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-search_application[Search application APIs]. +-- + +beta::[] + Given specified query parameters, generates and executes an {es} query using the search template associated with the search application or a default template if none is specified. Unspecified template parameters will be assigned their default values (if applicable). diff --git a/docs/reference/search.asciidoc b/docs/reference/search.asciidoc index 70ffe02e44d9..7db9b8a304be 100644 --- a/docs/reference/search.asciidoc +++ b/docs/reference/search.asciidoc @@ -1,6 +1,12 @@ [[search]] == Search APIs +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-search[Search APIs]. +-- + Search APIs are used to search and aggregate data stored in {es} indices and data streams. For an overview and related tutorials, see <>. diff --git a/docs/reference/search/async-search.asciidoc b/docs/reference/search/async-search.asciidoc index 786cfaee8024..9a9e9ca45e81 100644 --- a/docs/reference/search/async-search.asciidoc +++ b/docs/reference/search/async-search.asciidoc @@ -2,6 +2,12 @@ [[async-search]] === Async search +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-search[Search APIs]. +-- + The async search API let you asynchronously execute a search request, monitor its progress, and retrieve partial results as they become available. diff --git a/docs/reference/search/clear-scroll-api.asciidoc b/docs/reference/search/clear-scroll-api.asciidoc index a005babfd1be..a6a2cd4b3cab 100644 --- a/docs/reference/search/clear-scroll-api.asciidoc +++ b/docs/reference/search/clear-scroll-api.asciidoc @@ -4,6 +4,12 @@ Clear scroll ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-search[Search APIs]. +-- + Clears the search context and results for a <>. diff --git a/docs/reference/search/count.asciidoc b/docs/reference/search/count.asciidoc index 399545adf8d1..e0e86dd13129 100644 --- a/docs/reference/search/count.asciidoc +++ b/docs/reference/search/count.asciidoc @@ -4,6 +4,12 @@ Count ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-search[Search APIs]. +-- + Gets the number of matches for a search query. [source,console] diff --git a/docs/reference/search/explain.asciidoc b/docs/reference/search/explain.asciidoc index 77e2d5bd63ef..01c7bb4611a1 100644 --- a/docs/reference/search/explain.asciidoc +++ b/docs/reference/search/explain.asciidoc @@ -4,6 +4,12 @@ Explain ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-search[Search APIs]. +-- + Returns information about why a specific document matches (or doesn't match) a query. diff --git a/docs/reference/search/field-caps.asciidoc b/docs/reference/search/field-caps.asciidoc index 2ff2b8d18604..f4d9146b8ea9 100644 --- a/docs/reference/search/field-caps.asciidoc +++ b/docs/reference/search/field-caps.asciidoc @@ -4,6 +4,11 @@ Field capabilities ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-search[Search APIs]. +-- Allows you to retrieve the capabilities of fields among multiple indices. For data streams, the API returns field capabilities among the stream's backing diff --git a/docs/reference/search/multi-search-template-api.asciidoc b/docs/reference/search/multi-search-template-api.asciidoc index b1c9518b1f2b..010320c6b05e 100644 --- a/docs/reference/search/multi-search-template-api.asciidoc +++ b/docs/reference/search/multi-search-template-api.asciidoc @@ -4,6 +4,12 @@ Multi search template ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-search[Search APIs]. +-- + Runs multiple <> with a single request. diff --git a/docs/reference/search/multi-search.asciidoc b/docs/reference/search/multi-search.asciidoc index 9cafa756f035..ea2dd5977933 100644 --- a/docs/reference/search/multi-search.asciidoc +++ b/docs/reference/search/multi-search.asciidoc @@ -4,6 +4,12 @@ Multi search ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-search[Search APIs]. +-- + Executes several searches with a single API request. [source,console] diff --git a/docs/reference/search/point-in-time-api.asciidoc b/docs/reference/search/point-in-time-api.asciidoc index 9cd91626c760..bc7cbd01ebb3 100644 --- a/docs/reference/search/point-in-time-api.asciidoc +++ b/docs/reference/search/point-in-time-api.asciidoc @@ -4,6 +4,12 @@ Point in time ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-search[Search APIs]. +-- + A search request by default executes against the most recent visible data of the target indices, which is called point in time. Elasticsearch pit (point in time) is a lightweight view into the state of the data as it existed when initiated. diff --git a/docs/reference/search/profile.asciidoc b/docs/reference/search/profile.asciidoc index 5f1a0ccfdd6b..4fbe5ea1bb9f 100644 --- a/docs/reference/search/profile.asciidoc +++ b/docs/reference/search/profile.asciidoc @@ -24,6 +24,11 @@ The output from the Profile API is *very* verbose, especially for complicated requests executed across many shards. Pretty-printing the response is recommended to help understand the output. +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-search[Search APIs]. +-- [[search-profile-api-example]] ==== {api-examples-title} diff --git a/docs/reference/search/rank-eval.asciidoc b/docs/reference/search/rank-eval.asciidoc index 05862ebbbcca..4a03371c4da3 100644 --- a/docs/reference/search/rank-eval.asciidoc +++ b/docs/reference/search/rank-eval.asciidoc @@ -4,6 +4,12 @@ Ranking evaluation ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-search[Search APIs]. +-- + Allows you to evaluate the quality of ranked search results over a set of typical search queries. diff --git a/docs/reference/search/render-search-template-api.asciidoc b/docs/reference/search/render-search-template-api.asciidoc index 0c782f26068e..42e82b6e352b 100644 --- a/docs/reference/search/render-search-template-api.asciidoc +++ b/docs/reference/search/render-search-template-api.asciidoc @@ -4,6 +4,12 @@ Render search template ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-search[Search APIs]. +-- + Renders a <> as a <>. diff --git a/docs/reference/search/retriever.asciidoc b/docs/reference/search/retriever.asciidoc index c7df40ff5e07..7e98297b780e 100644 --- a/docs/reference/search/retriever.asciidoc +++ b/docs/reference/search/retriever.asciidoc @@ -14,6 +14,12 @@ Refer to <> for a high level overview of the retrievers abs Refer to <> for additional examples. ==== +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-search[Search APIs]. +-- + The following retrievers are available: `standard`:: diff --git a/docs/reference/search/rrf.asciidoc b/docs/reference/search/rrf.asciidoc index a942c0162a80..842bd7049e3b 100644 --- a/docs/reference/search/rrf.asciidoc +++ b/docs/reference/search/rrf.asciidoc @@ -27,6 +27,12 @@ return score [[rrf-api]] ==== Reciprocal rank fusion API +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-search[Search APIs]. +-- + You can use RRF as part of a <> to combine and rank documents using separate sets of top documents (result sets) from a combination of <> using an <>. A minimum of *two* child retrievers is required for ranking. diff --git a/docs/reference/search/scroll-api.asciidoc b/docs/reference/search/scroll-api.asciidoc index e3b4123ddff6..0f89df877ba7 100644 --- a/docs/reference/search/scroll-api.asciidoc +++ b/docs/reference/search/scroll-api.asciidoc @@ -8,6 +8,12 @@ IMPORTANT: We no longer recommend using the scroll API for deep pagination. If you need to preserve the index state while paging through more than 10,000 hits, use the <> parameter with a point in time (PIT). +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-search[Search APIs]. +-- + Retrieves the next batch of results for a <>. diff --git a/docs/reference/search/search-shards.asciidoc b/docs/reference/search/search-shards.asciidoc index 49045acf4c48..13f9ae877285 100644 --- a/docs/reference/search/search-shards.asciidoc +++ b/docs/reference/search/search-shards.asciidoc @@ -4,6 +4,12 @@ Search shards ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-search[Search APIs]. +-- + Returns the indices and shards that a search request would be executed against. [source,console] diff --git a/docs/reference/search/search-template-api.asciidoc b/docs/reference/search/search-template-api.asciidoc index c60b5281c05e..2094ee892401 100644 --- a/docs/reference/search/search-template-api.asciidoc +++ b/docs/reference/search/search-template-api.asciidoc @@ -4,6 +4,12 @@ Search template ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-search[Search APIs]. +-- + Runs a search with a <>. //// diff --git a/docs/reference/search/search-vector-tile-api.asciidoc b/docs/reference/search/search-vector-tile-api.asciidoc index 2cdc29918a69..f63abda6fcb4 100644 --- a/docs/reference/search/search-vector-tile-api.asciidoc +++ b/docs/reference/search/search-vector-tile-api.asciidoc @@ -4,6 +4,11 @@ Vector tile search ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-search[Search APIs]. +-- Searches a vector tile for geospatial values. Returns results as a binary https://docs.mapbox.com/vector-tiles/specification[Mapbox vector tile]. diff --git a/docs/reference/search/search.asciidoc b/docs/reference/search/search.asciidoc index 2ad407b4ae1e..d022605db22b 100644 --- a/docs/reference/search/search.asciidoc +++ b/docs/reference/search/search.asciidoc @@ -4,6 +4,12 @@ Search ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-search[Search APIs]. +-- + Returns search hits that match the query defined in the request. [source,console] diff --git a/docs/reference/search/suggesters.asciidoc b/docs/reference/search/suggesters.asciidoc index c5cdbc3bd666..22f427477b90 100644 --- a/docs/reference/search/suggesters.asciidoc +++ b/docs/reference/search/suggesters.asciidoc @@ -3,6 +3,12 @@ Suggests similar looking terms based on a provided text by using a suggester. +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-search[Search APIs]. +-- + [source,console] -------------------------------------------------- POST my-index-000001/_search diff --git a/docs/reference/search/terms-enum.asciidoc b/docs/reference/search/terms-enum.asciidoc index 4f34deb985ab..46f6d3560ecd 100644 --- a/docs/reference/search/terms-enum.asciidoc +++ b/docs/reference/search/terms-enum.asciidoc @@ -4,6 +4,12 @@ Terms enum ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-search[Search APIs]. +-- + The terms enum API can be used to discover terms in the index that match a partial string. Supported field types are <>, <>, <>, diff --git a/docs/reference/search/validate.asciidoc b/docs/reference/search/validate.asciidoc index ce682e485cd2..ab943cf72b46 100644 --- a/docs/reference/search/validate.asciidoc +++ b/docs/reference/search/validate.asciidoc @@ -4,6 +4,12 @@ Validate ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/operation/operation-indices-validate-query[Validate a query]. +-- + Validates a potentially expensive query without executing it. [source,console] diff --git a/docs/reference/searchable-snapshots/apis/clear-cache.asciidoc b/docs/reference/searchable-snapshots/apis/clear-cache.asciidoc index ff67be02e6d0..ac357e29d1d1 100644 --- a/docs/reference/searchable-snapshots/apis/clear-cache.asciidoc +++ b/docs/reference/searchable-snapshots/apis/clear-cache.asciidoc @@ -5,6 +5,12 @@ Clear cache ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-searchable_snapshots[Searchable snapshots APIs]. +-- + experimental::[] Clears indices and data streams from the shared cache for diff --git a/docs/reference/searchable-snapshots/apis/mount-snapshot.asciidoc b/docs/reference/searchable-snapshots/apis/mount-snapshot.asciidoc index b47bc2370ab1..f1613ea62492 100644 --- a/docs/reference/searchable-snapshots/apis/mount-snapshot.asciidoc +++ b/docs/reference/searchable-snapshots/apis/mount-snapshot.asciidoc @@ -5,6 +5,12 @@ Mount snapshot ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-searchable_snapshots[Searchable snapshots APIs]. +-- + Mount a snapshot as a searchable snapshot index. [[searchable-snapshots-api-mount-request]] diff --git a/docs/reference/searchable-snapshots/apis/node-cache-stats.asciidoc b/docs/reference/searchable-snapshots/apis/node-cache-stats.asciidoc index 62faceb99d4f..d42ba0287694 100644 --- a/docs/reference/searchable-snapshots/apis/node-cache-stats.asciidoc +++ b/docs/reference/searchable-snapshots/apis/node-cache-stats.asciidoc @@ -5,6 +5,12 @@ Cache stats ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-searchable_snapshots[Searchable snapshots APIs]. +-- + Retrieves statistics about the shared cache for <>. diff --git a/docs/reference/searchable-snapshots/apis/searchable-snapshots-apis.asciidoc b/docs/reference/searchable-snapshots/apis/searchable-snapshots-apis.asciidoc index f90e27ea6322..1a1856198d28 100644 --- a/docs/reference/searchable-snapshots/apis/searchable-snapshots-apis.asciidoc +++ b/docs/reference/searchable-snapshots/apis/searchable-snapshots-apis.asciidoc @@ -2,6 +2,12 @@ [[searchable-snapshots-apis]] == Searchable snapshots APIs +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-searchable_snapshots[Searchable snapshots APIs]. +-- + You can use the following APIs to perform searchable snapshots operations. * <> diff --git a/docs/reference/searchable-snapshots/apis/shard-stats.asciidoc b/docs/reference/searchable-snapshots/apis/shard-stats.asciidoc index 369d19da9ae5..f1eb9ac8d92b 100644 --- a/docs/reference/searchable-snapshots/apis/shard-stats.asciidoc +++ b/docs/reference/searchable-snapshots/apis/shard-stats.asciidoc @@ -5,6 +5,12 @@ Searchable snapshot statistics ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-searchable_snapshots[Searchable snapshots APIs]. +-- + experimental::[] Retrieves statistics about searchable snapshots. diff --git a/docs/reference/setup.asciidoc b/docs/reference/setup.asciidoc index a284e563917c..80828fdbfbb0 100644 --- a/docs/reference/setup.asciidoc +++ b/docs/reference/setup.asciidoc @@ -83,8 +83,6 @@ include::modules/indices/search-settings.asciidoc[] include::settings/security-settings.asciidoc[] -include::modules/shard-ops.asciidoc[] - include::modules/indices/request_cache.asciidoc[] include::settings/snapshot-settings.asciidoc[] diff --git a/docs/reference/shutdown/apis/shutdown-api.asciidoc b/docs/reference/shutdown/apis/shutdown-api.asciidoc index 24cbca720d16..b950cd3d19c5 100644 --- a/docs/reference/shutdown/apis/shutdown-api.asciidoc +++ b/docs/reference/shutdown/apis/shutdown-api.asciidoc @@ -4,6 +4,12 @@ NOTE: {cloud-only} +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-shutdown[Node lifecycle APIs]. +-- + You use the shutdown APIs to prepare nodes for temporary or permanent shutdown, monitor the shutdown status, and enable a previously shut-down node to resume normal operations. [discrete] diff --git a/docs/reference/shutdown/apis/shutdown-delete.asciidoc b/docs/reference/shutdown/apis/shutdown-delete.asciidoc index 4d7f30c3a1e4..225d88c63a01 100644 --- a/docs/reference/shutdown/apis/shutdown-delete.asciidoc +++ b/docs/reference/shutdown/apis/shutdown-delete.asciidoc @@ -3,6 +3,12 @@ NOTE: {cloud-only} +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-shutdown[Node lifecycle APIs]. +-- + Cancels shutdown preparations or clears a shutdown request so a node can resume normal operations. diff --git a/docs/reference/shutdown/apis/shutdown-get.asciidoc b/docs/reference/shutdown/apis/shutdown-get.asciidoc index 97755b3263d7..b0097eb0caf7 100644 --- a/docs/reference/shutdown/apis/shutdown-get.asciidoc +++ b/docs/reference/shutdown/apis/shutdown-get.asciidoc @@ -3,6 +3,12 @@ NOTE: {cloud-only} +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-shutdown[Node lifecycle APIs]. +-- + Retrieves the status of a node that's being prepared for shutdown. [[get-shutdown-api-request]] diff --git a/docs/reference/shutdown/apis/shutdown-put.asciidoc b/docs/reference/shutdown/apis/shutdown-put.asciidoc index 344dd8fa3671..5eef4763a9c3 100644 --- a/docs/reference/shutdown/apis/shutdown-put.asciidoc +++ b/docs/reference/shutdown/apis/shutdown-put.asciidoc @@ -3,6 +3,12 @@ NOTE: {cloud-only} +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-shutdown[Node lifecycle APIs]. +-- + Prepares a node to be shut down. [[put-shutdown-api-request]] diff --git a/docs/reference/slm/apis/slm-api.asciidoc b/docs/reference/slm/apis/slm-api.asciidoc index d061ff6b0aaf..ee624a70b00f 100644 --- a/docs/reference/slm/apis/slm-api.asciidoc +++ b/docs/reference/slm/apis/slm-api.asciidoc @@ -2,6 +2,12 @@ [[snapshot-lifecycle-management-api]] == {slm-cap} APIs +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-slm[{slm-cap} APIs]. +-- + You use the following APIs to set up policies to automatically take snapshots and control how long they are retained. diff --git a/docs/reference/slm/apis/slm-delete.asciidoc b/docs/reference/slm/apis/slm-delete.asciidoc index 650ee68e24fb..9c63d7326421 100644 --- a/docs/reference/slm/apis/slm-delete.asciidoc +++ b/docs/reference/slm/apis/slm-delete.asciidoc @@ -5,6 +5,12 @@ Delete policy ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-slm[{slm-cap} APIs]. +-- + Deletes an existing snapshot lifecycle policy. [[slm-api-delete-lifecycle-request]] diff --git a/docs/reference/slm/apis/slm-execute-retention.asciidoc b/docs/reference/slm/apis/slm-execute-retention.asciidoc index 75bcdb314366..ad8d7b15a43d 100644 --- a/docs/reference/slm/apis/slm-execute-retention.asciidoc +++ b/docs/reference/slm/apis/slm-execute-retention.asciidoc @@ -5,6 +5,12 @@ Execute snapshot retention policy ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-slm[{slm-cap} APIs]. +-- + Deletes any snapshots that are expired according to the policy's retention rules. [[slm-api-execute-retention-request]] diff --git a/docs/reference/slm/apis/slm-execute.asciidoc b/docs/reference/slm/apis/slm-execute.asciidoc index f3977d6aed2f..9ea35602de4e 100644 --- a/docs/reference/slm/apis/slm-execute.asciidoc +++ b/docs/reference/slm/apis/slm-execute.asciidoc @@ -5,6 +5,12 @@ Execute snapshot lifecycle policy ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-slm[{slm-cap} APIs]. +-- + Immediately creates a snapshot according to the lifecycle policy, without waiting for the scheduled time. diff --git a/docs/reference/slm/apis/slm-get-status.asciidoc b/docs/reference/slm/apis/slm-get-status.asciidoc index d4afbaddb1be..181927ac35a1 100644 --- a/docs/reference/slm/apis/slm-get-status.asciidoc +++ b/docs/reference/slm/apis/slm-get-status.asciidoc @@ -7,6 +7,12 @@ Get {slm} status ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-slm[{slm-cap} APIs]. +-- + Retrieves the status of {slm} ({slm-init}). [[slm-api-get-status-request]] diff --git a/docs/reference/slm/apis/slm-get.asciidoc b/docs/reference/slm/apis/slm-get.asciidoc index f7c847d06dc4..723d8b374b91 100644 --- a/docs/reference/slm/apis/slm-get.asciidoc +++ b/docs/reference/slm/apis/slm-get.asciidoc @@ -5,6 +5,12 @@ Get policy ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-slm[{slm-cap} APIs]. +-- + Retrieves one or more snapshot lifecycle policy definitions and information about the latest snapshot attempts. diff --git a/docs/reference/slm/apis/slm-put.asciidoc b/docs/reference/slm/apis/slm-put.asciidoc index 51ad571ee12e..a6ee29bfd1bc 100644 --- a/docs/reference/slm/apis/slm-put.asciidoc +++ b/docs/reference/slm/apis/slm-put.asciidoc @@ -4,8 +4,13 @@ Create or update policy ++++ -Creates or updates a snapshot lifecycle policy. +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-slm[{slm-cap} APIs]. +-- +Creates or updates a snapshot lifecycle policy. [[slm-api-put-request]] ==== {api-request-title} diff --git a/docs/reference/slm/apis/slm-start.asciidoc b/docs/reference/slm/apis/slm-start.asciidoc index 9d9b8108cb57..87c19ec600b6 100644 --- a/docs/reference/slm/apis/slm-start.asciidoc +++ b/docs/reference/slm/apis/slm-start.asciidoc @@ -1,12 +1,17 @@ [role="xpack"] [[slm-api-start]] === Start {slm} API - [subs="attributes"] ++++ Start {slm} ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-slm[{slm-cap} APIs]. +-- + Turns on {slm} ({slm-init}). [[slm-api-start-request]] diff --git a/docs/reference/slm/apis/slm-stats.asciidoc b/docs/reference/slm/apis/slm-stats.asciidoc index 340631e9c560..d6b9cd960e45 100644 --- a/docs/reference/slm/apis/slm-stats.asciidoc +++ b/docs/reference/slm/apis/slm-stats.asciidoc @@ -5,6 +5,12 @@ Get snapshot lifecycle stats ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-slm[{slm-cap} APIs]. +-- + Returns global and policy-level statistics about actions taken by {slm}. [[slm-api-stats-request]] diff --git a/docs/reference/slm/apis/slm-stop.asciidoc b/docs/reference/slm/apis/slm-stop.asciidoc index 253abec7b4d1..2bfe9646bcff 100644 --- a/docs/reference/slm/apis/slm-stop.asciidoc +++ b/docs/reference/slm/apis/slm-stop.asciidoc @@ -1,12 +1,17 @@ [role="xpack"] [[slm-api-stop]] === Stop {slm} API - [subs="attributes"] ++++ Stop {slm} ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-slm[{slm-cap} APIs]. +-- + Turn off {slm} ({slm-init}). [[slm-api-stop-request]] diff --git a/docs/reference/snapshot-restore/apis/clean-up-repo-api.asciidoc b/docs/reference/snapshot-restore/apis/clean-up-repo-api.asciidoc index 249e192c0c58..dbb754169349 100644 --- a/docs/reference/snapshot-restore/apis/clean-up-repo-api.asciidoc +++ b/docs/reference/snapshot-restore/apis/clean-up-repo-api.asciidoc @@ -4,6 +4,12 @@ Clean up snapshot repository ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-snapshot[Snapshot and restore APIs]. +-- + Triggers the review of a snapshot repository's contents and deletes any stale data that is not referenced by existing snapshots. See <>. diff --git a/docs/reference/snapshot-restore/apis/clone-snapshot-api.asciidoc b/docs/reference/snapshot-restore/apis/clone-snapshot-api.asciidoc index 590bc7e7410f..cf432d807d9c 100644 --- a/docs/reference/snapshot-restore/apis/clone-snapshot-api.asciidoc +++ b/docs/reference/snapshot-restore/apis/clone-snapshot-api.asciidoc @@ -4,6 +4,12 @@ Clone snapshot ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-snapshot[Snapshot and restore APIs]. +-- + Clones part or all of a snapshot into a new snapshot. [source,console] diff --git a/docs/reference/snapshot-restore/apis/create-snapshot-api.asciidoc b/docs/reference/snapshot-restore/apis/create-snapshot-api.asciidoc index baa28bb7b0a5..a6c39abbda73 100644 --- a/docs/reference/snapshot-restore/apis/create-snapshot-api.asciidoc +++ b/docs/reference/snapshot-restore/apis/create-snapshot-api.asciidoc @@ -4,6 +4,12 @@ Create snapshot ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-snapshot[Snapshot and restore APIs]. +-- + <> of a cluster or specified data streams and indices. diff --git a/docs/reference/snapshot-restore/apis/delete-repo-api.asciidoc b/docs/reference/snapshot-restore/apis/delete-repo-api.asciidoc index 4301fea64252..e0df427da745 100644 --- a/docs/reference/snapshot-restore/apis/delete-repo-api.asciidoc +++ b/docs/reference/snapshot-restore/apis/delete-repo-api.asciidoc @@ -4,6 +4,12 @@ Delete snapshot repository ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-snapshot[Snapshot and restore APIs]. +-- + Unregisters one or more <>. When a repository is unregistered, {es} only removes the reference to the diff --git a/docs/reference/snapshot-restore/apis/delete-snapshot-api.asciidoc b/docs/reference/snapshot-restore/apis/delete-snapshot-api.asciidoc index 8824977d660e..74db60c1970c 100644 --- a/docs/reference/snapshot-restore/apis/delete-snapshot-api.asciidoc +++ b/docs/reference/snapshot-restore/apis/delete-snapshot-api.asciidoc @@ -4,6 +4,12 @@ Delete snapshot ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-snapshot[Snapshot and restore APIs]. +-- + Deletes a <>. //// diff --git a/docs/reference/snapshot-restore/apis/get-repo-api.asciidoc b/docs/reference/snapshot-restore/apis/get-repo-api.asciidoc index cf1b9813c519..5f6b6485c5ee 100644 --- a/docs/reference/snapshot-restore/apis/get-repo-api.asciidoc +++ b/docs/reference/snapshot-restore/apis/get-repo-api.asciidoc @@ -4,6 +4,12 @@ Get snapshot repository ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-snapshot[Snapshot and restore APIs]. +-- + Gets information about one or more registered <>. diff --git a/docs/reference/snapshot-restore/apis/get-snapshot-api.asciidoc b/docs/reference/snapshot-restore/apis/get-snapshot-api.asciidoc index 622e1ade024b..f9eb6a27df03 100644 --- a/docs/reference/snapshot-restore/apis/get-snapshot-api.asciidoc +++ b/docs/reference/snapshot-restore/apis/get-snapshot-api.asciidoc @@ -4,6 +4,12 @@ Get snapshot ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-snapshot[Snapshot and restore APIs]. +-- + Retrieves information about one or more snapshots. //// diff --git a/docs/reference/snapshot-restore/apis/get-snapshot-status-api.asciidoc b/docs/reference/snapshot-restore/apis/get-snapshot-status-api.asciidoc index e677408da3f2..dbbf547528f4 100644 --- a/docs/reference/snapshot-restore/apis/get-snapshot-status-api.asciidoc +++ b/docs/reference/snapshot-restore/apis/get-snapshot-status-api.asciidoc @@ -4,6 +4,12 @@ Get snapshot status ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-snapshot[Snapshot and restore APIs]. +-- + Retrieves a detailed description of the current state for each shard participating in the snapshot. Note that this API should only be used to obtain detailed shard-level information for ongoing snapshots. If this detail is not needed, or you want to obtain information about one or more existing snapshots, use the <>. //// diff --git a/docs/reference/snapshot-restore/apis/put-repo-api.asciidoc b/docs/reference/snapshot-restore/apis/put-repo-api.asciidoc index 0d3b5586da86..55b61cc321ed 100644 --- a/docs/reference/snapshot-restore/apis/put-repo-api.asciidoc +++ b/docs/reference/snapshot-restore/apis/put-repo-api.asciidoc @@ -4,6 +4,12 @@ Create or update snapshot repository ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-snapshot[Snapshot and restore APIs]. +-- + Registers or updates a <>. [source,console] diff --git a/docs/reference/snapshot-restore/apis/repo-analysis-api.asciidoc b/docs/reference/snapshot-restore/apis/repo-analysis-api.asciidoc index f18ef1ee6e82..ca46ba1fb2b5 100644 --- a/docs/reference/snapshot-restore/apis/repo-analysis-api.asciidoc +++ b/docs/reference/snapshot-restore/apis/repo-analysis-api.asciidoc @@ -5,6 +5,12 @@ Repository analysis ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-snapshot[Snapshot and restore APIs]. +-- + Analyzes a repository, reporting its performance characteristics and any incorrect behaviour found. diff --git a/docs/reference/snapshot-restore/apis/restore-snapshot-api.asciidoc b/docs/reference/snapshot-restore/apis/restore-snapshot-api.asciidoc index 9fe06d73f1a6..2f2c7fcd8ebd 100644 --- a/docs/reference/snapshot-restore/apis/restore-snapshot-api.asciidoc +++ b/docs/reference/snapshot-restore/apis/restore-snapshot-api.asciidoc @@ -4,6 +4,12 @@ Restore snapshot ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-snapshot[Snapshot and restore APIs]. +-- + Restores a <> of a cluster or specified data streams and indices. //// diff --git a/docs/reference/snapshot-restore/apis/snapshot-restore-apis.asciidoc b/docs/reference/snapshot-restore/apis/snapshot-restore-apis.asciidoc index b8bb6a2cd7d1..715687f02ede 100644 --- a/docs/reference/snapshot-restore/apis/snapshot-restore-apis.asciidoc +++ b/docs/reference/snapshot-restore/apis/snapshot-restore-apis.asciidoc @@ -1,6 +1,12 @@ [[snapshot-restore-apis]] == Snapshot and restore APIs +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-snapshot[Snapshot and restore APIs]. +-- + You can use the following APIs to set up snapshot repositories, manage snapshot backups, and restore snapshots to a running cluster. diff --git a/docs/reference/snapshot-restore/apis/verify-repo-api.asciidoc b/docs/reference/snapshot-restore/apis/verify-repo-api.asciidoc index dd845663be8d..333a12ff49ac 100644 --- a/docs/reference/snapshot-restore/apis/verify-repo-api.asciidoc +++ b/docs/reference/snapshot-restore/apis/verify-repo-api.asciidoc @@ -4,6 +4,12 @@ Verify snapshot repository ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-snapshot[Snapshot and restore APIs]. +-- + Checks for common misconfigurations in a snapshot repository. See <>. diff --git a/docs/reference/snapshot-restore/apis/verify-repo-integrity-api.asciidoc b/docs/reference/snapshot-restore/apis/verify-repo-integrity-api.asciidoc index 99ae126b401f..22a4051a546c 100644 --- a/docs/reference/snapshot-restore/apis/verify-repo-integrity-api.asciidoc +++ b/docs/reference/snapshot-restore/apis/verify-repo-integrity-api.asciidoc @@ -5,6 +5,12 @@ Verify repository integrity ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-snapshot[Snapshot and restore APIs]. +-- + Verifies the integrity of the contents of a snapshot repository. //// diff --git a/docs/reference/sql/apis/clear-sql-cursor-api.asciidoc b/docs/reference/sql/apis/clear-sql-cursor-api.asciidoc index 48663ca0d75a..a16d31c4b12d 100644 --- a/docs/reference/sql/apis/clear-sql-cursor-api.asciidoc +++ b/docs/reference/sql/apis/clear-sql-cursor-api.asciidoc @@ -5,6 +5,12 @@ Clear SQL cursor ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-sql[SQL APIs]. +-- + Clears an <>. //// diff --git a/docs/reference/sql/apis/delete-async-sql-search-api.asciidoc b/docs/reference/sql/apis/delete-async-sql-search-api.asciidoc index 1737a39401db..e8e4ff68cef0 100644 --- a/docs/reference/sql/apis/delete-async-sql-search-api.asciidoc +++ b/docs/reference/sql/apis/delete-async-sql-search-api.asciidoc @@ -5,6 +5,12 @@ Delete async SQL search ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-sql[SQL APIs]. +-- + Deletes an <> or a <>. If the search is still running, the API cancels it. diff --git a/docs/reference/sql/apis/get-async-sql-search-api.asciidoc b/docs/reference/sql/apis/get-async-sql-search-api.asciidoc index 8ae575176dd3..10ca4c648eba 100644 --- a/docs/reference/sql/apis/get-async-sql-search-api.asciidoc +++ b/docs/reference/sql/apis/get-async-sql-search-api.asciidoc @@ -5,6 +5,12 @@ Get async SQL search ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-sql[SQL APIs]. +-- + Returns results for an <> or a <>. diff --git a/docs/reference/sql/apis/get-async-sql-search-status-api.asciidoc b/docs/reference/sql/apis/get-async-sql-search-status-api.asciidoc index 61505bab7c45..7a0d68cd120c 100644 --- a/docs/reference/sql/apis/get-async-sql-search-status-api.asciidoc +++ b/docs/reference/sql/apis/get-async-sql-search-status-api.asciidoc @@ -5,6 +5,12 @@ Get async SQL search status ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-sql[SQL APIs]. +-- + Returns the current status of an <> or a <>. diff --git a/docs/reference/sql/apis/sql-apis.asciidoc b/docs/reference/sql/apis/sql-apis.asciidoc index 08300522c328..60f3c85c55d6 100644 --- a/docs/reference/sql/apis/sql-apis.asciidoc +++ b/docs/reference/sql/apis/sql-apis.asciidoc @@ -2,6 +2,12 @@ [[sql-apis]] == SQL APIs +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-sql[SQL APIs]. +-- + {es}'s SQL APIs let you run SQL queries on {es} indices and data streams. For an overview of {es}'s SQL features and related tutorials, see <>. diff --git a/docs/reference/sql/apis/sql-search-api.asciidoc b/docs/reference/sql/apis/sql-search-api.asciidoc index 118d7975aefd..b1d002d343fb 100644 --- a/docs/reference/sql/apis/sql-search-api.asciidoc +++ b/docs/reference/sql/apis/sql-search-api.asciidoc @@ -5,6 +5,12 @@ SQL search ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-sql[SQL APIs]. +-- + Returns results for an <>. [source,console] diff --git a/docs/reference/sql/apis/sql-translate-api.asciidoc b/docs/reference/sql/apis/sql-translate-api.asciidoc index 15e52d118800..9183423eefd0 100644 --- a/docs/reference/sql/apis/sql-translate-api.asciidoc +++ b/docs/reference/sql/apis/sql-translate-api.asciidoc @@ -5,6 +5,12 @@ SQL translate ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-sql[SQL APIs]. +-- + Translates an <> into a <> request containing <>. See <>. diff --git a/docs/reference/synonyms/apis/delete-synonym-rule.asciidoc b/docs/reference/synonyms/apis/delete-synonym-rule.asciidoc index 74cbab8c0b4a..11f0708bafcd 100644 --- a/docs/reference/synonyms/apis/delete-synonym-rule.asciidoc +++ b/docs/reference/synonyms/apis/delete-synonym-rule.asciidoc @@ -1,10 +1,15 @@ [[delete-synonym-rule]] === Delete synonym rule - ++++ Delete synonym rule ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-synonyms[Synonyms APIs]. +-- + Deletes an individual synonym rule from a synonyms set. [[delete-synonym-rule-request]] diff --git a/docs/reference/synonyms/apis/delete-synonyms-set.asciidoc b/docs/reference/synonyms/apis/delete-synonyms-set.asciidoc index 9ba33ff3a5c7..62162e5c4567 100644 --- a/docs/reference/synonyms/apis/delete-synonyms-set.asciidoc +++ b/docs/reference/synonyms/apis/delete-synonyms-set.asciidoc @@ -1,10 +1,15 @@ [[delete-synonyms-set]] === Delete synonyms set - ++++ Delete synonyms set ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-synonyms[Synonyms APIs]. +-- + Deletes a synonyms set. [[delete-synonyms-set-request]] diff --git a/docs/reference/synonyms/apis/get-synonym-rule.asciidoc b/docs/reference/synonyms/apis/get-synonym-rule.asciidoc index c6c35e0efecc..3f0ee3f17324 100644 --- a/docs/reference/synonyms/apis/get-synonym-rule.asciidoc +++ b/docs/reference/synonyms/apis/get-synonym-rule.asciidoc @@ -1,10 +1,15 @@ [[get-synonym-rule]] === Get synonym rule - ++++ Get synonym rule ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-synonyms[Synonyms APIs]. +-- + Retrieves a synonym rule from a synonyms set. [[get-synonym-rule-request]] diff --git a/docs/reference/synonyms/apis/get-synonyms-set.asciidoc b/docs/reference/synonyms/apis/get-synonyms-set.asciidoc index 70bb5fb69526..1bb31081712e 100644 --- a/docs/reference/synonyms/apis/get-synonyms-set.asciidoc +++ b/docs/reference/synonyms/apis/get-synonyms-set.asciidoc @@ -5,6 +5,12 @@ Get synonyms set ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-synonyms[Synonyms APIs]. +-- + Retrieves a synonyms set. [[get-synonyms-set-request]] diff --git a/docs/reference/synonyms/apis/list-synonyms-sets.asciidoc b/docs/reference/synonyms/apis/list-synonyms-sets.asciidoc index 705a24c809e9..33ef220036b7 100644 --- a/docs/reference/synonyms/apis/list-synonyms-sets.asciidoc +++ b/docs/reference/synonyms/apis/list-synonyms-sets.asciidoc @@ -5,6 +5,12 @@ List synonyms sets ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-synonyms[Synonyms APIs]. +-- + Retrieves a summary of all defined synonyms sets. This API allows to retrieve the total number of synonyms sets defined. diff --git a/docs/reference/synonyms/apis/put-synonym-rule.asciidoc b/docs/reference/synonyms/apis/put-synonym-rule.asciidoc index de2865632d55..5bb561f0f923 100644 --- a/docs/reference/synonyms/apis/put-synonym-rule.asciidoc +++ b/docs/reference/synonyms/apis/put-synonym-rule.asciidoc @@ -5,6 +5,12 @@ Create or update synonym rule ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-synonyms[Synonyms APIs]. +-- + Creates or updates a synonym rule for a synonym set. [[put-synonym-rule-request]] diff --git a/docs/reference/synonyms/apis/put-synonyms-set.asciidoc b/docs/reference/synonyms/apis/put-synonyms-set.asciidoc index ca7588592145..3af85638d022 100644 --- a/docs/reference/synonyms/apis/put-synonyms-set.asciidoc +++ b/docs/reference/synonyms/apis/put-synonyms-set.asciidoc @@ -5,6 +5,12 @@ Create or update synonyms set ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-synonyms[Synonyms APIs]. +-- + Creates or updates a synonyms set. NOTE: Synonyms sets are limited to a maximum of 10,000 synonym rules per set. diff --git a/docs/reference/synonyms/apis/synonyms-apis.asciidoc b/docs/reference/synonyms/apis/synonyms-apis.asciidoc index dbbc26c36d3d..95fc0aae8c14 100644 --- a/docs/reference/synonyms/apis/synonyms-apis.asciidoc +++ b/docs/reference/synonyms/apis/synonyms-apis.asciidoc @@ -7,6 +7,12 @@ --- +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-synonyms[Synonyms APIs]. +-- + The synonyms management API provides a convenient way to define and manage synonyms in an internal system index. Related synonyms can be grouped in a "synonyms set". Create as many synonym sets as you need. diff --git a/docs/reference/text-structure/apis/find-field-structure.asciidoc b/docs/reference/text-structure/apis/find-field-structure.asciidoc index 4fa108e92d4c..c4b289e6c30a 100644 --- a/docs/reference/text-structure/apis/find-field-structure.asciidoc +++ b/docs/reference/text-structure/apis/find-field-structure.asciidoc @@ -2,6 +2,12 @@ [[find-field-structure]] = Find field structure API +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-text_structure[Text structure APIs]. +-- + Finds the structure of a field in an Elasticsearch index. [discrete] diff --git a/docs/reference/text-structure/apis/find-message-structure.asciidoc b/docs/reference/text-structure/apis/find-message-structure.asciidoc index 6c1bf5089bed..18b85069559e 100644 --- a/docs/reference/text-structure/apis/find-message-structure.asciidoc +++ b/docs/reference/text-structure/apis/find-message-structure.asciidoc @@ -2,6 +2,12 @@ [[find-message-structure]] = Find messages structure API +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-text_structure[Text structure APIs]. +-- + Finds the structure of a list of text messages. [discrete] diff --git a/docs/reference/text-structure/apis/find-structure.asciidoc b/docs/reference/text-structure/apis/find-structure.asciidoc index 361560bace4e..fef0584222e1 100644 --- a/docs/reference/text-structure/apis/find-structure.asciidoc +++ b/docs/reference/text-structure/apis/find-structure.asciidoc @@ -2,6 +2,12 @@ [[find-structure]] = Find text structure API +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-text_structure[Text structure APIs]. +-- + Finds the structure of text. The text must contain data that is suitable to be ingested into the {stack}. diff --git a/docs/reference/text-structure/apis/index.asciidoc b/docs/reference/text-structure/apis/index.asciidoc index 9f4af120690f..68607ddf708b 100644 --- a/docs/reference/text-structure/apis/index.asciidoc +++ b/docs/reference/text-structure/apis/index.asciidoc @@ -2,6 +2,12 @@ [[text-structure-apis]] == Text structure APIs +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-text_structure[Text structure APIs]. +-- + You can use the following APIs to find text structures: * <> diff --git a/docs/reference/text-structure/apis/test-grok-pattern.asciidoc b/docs/reference/text-structure/apis/test-grok-pattern.asciidoc index 4034a24cf0a1..7c1cfb41608e 100644 --- a/docs/reference/text-structure/apis/test-grok-pattern.asciidoc +++ b/docs/reference/text-structure/apis/test-grok-pattern.asciidoc @@ -6,6 +6,12 @@ Test Grok pattern ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-text_structure[Text structure APIs]. +-- + Tests a Grok pattern on lines of text, see also <>. [discrete] diff --git a/docs/reference/transform/apis/delete-transform.asciidoc b/docs/reference/transform/apis/delete-transform.asciidoc index 111dda23690b..0b386f946d6c 100644 --- a/docs/reference/transform/apis/delete-transform.asciidoc +++ b/docs/reference/transform/apis/delete-transform.asciidoc @@ -7,6 +7,12 @@ Delete {transform} ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-transform[{transform-cap} APIs]. +-- + Deletes an existing {transform}. [[delete-transform-request]] diff --git a/docs/reference/transform/apis/get-transform-stats.asciidoc b/docs/reference/transform/apis/get-transform-stats.asciidoc index 273b1d094979..e8d12a994faa 100644 --- a/docs/reference/transform/apis/get-transform-stats.asciidoc +++ b/docs/reference/transform/apis/get-transform-stats.asciidoc @@ -7,6 +7,12 @@ Get {transform} statistics ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-transform[{transform-cap} APIs]. +-- + Retrieves usage information for {transforms}. diff --git a/docs/reference/transform/apis/get-transform.asciidoc b/docs/reference/transform/apis/get-transform.asciidoc index ece59138e289..3eabf4ba2698 100644 --- a/docs/reference/transform/apis/get-transform.asciidoc +++ b/docs/reference/transform/apis/get-transform.asciidoc @@ -6,6 +6,12 @@ Get {transforms} ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-transform[{transform-cap} APIs]. +-- + Retrieves configuration information for {transforms}. [[get-transform-request]] diff --git a/docs/reference/transform/apis/preview-transform.asciidoc b/docs/reference/transform/apis/preview-transform.asciidoc index fa9ad0c0fc8f..c280bef52e0e 100644 --- a/docs/reference/transform/apis/preview-transform.asciidoc +++ b/docs/reference/transform/apis/preview-transform.asciidoc @@ -7,6 +7,12 @@ Preview {transform} ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-transform[{transform-cap} APIs]. +-- + Previews a {transform}. [[preview-transform-request]] diff --git a/docs/reference/transform/apis/put-transform.asciidoc b/docs/reference/transform/apis/put-transform.asciidoc index ed2ceba0a7a5..fc9abc8c6470 100644 --- a/docs/reference/transform/apis/put-transform.asciidoc +++ b/docs/reference/transform/apis/put-transform.asciidoc @@ -7,6 +7,12 @@ Create {transform} ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-transform[{transform-cap} APIs]. +-- + Instantiates a {transform}. [[put-transform-request]] diff --git a/docs/reference/transform/apis/reset-transform.asciidoc b/docs/reference/transform/apis/reset-transform.asciidoc index 1194d3589275..3d9fd5db180b 100644 --- a/docs/reference/transform/apis/reset-transform.asciidoc +++ b/docs/reference/transform/apis/reset-transform.asciidoc @@ -8,6 +8,12 @@ Reset {transform} ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-transform[{transform-cap} APIs]. +-- + Resets a {transform}. [[reset-transform-request]] diff --git a/docs/reference/transform/apis/schedule-now-transform.asciidoc b/docs/reference/transform/apis/schedule-now-transform.asciidoc index 7a276edf0881..3349e14b02ca 100644 --- a/docs/reference/transform/apis/schedule-now-transform.asciidoc +++ b/docs/reference/transform/apis/schedule-now-transform.asciidoc @@ -8,6 +8,12 @@ Schedule now {transform} ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-transform[{transform-cap} APIs]. +-- + Instantly runs a {transform} to process data. [[schedule-now-transform-request]] diff --git a/docs/reference/transform/apis/start-transform.asciidoc b/docs/reference/transform/apis/start-transform.asciidoc index f4f99f0f3457..4bcb951f4c6b 100644 --- a/docs/reference/transform/apis/start-transform.asciidoc +++ b/docs/reference/transform/apis/start-transform.asciidoc @@ -7,6 +7,12 @@ Start {transform} ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-transform[{transform-cap} APIs]. +-- + Starts a {transform}. [[start-transform-request]] diff --git a/docs/reference/transform/apis/stop-transform.asciidoc b/docs/reference/transform/apis/stop-transform.asciidoc index e99fcbd413eb..d87784e036ae 100644 --- a/docs/reference/transform/apis/stop-transform.asciidoc +++ b/docs/reference/transform/apis/stop-transform.asciidoc @@ -7,6 +7,12 @@ Stop {transforms} ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-transform[{transform-cap} APIs]. +-- + Stops one or more {transforms}. diff --git a/docs/reference/transform/apis/transform-apis.asciidoc b/docs/reference/transform/apis/transform-apis.asciidoc index 20e5960e5bb1..45131f0214f9 100644 --- a/docs/reference/transform/apis/transform-apis.asciidoc +++ b/docs/reference/transform/apis/transform-apis.asciidoc @@ -2,6 +2,12 @@ [[transform-apis]] = {transform-cap} APIs +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-transform[{transform-cap} APIs]. +-- + * <> * <> * <> diff --git a/docs/reference/transform/apis/update-transform.asciidoc b/docs/reference/transform/apis/update-transform.asciidoc index 1ac7d6d5410d..c473ca0f83b8 100644 --- a/docs/reference/transform/apis/update-transform.asciidoc +++ b/docs/reference/transform/apis/update-transform.asciidoc @@ -7,6 +7,12 @@ Update {transform} ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-transform[{transform-cap} APIs]. +-- + Updates certain properties of a {transform}. [[update-transform-request]] diff --git a/docs/reference/transform/apis/upgrade-transforms.asciidoc b/docs/reference/transform/apis/upgrade-transforms.asciidoc index a1b01a6fd146..826243938a9f 100644 --- a/docs/reference/transform/apis/upgrade-transforms.asciidoc +++ b/docs/reference/transform/apis/upgrade-transforms.asciidoc @@ -7,6 +7,12 @@ Upgrade {transforms} ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-transform[{transform-cap} APIs]. +-- + Upgrades all {transforms}. [[upgrade-transforms-request]] diff --git a/libs/entitlement/bridge/src/main/java/org/elasticsearch/entitlement/bridge/EntitlementChecker.java b/libs/entitlement/bridge/src/main/java/org/elasticsearch/entitlement/bridge/EntitlementChecker.java index d44b4667f682..67d006868b48 100644 --- a/libs/entitlement/bridge/src/main/java/org/elasticsearch/entitlement/bridge/EntitlementChecker.java +++ b/libs/entitlement/bridge/src/main/java/org/elasticsearch/entitlement/bridge/EntitlementChecker.java @@ -13,6 +13,11 @@ import java.net.URL; import java.net.URLStreamHandlerFactory; import java.util.List; +import javax.net.ssl.HostnameVerifier; +import javax.net.ssl.HttpsURLConnection; +import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLSocketFactory; + @SuppressWarnings("unused") // Called from instrumentation code inserted by the Entitlements agent public interface EntitlementChecker { @@ -21,7 +26,21 @@ public interface EntitlementChecker { void check$java_lang_Runtime$halt(Class callerClass, Runtime runtime, int status); - // URLClassLoader ctor + // ClassLoader ctor + void check$java_lang_ClassLoader$(Class callerClass); + + void check$java_lang_ClassLoader$(Class callerClass, ClassLoader parent); + + void check$java_lang_ClassLoader$(Class callerClass, String name, ClassLoader parent); + + // SecureClassLoader ctor + void check$java_security_SecureClassLoader$(Class callerClass); + + void check$java_security_SecureClassLoader$(Class callerClass, ClassLoader parent); + + void check$java_security_SecureClassLoader$(Class callerClass, String name, ClassLoader parent); + + // URLClassLoader constructors void check$java_net_URLClassLoader$(Class callerClass, URL[] urls); void check$java_net_URLClassLoader$(Class callerClass, URL[] urls, ClassLoader parent); @@ -32,6 +51,15 @@ public interface EntitlementChecker { void check$java_net_URLClassLoader$(Class callerClass, String name, URL[] urls, ClassLoader parent, URLStreamHandlerFactory factory); + // "setFactory" methods + void check$javax_net_ssl_HttpsURLConnection$setSSLSocketFactory(Class callerClass, HttpsURLConnection conn, SSLSocketFactory sf); + + void check$javax_net_ssl_HttpsURLConnection$$setDefaultSSLSocketFactory(Class callerClass, SSLSocketFactory sf); + + void check$javax_net_ssl_HttpsURLConnection$$setDefaultHostnameVerifier(Class callerClass, HostnameVerifier hv); + + void check$javax_net_ssl_SSLContext$$setDefault(Class callerClass, SSLContext context); + // Process creation void check$java_lang_ProcessBuilder$start(Class callerClass, ProcessBuilder that); diff --git a/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/RestEntitlementsCheckAction.java b/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/RestEntitlementsCheckAction.java index be2ace7c1752..4afceedbe3f0 100644 --- a/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/RestEntitlementsCheckAction.java +++ b/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/RestEntitlementsCheckAction.java @@ -23,12 +23,17 @@ import java.io.IOException; import java.io.UncheckedIOException; import java.net.URL; import java.net.URLClassLoader; +import java.security.NoSuchAlgorithmException; import java.util.List; import java.util.Map; import java.util.Set; import java.util.stream.Collectors; +import javax.net.ssl.HttpsURLConnection; +import javax.net.ssl.SSLContext; + import static java.util.Map.entry; +import static org.elasticsearch.entitlement.qa.common.RestEntitlementsCheckAction.CheckAction.alwaysDenied; import static org.elasticsearch.entitlement.qa.common.RestEntitlementsCheckAction.CheckAction.deniedToPlugins; import static org.elasticsearch.entitlement.qa.common.RestEntitlementsCheckAction.CheckAction.forPlugins; import static org.elasticsearch.rest.RestRequest.Method.GET; @@ -49,6 +54,10 @@ public class RestEntitlementsCheckAction extends BaseRestHandler { static CheckAction forPlugins(Runnable action) { return new CheckAction(action, false); } + + static CheckAction alwaysDenied(Runnable action) { + return new CheckAction(action, true); + } } private static final Map checkActions = Map.ofEntries( @@ -56,9 +65,32 @@ public class RestEntitlementsCheckAction extends BaseRestHandler { entry("runtime_halt", deniedToPlugins(RestEntitlementsCheckAction::runtimeHalt)), entry("create_classloader", forPlugins(RestEntitlementsCheckAction::createClassLoader)), entry("processBuilder_start", deniedToPlugins(RestEntitlementsCheckAction::processBuilder_start)), - entry("processBuilder_startPipeline", deniedToPlugins(RestEntitlementsCheckAction::processBuilder_startPipeline)) + entry("processBuilder_startPipeline", deniedToPlugins(RestEntitlementsCheckAction::processBuilder_startPipeline)), + entry("set_https_connection_properties", forPlugins(RestEntitlementsCheckAction::setHttpsConnectionProperties)), + entry("set_default_ssl_socket_factory", alwaysDenied(RestEntitlementsCheckAction::setDefaultSSLSocketFactory)), + entry("set_default_hostname_verifier", alwaysDenied(RestEntitlementsCheckAction::setDefaultHostnameVerifier)), + entry("set_default_ssl_context", alwaysDenied(RestEntitlementsCheckAction::setDefaultSSLContext)) ); + private static void setDefaultSSLContext() { + logger.info("Calling SSLContext.setDefault"); + try { + SSLContext.setDefault(SSLContext.getDefault()); + } catch (NoSuchAlgorithmException e) { + throw new RuntimeException(e); + } + } + + private static void setDefaultHostnameVerifier() { + logger.info("Calling HttpsURLConnection.setDefaultHostnameVerifier"); + HttpsURLConnection.setDefaultHostnameVerifier((hostname, session) -> false); + } + + private static void setDefaultSSLSocketFactory() { + logger.info("Calling HttpsURLConnection.setDefaultSSLSocketFactory"); + HttpsURLConnection.setDefaultSSLSocketFactory(new TestSSLSocketFactory()); + } + @SuppressForbidden(reason = "Specifically testing Runtime.exit") private static void runtimeExit() { Runtime.getRuntime().exit(123); @@ -93,11 +125,17 @@ public class RestEntitlementsCheckAction extends BaseRestHandler { } } + private static void setHttpsConnectionProperties() { + logger.info("Calling setSSLSocketFactory"); + var connection = new TestHttpsURLConnection(); + connection.setSSLSocketFactory(new TestSSLSocketFactory()); + } + public RestEntitlementsCheckAction(String prefix) { this.prefix = prefix; } - public static Set getServerAndPluginsCheckActions() { + public static Set getCheckActionsAllowedInPlugins() { return checkActions.entrySet() .stream() .filter(kv -> kv.getValue().isAlwaysDeniedToPlugins() == false) diff --git a/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/TestHttpsURLConnection.java b/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/TestHttpsURLConnection.java new file mode 100644 index 000000000000..5a96e582db02 --- /dev/null +++ b/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/TestHttpsURLConnection.java @@ -0,0 +1,48 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.entitlement.qa.common; + +import java.io.IOException; +import java.security.cert.Certificate; + +import javax.net.ssl.HttpsURLConnection; +import javax.net.ssl.SSLPeerUnverifiedException; + +class TestHttpsURLConnection extends HttpsURLConnection { + TestHttpsURLConnection() { + super(null); + } + + @Override + public void connect() throws IOException {} + + @Override + public void disconnect() {} + + @Override + public boolean usingProxy() { + return false; + } + + @Override + public String getCipherSuite() { + return ""; + } + + @Override + public Certificate[] getLocalCertificates() { + return new Certificate[0]; + } + + @Override + public Certificate[] getServerCertificates() throws SSLPeerUnverifiedException { + return new Certificate[0]; + } +} diff --git a/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/TestSSLSocketFactory.java b/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/TestSSLSocketFactory.java new file mode 100644 index 000000000000..feb19df78017 --- /dev/null +++ b/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/TestSSLSocketFactory.java @@ -0,0 +1,54 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.entitlement.qa.common; + +import java.io.IOException; +import java.net.InetAddress; +import java.net.Socket; +import java.net.UnknownHostException; + +import javax.net.ssl.SSLSocketFactory; + +class TestSSLSocketFactory extends SSLSocketFactory { + @Override + public Socket createSocket(String host, int port) throws IOException, UnknownHostException { + return null; + } + + @Override + public Socket createSocket(String host, int port, InetAddress localHost, int localPort) { + return null; + } + + @Override + public Socket createSocket(InetAddress host, int port) throws IOException { + return null; + } + + @Override + public Socket createSocket(InetAddress address, int port, InetAddress localAddress, int localPort) throws IOException { + return null; + } + + @Override + public String[] getDefaultCipherSuites() { + return new String[0]; + } + + @Override + public String[] getSupportedCipherSuites() { + return new String[0]; + } + + @Override + public Socket createSocket(Socket s, String host, int port, boolean autoClose) throws IOException { + return null; + } +} diff --git a/libs/entitlement/qa/entitlement-allowed-nonmodular/src/main/plugin-metadata/entitlement-policy.yaml b/libs/entitlement/qa/entitlement-allowed-nonmodular/src/main/plugin-metadata/entitlement-policy.yaml index 45d4e57f6652..30fc9f0abeec 100644 --- a/libs/entitlement/qa/entitlement-allowed-nonmodular/src/main/plugin-metadata/entitlement-policy.yaml +++ b/libs/entitlement/qa/entitlement-allowed-nonmodular/src/main/plugin-metadata/entitlement-policy.yaml @@ -1,2 +1,3 @@ ALL-UNNAMED: - create_class_loader + - set_https_connection_properties diff --git a/libs/entitlement/qa/entitlement-allowed/src/main/plugin-metadata/entitlement-policy.yaml b/libs/entitlement/qa/entitlement-allowed/src/main/plugin-metadata/entitlement-policy.yaml index 7b5e848f414b..0a25570a9f62 100644 --- a/libs/entitlement/qa/entitlement-allowed/src/main/plugin-metadata/entitlement-policy.yaml +++ b/libs/entitlement/qa/entitlement-allowed/src/main/plugin-metadata/entitlement-policy.yaml @@ -1,2 +1,3 @@ org.elasticsearch.entitlement.qa.common: - create_class_loader + - set_https_connection_properties diff --git a/libs/entitlement/qa/src/javaRestTest/java/org/elasticsearch/entitlement/qa/EntitlementsAllowedIT.java b/libs/entitlement/qa/src/javaRestTest/java/org/elasticsearch/entitlement/qa/EntitlementsAllowedIT.java index 2fd4472f5cc6..c38e8b3f35ef 100644 --- a/libs/entitlement/qa/src/javaRestTest/java/org/elasticsearch/entitlement/qa/EntitlementsAllowedIT.java +++ b/libs/entitlement/qa/src/javaRestTest/java/org/elasticsearch/entitlement/qa/EntitlementsAllowedIT.java @@ -46,7 +46,7 @@ public class EntitlementsAllowedIT extends ESRestTestCase { public static Iterable data() { return Stream.of("allowed", "allowed_nonmodular") .flatMap( - path -> RestEntitlementsCheckAction.getServerAndPluginsCheckActions().stream().map(action -> new Object[] { path, action }) + path -> RestEntitlementsCheckAction.getCheckActionsAllowedInPlugins().stream().map(action -> new Object[] { path, action }) ) .toList(); } diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/bootstrap/EntitlementBootstrap.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/bootstrap/EntitlementBootstrap.java index 2abfb11964a9..257d13030258 100644 --- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/bootstrap/EntitlementBootstrap.java +++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/bootstrap/EntitlementBootstrap.java @@ -16,6 +16,7 @@ import com.sun.tools.attach.VirtualMachine; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.entitlement.initialization.EntitlementInitialization; +import org.elasticsearch.entitlement.runtime.api.NotEntitledException; import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; @@ -23,14 +24,24 @@ import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; import java.util.Collection; -import java.util.Objects; import java.util.function.Function; +import static java.util.Objects.requireNonNull; + public class EntitlementBootstrap { - public record PluginData(Path pluginPath, boolean isModular, boolean isExternalPlugin) {} + public record BootstrapArgs(Collection pluginData, Function, String> pluginResolver) { + public BootstrapArgs { + requireNonNull(pluginData); + requireNonNull(pluginResolver); + } + } - public record BootstrapArgs(Collection pluginData, Function, String> pluginResolver) {} + public record PluginData(Path pluginPath, boolean isModular, boolean isExternalPlugin) { + public PluginData { + requireNonNull(pluginPath); + } + } private static BootstrapArgs bootstrapArgs; @@ -50,9 +61,10 @@ public class EntitlementBootstrap { if (EntitlementBootstrap.bootstrapArgs != null) { throw new IllegalStateException("plugin data is already set"); } - EntitlementBootstrap.bootstrapArgs = new BootstrapArgs(Objects.requireNonNull(pluginData), Objects.requireNonNull(pluginResolver)); + EntitlementBootstrap.bootstrapArgs = new BootstrapArgs(pluginData, pluginResolver); exportInitializationToAgent(); loadAgent(findAgentJar()); + selfTest(); } @SuppressForbidden(reason = "The VirtualMachine API is the only way to attach a java agent dynamically") @@ -98,5 +110,63 @@ public class EntitlementBootstrap { } } + /** + * Attempt a few sensitive operations to ensure that some are permitted and some are forbidden. + *

+ * + * This serves two purposes: + * + *

    + *
  1. + * a smoke test to make sure the entitlements system is not completely broken, and + *
  2. + *
  3. + * an early test of certain important operations so they don't fail later on at an awkward time. + *
  4. + *
+ * + * @throws IllegalStateException if the entitlements system can't prevent an unauthorized action of our choosing + */ + private static void selfTest() { + ensureCannotStartProcess(); + ensureCanCreateTempFile(); + } + + private static void ensureCannotStartProcess() { + try { + // The command doesn't matter; it doesn't even need to exist + new ProcessBuilder("").start(); + } catch (NotEntitledException e) { + logger.debug("Success: Entitlement protection correctly prevented process creation"); + return; + } catch (IOException e) { + throw new IllegalStateException("Failed entitlement protection self-test", e); + } + throw new IllegalStateException("Entitlement protection self-test was incorrectly permitted"); + } + + /** + * Originally {@code Security.selfTest}. + */ + @SuppressForbidden(reason = "accesses jvm default tempdir as a self-test") + private static void ensureCanCreateTempFile() { + try { + Path p = Files.createTempFile(null, null); + p.toFile().deleteOnExit(); + + // Make an effort to clean up the file immediately; also, deleteOnExit leaves the file if the JVM exits abnormally. + try { + Files.delete(p); + } catch (IOException ignored) { + // Can be caused by virus scanner + } + } catch (NotEntitledException e) { + throw new IllegalStateException("Entitlement protection self-test was incorrectly forbidden", e); + } catch (Exception e) { + throw new IllegalStateException("Unable to perform entitlement protection self-test", e); + } + logger.debug("Success: Entitlement protection correctly permitted temp file creation"); + } + private static final Logger logger = LogManager.getLogger(EntitlementBootstrap.class); } diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/initialization/EntitlementInitialization.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/initialization/EntitlementInitialization.java index c2ee935e0e5f..ba5ccbafa70a 100644 --- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/initialization/EntitlementInitialization.java +++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/initialization/EntitlementInitialization.java @@ -9,6 +9,7 @@ package org.elasticsearch.entitlement.initialization; +import org.elasticsearch.core.Strings; import org.elasticsearch.core.internal.provider.ProviderLocator; import org.elasticsearch.entitlement.bootstrap.EntitlementBootstrap; import org.elasticsearch.entitlement.bridge.EntitlementChecker; @@ -19,6 +20,7 @@ import org.elasticsearch.entitlement.instrumentation.MethodKey; import org.elasticsearch.entitlement.instrumentation.Transformer; import org.elasticsearch.entitlement.runtime.api.ElasticsearchEntitlementChecker; import org.elasticsearch.entitlement.runtime.policy.CreateClassLoaderEntitlement; +import org.elasticsearch.entitlement.runtime.policy.Entitlement; import org.elasticsearch.entitlement.runtime.policy.ExitVMEntitlement; import org.elasticsearch.entitlement.runtime.policy.Policy; import org.elasticsearch.entitlement.runtime.policy.PolicyManager; @@ -92,9 +94,17 @@ public class EntitlementInitialization { // TODO(ES-10031): Decide what goes in the elasticsearch default policy and extend it var serverPolicy = new Policy( "server", - List.of(new Scope("org.elasticsearch.server", List.of(new ExitVMEntitlement(), new CreateClassLoaderEntitlement()))) + List.of( + new Scope("org.elasticsearch.base", List.of(new CreateClassLoaderEntitlement())), + new Scope("org.elasticsearch.xcontent", List.of(new CreateClassLoaderEntitlement())), + new Scope("org.elasticsearch.server", List.of(new ExitVMEntitlement(), new CreateClassLoaderEntitlement())) + ) ); - return new PolicyManager(serverPolicy, pluginPolicies, EntitlementBootstrap.bootstrapArgs().pluginResolver(), ENTITLEMENTS_MODULE); + // agents run without a module, so this is a special hack for the apm agent + // this should be removed once https://github.com/elastic/elasticsearch/issues/109335 is completed + List agentEntitlements = List.of(new CreateClassLoaderEntitlement()); + var resolver = EntitlementBootstrap.bootstrapArgs().pluginResolver(); + return new PolicyManager(serverPolicy, agentEntitlements, pluginPolicies, resolver, ENTITLEMENTS_MODULE); } private static Map createPluginPolicies(Collection pluginData) throws IOException { @@ -118,9 +128,17 @@ public class EntitlementInitialization { final Policy policy = parsePolicyIfExists(pluginName, policyFile, isExternalPlugin); // TODO: should this check actually be part of the parser? - for (Scope scope : policy.scopes) { - if (moduleNames.contains(scope.name) == false) { - throw new IllegalStateException("policy [" + policyFile + "] contains invalid module [" + scope.name + "]"); + for (Scope scope : policy.scopes()) { + if (moduleNames.contains(scope.moduleName()) == false) { + throw new IllegalStateException( + Strings.format( + "Invalid module name in policy: plugin [%s] does not have module [%s]; available modules [%s]; policy file [%s]", + pluginName, + scope.moduleName(), + String.join(", ", moduleNames), + policyFile + ) + ); } } return policy; diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/api/ElasticsearchEntitlementChecker.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/api/ElasticsearchEntitlementChecker.java index 7ae7bc423845..450786ee57d8 100644 --- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/api/ElasticsearchEntitlementChecker.java +++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/api/ElasticsearchEntitlementChecker.java @@ -16,12 +16,18 @@ import java.net.URL; import java.net.URLStreamHandlerFactory; import java.util.List; +import javax.net.ssl.HostnameVerifier; +import javax.net.ssl.HttpsURLConnection; +import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLSocketFactory; + /** * Implementation of the {@link EntitlementChecker} interface, providing additional * API methods for managing the checks. * The trampoline module loads this object via SPI. */ public class ElasticsearchEntitlementChecker implements EntitlementChecker { + private final PolicyManager policyManager; public ElasticsearchEntitlementChecker(PolicyManager policyManager) { @@ -38,6 +44,36 @@ public class ElasticsearchEntitlementChecker implements EntitlementChecker { policyManager.checkExitVM(callerClass); } + @Override + public void check$java_lang_ClassLoader$(Class callerClass) { + policyManager.checkCreateClassLoader(callerClass); + } + + @Override + public void check$java_lang_ClassLoader$(Class callerClass, ClassLoader parent) { + policyManager.checkCreateClassLoader(callerClass); + } + + @Override + public void check$java_lang_ClassLoader$(Class callerClass, String name, ClassLoader parent) { + policyManager.checkCreateClassLoader(callerClass); + } + + @Override + public void check$java_security_SecureClassLoader$(Class callerClass) { + policyManager.checkCreateClassLoader(callerClass); + } + + @Override + public void check$java_security_SecureClassLoader$(Class callerClass, ClassLoader parent) { + policyManager.checkCreateClassLoader(callerClass); + } + + @Override + public void check$java_security_SecureClassLoader$(Class callerClass, String name, ClassLoader parent) { + policyManager.checkCreateClassLoader(callerClass); + } + @Override public void check$java_net_URLClassLoader$(Class callerClass, URL[] urls) { policyManager.checkCreateClassLoader(callerClass); @@ -78,4 +114,28 @@ public class ElasticsearchEntitlementChecker implements EntitlementChecker { public void check$java_lang_ProcessBuilder$$startPipeline(Class callerClass, List builders) { policyManager.checkStartProcess(callerClass); } + + @Override + public void check$javax_net_ssl_HttpsURLConnection$setSSLSocketFactory( + Class callerClass, + HttpsURLConnection connection, + SSLSocketFactory sf + ) { + policyManager.checkSetHttpsConnectionProperties(callerClass); + } + + @Override + public void check$javax_net_ssl_HttpsURLConnection$$setDefaultSSLSocketFactory(Class callerClass, SSLSocketFactory sf) { + policyManager.checkSetGlobalHttpsConnectionProperties(callerClass); + } + + @Override + public void check$javax_net_ssl_HttpsURLConnection$$setDefaultHostnameVerifier(Class callerClass, HostnameVerifier hv) { + policyManager.checkSetGlobalHttpsConnectionProperties(callerClass); + } + + @Override + public void check$javax_net_ssl_SSLContext$$setDefault(Class callerClass, SSLContext context) { + policyManager.checkSetGlobalHttpsConnectionProperties(callerClass); + } } diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/CreateClassLoaderEntitlement.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/CreateClassLoaderEntitlement.java index 138515be9ffc..55e4b6659564 100644 --- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/CreateClassLoaderEntitlement.java +++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/CreateClassLoaderEntitlement.java @@ -9,7 +9,7 @@ package org.elasticsearch.entitlement.runtime.policy; -public class CreateClassLoaderEntitlement implements Entitlement { +public record CreateClassLoaderEntitlement() implements Entitlement { @ExternalEntitlement - public CreateClassLoaderEntitlement() {} + public CreateClassLoaderEntitlement {} } diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/ExitVMEntitlement.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/ExitVMEntitlement.java index c4a8fc683358..e5c836ea22b2 100644 --- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/ExitVMEntitlement.java +++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/ExitVMEntitlement.java @@ -12,4 +12,4 @@ package org.elasticsearch.entitlement.runtime.policy; /** * Internal policy type (not-parseable -- not available to plugins). */ -public class ExitVMEntitlement implements Entitlement {} +public record ExitVMEntitlement() implements Entitlement {} diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/Policy.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/Policy.java index e8bd7a3fff35..3546472f485f 100644 --- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/Policy.java +++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/Policy.java @@ -9,38 +9,15 @@ package org.elasticsearch.entitlement.runtime.policy; -import java.util.Collections; import java.util.List; import java.util.Objects; /** * A holder for scoped entitlements. */ -public class Policy { - - public final String name; - public final List scopes; - +public record Policy(String name, List scopes) { public Policy(String name, List scopes) { this.name = Objects.requireNonNull(name); - this.scopes = Collections.unmodifiableList(Objects.requireNonNull(scopes)); - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - Policy policy = (Policy) o; - return Objects.equals(name, policy.name) && Objects.equals(scopes, policy.scopes); - } - - @Override - public int hashCode() { - return Objects.hash(name, scopes); - } - - @Override - public String toString() { - return "Policy{" + "name='" + name + '\'' + ", scopes=" + scopes + '}'; + this.scopes = List.copyOf(scopes); } } diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyManager.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyManager.java index 527a9472a7ce..188ce1d747db 100644 --- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyManager.java +++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyManager.java @@ -17,34 +17,31 @@ import org.elasticsearch.logging.Logger; import java.lang.StackWalker.StackFrame; import java.lang.module.ModuleFinder; import java.lang.module.ModuleReference; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.IdentityHashMap; import java.util.List; import java.util.Map; -import java.util.Objects; import java.util.Optional; import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.Stream; import static java.lang.StackWalker.Option.RETAIN_CLASS_REFERENCE; import static java.util.Objects.requireNonNull; +import static java.util.stream.Collectors.groupingBy; public class PolicyManager { private static final Logger logger = LogManager.getLogger(PolicyManager.class); - static class ModuleEntitlements { - public static final ModuleEntitlements NONE = new ModuleEntitlements(List.of()); - private final IdentityHashMap, List> entitlementsByType; + record ModuleEntitlements(Map, List> entitlementsByType) { + public static final ModuleEntitlements NONE = new ModuleEntitlements(Map.of()); - ModuleEntitlements(List entitlements) { - this.entitlementsByType = entitlements.stream() - .collect(Collectors.toMap(Entitlement::getClass, e -> new ArrayList<>(List.of(e)), (a, b) -> { - a.addAll(b); - return a; - }, IdentityHashMap::new)); + ModuleEntitlements { + entitlementsByType = Map.copyOf(entitlementsByType); + } + + public static ModuleEntitlements from(List entitlements) { + return new ModuleEntitlements(entitlements.stream().collect(groupingBy(Entitlement::getClass))); } public boolean hasEntitlement(Class entitlementClass) { @@ -56,9 +53,10 @@ public class PolicyManager { } } - final Map moduleEntitlementsMap = new HashMap<>(); + final Map moduleEntitlementsMap = new ConcurrentHashMap<>(); protected final Map> serverEntitlements; + protected final List agentEntitlements; protected final Map>> pluginsEntitlements; private final Function, String> pluginResolver; @@ -85,12 +83,14 @@ public class PolicyManager { private final Module entitlementsModule; public PolicyManager( - Policy defaultPolicy, + Policy serverPolicy, + List agentEntitlements, Map pluginPolicies, Function, String> pluginResolver, Module entitlementsModule ) { - this.serverEntitlements = buildScopeEntitlementsMap(requireNonNull(defaultPolicy)); + this.serverEntitlements = buildScopeEntitlementsMap(requireNonNull(serverPolicy)); + this.agentEntitlements = agentEntitlements; this.pluginsEntitlements = requireNonNull(pluginPolicies).entrySet() .stream() .collect(Collectors.toUnmodifiableMap(Map.Entry::getKey, e -> buildScopeEntitlementsMap(e.getValue()))); @@ -99,7 +99,7 @@ public class PolicyManager { } private static Map> buildScopeEntitlementsMap(Policy policy) { - return policy.scopes.stream().collect(Collectors.toUnmodifiableMap(scope -> scope.name, scope -> scope.entitlements)); + return policy.scopes().stream().collect(Collectors.toUnmodifiableMap(scope -> scope.moduleName(), scope -> scope.entitlements())); } public void checkStartProcess(Class callerClass) { @@ -107,7 +107,7 @@ public class PolicyManager { } private void neverEntitled(Class callerClass, String operationDescription) { - var requestingModule = requestingModule(callerClass); + var requestingModule = requestingClass(callerClass); if (isTriviallyAllowed(requestingModule)) { return; } @@ -130,19 +130,27 @@ public class PolicyManager { checkEntitlementPresent(callerClass, CreateClassLoaderEntitlement.class); } + public void checkSetHttpsConnectionProperties(Class callerClass) { + checkEntitlementPresent(callerClass, SetHttpsConnectionPropertiesEntitlement.class); + } + + public void checkSetGlobalHttpsConnectionProperties(Class callerClass) { + neverEntitled(callerClass, "set global https connection properties"); + } + private void checkEntitlementPresent(Class callerClass, Class entitlementClass) { - var requestingModule = requestingModule(callerClass); - if (isTriviallyAllowed(requestingModule)) { + var requestingClass = requestingClass(callerClass); + if (isTriviallyAllowed(requestingClass)) { return; } - ModuleEntitlements entitlements = getEntitlementsOrThrow(callerClass, requestingModule); + ModuleEntitlements entitlements = getEntitlements(requestingClass); if (entitlements.hasEntitlement(entitlementClass)) { logger.debug( () -> Strings.format( - "Entitled: caller [%s], module [%s], type [%s]", - callerClass, - requestingModule.getName(), + "Entitled: class [%s], module [%s], entitlement [%s]", + requestingClass, + requestingClass.getModule().getName(), entitlementClass.getSimpleName() ) ); @@ -150,30 +158,26 @@ public class PolicyManager { } throw new NotEntitledException( Strings.format( - "Missing entitlement: caller [%s], module [%s], type [%s]", - callerClass, - requestingModule.getName(), + "Missing entitlement: class [%s], module [%s], entitlement [%s]", + requestingClass, + requestingClass.getModule().getName(), entitlementClass.getSimpleName() ) ); } - ModuleEntitlements getEntitlementsOrThrow(Class callerClass, Module requestingModule) { - ModuleEntitlements cachedEntitlement = moduleEntitlementsMap.get(requestingModule); - if (cachedEntitlement != null) { - if (cachedEntitlement == ModuleEntitlements.NONE) { - throw new NotEntitledException(buildModuleNoPolicyMessage(callerClass, requestingModule) + "[CACHED]"); - } - return cachedEntitlement; - } + ModuleEntitlements getEntitlements(Class requestingClass) { + return moduleEntitlementsMap.computeIfAbsent(requestingClass.getModule(), m -> computeEntitlements(requestingClass)); + } + private ModuleEntitlements computeEntitlements(Class requestingClass) { + Module requestingModule = requestingClass.getModule(); if (isServerModule(requestingModule)) { - var scopeName = requestingModule.getName(); - return getModuleEntitlementsOrThrow(callerClass, requestingModule, serverEntitlements, scopeName); + return getModuleScopeEntitlements(requestingClass, serverEntitlements, requestingModule.getName()); } // plugins - var pluginName = pluginResolver.apply(callerClass); + var pluginName = pluginResolver.apply(requestingClass); if (pluginName != null) { var pluginEntitlements = pluginsEntitlements.get(pluginName); if (pluginEntitlements != null) { @@ -183,34 +187,30 @@ public class PolicyManager { } else { scopeName = requestingModule.getName(); } - return getModuleEntitlementsOrThrow(callerClass, requestingModule, pluginEntitlements, scopeName); + return getModuleScopeEntitlements(requestingClass, pluginEntitlements, scopeName); } } - moduleEntitlementsMap.put(requestingModule, ModuleEntitlements.NONE); - throw new NotEntitledException(buildModuleNoPolicyMessage(callerClass, requestingModule)); + if (requestingModule.isNamed() == false) { + // agents are the only thing running non-modular + return ModuleEntitlements.from(agentEntitlements); + } + + logger.warn("No applicable entitlement policy for class [{}]", requestingClass.getName()); + return ModuleEntitlements.NONE; } - private static String buildModuleNoPolicyMessage(Class callerClass, Module requestingModule) { - return Strings.format("Missing entitlement policy: caller [%s], module [%s]", callerClass, requestingModule.getName()); - } - - private ModuleEntitlements getModuleEntitlementsOrThrow( + private ModuleEntitlements getModuleScopeEntitlements( Class callerClass, - Module module, Map> scopeEntitlements, String moduleName ) { var entitlements = scopeEntitlements.get(moduleName); if (entitlements == null) { - // Module without entitlements - remember we don't have any - moduleEntitlementsMap.put(module, ModuleEntitlements.NONE); - throw new NotEntitledException(buildModuleNoPolicyMessage(callerClass, module)); + logger.warn("No applicable entitlement policy for module [{}], class [{}]", moduleName, callerClass); + return ModuleEntitlements.NONE; } - // We have a policy for this module - var classEntitlements = new ModuleEntitlements(entitlements); - moduleEntitlementsMap.put(module, classEntitlements); - return classEntitlements; + return ModuleEntitlements.from(entitlements); } private static boolean isServerModule(Module requestingModule) { @@ -218,25 +218,22 @@ public class PolicyManager { } /** - * Walks the stack to determine which module's entitlements should be checked. + * Walks the stack to determine which class should be checked for entitlements. * - * @param callerClass when non-null will be used if its module is suitable; + * @param callerClass when non-null will be returned; * this is a fast-path check that can avoid the stack walk * in cases where the caller class is available. - * @return the requesting module, or {@code null} if the entire call stack + * @return the requesting class, or {@code null} if the entire call stack * comes from the entitlement library itself. */ - Module requestingModule(Class callerClass) { + Class requestingClass(Class callerClass) { if (callerClass != null) { - var callerModule = callerClass.getModule(); - if (callerModule != null && entitlementsModule.equals(callerModule) == false) { - // fast path - return callerModule; - } + // fast path + return callerClass; } - Optional module = StackWalker.getInstance(RETAIN_CLASS_REFERENCE) - .walk(frames -> findRequestingModule(frames.map(StackFrame::getDeclaringClass))); - return module.orElse(null); + Optional> result = StackWalker.getInstance(RETAIN_CLASS_REFERENCE) + .walk(frames -> findRequestingClass(frames.map(StackFrame::getDeclaringClass))); + return result.orElse(null); } /** @@ -245,33 +242,25 @@ public class PolicyManager { * * @throws NullPointerException if the requesting module is {@code null} */ - Optional findRequestingModule(Stream> classes) { - return classes.map(Objects::requireNonNull) - .map(PolicyManager::moduleOf) - .filter(m -> m != entitlementsModule) // Ignore the entitlements library itself entirely - .skip(1) // Skip the sensitive method itself + Optional> findRequestingClass(Stream> classes) { + return classes.filter(c -> c.getModule() != entitlementsModule) // Ignore the entitlements library + .skip(1) // Skip the sensitive caller method .findFirst(); } - private static Module moduleOf(Class c) { - var result = c.getModule(); - if (result == null) { - throw new NullPointerException("Entitlements system does not support non-modular class [" + c.getName() + "]"); - } else { - return result; - } - } - - private static boolean isTriviallyAllowed(Module requestingModule) { + /** + * @return true if permission is granted regardless of the entitlement + */ + private static boolean isTriviallyAllowed(Class requestingClass) { if (logger.isTraceEnabled()) { logger.trace("Stack trace for upcoming trivially-allowed check", new Exception()); } - if (requestingModule == null) { + if (requestingClass == null) { logger.debug("Entitlement trivially allowed: no caller frames outside the entitlement library"); return true; } - if (systemModules.contains(requestingModule)) { - logger.debug("Entitlement trivially allowed from system module [{}]", requestingModule.getName()); + if (systemModules.contains(requestingClass.getModule())) { + logger.debug("Entitlement trivially allowed from system module [{}]", requestingClass.getModule().getName()); return true; } logger.trace("Entitlement not trivially allowed"); diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyParser.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyParser.java index fb63d5ffbeb4..013acf8f22fa 100644 --- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyParser.java +++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyParser.java @@ -34,8 +34,11 @@ import java.util.stream.Stream; */ public class PolicyParser { - private static final Map> EXTERNAL_ENTITLEMENTS = Stream.of(FileEntitlement.class, CreateClassLoaderEntitlement.class) - .collect(Collectors.toUnmodifiableMap(PolicyParser::getEntitlementTypeName, Function.identity())); + private static final Map> EXTERNAL_ENTITLEMENTS = Stream.of( + FileEntitlement.class, + CreateClassLoaderEntitlement.class, + SetHttpsConnectionPropertiesEntitlement.class + ).collect(Collectors.toUnmodifiableMap(PolicyParser::getEntitlementTypeName, Function.identity())); protected final XContentParser policyParser; protected final String policyName; diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/Scope.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/Scope.java index 0fe63eb8da1b..55e257797d60 100644 --- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/Scope.java +++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/Scope.java @@ -9,38 +9,17 @@ package org.elasticsearch.entitlement.runtime.policy; -import java.util.Collections; import java.util.List; import java.util.Objects; /** * A holder for entitlements within a single scope. */ -public class Scope { +public record Scope(String moduleName, List entitlements) { - public final String name; - public final List entitlements; - - public Scope(String name, List entitlements) { - this.name = Objects.requireNonNull(name); - this.entitlements = Collections.unmodifiableList(Objects.requireNonNull(entitlements)); + public Scope(String moduleName, List entitlements) { + this.moduleName = Objects.requireNonNull(moduleName); + this.entitlements = List.copyOf(entitlements); } - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - Scope scope = (Scope) o; - return Objects.equals(name, scope.name) && Objects.equals(entitlements, scope.entitlements); - } - - @Override - public int hashCode() { - return Objects.hash(name, entitlements); - } - - @Override - public String toString() { - return "Scope{" + "name='" + name + '\'' + ", entitlements=" + entitlements + '}'; - } } diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/SetHttpsConnectionPropertiesEntitlement.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/SetHttpsConnectionPropertiesEntitlement.java new file mode 100644 index 000000000000..bb2f65def9e1 --- /dev/null +++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/SetHttpsConnectionPropertiesEntitlement.java @@ -0,0 +1,18 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.entitlement.runtime.policy; + +/** + * An Entitlement to allow setting properties to a single Https connection after this has been created + */ +public record SetHttpsConnectionPropertiesEntitlement() implements Entitlement { + @ExternalEntitlement(esModulesOnly = false) + public SetHttpsConnectionPropertiesEntitlement {} +} diff --git a/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyManagerTests.java b/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyManagerTests.java index 31e3e62f56bf..d22c2f598e34 100644 --- a/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyManagerTests.java +++ b/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyManagerTests.java @@ -9,7 +9,7 @@ package org.elasticsearch.entitlement.runtime.policy; -import org.elasticsearch.entitlement.runtime.api.NotEntitledException; +import org.elasticsearch.entitlement.runtime.policy.PolicyManager.ModuleEntitlements; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.compiler.InMemoryJavaCompiler; import org.elasticsearch.test.jar.JarUtils; @@ -31,8 +31,6 @@ import static org.elasticsearch.test.LambdaMatchers.transformedMatch; import static org.hamcrest.Matchers.aMapWithSize; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.endsWith; -import static org.hamcrest.Matchers.hasEntry; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.sameInstance; @@ -58,6 +56,7 @@ public class PolicyManagerTests extends ESTestCase { public void testGetEntitlementsThrowsOnMissingPluginUnnamedModule() { var policyManager = new PolicyManager( createEmptyTestServerPolicy(), + List.of(), Map.of("plugin1", createPluginPolicy("plugin.module")), c -> "plugin1", NO_ENTITLEMENTS_MODULE @@ -67,60 +66,44 @@ public class PolicyManagerTests extends ESTestCase { var callerClass = this.getClass(); var requestingModule = callerClass.getModule(); - var ex = assertThrows( - "No policy for the unnamed module", - NotEntitledException.class, - () -> policyManager.getEntitlementsOrThrow(callerClass, requestingModule) - ); + assertEquals("No policy for the unnamed module", ModuleEntitlements.NONE, policyManager.getEntitlements(callerClass)); - assertEquals( - "Missing entitlement policy: caller [class org.elasticsearch.entitlement.runtime.policy.PolicyManagerTests], module [null]", - ex.getMessage() - ); - assertThat(policyManager.moduleEntitlementsMap, hasEntry(requestingModule, PolicyManager.ModuleEntitlements.NONE)); + assertEquals(Map.of(requestingModule, ModuleEntitlements.NONE), policyManager.moduleEntitlementsMap); } public void testGetEntitlementsThrowsOnMissingPolicyForPlugin() { - var policyManager = new PolicyManager(createEmptyTestServerPolicy(), Map.of(), c -> "plugin1", NO_ENTITLEMENTS_MODULE); + var policyManager = new PolicyManager(createEmptyTestServerPolicy(), List.of(), Map.of(), c -> "plugin1", NO_ENTITLEMENTS_MODULE); // Any class from the current module (unnamed) will do var callerClass = this.getClass(); var requestingModule = callerClass.getModule(); - var ex = assertThrows( - "No policy for this plugin", - NotEntitledException.class, - () -> policyManager.getEntitlementsOrThrow(callerClass, requestingModule) - ); + assertEquals("No policy for this plugin", ModuleEntitlements.NONE, policyManager.getEntitlements(callerClass)); - assertEquals( - "Missing entitlement policy: caller [class org.elasticsearch.entitlement.runtime.policy.PolicyManagerTests], module [null]", - ex.getMessage() - ); - assertThat(policyManager.moduleEntitlementsMap, hasEntry(requestingModule, PolicyManager.ModuleEntitlements.NONE)); + assertEquals(Map.of(requestingModule, ModuleEntitlements.NONE), policyManager.moduleEntitlementsMap); } public void testGetEntitlementsFailureIsCached() { - var policyManager = new PolicyManager(createEmptyTestServerPolicy(), Map.of(), c -> "plugin1", NO_ENTITLEMENTS_MODULE); + var policyManager = new PolicyManager(createEmptyTestServerPolicy(), List.of(), Map.of(), c -> "plugin1", NO_ENTITLEMENTS_MODULE); // Any class from the current module (unnamed) will do var callerClass = this.getClass(); var requestingModule = callerClass.getModule(); - assertThrows(NotEntitledException.class, () -> policyManager.getEntitlementsOrThrow(callerClass, requestingModule)); - assertThat(policyManager.moduleEntitlementsMap, hasEntry(requestingModule, PolicyManager.ModuleEntitlements.NONE)); + assertEquals(ModuleEntitlements.NONE, policyManager.getEntitlements(callerClass)); + assertEquals(Map.of(requestingModule, ModuleEntitlements.NONE), policyManager.moduleEntitlementsMap); // A second time - var ex = assertThrows(NotEntitledException.class, () -> policyManager.getEntitlementsOrThrow(callerClass, requestingModule)); + assertEquals(ModuleEntitlements.NONE, policyManager.getEntitlements(callerClass)); - assertThat(ex.getMessage(), endsWith("[CACHED]")); // Nothing new in the map - assertThat(policyManager.moduleEntitlementsMap, aMapWithSize(1)); + assertEquals(Map.of(requestingModule, ModuleEntitlements.NONE), policyManager.moduleEntitlementsMap); } public void testGetEntitlementsReturnsEntitlementsForPluginUnnamedModule() { var policyManager = new PolicyManager( createEmptyTestServerPolicy(), + List.of(), Map.ofEntries(entry("plugin2", createPluginPolicy(ALL_UNNAMED))), c -> "plugin2", NO_ENTITLEMENTS_MODULE @@ -128,14 +111,13 @@ public class PolicyManagerTests extends ESTestCase { // Any class from the current module (unnamed) will do var callerClass = this.getClass(); - var requestingModule = callerClass.getModule(); - var entitlements = policyManager.getEntitlementsOrThrow(callerClass, requestingModule); + var entitlements = policyManager.getEntitlements(callerClass); assertThat(entitlements.hasEntitlement(CreateClassLoaderEntitlement.class), is(true)); } public void testGetEntitlementsThrowsOnMissingPolicyForServer() throws ClassNotFoundException { - var policyManager = new PolicyManager(createTestServerPolicy("example"), Map.of(), c -> null, NO_ENTITLEMENTS_MODULE); + var policyManager = new PolicyManager(createTestServerPolicy("example"), List.of(), Map.of(), c -> null, NO_ENTITLEMENTS_MODULE); // Tests do not run modular, so we cannot use a server class. // But we know that in production code the server module and its classes are in the boot layer. @@ -144,21 +126,19 @@ public class PolicyManagerTests extends ESTestCase { var mockServerClass = ModuleLayer.boot().findLoader("jdk.httpserver").loadClass("com.sun.net.httpserver.HttpServer"); var requestingModule = mockServerClass.getModule(); - var ex = assertThrows( - "No policy for this module in server", - NotEntitledException.class, - () -> policyManager.getEntitlementsOrThrow(mockServerClass, requestingModule) - ); + assertEquals("No policy for this module in server", ModuleEntitlements.NONE, policyManager.getEntitlements(mockServerClass)); - assertEquals( - "Missing entitlement policy: caller [class com.sun.net.httpserver.HttpServer], module [jdk.httpserver]", - ex.getMessage() - ); - assertThat(policyManager.moduleEntitlementsMap, hasEntry(requestingModule, PolicyManager.ModuleEntitlements.NONE)); + assertEquals(Map.of(requestingModule, ModuleEntitlements.NONE), policyManager.moduleEntitlementsMap); } public void testGetEntitlementsReturnsEntitlementsForServerModule() throws ClassNotFoundException { - var policyManager = new PolicyManager(createTestServerPolicy("jdk.httpserver"), Map.of(), c -> null, NO_ENTITLEMENTS_MODULE); + var policyManager = new PolicyManager( + createTestServerPolicy("jdk.httpserver"), + List.of(), + Map.of(), + c -> null, + NO_ENTITLEMENTS_MODULE + ); // Tests do not run modular, so we cannot use a server class. // But we know that in production code the server module and its classes are in the boot layer. @@ -167,7 +147,7 @@ public class PolicyManagerTests extends ESTestCase { var mockServerClass = ModuleLayer.boot().findLoader("jdk.httpserver").loadClass("com.sun.net.httpserver.HttpServer"); var requestingModule = mockServerClass.getModule(); - var entitlements = policyManager.getEntitlementsOrThrow(mockServerClass, requestingModule); + var entitlements = policyManager.getEntitlements(mockServerClass); assertThat(entitlements.hasEntitlement(CreateClassLoaderEntitlement.class), is(true)); assertThat(entitlements.hasEntitlement(ExitVMEntitlement.class), is(true)); } @@ -179,6 +159,7 @@ public class PolicyManagerTests extends ESTestCase { var policyManager = new PolicyManager( createEmptyTestServerPolicy(), + List.of(), Map.of("mock-plugin", createPluginPolicy("org.example.plugin")), c -> "mock-plugin", NO_ENTITLEMENTS_MODULE @@ -188,7 +169,7 @@ public class PolicyManagerTests extends ESTestCase { var mockPluginClass = layer.findLoader("org.example.plugin").loadClass("q.B"); var requestingModule = mockPluginClass.getModule(); - var entitlements = policyManager.getEntitlementsOrThrow(mockPluginClass, requestingModule); + var entitlements = policyManager.getEntitlements(mockPluginClass); assertThat(entitlements.hasEntitlement(CreateClassLoaderEntitlement.class), is(true)); assertThat( entitlements.getEntitlements(FileEntitlement.class).toList(), @@ -199,6 +180,7 @@ public class PolicyManagerTests extends ESTestCase { public void testGetEntitlementsResultIsCached() { var policyManager = new PolicyManager( createEmptyTestServerPolicy(), + List.of(), Map.ofEntries(entry("plugin2", createPluginPolicy(ALL_UNNAMED))), c -> "plugin2", NO_ENTITLEMENTS_MODULE @@ -206,22 +188,21 @@ public class PolicyManagerTests extends ESTestCase { // Any class from the current module (unnamed) will do var callerClass = this.getClass(); - var requestingModule = callerClass.getModule(); - var entitlements = policyManager.getEntitlementsOrThrow(callerClass, requestingModule); + var entitlements = policyManager.getEntitlements(callerClass); assertThat(entitlements.hasEntitlement(CreateClassLoaderEntitlement.class), is(true)); assertThat(policyManager.moduleEntitlementsMap, aMapWithSize(1)); var cachedResult = policyManager.moduleEntitlementsMap.values().stream().findFirst().get(); - var entitlementsAgain = policyManager.getEntitlementsOrThrow(callerClass, requestingModule); + var entitlementsAgain = policyManager.getEntitlements(callerClass); // Nothing new in the map assertThat(policyManager.moduleEntitlementsMap, aMapWithSize(1)); assertThat(entitlementsAgain, sameInstance(cachedResult)); } - public void testRequestingModuleFastPath() throws IOException, ClassNotFoundException { + public void testRequestingClassFastPath() throws IOException, ClassNotFoundException { var callerClass = makeClassInItsOwnModule(); - assertEquals(callerClass.getModule(), policyManagerWithEntitlementsModule(NO_ENTITLEMENTS_MODULE).requestingModule(callerClass)); + assertEquals(callerClass, policyManagerWithEntitlementsModule(NO_ENTITLEMENTS_MODULE).requestingClass(callerClass)); } public void testRequestingModuleWithStackWalk() throws IOException, ClassNotFoundException { @@ -232,24 +213,21 @@ public class PolicyManagerTests extends ESTestCase { var policyManager = policyManagerWithEntitlementsModule(entitlementsClass.getModule()); - var requestingModule = requestingClass.getModule(); - assertEquals( "Skip entitlement library and the instrumented method", - requestingModule, - policyManager.findRequestingModule(Stream.of(entitlementsClass, instrumentedClass, requestingClass, ignorableClass)) - .orElse(null) + requestingClass, + policyManager.findRequestingClass(Stream.of(entitlementsClass, instrumentedClass, requestingClass, ignorableClass)).orElse(null) ); assertEquals( "Skip multiple library frames", - requestingModule, - policyManager.findRequestingModule(Stream.of(entitlementsClass, entitlementsClass, instrumentedClass, requestingClass)) + requestingClass, + policyManager.findRequestingClass(Stream.of(entitlementsClass, entitlementsClass, instrumentedClass, requestingClass)) .orElse(null) ); assertThrows( "Non-modular caller frames are not supported", NullPointerException.class, - () -> policyManager.findRequestingModule(Stream.of(entitlementsClass, null)) + () -> policyManager.findRequestingClass(Stream.of(entitlementsClass, null)) ); } @@ -261,7 +239,7 @@ public class PolicyManagerTests extends ESTestCase { } private static PolicyManager policyManagerWithEntitlementsModule(Module entitlementsModule) { - return new PolicyManager(createEmptyTestServerPolicy(), Map.of(), c -> "test", entitlementsModule); + return new PolicyManager(createEmptyTestServerPolicy(), List.of(), Map.of(), c -> "test", entitlementsModule); } private static Policy createEmptyTestServerPolicy() { diff --git a/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyParserTests.java b/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyParserTests.java index 633c76cb8c04..4d17fc92e157 100644 --- a/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyParserTests.java +++ b/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyParserTests.java @@ -16,11 +16,7 @@ import java.io.IOException; import java.nio.charset.StandardCharsets; import java.util.List; -import static org.elasticsearch.test.LambdaMatchers.transformedMatch; -import static org.hamcrest.Matchers.both; -import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.instanceOf; public class PolicyParserTests extends ESTestCase { @@ -39,21 +35,21 @@ public class PolicyParserTests extends ESTestCase { public void testPolicyBuilder() throws IOException { Policy parsedPolicy = new PolicyParser(PolicyParserTests.class.getResourceAsStream("test-policy.yaml"), "test-policy.yaml", false) .parsePolicy(); - Policy builtPolicy = new Policy( + Policy expected = new Policy( "test-policy.yaml", List.of(new Scope("entitlement-module-name", List.of(new FileEntitlement("test/path/to/file", List.of("read", "write"))))) ); - assertEquals(parsedPolicy, builtPolicy); + assertEquals(expected, parsedPolicy); } public void testPolicyBuilderOnExternalPlugin() throws IOException { Policy parsedPolicy = new PolicyParser(PolicyParserTests.class.getResourceAsStream("test-policy.yaml"), "test-policy.yaml", true) .parsePolicy(); - Policy builtPolicy = new Policy( + Policy expected = new Policy( "test-policy.yaml", List.of(new Scope("entitlement-module-name", List.of(new FileEntitlement("test/path/to/file", List.of("read", "write"))))) ); - assertEquals(parsedPolicy, builtPolicy); + assertEquals(expected, parsedPolicy); } public void testParseCreateClassloader() throws IOException { @@ -61,17 +57,22 @@ public class PolicyParserTests extends ESTestCase { entitlement-module-name: - create_class_loader """.getBytes(StandardCharsets.UTF_8)), "test-policy.yaml", false).parsePolicy(); - Policy builtPolicy = new Policy( + Policy expected = new Policy( "test-policy.yaml", List.of(new Scope("entitlement-module-name", List.of(new CreateClassLoaderEntitlement()))) ); - assertThat( - parsedPolicy.scopes, - contains( - both(transformedMatch((Scope scope) -> scope.name, equalTo("entitlement-module-name"))).and( - transformedMatch(scope -> scope.entitlements, contains(instanceOf(CreateClassLoaderEntitlement.class))) - ) - ) + assertEquals(expected, parsedPolicy); + } + + public void testParseSetHttpsConnectionProperties() throws IOException { + Policy parsedPolicy = new PolicyParser(new ByteArrayInputStream(""" + entitlement-module-name: + - set_https_connection_properties + """.getBytes(StandardCharsets.UTF_8)), "test-policy.yaml", true).parsePolicy(); + Policy expected = new Policy( + "test-policy.yaml", + List.of(new Scope("entitlement-module-name", List.of(new SetHttpsConnectionPropertiesEntitlement()))) ); + assertEquals(expected, parsedPolicy); } } diff --git a/modules/apm/src/main/plugin-metadata/entitlement-policy.yaml b/modules/apm/src/main/plugin-metadata/entitlement-policy.yaml new file mode 100644 index 000000000000..9c10bafca42f --- /dev/null +++ b/modules/apm/src/main/plugin-metadata/entitlement-policy.yaml @@ -0,0 +1,4 @@ +org.elasticsearch.telemetry.apm: + - create_class_loader +elastic.apm.agent: + - set_https_connection_properties diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamGetWriteIndexTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamGetWriteIndexTests.java index 574ea44d6235..e3d767c92a68 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamGetWriteIndexTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamGetWriteIndexTests.java @@ -244,7 +244,7 @@ public class DataStreamGetWriteIndexTests extends ESTestCase { new MetadataFieldMapper[] { dtfm }, Collections.emptyMap() ); - MappingLookup mappingLookup = MappingLookup.fromMappers(mapping, List.of(dtfm, dateFieldMapper), List.of(), null); + MappingLookup mappingLookup = MappingLookup.fromMappers(mapping, List.of(dtfm, dateFieldMapper), List.of()); indicesService = DataStreamTestHelper.mockIndicesServices(mappingLookup); } diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/GetDatabaseConfigurationAction.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/GetDatabaseConfigurationAction.java index 1970883e91b3..68b3ce279a89 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/GetDatabaseConfigurationAction.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/GetDatabaseConfigurationAction.java @@ -25,6 +25,7 @@ import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; import java.util.Arrays; import java.util.List; +import java.util.Map; import java.util.Objects; import static org.elasticsearch.ingest.geoip.direct.DatabaseConfigurationMetadata.DATABASE; @@ -91,6 +92,11 @@ public class GetDatabaseConfigurationAction extends ActionType { this.databases = in.readCollectionAsList(DatabaseConfigurationMetadata::new); } + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeCollection(databases); + } + @Override protected List readNodesFrom(StreamInput in) throws IOException { return in.readCollectionAsList(NodeResponse::new); @@ -122,6 +128,63 @@ public class GetDatabaseConfigurationAction extends ActionType { builder.endObject(); return builder; } + + /* + * This implementation of equals exists solely for testing the serialization of this object. + */ + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Response response = (Response) o; + return Objects.equals(databases, response.databases) + && Objects.equals(getClusterName(), response.getClusterName()) + && Objects.equals(equalsHashCodeFailures(), response.equalsHashCodeFailures()) + && Objects.equals(getNodes(), response.getNodes()) + && Objects.equals(equalsHashCodeNodesMap(), response.equalsHashCodeNodesMap()); + } + + /* + * This implementation of hashCode exists solely for testing the serialization of this object. + */ + @Override + public int hashCode() { + return Objects.hash(databases, getClusterName(), equalsHashCodeFailures(), getNodes(), equalsHashCodeNodesMap()); + } + + /* + * FailedNodeException does not implement equals or hashCode, making it difficult to test the serialization of this class. This + * helper method wraps the failures() list with a class that does implement equals and hashCode. + */ + private List equalsHashCodeFailures() { + return failures().stream().map(EqualsHashCodeFailedNodeException::new).toList(); + } + + private record EqualsHashCodeFailedNodeException(FailedNodeException failedNodeException) { + @Override + public boolean equals(Object o) { + if (o == this) return true; + if (o == null || getClass() != o.getClass()) return false; + EqualsHashCodeFailedNodeException other = (EqualsHashCodeFailedNodeException) o; + return Objects.equals(failedNodeException.nodeId(), other.failedNodeException.nodeId()) + && Objects.equals(failedNodeException.getMessage(), other.failedNodeException.getMessage()); + } + + @Override + public int hashCode() { + return Objects.hash(failedNodeException.nodeId(), failedNodeException.getMessage()); + } + } + + /* + * The getNodesMap method changes the value of the nodesMap, causing failures when testing the concurrent serialization and + * deserialization of this class. Since this is a response object, we do not actually care about concurrency since it will not + * happen in practice. So this helper method synchronizes access to getNodesMap, which can be used from equals and hashCode for + * tests. + */ + private synchronized Map equalsHashCodeNodesMap() { + return getNodesMap(); + } } public static class NodeRequest extends TransportRequest { @@ -186,6 +249,7 @@ public class GetDatabaseConfigurationAction extends ActionType { @Override public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); out.writeCollection(databases); } diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/direct/GetDatabaseConfigurationActionNodeResponseTests.java b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/direct/GetDatabaseConfigurationActionNodeResponseTests.java new file mode 100644 index 000000000000..12fb08a5a1ab --- /dev/null +++ b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/direct/GetDatabaseConfigurationActionNodeResponseTests.java @@ -0,0 +1,98 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.ingest.geoip.direct; + +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodeUtils; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; + +import java.io.IOException; +import java.util.List; + +import static java.util.Collections.emptySet; + +public class GetDatabaseConfigurationActionNodeResponseTests extends AbstractWireSerializingTestCase< + GetDatabaseConfigurationAction.NodeResponse> { + @Override + protected Writeable.Reader instanceReader() { + return GetDatabaseConfigurationAction.NodeResponse::new; + } + + @Override + protected GetDatabaseConfigurationAction.NodeResponse createTestInstance() { + return getRandomDatabaseConfigurationActionNodeResponse(); + } + + static GetDatabaseConfigurationAction.NodeResponse getRandomDatabaseConfigurationActionNodeResponse() { + return new GetDatabaseConfigurationAction.NodeResponse(randomDiscoveryNode(), getRandomDatabaseConfigurationMetadata()); + } + + private static DiscoveryNode randomDiscoveryNode() { + return DiscoveryNodeUtils.builder(randomAlphaOfLength(6)).roles(emptySet()).build(); + } + + static List getRandomDatabaseConfigurationMetadata() { + return randomList( + 0, + 20, + () -> new DatabaseConfigurationMetadata( + new DatabaseConfiguration( + randomAlphaOfLength(20), + randomAlphaOfLength(20), + randomFrom( + List.of( + new DatabaseConfiguration.Local(randomAlphaOfLength(10)), + new DatabaseConfiguration.Web(), + new DatabaseConfiguration.Ipinfo(), + new DatabaseConfiguration.Maxmind(randomAlphaOfLength(10)) + ) + ) + ), + randomNonNegativeLong(), + randomNonNegativeLong() + ) + ); + } + + @Override + protected GetDatabaseConfigurationAction.NodeResponse mutateInstance(GetDatabaseConfigurationAction.NodeResponse instance) + throws IOException { + return null; + } + + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry( + List.of( + new NamedWriteableRegistry.Entry( + DatabaseConfiguration.Provider.class, + DatabaseConfiguration.Maxmind.NAME, + DatabaseConfiguration.Maxmind::new + ), + new NamedWriteableRegistry.Entry( + DatabaseConfiguration.Provider.class, + DatabaseConfiguration.Ipinfo.NAME, + DatabaseConfiguration.Ipinfo::new + ), + new NamedWriteableRegistry.Entry( + DatabaseConfiguration.Provider.class, + DatabaseConfiguration.Local.NAME, + DatabaseConfiguration.Local::new + ), + new NamedWriteableRegistry.Entry( + DatabaseConfiguration.Provider.class, + DatabaseConfiguration.Web.NAME, + DatabaseConfiguration.Web::new + ) + ) + ); + } +} diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/direct/GetDatabaseConfigurationActionResponseTests.java b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/direct/GetDatabaseConfigurationActionResponseTests.java new file mode 100644 index 000000000000..1b48a409d787 --- /dev/null +++ b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/direct/GetDatabaseConfigurationActionResponseTests.java @@ -0,0 +1,89 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.ingest.geoip.direct; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.FailedNodeException; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; + +import java.io.IOException; +import java.util.List; + +public class GetDatabaseConfigurationActionResponseTests extends AbstractWireSerializingTestCase { + @Override + protected Writeable.Reader instanceReader() { + return GetDatabaseConfigurationAction.Response::new; + } + + @Override + protected GetDatabaseConfigurationAction.Response createTestInstance() { + return new GetDatabaseConfigurationAction.Response( + GetDatabaseConfigurationActionNodeResponseTests.getRandomDatabaseConfigurationMetadata(), + getTestClusterName(), + getTestNodeResponses(), + getTestFailedNodeExceptions() + ); + } + + @Override + protected GetDatabaseConfigurationAction.Response mutateInstance(GetDatabaseConfigurationAction.Response instance) throws IOException { + return null; + } + + private ClusterName getTestClusterName() { + return new ClusterName(randomAlphaOfLength(30)); + } + + private List getTestNodeResponses() { + return randomList(0, 20, GetDatabaseConfigurationActionNodeResponseTests::getRandomDatabaseConfigurationActionNodeResponse); + } + + private List getTestFailedNodeExceptions() { + return randomList( + 0, + 5, + () -> new FailedNodeException( + randomAlphaOfLength(10), + randomAlphaOfLength(20), + new ElasticsearchException(randomAlphaOfLength(10)) + ) + ); + } + + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry( + List.of( + new NamedWriteableRegistry.Entry( + DatabaseConfiguration.Provider.class, + DatabaseConfiguration.Maxmind.NAME, + DatabaseConfiguration.Maxmind::new + ), + new NamedWriteableRegistry.Entry( + DatabaseConfiguration.Provider.class, + DatabaseConfiguration.Ipinfo.NAME, + DatabaseConfiguration.Ipinfo::new + ), + new NamedWriteableRegistry.Entry( + DatabaseConfiguration.Provider.class, + DatabaseConfiguration.Local.NAME, + DatabaseConfiguration.Local::new + ), + new NamedWriteableRegistry.Entry( + DatabaseConfiguration.Provider.class, + DatabaseConfiguration.Web.NAME, + DatabaseConfiguration.Web::new + ) + ) + ); + } +} diff --git a/modules/lang-expression/src/main/plugin-metadata/entitlement-policy.yaml b/modules/lang-expression/src/main/plugin-metadata/entitlement-policy.yaml new file mode 100644 index 000000000000..b05e6e3a7bf7 --- /dev/null +++ b/modules/lang-expression/src/main/plugin-metadata/entitlement-policy.yaml @@ -0,0 +1,2 @@ +org.elasticsearch.script.expression: + - create_class_loader diff --git a/modules/lang-painless/src/main/plugin-metadata/entitlement-policy.yaml b/modules/lang-painless/src/main/plugin-metadata/entitlement-policy.yaml new file mode 100644 index 000000000000..d7e4ad872fc3 --- /dev/null +++ b/modules/lang-painless/src/main/plugin-metadata/entitlement-policy.yaml @@ -0,0 +1,2 @@ +org.elasticsearch.painless: + - create_class_loader diff --git a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/ReindexNodeShutdownIT.java b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/ReindexNodeShutdownIT.java index a4b030e3c793..4f56b6d88b63 100644 --- a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/ReindexNodeShutdownIT.java +++ b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/ReindexNodeShutdownIT.java @@ -59,10 +59,10 @@ public class ReindexNodeShutdownIT extends ESIntegTestCase { final String dataNodeName = internalCluster().startDataOnlyNode(); /* Maximum time to wait for reindexing tasks to complete before shutdown */ - final Settings COORD_SETTINGS = Settings.builder() + final Settings coordSettings = Settings.builder() .put(MAXIMUM_REINDEXING_TIMEOUT_SETTING.getKey(), TimeValue.timeValueSeconds(60)) .build(); - final String coordNodeName = internalCluster().startCoordinatingOnlyNode(Settings.EMPTY); + final String coordNodeName = internalCluster().startCoordinatingOnlyNode(coordSettings); ensureStableCluster(3); diff --git a/modules/repository-gcs/src/main/plugin-metadata/entitlement-policy.yaml b/modules/repository-gcs/src/main/plugin-metadata/entitlement-policy.yaml new file mode 100644 index 000000000000..a1ff54f02d96 --- /dev/null +++ b/modules/repository-gcs/src/main/plugin-metadata/entitlement-policy.yaml @@ -0,0 +1,2 @@ +ALL-UNNAMED: + - set_https_connection_properties # required by google-http-client diff --git a/muted-tests.yml b/muted-tests.yml index d912f30584b8..d874a6ffd895 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -169,9 +169,6 @@ tests: - class: org.elasticsearch.xpack.restart.QueryBuilderBWCIT method: testQueryBuilderBWC {p0=UPGRADED} issue: https://github.com/elastic/elasticsearch/issues/116989 -- class: org.elasticsearch.index.reindex.ReindexNodeShutdownIT - method: testReindexWithShutdown - issue: https://github.com/elastic/elasticsearch/issues/118040 - class: org.elasticsearch.xpack.remotecluster.CrossClusterEsqlRCS2UnavailableRemotesIT method: testEsqlRcs2UnavailableRemoteScenarios issue: https://github.com/elastic/elasticsearch/issues/117419 @@ -229,43 +226,59 @@ tests: issue: https://github.com/elastic/elasticsearch/issues/116777 - class: org.elasticsearch.xpack.security.authc.ldap.ActiveDirectoryRunAsIT issue: https://github.com/elastic/elasticsearch/issues/115727 -- class: org.elasticsearch.xpack.security.authc.kerberos.KerberosAuthenticationIT - issue: https://github.com/elastic/elasticsearch/issues/118414 - class: org.elasticsearch.smoketest.DocsClientYamlTestSuiteIT method: test {yaml=reference/search/search-your-data/retrievers-examples/line_98} issue: https://github.com/elastic/elasticsearch/issues/119155 - class: org.elasticsearch.xpack.esql.action.EsqlNodeFailureIT method: testFailureLoadingFields issue: https://github.com/elastic/elasticsearch/issues/118000 -- class: org.elasticsearch.smoketest.SmokeTestMultiNodeClientYamlTestSuiteIT - method: test {yaml=indices.create/20_synthetic_source/create index with use_synthetic_source} - issue: https://github.com/elastic/elasticsearch/issues/119191 - class: org.elasticsearch.index.mapper.AbstractShapeGeometryFieldMapperTests method: testCartesianBoundsBlockLoader issue: https://github.com/elastic/elasticsearch/issues/119201 - class: org.elasticsearch.xpack.test.rest.XPackRestIT method: test {p0=ml/data_frame_analytics_cat_apis/Test cat data frame analytics all jobs with header} issue: https://github.com/elastic/elasticsearch/issues/119332 -- class: org.elasticsearch.xpack.esql.qa.multi_node.EsqlSpecIT - method: test {lookup-join.MvJoinKeyOnTheDataNode ASYNC} - issue: https://github.com/elastic/elasticsearch/issues/119179 - class: org.elasticsearch.smoketest.SmokeTestMultiNodeClientYamlTestSuiteIT issue: https://github.com/elastic/elasticsearch/issues/119191 -- class: org.elasticsearch.xpack.esql.EsqlSecurityIT - method: testLookupJoinIndexAllowed - issue: https://github.com/elastic/elasticsearch/issues/119268 -- class: org.elasticsearch.xpack.esql.EsqlSecurityIT - method: testLookupJoinIndexForbidden - issue: https://github.com/elastic/elasticsearch/issues/119269 -- class: org.elasticsearch.xpack.esql.EsqlAsyncSecurityIT - method: testLookupJoinIndexForbidden - issue: https://github.com/elastic/elasticsearch/issues/119270 -- class: org.elasticsearch.xpack.esql.EsqlAsyncSecurityIT - method: testLookupJoinIndexAllowed - issue: https://github.com/elastic/elasticsearch/issues/119271 - class: org.elasticsearch.xpack.logsdb.qa.LogsDbVersusLogsDbReindexedIntoStandardModeChallengeRestIT method: testEsqlTermsAggregationByMethod issue: https://github.com/elastic/elasticsearch/issues/119355 +- class: org.elasticsearch.xpack.logsdb.qa.LogsDbVersusLogsDbReindexedIntoStandardModeChallengeRestIT + method: testMatchAllQuery + issue: https://github.com/elastic/elasticsearch/issues/119432 +- class: org.elasticsearch.xpack.logsdb.qa.LogsDbVersusLogsDbReindexedIntoStandardModeChallengeRestIT + method: testTermsQuery + issue: https://github.com/elastic/elasticsearch/issues/119486 +- class: org.elasticsearch.xpack.test.rest.XPackRestIT + method: test {p0=transform/transforms_start_stop/Test start/stop/start transform} + issue: https://github.com/elastic/elasticsearch/issues/119508 +- class: org.elasticsearch.xpack.logsdb.qa.LogsDbVersusLogsDbReindexedIntoStandardModeChallengeRestIT + method: testEsqlSource + issue: https://github.com/elastic/elasticsearch/issues/119510 +- class: org.elasticsearch.smoketest.MlWithSecurityIT + method: test {yaml=ml/sparse_vector_search/Test sparse_vector search with query vector and pruning config} + issue: https://github.com/elastic/elasticsearch/issues/119548 +- class: org.elasticsearch.lucene.RollingUpgradeSearchableSnapshotIndexCompatibilityIT + method: testSearchableSnapshotUpgrade {p0=[9.0.0, 8.18.0, 8.18.0]} + issue: https://github.com/elastic/elasticsearch/issues/119549 +- class: org.elasticsearch.lucene.RollingUpgradeSearchableSnapshotIndexCompatibilityIT + method: testMountSearchableSnapshot {p0=[9.0.0, 8.18.0, 8.18.0]} + issue: https://github.com/elastic/elasticsearch/issues/119550 +- class: org.elasticsearch.lucene.RollingUpgradeSearchableSnapshotIndexCompatibilityIT + method: testMountSearchableSnapshot {p0=[9.0.0, 9.0.0, 8.18.0]} + issue: https://github.com/elastic/elasticsearch/issues/119551 +- class: org.elasticsearch.index.engine.LuceneSyntheticSourceChangesSnapshotTests + method: testSkipNonRootOfNestedDocuments + issue: https://github.com/elastic/elasticsearch/issues/119553 +- class: org.elasticsearch.lucene.RollingUpgradeSearchableSnapshotIndexCompatibilityIT + method: testSearchableSnapshotUpgrade {p0=[9.0.0, 9.0.0, 8.18.0]} + issue: https://github.com/elastic/elasticsearch/issues/119560 +- class: org.elasticsearch.lucene.RollingUpgradeSearchableSnapshotIndexCompatibilityIT + method: testMountSearchableSnapshot {p0=[9.0.0, 9.0.0, 9.0.0]} + issue: https://github.com/elastic/elasticsearch/issues/119561 +- class: org.elasticsearch.lucene.RollingUpgradeSearchableSnapshotIndexCompatibilityIT + method: testSearchableSnapshotUpgrade {p0=[9.0.0, 9.0.0, 9.0.0]} + issue: https://github.com/elastic/elasticsearch/issues/119562 # Examples: # diff --git a/plugins/discovery-gce/src/main/plugin-metadata/entitlement-policy.yaml b/plugins/discovery-gce/src/main/plugin-metadata/entitlement-policy.yaml new file mode 100644 index 000000000000..a1ff54f02d96 --- /dev/null +++ b/plugins/discovery-gce/src/main/plugin-metadata/entitlement-policy.yaml @@ -0,0 +1,2 @@ +ALL-UNNAMED: + - set_https_connection_properties # required by google-http-client diff --git a/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/AbstractLuceneIndexCompatibilityTestCase.java b/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/AbstractIndexCompatibilityTestCase.java similarity index 73% rename from qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/AbstractLuceneIndexCompatibilityTestCase.java rename to qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/AbstractIndexCompatibilityTestCase.java index 1865da06e20c..68375cec5171 100644 --- a/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/AbstractLuceneIndexCompatibilityTestCase.java +++ b/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/AbstractIndexCompatibilityTestCase.java @@ -9,11 +9,6 @@ package org.elasticsearch.lucene; -import com.carrotsearch.randomizedtesting.TestMethodAndParams; -import com.carrotsearch.randomizedtesting.annotations.Name; -import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; -import com.carrotsearch.randomizedtesting.annotations.TestCaseOrdering; - import org.elasticsearch.client.Request; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Strings; @@ -22,16 +17,18 @@ import org.elasticsearch.test.cluster.local.LocalClusterConfigProvider; import org.elasticsearch.test.cluster.local.distribution.DistributionType; import org.elasticsearch.test.cluster.util.Version; import org.elasticsearch.test.rest.ESRestTestCase; +import org.junit.After; import org.junit.Before; import org.junit.ClassRule; import org.junit.rules.RuleChain; import org.junit.rules.TemporaryFolder; import org.junit.rules.TestRule; -import java.util.Comparator; +import java.io.IOException; +import java.util.HashMap; import java.util.Locale; +import java.util.Map; import java.util.stream.IntStream; -import java.util.stream.Stream; import static org.elasticsearch.test.cluster.util.Version.CURRENT; import static org.elasticsearch.test.cluster.util.Version.fromString; @@ -41,24 +38,21 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; -/** - * Test suite for Lucene indices backward compatibility with N-2 versions. The test suite creates a cluster in N-2 version, then upgrades it - * to N-1 version and finally upgrades it to the current version. Test methods are executed after each upgrade. - */ -@TestCaseOrdering(AbstractLuceneIndexCompatibilityTestCase.TestCaseOrdering.class) -public abstract class AbstractLuceneIndexCompatibilityTestCase extends ESRestTestCase { +public abstract class AbstractIndexCompatibilityTestCase extends ESRestTestCase { protected static final Version VERSION_MINUS_2 = fromString(System.getProperty("tests.minimum.index.compatible")); protected static final Version VERSION_MINUS_1 = fromString(System.getProperty("tests.minimum.wire.compatible")); protected static final Version VERSION_CURRENT = CURRENT; - protected static TemporaryFolder REPOSITORY_PATH = new TemporaryFolder(); + protected static final int NODES = 3; + + private static TemporaryFolder REPOSITORY_PATH = new TemporaryFolder(); protected static LocalClusterConfigProvider clusterConfig = c -> {}; private static ElasticsearchCluster cluster = ElasticsearchCluster.local() .distribution(DistributionType.DEFAULT) .version(VERSION_MINUS_2) - .nodes(2) + .nodes(NODES) .setting("path.repo", () -> REPOSITORY_PATH.getRoot().getPath()) .setting("xpack.security.enabled", "false") .setting("xpack.ml.enabled", "false") @@ -71,15 +65,44 @@ public abstract class AbstractLuceneIndexCompatibilityTestCase extends ESRestTes private static boolean upgradeFailed = false; - private final Version clusterVersion; + @Before + public final void maybeUpgradeBeforeTest() throws Exception { + // We want to use this test suite for the V9 upgrade, but we are not fully committed to necessarily having N-2 support + // in V10, so we add a check here to ensure we'll revisit this decision once V10 exists. + assertThat("Explicit check that N-2 version is Elasticsearch 7", VERSION_MINUS_2.getMajor(), equalTo(7)); - public AbstractLuceneIndexCompatibilityTestCase(@Name("cluster") Version clusterVersion) { - this.clusterVersion = clusterVersion; + if (upgradeFailed == false) { + try { + maybeUpgrade(); + } catch (Exception e) { + upgradeFailed = true; + throw e; + } + } + + // Skip remaining tests if upgrade failed + assumeFalse("Cluster upgrade failed", upgradeFailed); } - @ParametersFactory - public static Iterable parameters() { - return Stream.of(VERSION_MINUS_2, VERSION_MINUS_1, CURRENT).map(v -> new Object[] { v }).toList(); + protected abstract void maybeUpgrade() throws Exception; + + @After + public final void deleteSnapshotBlobCache() throws IOException { + // TODO ES-10475: The .snapshot-blob-cache created in legacy version can block upgrades, we should probably delete it automatically + try { + var request = new Request("DELETE", "/.snapshot-blob-cache"); + request.setOptions( + expectWarnings( + "this request accesses system indices: [.snapshot-blob-cache], but in a future major version, " + + "direct access to system indices will be prevented by default" + ) + ); + adminClient().performRequest(request); + } catch (IOException e) { + if (isNotFoundResponseException(e) == false) { + throw e; + } + } } @Override @@ -92,26 +115,8 @@ public abstract class AbstractLuceneIndexCompatibilityTestCase extends ESRestTes return true; } - @Before - public void maybeUpgrade() throws Exception { - // We want to use this test suite for the V9 upgrade, but we are not fully committed to necessarily having N-2 support - // in V10, so we add a check here to ensure we'll revisit this decision once V10 exists. - assertThat("Explicit check that N-2 version is Elasticsearch 7", VERSION_MINUS_2.getMajor(), equalTo(7)); - - var currentVersion = clusterVersion(); - if (currentVersion.before(clusterVersion)) { - try { - cluster.upgradeToVersion(clusterVersion); - closeClients(); - initClient(); - } catch (Exception e) { - upgradeFailed = true; - throw e; - } - } - - // Skip remaining tests if upgrade failed - assumeFalse("Cluster upgrade failed", upgradeFailed); + protected ElasticsearchCluster cluster() { + return cluster; } protected String suffix(String name) { @@ -124,12 +129,18 @@ public abstract class AbstractLuceneIndexCompatibilityTestCase extends ESRestTes .build(); } - protected static Version clusterVersion() throws Exception { - var response = assertOK(client().performRequest(new Request("GET", "/"))); - var responseBody = createFromResponse(response); - var version = Version.fromString(responseBody.evaluate("version.number").toString()); - assertThat("Failed to retrieve cluster version", version, notNullValue()); - return version; + protected static Map nodesVersions() throws Exception { + var nodesInfos = getNodesInfo(adminClient()); + assertThat(nodesInfos.size(), equalTo(NODES)); + var versions = new HashMap(); + for (var nodeInfos : nodesInfos.values()) { + versions.put((String) nodeInfos.get("name"), Version.fromString((String) nodeInfos.get("version"))); + } + return versions; + } + + protected static boolean isFullyUpgradedTo(Version version) throws Exception { + return nodesVersions().values().stream().allMatch(v -> v.equals(version)); } protected static Version indexVersion(String indexName) throws Exception { @@ -181,16 +192,4 @@ public abstract class AbstractLuceneIndexCompatibilityTestCase extends ESRestTes assertThat(responseBody.evaluate("snapshot.shards.total"), equalTo((int) responseBody.evaluate("snapshot.shards.failed"))); assertThat(responseBody.evaluate("snapshot.shards.successful"), equalTo(0)); } - - /** - * Execute the test suite with the parameters provided by the {@link #parameters()} in version order. - */ - public static class TestCaseOrdering implements Comparator { - @Override - public int compare(TestMethodAndParams o1, TestMethodAndParams o2) { - var version1 = (Version) o1.getInstanceArguments().get(0); - var version2 = (Version) o2.getInstanceArguments().get(0); - return version1.compareTo(version2); - } - } } diff --git a/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/FullClusterRestartIndexCompatibilityTestCase.java b/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/FullClusterRestartIndexCompatibilityTestCase.java new file mode 100644 index 000000000000..9ca7132493ae --- /dev/null +++ b/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/FullClusterRestartIndexCompatibilityTestCase.java @@ -0,0 +1,65 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.lucene; + +import com.carrotsearch.randomizedtesting.TestMethodAndParams; +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import com.carrotsearch.randomizedtesting.annotations.TestCaseOrdering; + +import org.elasticsearch.test.cluster.util.Version; + +import java.util.Comparator; +import java.util.stream.Stream; + +import static org.elasticsearch.test.cluster.util.Version.CURRENT; +import static org.hamcrest.Matchers.equalTo; + +/** + * Test suite for Lucene indices backward compatibility with N-2 versions after full cluster restart upgrades. The test suite creates a + * cluster in N-2 version, then upgrades it to N-1 version and finally upgrades it to the current version. Test methods are executed after + * each upgrade. + */ +@TestCaseOrdering(FullClusterRestartIndexCompatibilityTestCase.TestCaseOrdering.class) +public abstract class FullClusterRestartIndexCompatibilityTestCase extends AbstractIndexCompatibilityTestCase { + + private final Version clusterVersion; + + public FullClusterRestartIndexCompatibilityTestCase(@Name("cluster") Version clusterVersion) { + this.clusterVersion = clusterVersion; + } + + @ParametersFactory + public static Iterable parameters() { + return Stream.of(VERSION_MINUS_2, VERSION_MINUS_1, CURRENT).map(v -> new Object[] { v }).toList(); + } + + @Override + protected void maybeUpgrade() throws Exception { + if (nodesVersions().values().stream().anyMatch(version -> version.before(clusterVersion))) { + cluster().upgradeToVersion(clusterVersion); + closeClients(); + initClient(); + } + assertThat(isFullyUpgradedTo(clusterVersion), equalTo(true)); + } + + /** + * Execute the test suite with the parameters provided by the {@link #parameters()} in version order. + */ + public static class TestCaseOrdering implements Comparator { + @Override + public int compare(TestMethodAndParams o1, TestMethodAndParams o2) { + var version1 = (Version) o1.getInstanceArguments().get(0); + var version2 = (Version) o2.getInstanceArguments().get(0); + return version1.compareTo(version2); + } + } +} diff --git a/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/LuceneCompatibilityIT.java b/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/FullClusterRestartLuceneIndexCompatibilityIT.java similarity index 92% rename from qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/LuceneCompatibilityIT.java rename to qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/FullClusterRestartLuceneIndexCompatibilityIT.java index 655e30f069f1..15d41cc981ce 100644 --- a/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/LuceneCompatibilityIT.java +++ b/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/FullClusterRestartLuceneIndexCompatibilityIT.java @@ -23,13 +23,13 @@ import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.equalTo; -public class LuceneCompatibilityIT extends AbstractLuceneIndexCompatibilityTestCase { +public class FullClusterRestartLuceneIndexCompatibilityIT extends FullClusterRestartIndexCompatibilityTestCase { static { clusterConfig = config -> config.setting("xpack.license.self_generated.type", "trial"); } - public LuceneCompatibilityIT(Version version) { + public FullClusterRestartLuceneIndexCompatibilityIT(Version version) { super(version); } @@ -42,7 +42,7 @@ public class LuceneCompatibilityIT extends AbstractLuceneIndexCompatibilityTestC final String index = suffix("index"); final int numDocs = 1234; - if (VERSION_MINUS_2.equals(clusterVersion())) { + if (isFullyUpgradedTo(VERSION_MINUS_2)) { logger.debug("--> registering repository [{}]", repository); registerRepository(client(), repository, FsRepository.TYPE, true, repositorySettings()); @@ -65,7 +65,7 @@ public class LuceneCompatibilityIT extends AbstractLuceneIndexCompatibilityTestC return; } - if (VERSION_MINUS_1.equals(clusterVersion())) { + if (isFullyUpgradedTo(VERSION_MINUS_1)) { ensureGreen(index); assertThat(indexVersion(index), equalTo(VERSION_MINUS_2)); @@ -76,7 +76,7 @@ public class LuceneCompatibilityIT extends AbstractLuceneIndexCompatibilityTestC return; } - if (VERSION_CURRENT.equals(clusterVersion())) { + if (isFullyUpgradedTo(VERSION_CURRENT)) { var restoredIndex = suffix("index-restored"); logger.debug("--> restoring index [{}] as [{}]", index, restoredIndex); diff --git a/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/SearchableSnapshotCompatibilityIT.java b/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/FullClusterRestartSearchableSnapshotIndexCompatibilityIT.java similarity index 91% rename from qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/SearchableSnapshotCompatibilityIT.java rename to qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/FullClusterRestartSearchableSnapshotIndexCompatibilityIT.java index d5db17f257b0..4d59badcba7e 100644 --- a/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/SearchableSnapshotCompatibilityIT.java +++ b/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/FullClusterRestartSearchableSnapshotIndexCompatibilityIT.java @@ -17,7 +17,7 @@ import org.elasticsearch.test.cluster.util.Version; import static org.hamcrest.Matchers.equalTo; -public class SearchableSnapshotCompatibilityIT extends AbstractLuceneIndexCompatibilityTestCase { +public class FullClusterRestartSearchableSnapshotIndexCompatibilityIT extends FullClusterRestartIndexCompatibilityTestCase { static { clusterConfig = config -> config.setting("xpack.license.self_generated.type", "trial") @@ -25,7 +25,7 @@ public class SearchableSnapshotCompatibilityIT extends AbstractLuceneIndexCompat .setting("xpack.searchable.snapshot.shared_cache.region_size", "256KB"); } - public SearchableSnapshotCompatibilityIT(Version version) { + public FullClusterRestartSearchableSnapshotIndexCompatibilityIT(Version version) { super(version); } @@ -38,7 +38,7 @@ public class SearchableSnapshotCompatibilityIT extends AbstractLuceneIndexCompat final String index = suffix("index"); final int numDocs = 1234; - if (VERSION_MINUS_2.equals(clusterVersion())) { + if (isFullyUpgradedTo(VERSION_MINUS_2)) { logger.debug("--> registering repository [{}]", repository); registerRepository(client(), repository, FsRepository.TYPE, true, repositorySettings()); @@ -61,7 +61,7 @@ public class SearchableSnapshotCompatibilityIT extends AbstractLuceneIndexCompat return; } - if (VERSION_MINUS_1.equals(clusterVersion())) { + if (isFullyUpgradedTo(VERSION_MINUS_1)) { ensureGreen(index); assertThat(indexVersion(index), equalTo(VERSION_MINUS_2)); @@ -72,7 +72,7 @@ public class SearchableSnapshotCompatibilityIT extends AbstractLuceneIndexCompat return; } - if (VERSION_CURRENT.equals(clusterVersion())) { + if (isFullyUpgradedTo(VERSION_CURRENT)) { var mountedIndex = suffix("index-mounted"); logger.debug("--> mounting index [{}] as [{}]", index, mountedIndex); mountIndex(repository, snapshot, index, randomBoolean(), mountedIndex); @@ -98,7 +98,7 @@ public class SearchableSnapshotCompatibilityIT extends AbstractLuceneIndexCompat final String index = suffix("index"); final int numDocs = 4321; - if (VERSION_MINUS_2.equals(clusterVersion())) { + if (isFullyUpgradedTo(VERSION_MINUS_2)) { logger.debug("--> registering repository [{}]", repository); registerRepository(client(), repository, FsRepository.TYPE, true, repositorySettings()); @@ -124,7 +124,7 @@ public class SearchableSnapshotCompatibilityIT extends AbstractLuceneIndexCompat return; } - if (VERSION_MINUS_1.equals(clusterVersion())) { + if (isFullyUpgradedTo(VERSION_MINUS_1)) { logger.debug("--> mounting index [{}] as [{}]", index, mountedIndex); mountIndex(repository, snapshot, index, randomBoolean(), mountedIndex); @@ -135,7 +135,7 @@ public class SearchableSnapshotCompatibilityIT extends AbstractLuceneIndexCompat return; } - if (VERSION_CURRENT.equals(clusterVersion())) { + if (isFullyUpgradedTo(VERSION_CURRENT)) { ensureGreen(mountedIndex); assertThat(indexVersion(mountedIndex), equalTo(VERSION_MINUS_2)); diff --git a/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/RollingUpgradeIndexCompatibilityTestCase.java b/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/RollingUpgradeIndexCompatibilityTestCase.java new file mode 100644 index 000000000000..03b6a9292e35 --- /dev/null +++ b/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/RollingUpgradeIndexCompatibilityTestCase.java @@ -0,0 +1,104 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.lucene; + +import com.carrotsearch.randomizedtesting.TestMethodAndParams; +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import com.carrotsearch.randomizedtesting.annotations.TestCaseOrdering; + +import org.elasticsearch.test.cluster.util.Version; + +import java.util.Comparator; +import java.util.List; +import java.util.stream.Stream; + +import static org.elasticsearch.test.cluster.util.Version.CURRENT; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.notNullValue; + +/** + * Test suite for Lucene indices backward compatibility with N-2 versions during rolling upgrades. The test suite creates a cluster in N-2 + * version, then upgrades each node sequentially to N-1 version and finally upgrades each node sequentially to the current version. Test + * methods are executed after each node upgrade. + */ +@TestCaseOrdering(RollingUpgradeIndexCompatibilityTestCase.TestCaseOrdering.class) +public abstract class RollingUpgradeIndexCompatibilityTestCase extends AbstractIndexCompatibilityTestCase { + + private final List nodesVersions; + + public RollingUpgradeIndexCompatibilityTestCase(@Name("cluster") List nodesVersions) { + this.nodesVersions = nodesVersions; + } + + @ParametersFactory + public static Iterable parameters() { + return Stream.of( + // Begin on N-2 + List.of(VERSION_MINUS_2, VERSION_MINUS_2, VERSION_MINUS_2), + // Rolling upgrade to VERSION_MINUS_1 + List.of(VERSION_MINUS_1, VERSION_MINUS_2, VERSION_MINUS_2), + List.of(VERSION_MINUS_1, VERSION_MINUS_1, VERSION_MINUS_2), + List.of(VERSION_MINUS_1, VERSION_MINUS_1, VERSION_MINUS_1), + // Rolling upgrade to CURRENT + List.of(CURRENT, VERSION_MINUS_1, VERSION_MINUS_1), + List.of(CURRENT, CURRENT, VERSION_MINUS_1), + List.of(CURRENT, CURRENT, CURRENT) + ).map(nodesVersion -> new Object[] { nodesVersion }).toList(); + } + + @Override + protected void maybeUpgrade() throws Exception { + assertThat(nodesVersions, hasSize(NODES)); + + for (int i = 0; i < NODES; i++) { + var nodeName = cluster().getName(i); + + var expectedNodeVersion = nodesVersions.get(i); + assertThat(expectedNodeVersion, notNullValue()); + + var currentNodeVersion = nodesVersions().get(nodeName); + assertThat(currentNodeVersion, notNullValue()); + assertThat(currentNodeVersion.onOrBefore(expectedNodeVersion), equalTo(true)); + + if (currentNodeVersion.equals(expectedNodeVersion) == false) { + closeClients(); + cluster().upgradeNodeToVersion(i, expectedNodeVersion); + initClient(); + } + + currentNodeVersion = nodesVersions().get(nodeName); + assertThat(currentNodeVersion, equalTo(expectedNodeVersion)); + } + } + + /** + * Execute the test suite with the parameters provided by the {@link #parameters()} in nodes versions order. + */ + public static class TestCaseOrdering implements Comparator { + @Override + public int compare(TestMethodAndParams o1, TestMethodAndParams o2) { + List nodesVersions1 = asInstanceOf(List.class, o1.getInstanceArguments().get(0)); + assertThat(nodesVersions1, hasSize(NODES)); + List nodesVersions2 = asInstanceOf(List.class, o2.getInstanceArguments().get(0)); + assertThat(nodesVersions2, hasSize(NODES)); + for (int i = 0; i < NODES; i++) { + var nodeVersion1 = asInstanceOf(Version.class, nodesVersions1.get(i)); + var nodeVersion2 = asInstanceOf(Version.class, nodesVersions2.get(i)); + var result = nodeVersion1.compareTo(nodeVersion2); + if (result != 0) { + return result; + } + } + return 0; + } + } +} diff --git a/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/RollingUpgradeSearchableSnapshotIndexCompatibilityIT.java b/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/RollingUpgradeSearchableSnapshotIndexCompatibilityIT.java new file mode 100644 index 000000000000..89c159c15f70 --- /dev/null +++ b/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/RollingUpgradeSearchableSnapshotIndexCompatibilityIT.java @@ -0,0 +1,140 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.lucene; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.repositories.fs.FsRepository; +import org.elasticsearch.test.cluster.util.Version; + +import java.util.List; + +import static org.hamcrest.Matchers.equalTo; + +public class RollingUpgradeSearchableSnapshotIndexCompatibilityIT extends RollingUpgradeIndexCompatibilityTestCase { + + static { + clusterConfig = config -> config.setting("xpack.license.self_generated.type", "trial") + .setting("xpack.searchable.snapshot.shared_cache.size", "16MB") + .setting("xpack.searchable.snapshot.shared_cache.region_size", "256KB"); + } + + public RollingUpgradeSearchableSnapshotIndexCompatibilityIT(List nodesVersion) { + super(nodesVersion); + } + + /** + * Creates an index and a snapshot on N-2, then mounts the snapshot during rolling upgrades. + */ + public void testMountSearchableSnapshot() throws Exception { + final String repository = suffix("repository"); + final String snapshot = suffix("snapshot"); + final String index = suffix("index-rolling-upgrade"); + final var mountedIndex = suffix("index-rolling-upgrade-mounted"); + final int numDocs = 3145; + + if (isFullyUpgradedTo(VERSION_MINUS_2)) { + logger.debug("--> registering repository [{}]", repository); + registerRepository(client(), repository, FsRepository.TYPE, true, repositorySettings()); + + logger.debug("--> creating index [{}]", index); + createIndex( + client(), + index, + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) + .build() + ); + + logger.debug("--> indexing [{}] docs in [{}]", numDocs, index); + indexDocs(index, numDocs); + + logger.debug("--> creating snapshot [{}]", snapshot); + createSnapshot(client(), repository, snapshot, true); + + logger.debug("--> deleting index [{}]", index); + deleteIndex(index); + return; + } + + boolean success = false; + try { + logger.debug("--> mounting index [{}] as [{}]", index, mountedIndex); + mountIndex(repository, snapshot, index, randomBoolean(), mountedIndex); + + ensureGreen(mountedIndex); + + assertThat(indexVersion(mountedIndex), equalTo(VERSION_MINUS_2)); + assertDocCount(client(), mountedIndex, numDocs); + + logger.debug("--> deleting mounted index [{}]", mountedIndex); + deleteIndex(mountedIndex); + + success = true; + } finally { + if (success == false) { + try { + client().performRequest(new Request("DELETE", "/" + mountedIndex)); + } catch (ResponseException e) { + logger.warn("Failed to delete mounted index [" + mountedIndex + ']', e); + } + } + } + } + + /** + * Creates an index and a snapshot on N-2, mounts the snapshot and ensures it remains searchable during rolling upgrades. + */ + public void testSearchableSnapshotUpgrade() throws Exception { + final String mountedIndex = suffix("index-rolling-upgraded-mounted"); + final String repository = suffix("repository"); + final String snapshot = suffix("snapshot"); + final String index = suffix("index-rolling-upgraded"); + final int numDocs = 2143; + + if (isFullyUpgradedTo(VERSION_MINUS_2)) { + logger.debug("--> registering repository [{}]", repository); + registerRepository(client(), repository, FsRepository.TYPE, true, repositorySettings()); + + logger.debug("--> creating index [{}]", index); + createIndex( + client(), + index, + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) + .build() + ); + + logger.debug("--> indexing [{}] docs in [{}]", numDocs, index); + indexDocs(index, numDocs); + + logger.debug("--> creating snapshot [{}]", snapshot); + createSnapshot(client(), repository, snapshot, true); + + logger.debug("--> deleting index [{}]", index); + deleteIndex(index); + + logger.debug("--> mounting index [{}] as [{}]", index, mountedIndex); + mountIndex(repository, snapshot, index, randomBoolean(), mountedIndex); + } + + ensureGreen(mountedIndex); + + assertThat(indexVersion(mountedIndex), equalTo(VERSION_MINUS_2)); + assertDocCount(client(), mountedIndex, numDocs); + } +} diff --git a/qa/packaging/src/test/java/org/elasticsearch/packaging/util/docker/Docker.java b/qa/packaging/src/test/java/org/elasticsearch/packaging/util/docker/Docker.java index 0cd2823080b9..808aec92fb35 100644 --- a/qa/packaging/src/test/java/org/elasticsearch/packaging/util/docker/Docker.java +++ b/qa/packaging/src/test/java/org/elasticsearch/packaging/util/docker/Docker.java @@ -206,15 +206,34 @@ public class Docker { ps output: %s - stdout(): + Stdout: %s Stderr: + %s + + Thread dump: %s\ - """, psOutput, dockerLogs.stdout(), dockerLogs.stderr())); + """, psOutput, dockerLogs.stdout(), dockerLogs.stderr(), getThreadDump())); } } + /** + * @return output of jstack for currently running Java process + */ + private static String getThreadDump() { + try { + String pid = dockerShell.run("/usr/share/elasticsearch/jdk/bin/jps | grep -v 'Jps' | awk '{print $1}'").stdout(); + if (pid.isEmpty() == false) { + return dockerShell.run("/usr/share/elasticsearch/jdk/bin/jstack " + Integer.parseInt(pid)).stdout(); + } + } catch (Exception e) { + logger.error("Failed to get thread dump", e); + } + + return ""; + } + /** * Waits for the Elasticsearch container to exit. */ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/esql.async_query_delete.json b/rest-api-spec/src/main/resources/rest-api-spec/api/esql.async_query_delete.json new file mode 100644 index 000000000000..a6339559afd7 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/esql.async_query_delete.json @@ -0,0 +1,27 @@ +{ + "esql.async_query_delete": { + "documentation": { + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/esql-async-query-delete-api.html", + "description": "Delete an async query request given its ID." + }, + "stability": "stable", + "visibility": "public", + "headers": { + "accept": ["application/json"] + }, + "url": { + "paths": [ + { + "path": "/_query/async/{id}", + "methods": ["DELETE"], + "parts": { + "id": { + "type": "string", + "description": "The async query ID" + } + } + } + ] + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_data_lifecycle_stats.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_data_lifecycle_stats.json new file mode 100644 index 000000000000..8c9e94790340 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_data_lifecycle_stats.json @@ -0,0 +1,21 @@ +{ + "indices.get_data_lifecycle_stats": { + "documentation": { + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams-get-lifecycle-stats.html", + "description": "Get data stream lifecycle statistics." + }, + "stability": "stable", + "visibility": "public", + "headers": { + "accept": ["application/json"] + }, + "url": { + "paths": [ + { + "path": "/_lifecycle/stats", + "methods": ["GET"] + } + ] + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/inference.update.json b/rest-api-spec/src/main/resources/rest-api-spec/api/inference.update.json new file mode 100644 index 000000000000..6c458ce080aa --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/inference.update.json @@ -0,0 +1,45 @@ +{ + "inference.update": { + "documentation": { + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/update-inference-api.html", + "description": "Update inference" + }, + "stability": "stable", + "visibility": "public", + "headers": { + "accept": ["application/json"], + "content_type": ["application/json"] + }, + "url": { + "paths": [ + { + "path": "/_inference/{inference_id}/_update", + "methods": ["POST"], + "parts": { + "inference_id": { + "type": "string", + "description": "The inference Id" + } + } + }, + { + "path": "/_inference/{task_type}/{inference_id}/_update", + "methods": ["POST"], + "parts": { + "task_type": { + "type": "string", + "description": "The task type" + }, + "inference_id": { + "type": "string", + "description": "The inference Id" + } + } + } + ] + }, + "body": { + "description": "The inference endpoint's task and service settings" + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/security.delegate_pki.json b/rest-api-spec/src/main/resources/rest-api-spec/api/security.delegate_pki.json new file mode 100644 index 000000000000..752ea35028b4 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/security.delegate_pki.json @@ -0,0 +1,26 @@ +{ + "security.delegate_pki": { + "documentation": { + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-delegate-pki-authentication.html", + "description": "Delegate PKI authentication." + }, + "stability": "stable", + "visibility": "public", + "headers": { + "accept": ["application/json"] + }, + "url": { + "paths": [ + { + "path": "/_security/delegate_pki", + "methods": ["POST"] + } + ] + }, + "params": {}, + "body": { + "description":"The X509Certificate chain.", + "required":true + } + } +} diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java index 1e16357a2441..8afdbc590649 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java @@ -779,17 +779,13 @@ public class TasksIT extends ESIntegTestCase { assertNoFailures(indicesAdmin().prepareRefresh(TaskResultsService.TASK_INDEX).get()); assertHitCount( + 1L, prepareSearch(TaskResultsService.TASK_INDEX).setSource( SearchSourceBuilder.searchSource().query(QueryBuilders.termQuery("task.action", taskInfo.action())) ), - 1L - ); - - assertHitCount( prepareSearch(TaskResultsService.TASK_INDEX).setSource( SearchSourceBuilder.searchSource().query(QueryBuilders.termQuery("task.node", taskInfo.taskId().getNodeId())) - ), - 1L + ) ); GetTaskResponse getResponse = expectFinishedTask(taskId); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java index ad5ac675359b..5bee2d27c3ef 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java @@ -113,8 +113,11 @@ public class ShrinkIndexIT extends ESIntegTestCase { .get(); } flushAndRefresh(); - assertHitCount(prepareSearch("first_shrink").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")), 20); - assertHitCount(prepareSearch("source").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")), 20); + assertHitCount( + 20, + prepareSearch("first_shrink").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")), + prepareSearch("source").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")) + ); // relocate all shards to one node such that we can merge it. updateIndexSettings( @@ -145,9 +148,12 @@ public class ShrinkIndexIT extends ESIntegTestCase { .get(); } flushAndRefresh(); - assertHitCount(prepareSearch("second_shrink").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")), 20); - assertHitCount(prepareSearch("first_shrink").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")), 20); - assertHitCount(prepareSearch("source").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")), 20); + assertHitCount( + 20, + prepareSearch("second_shrink").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")), + prepareSearch("first_shrink").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")), + prepareSearch("source").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")) + ); assertNoResizeSourceIndexSettings("first_shrink"); assertNoResizeSourceIndexSettings("second_shrink"); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java index a81864c9493e..b61cdf1806e3 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java @@ -236,9 +236,12 @@ public class SplitIndexIT extends ESIntegTestCase { GetResponse getResponse = client().prepareGet("second_split", Integer.toString(i)).setRouting(routingValue[i]).get(); assertTrue(getResponse.isExists()); } - assertHitCount(prepareSearch("second_split").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")), numDocs); - assertHitCount(prepareSearch("first_split").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")), numDocs); - assertHitCount(prepareSearch("source").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")), numDocs); + assertHitCount( + numDocs, + prepareSearch("second_split").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")), + prepareSearch("first_split").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")), + prepareSearch("source").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")) + ); if (useNested) { assertNested("source", numDocs); assertNested("first_split", numDocs); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/DynamicMappingIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/DynamicMappingIT.java index 6dd8fc75df35..2dad8500a309 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/DynamicMappingIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/DynamicMappingIT.java @@ -685,9 +685,12 @@ public class DynamicMappingIT extends ESIntegTestCase { BulkResponse bulkItemResponses = client().bulk(bulkRequest).actionGet(); assertFalse(bulkItemResponses.buildFailureMessage(), bulkItemResponses.hasFailures()); - assertHitCount(prepareSearch("test").setQuery(new MatchQueryBuilder("one", "one")), 1); - assertHitCount(prepareSearch("test").setQuery(new MatchQueryBuilder("one.two", 3.5)), 1); - assertHitCount(prepareSearch("test").setQuery(new MatchQueryBuilder("one.two.three", "1")), 1); + assertHitCount( + 1, + prepareSearch("test").setQuery(new MatchQueryBuilder("one", "one")), + prepareSearch("test").setQuery(new MatchQueryBuilder("one.two", 3.5)), + prepareSearch("test").setQuery(new MatchQueryBuilder("one.two.three", "1")) + ); } public void testDynamicRuntimeObjectFields() { @@ -724,10 +727,13 @@ public class DynamicMappingIT extends ESIntegTestCase { BulkResponse bulkItemResponses = client().bulk(bulkRequest).actionGet(); assertFalse(bulkItemResponses.buildFailureMessage(), bulkItemResponses.hasFailures()); - assertHitCount(prepareSearch("test").setQuery(new MatchQueryBuilder("obj.one", 1)), 1); - assertHitCount(prepareSearch("test").setQuery(new MatchQueryBuilder("anything", "anything")), 1); - assertHitCount(prepareSearch("test").setQuery(new MatchQueryBuilder("obj.runtime.one", "one")), 1); - assertHitCount(prepareSearch("test").setQuery(new MatchQueryBuilder("obj.runtime.one.two", "1")), 1); + assertHitCount( + 1, + prepareSearch("test").setQuery(new MatchQueryBuilder("obj.one", 1)), + prepareSearch("test").setQuery(new MatchQueryBuilder("anything", "anything")), + prepareSearch("test").setQuery(new MatchQueryBuilder("obj.runtime.one", "one")), + prepareSearch("test").setQuery(new MatchQueryBuilder("obj.runtime.one.two", "1")) + ); Exception exception = expectThrows(DocumentParsingException.class, prepareIndex("test").setSource("obj.runtime", "value")); assertThat( diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/MultiFieldsIntegrationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/MultiFieldsIntegrationIT.java index fea5d256b199..4a7de4b0ebc2 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/MultiFieldsIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/MultiFieldsIntegrationIT.java @@ -91,11 +91,11 @@ public class MultiFieldsIntegrationIT extends ESIntegTestCase { GeoPoint point = new GeoPoint(51, 19); prepareIndex("my-index").setId("1").setSource("a", point.toString()).setRefreshPolicy(IMMEDIATE).get(); assertHitCount( + 1L, prepareSearch("my-index").setSize(0) .setQuery(constantScoreQuery(geoDistanceQuery("a").point(51, 19).distance(50, DistanceUnit.KILOMETERS))), - 1L + prepareSearch("my-index").setSize(0).setQuery(matchQuery("a.b", point.geohash())) ); - assertHitCount(prepareSearch("my-index").setSize(0).setQuery(matchQuery("a.b", point.geohash())), 1L); } @SuppressWarnings("unchecked") diff --git a/server/src/internalClusterTest/java/org/elasticsearch/routing/AliasRoutingIT.java b/server/src/internalClusterTest/java/org/elasticsearch/routing/AliasRoutingIT.java index 199c9a9fb4c8..e14c7c836891 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/routing/AliasRoutingIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/routing/AliasRoutingIT.java @@ -123,19 +123,25 @@ public class AliasRoutingIT extends ESIntegTestCase { logger.info("--> search with wrong routing, should not find"); for (int i = 0; i < 5; i++) { - assertHitCount(prepareSearch().setRouting("1").setQuery(QueryBuilders.matchAllQuery()), 0); - assertHitCount(prepareSearch().setSize(0).setRouting("1").setQuery(QueryBuilders.matchAllQuery()), 0); - assertHitCount(prepareSearch("alias1").setQuery(QueryBuilders.matchAllQuery()), 0); - assertHitCount(prepareSearch("alias1").setSize(0).setQuery(QueryBuilders.matchAllQuery()), 0); + assertHitCount( + 0, + prepareSearch().setRouting("1").setQuery(QueryBuilders.matchAllQuery()), + prepareSearch().setSize(0).setRouting("1").setQuery(QueryBuilders.matchAllQuery()), + prepareSearch("alias1").setQuery(QueryBuilders.matchAllQuery()), + prepareSearch("alias1").setSize(0).setQuery(QueryBuilders.matchAllQuery()) + ); } logger.info("--> search with correct routing, should find"); for (int i = 0; i < 5; i++) { - assertHitCount(prepareSearch().setRouting("0").setQuery(QueryBuilders.matchAllQuery()), 1); - assertHitCount(prepareSearch().setSize(0).setRouting("0").setQuery(QueryBuilders.matchAllQuery()), 1); - assertHitCount(prepareSearch("alias0").setQuery(QueryBuilders.matchAllQuery()), 1); - assertHitCount(prepareSearch("alias0").setSize(0).setQuery(QueryBuilders.matchAllQuery()), 1); + assertHitCount( + 1, + prepareSearch().setRouting("0").setQuery(QueryBuilders.matchAllQuery()), + prepareSearch().setSize(0).setRouting("0").setQuery(QueryBuilders.matchAllQuery()), + prepareSearch("alias0").setQuery(QueryBuilders.matchAllQuery()), + prepareSearch("alias0").setSize(0).setQuery(QueryBuilders.matchAllQuery()) + ); } logger.info("--> indexing with id [2], and routing [1] using alias"); @@ -143,50 +149,71 @@ public class AliasRoutingIT extends ESIntegTestCase { logger.info("--> search with no routing, should fine two"); for (int i = 0; i < 5; i++) { - assertHitCount(prepareSearch().setQuery(QueryBuilders.matchAllQuery()), 2); - assertHitCount(prepareSearch().setSize(0).setQuery(QueryBuilders.matchAllQuery()), 2); + assertHitCount( + 2, + prepareSearch().setQuery(QueryBuilders.matchAllQuery()), + prepareSearch().setSize(0).setQuery(QueryBuilders.matchAllQuery()) + ); } logger.info("--> search with 0 routing, should find one"); for (int i = 0; i < 5; i++) { - assertHitCount(prepareSearch().setRouting("0").setQuery(QueryBuilders.matchAllQuery()), 1); - assertHitCount(prepareSearch().setSize(0).setRouting("0").setQuery(QueryBuilders.matchAllQuery()), 1); - assertHitCount(prepareSearch("alias0").setQuery(QueryBuilders.matchAllQuery()), 1); - assertHitCount(prepareSearch("alias0").setSize(0).setQuery(QueryBuilders.matchAllQuery()), 1); + assertHitCount( + 1, + prepareSearch().setRouting("0").setQuery(QueryBuilders.matchAllQuery()), + prepareSearch().setSize(0).setRouting("0").setQuery(QueryBuilders.matchAllQuery()), + prepareSearch("alias0").setQuery(QueryBuilders.matchAllQuery()), + prepareSearch("alias0").setSize(0).setQuery(QueryBuilders.matchAllQuery()) + ); } logger.info("--> search with 1 routing, should find one"); for (int i = 0; i < 5; i++) { - assertHitCount(prepareSearch().setRouting("1").setQuery(QueryBuilders.matchAllQuery()), 1); - assertHitCount(prepareSearch().setSize(0).setRouting("1").setQuery(QueryBuilders.matchAllQuery()), 1); - assertHitCount(prepareSearch("alias1").setQuery(QueryBuilders.matchAllQuery()), 1); - assertHitCount(prepareSearch("alias1").setSize(0).setQuery(QueryBuilders.matchAllQuery()), 1); + assertHitCount( + 1, + prepareSearch().setRouting("1").setQuery(QueryBuilders.matchAllQuery()), + prepareSearch().setSize(0).setRouting("1").setQuery(QueryBuilders.matchAllQuery()), + prepareSearch("alias1").setQuery(QueryBuilders.matchAllQuery()), + prepareSearch("alias1").setSize(0).setQuery(QueryBuilders.matchAllQuery()) + ); } logger.info("--> search with 0,1 indexRoutings , should find two"); for (int i = 0; i < 5; i++) { - assertHitCount(prepareSearch().setRouting("0", "1").setQuery(QueryBuilders.matchAllQuery()), 2); - assertHitCount(prepareSearch().setSize(0).setRouting("0", "1").setQuery(QueryBuilders.matchAllQuery()), 2); - assertHitCount(prepareSearch("alias01").setQuery(QueryBuilders.matchAllQuery()), 2); - assertHitCount(prepareSearch("alias01").setSize(0).setQuery(QueryBuilders.matchAllQuery()), 2); + assertHitCount( + 2, + prepareSearch().setRouting("0", "1").setQuery(QueryBuilders.matchAllQuery()), + prepareSearch().setSize(0).setRouting("0", "1").setQuery(QueryBuilders.matchAllQuery()), + prepareSearch("alias01").setQuery(QueryBuilders.matchAllQuery()), + prepareSearch("alias01").setSize(0).setQuery(QueryBuilders.matchAllQuery()) + ); } logger.info("--> search with two routing aliases , should find two"); for (int i = 0; i < 5; i++) { - assertHitCount(prepareSearch("alias0", "alias1").setQuery(QueryBuilders.matchAllQuery()), 2); - assertHitCount(prepareSearch("alias0", "alias1").setSize(0).setQuery(QueryBuilders.matchAllQuery()), 2); + assertHitCount( + 2, + prepareSearch("alias0", "alias1").setQuery(QueryBuilders.matchAllQuery()), + prepareSearch("alias0", "alias1").setSize(0).setQuery(QueryBuilders.matchAllQuery()) + ); } logger.info("--> search with alias0, alias1 and alias01, should find two"); for (int i = 0; i < 5; i++) { - assertHitCount(prepareSearch("alias0", "alias1", "alias01").setQuery(QueryBuilders.matchAllQuery()), 2); - assertHitCount(prepareSearch("alias0", "alias1", "alias01").setSize(0).setQuery(QueryBuilders.matchAllQuery()), 2); + assertHitCount( + 2, + prepareSearch("alias0", "alias1", "alias01").setQuery(QueryBuilders.matchAllQuery()), + prepareSearch("alias0", "alias1", "alias01").setSize(0).setQuery(QueryBuilders.matchAllQuery()) + ); } logger.info("--> search with test, alias0 and alias1, should find two"); for (int i = 0; i < 5; i++) { - assertHitCount(prepareSearch("test", "alias0", "alias1").setQuery(QueryBuilders.matchAllQuery()), 2); - assertHitCount(prepareSearch("test", "alias0", "alias1").setSize(0).setQuery(QueryBuilders.matchAllQuery()), 2); + assertHitCount( + 2, + prepareSearch("test", "alias0", "alias1").setQuery(QueryBuilders.matchAllQuery()), + prepareSearch("test", "alias0", "alias1").setSize(0).setQuery(QueryBuilders.matchAllQuery()) + ); } } @@ -236,20 +263,29 @@ public class AliasRoutingIT extends ESIntegTestCase { logger.info("--> search with alias-a1,alias-b0, should not find"); for (int i = 0; i < 5; i++) { - assertHitCount(prepareSearch("alias-a1", "alias-b0").setQuery(QueryBuilders.matchAllQuery()), 0); - assertHitCount(prepareSearch("alias-a1", "alias-b0").setSize(0).setQuery(QueryBuilders.matchAllQuery()), 0); + assertHitCount( + 0, + prepareSearch("alias-a1", "alias-b0").setQuery(QueryBuilders.matchAllQuery()), + prepareSearch("alias-a1", "alias-b0").setSize(0).setQuery(QueryBuilders.matchAllQuery()) + ); } logger.info("--> search with alias-ab, should find two"); for (int i = 0; i < 5; i++) { - assertHitCount(prepareSearch("alias-ab").setQuery(QueryBuilders.matchAllQuery()), 2); - assertHitCount(prepareSearch("alias-ab").setSize(0).setQuery(QueryBuilders.matchAllQuery()), 2); + assertHitCount( + 2, + prepareSearch("alias-ab").setQuery(QueryBuilders.matchAllQuery()), + prepareSearch("alias-ab").setSize(0).setQuery(QueryBuilders.matchAllQuery()) + ); } logger.info("--> search with alias-a0,alias-b1 should find two"); for (int i = 0; i < 5; i++) { - assertHitCount(prepareSearch("alias-a0", "alias-b1").setQuery(QueryBuilders.matchAllQuery()), 2); - assertHitCount(prepareSearch("alias-a0", "alias-b1").setSize(0).setQuery(QueryBuilders.matchAllQuery()), 2); + assertHitCount( + 2, + prepareSearch("alias-a0", "alias-b1").setQuery(QueryBuilders.matchAllQuery()), + prepareSearch("alias-a0", "alias-b1").setSize(0).setQuery(QueryBuilders.matchAllQuery()) + ); } } @@ -317,8 +353,11 @@ public class AliasRoutingIT extends ESIntegTestCase { logger.info("--> verifying get and search with routing, should find"); for (int i = 0; i < 5; i++) { assertThat(client().prepareGet("test", "0").setRouting("3").get().isExists(), equalTo(true)); - assertHitCount(prepareSearch("alias").setQuery(QueryBuilders.matchAllQuery()), 1); - assertHitCount(prepareSearch("alias").setSize(0).setQuery(QueryBuilders.matchAllQuery()), 1); + assertHitCount( + 1, + prepareSearch("alias").setQuery(QueryBuilders.matchAllQuery()), + prepareSearch("alias").setSize(0).setQuery(QueryBuilders.matchAllQuery()) + ); } logger.info("--> creating alias with routing [4]"); @@ -326,8 +365,11 @@ public class AliasRoutingIT extends ESIntegTestCase { logger.info("--> verifying search with wrong routing should not find"); for (int i = 0; i < 5; i++) { - assertHitCount(prepareSearch("alias").setQuery(QueryBuilders.matchAllQuery()), 0); - assertHitCount(prepareSearch("alias").setSize(0).setQuery(QueryBuilders.matchAllQuery()), 0); + assertHitCount( + 0, + prepareSearch("alias").setQuery(QueryBuilders.matchAllQuery()), + prepareSearch("alias").setSize(0).setQuery(QueryBuilders.matchAllQuery()) + ); } logger.info("--> creating alias with search routing [3,4] and index routing 4"); @@ -344,8 +386,11 @@ public class AliasRoutingIT extends ESIntegTestCase { for (int i = 0; i < 5; i++) { assertThat(client().prepareGet("test", "0").setRouting("3").get().isExists(), equalTo(true)); assertThat(client().prepareGet("test", "1").setRouting("4").get().isExists(), equalTo(true)); - assertHitCount(prepareSearch("alias").setQuery(QueryBuilders.matchAllQuery()), 2); - assertHitCount(prepareSearch("alias").setSize(0).setQuery(QueryBuilders.matchAllQuery()), 2); + assertHitCount( + 2, + prepareSearch("alias").setQuery(QueryBuilders.matchAllQuery()), + prepareSearch("alias").setSize(0).setQuery(QueryBuilders.matchAllQuery()) + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/routing/SimpleRoutingIT.java b/server/src/internalClusterTest/java/org/elasticsearch/routing/SimpleRoutingIT.java index fef2772283f6..5842e04a333b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/routing/SimpleRoutingIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/routing/SimpleRoutingIT.java @@ -141,14 +141,20 @@ public class SimpleRoutingIT extends ESIntegTestCase { logger.info("--> search with wrong routing, should not find"); for (int i = 0; i < 5; i++) { - assertHitCount(prepareSearch().setRouting("1").setQuery(QueryBuilders.matchAllQuery()), 0); - assertHitCount(prepareSearch().setSize(0).setRouting("1").setQuery(QueryBuilders.matchAllQuery()), 0); + assertHitCount( + 0, + prepareSearch().setRouting("1").setQuery(QueryBuilders.matchAllQuery()), + prepareSearch().setSize(0).setRouting("1").setQuery(QueryBuilders.matchAllQuery()) + ); } logger.info("--> search with correct routing, should find"); for (int i = 0; i < 5; i++) { - assertHitCount(prepareSearch().setRouting(routingValue).setQuery(QueryBuilders.matchAllQuery()), 1); - assertHitCount(prepareSearch().setSize(0).setRouting(routingValue).setQuery(QueryBuilders.matchAllQuery()), 1); + assertHitCount( + 1, + prepareSearch().setRouting(routingValue).setQuery(QueryBuilders.matchAllQuery()), + prepareSearch().setSize(0).setRouting(routingValue).setQuery(QueryBuilders.matchAllQuery()) + ); } String secondRoutingValue = "1"; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CCSUsageTelemetryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CCSUsageTelemetryIT.java index 9c1daccd2cc9..ab79fd7ba181 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CCSUsageTelemetryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CCSUsageTelemetryIT.java @@ -40,18 +40,12 @@ import org.elasticsearch.search.retriever.RetrieverBuilder; import org.elasticsearch.tasks.Task; import org.elasticsearch.test.AbstractMultiClustersTestCase; import org.elasticsearch.test.InternalTestCluster; +import org.elasticsearch.test.SkipUnavailableRule; +import org.elasticsearch.test.SkipUnavailableRule.NotSkipped; import org.elasticsearch.usage.UsageService; import org.junit.Assert; import org.junit.Rule; -import org.junit.rules.TestRule; -import org.junit.runner.Description; -import org.junit.runners.model.Statement; -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; -import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -59,8 +53,6 @@ import java.util.List; import java.util.Map; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; -import java.util.function.Function; -import java.util.stream.Collectors; import static org.elasticsearch.action.admin.cluster.stats.CCSUsageTelemetry.ASYNC_FEATURE; import static org.elasticsearch.action.admin.cluster.stats.CCSUsageTelemetry.MRT_FEATURE; @@ -498,7 +490,7 @@ public class CCSUsageTelemetryIT extends AbstractMultiClustersTestCase { assertThat(perCluster.get(REMOTE2), equalTo(null)); } - @SkipOverride(aliases = { REMOTE1 }) + @NotSkipped(aliases = { REMOTE1 }) public void testRemoteTimesOutFailure() throws Exception { Map testClusterInfo = setupClusters(); String remoteIndex = (String) testClusterInfo.get("remote.index"); @@ -528,7 +520,7 @@ public class CCSUsageTelemetryIT extends AbstractMultiClustersTestCase { /** * Search when all the remotes failed and not skipped */ - @SkipOverride(aliases = { REMOTE1, REMOTE2 }) + @NotSkipped(aliases = { REMOTE1, REMOTE2 }) public void testFailedAllRemotesSearch() throws Exception { Map testClusterInfo = setupClusters(); String localIndex = (String) testClusterInfo.get("local.index"); @@ -577,7 +569,7 @@ public class CCSUsageTelemetryIT extends AbstractMultiClustersTestCase { /** * Test that we're still counting remote search even if remote cluster has no such index */ - @SkipOverride(aliases = { REMOTE1 }) + @NotSkipped(aliases = { REMOTE1 }) public void testRemoteHasNoIndexFailure() throws Exception { SearchRequest searchRequest = makeSearchRequest(REMOTE1 + ":no_such_index"); CCSTelemetrySnapshot telemetry = getTelemetryFromFailedSearch(searchRequest); @@ -695,40 +687,4 @@ public class CCSUsageTelemetryIT extends AbstractMultiClustersTestCase { bulkRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).execute(listener.safeMap(r -> null)); } - /** - * Annotation to mark specific cluster in a test as not to be skipped when unavailable - */ - @Retention(RetentionPolicy.RUNTIME) - @Target(ElementType.METHOD) - @interface SkipOverride { - String[] aliases(); - } - - /** - * Test rule to process skip annotations - */ - static class SkipUnavailableRule implements TestRule { - private final Map skipMap; - - SkipUnavailableRule(String... clusterAliases) { - this.skipMap = Arrays.stream(clusterAliases).collect(Collectors.toMap(Function.identity(), alias -> true)); - } - - public Map getMap() { - return skipMap; - } - - @Override - public Statement apply(Statement base, Description description) { - // Check for annotation named "SkipOverride" and set the overrides accordingly - var aliases = description.getAnnotation(SkipOverride.class); - if (aliases != null) { - for (String alias : aliases.aliases()) { - skipMap.put(alias, false); - } - } - return base; - } - - } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/query/SearchQueryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/query/SearchQueryIT.java index f790cf30e1c0..6d1f2784a739 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/query/SearchQueryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/query/SearchQueryIT.java @@ -164,22 +164,19 @@ public class SearchQueryIT extends ESIntegTestCase { forceMerge(); refresh(); assertHitCount( + 3L, prepareSearch().setQuery(matchAllQuery()) .setPostFilter( boolQuery().must(matchAllQuery()) .must(boolQuery().mustNot(boolQuery().must(termQuery("field1", "value1")).must(termQuery("field1", "value2")))) ), - 3L - ); - assertHitCount( prepareSearch().setQuery( boolQuery().must( boolQuery().should(termQuery("field1", "value1")) .should(termQuery("field1", "value2")) .should(termQuery("field1", "value3")) ).filter(boolQuery().mustNot(boolQuery().must(termQuery("field1", "value1")).must(termQuery("field1", "value2")))) - ), - 3L + ) ); assertHitCount(prepareSearch().setQuery(matchAllQuery()).setPostFilter(boolQuery().mustNot(termQuery("field1", "value3"))), 2L); } @@ -309,11 +306,14 @@ public class SearchQueryIT extends ESIntegTestCase { prepareIndex("test").setId("1").setSource("field1", "value_1", "field2", "value_2").get(); refresh(); - assertHitCount(prepareSearch().setQuery(queryStringQuery("value*")), 1L); - assertHitCount(prepareSearch().setQuery(queryStringQuery("*ue*")), 1L); - assertHitCount(prepareSearch().setQuery(queryStringQuery("*ue_1")), 1L); - assertHitCount(prepareSearch().setQuery(queryStringQuery("val*e_1")), 1L); - assertHitCount(prepareSearch().setQuery(queryStringQuery("v?l*e?1")), 1L); + assertHitCount( + 1L, + prepareSearch().setQuery(queryStringQuery("value*")), + prepareSearch().setQuery(queryStringQuery("*ue*")), + prepareSearch().setQuery(queryStringQuery("*ue_1")), + prepareSearch().setQuery(queryStringQuery("val*e_1")), + prepareSearch().setQuery(queryStringQuery("v?l*e?1")) + ); } public void testLowercaseExpandedTerms() { @@ -322,10 +322,13 @@ public class SearchQueryIT extends ESIntegTestCase { prepareIndex("test").setId("1").setSource("field1", "value_1", "field2", "value_2").get(); refresh(); - assertHitCount(prepareSearch().setQuery(queryStringQuery("VALUE_3~1")), 1L); - assertHitCount(prepareSearch().setQuery(queryStringQuery("ValUE_*")), 1L); - assertHitCount(prepareSearch().setQuery(queryStringQuery("vAl*E_1")), 1L); - assertHitCount(prepareSearch().setQuery(queryStringQuery("[VALUE_1 TO VALUE_3]")), 1L); + assertHitCount( + 1L, + prepareSearch().setQuery(queryStringQuery("VALUE_3~1")), + prepareSearch().setQuery(queryStringQuery("ValUE_*")), + prepareSearch().setQuery(queryStringQuery("vAl*E_1")), + prepareSearch().setQuery(queryStringQuery("[VALUE_1 TO VALUE_3]")) + ); } // Issue #3540 @@ -340,8 +343,11 @@ public class SearchQueryIT extends ESIntegTestCase { prepareIndex("test").setId("1").setSource("past", aMonthAgo, "future", aMonthFromNow).get(); refresh(); - assertHitCount(prepareSearch().setQuery(queryStringQuery("past:[now-2M/d TO now/d]")), 1L); - assertHitCount(prepareSearch().setQuery(queryStringQuery("future:[now/d TO now+2M/d]")), 1L); + assertHitCount( + 1L, + prepareSearch().setQuery(queryStringQuery("past:[now-2M/d TO now/d]")), + prepareSearch().setQuery(queryStringQuery("future:[now/d TO now+2M/d]")) + ); SearchPhaseExecutionException e = expectThrows( SearchPhaseExecutionException.class, @@ -377,21 +383,17 @@ public class SearchQueryIT extends ESIntegTestCase { refresh(); // Timezone set with dates - assertHitCount(prepareSearch().setQuery(queryStringQuery("past:[2015-04-06T00:00:00+0200 TO 2015-04-06T23:00:00+0200]")), 2L); - - // Same timezone set with time_zone assertHitCount( - prepareSearch().setQuery(queryStringQuery("past:[2015-04-06T00:00:00 TO 2015-04-06T23:00:00]").timeZone("+0200")), - 2L + 2L, + prepareSearch().setQuery(queryStringQuery("past:[2015-04-06T00:00:00+0200 TO 2015-04-06T23:00:00+0200]")), + prepareSearch().setQuery(queryStringQuery("past:[2015-04-06T00:00:00 TO 2015-04-06T23:00:00]").timeZone("+0200")) ); // We set a timezone which will give no result - assertHitCount(prepareSearch().setQuery(queryStringQuery("past:[2015-04-06T00:00:00-0200 TO 2015-04-06T23:00:00-0200]")), 0L); - - // Same timezone set with time_zone but another timezone is set directly within dates which has the precedence assertHitCount( - prepareSearch().setQuery(queryStringQuery("past:[2015-04-06T00:00:00-0200 TO 2015-04-06T23:00:00-0200]").timeZone("+0200")), - 0L + 0L, + prepareSearch().setQuery(queryStringQuery("past:[2015-04-06T00:00:00-0200 TO 2015-04-06T23:00:00-0200]")), + prepareSearch().setQuery(queryStringQuery("past:[2015-04-06T00:00:00-0200 TO 2015-04-06T23:00:00-0200]").timeZone("+0200")) ); } @@ -502,13 +504,12 @@ public class SearchQueryIT extends ESIntegTestCase { prepareIndex("test").setId("1").setSource("field1", "value1_1", "field2", "value2_1").setRefreshPolicy(IMMEDIATE).get(); WrapperQueryBuilder wrapper = new WrapperQueryBuilder("{ \"term\" : { \"field1\" : \"value1_1\" } }"); - assertHitCount(prepareSearch().setQuery(wrapper), 1L); - - BoolQueryBuilder bool = boolQuery().must(wrapper).must(new TermQueryBuilder("field2", "value2_1")); - assertHitCount(prepareSearch().setQuery(bool), 1L); - - WrapperQueryBuilder wrapperFilter = wrapperQuery("{ \"term\" : { \"field1\" : \"value1_1\" } }"); - assertHitCount(prepareSearch().setPostFilter(wrapperFilter), 1L); + assertHitCount( + 1L, + prepareSearch().setQuery(wrapper), + prepareSearch().setQuery(boolQuery().must(wrapper).must(new TermQueryBuilder("field2", "value2_1"))), + prepareSearch().setPostFilter(wrapperQuery("{ \"term\" : { \"field1\" : \"value1_1\" } }")) + ); } public void testFiltersWithCustomCacheKey() throws Exception { @@ -516,10 +517,13 @@ public class SearchQueryIT extends ESIntegTestCase { prepareIndex("test").setId("1").setSource("field1", "value1").get(); refresh(); - assertHitCount(prepareSearch("test").setQuery(constantScoreQuery(termsQuery("field1", "value1"))), 1L); - assertHitCount(prepareSearch("test").setQuery(constantScoreQuery(termsQuery("field1", "value1"))), 1L); - assertHitCount(prepareSearch("test").setQuery(constantScoreQuery(termsQuery("field1", "value1"))), 1L); - assertHitCount(prepareSearch("test").setQuery(constantScoreQuery(termsQuery("field1", "value1"))), 1L); + assertHitCount( + 1L, + prepareSearch("test").setQuery(constantScoreQuery(termsQuery("field1", "value1"))), + prepareSearch("test").setQuery(constantScoreQuery(termsQuery("field1", "value1"))), + prepareSearch("test").setQuery(constantScoreQuery(termsQuery("field1", "value1"))), + prepareSearch("test").setQuery(constantScoreQuery(termsQuery("field1", "value1"))) + ); } public void testMatchQueryNumeric() throws Exception { @@ -994,13 +998,14 @@ public class SearchQueryIT extends ESIntegTestCase { "4" ); - assertHitCount(prepareSearch("test").setQuery(termsLookupQuery("not_exists", new TermsLookup("lookup2", "3", "arr.term"))), 0L); - - // index "lookup" id "missing" document does not exist: ignore the lookup terms - assertHitCount(prepareSearch("test").setQuery(termsLookupQuery("term", new TermsLookup("lookup", "missing", "terms"))), 0L); - - // index "lookup3" has the source disabled: ignore the lookup terms - assertHitCount(prepareSearch("test").setQuery(termsLookupQuery("term", new TermsLookup("lookup3", "1", "terms"))), 0L); + assertHitCount( + 0L, + prepareSearch("test").setQuery(termsLookupQuery("not_exists", new TermsLookup("lookup2", "3", "arr.term"))), + // index "lookup" id "missing" document does not exist: ignore the lookup terms + prepareSearch("test").setQuery(termsLookupQuery("term", new TermsLookup("lookup", "missing", "terms"))), + // index "lookup3" has the source disabled: ignore the lookup terms + prepareSearch("test").setQuery(termsLookupQuery("term", new TermsLookup("lookup3", "1", "terms"))) + ); } public void testBasicQueryById() throws Exception { @@ -1120,18 +1125,14 @@ public class SearchQueryIT extends ESIntegTestCase { refresh(); assertHitCount( + 4L, prepareSearch("test").setPostFilter( boolQuery().should(rangeQuery("num_long").from(1).to(2)).should(rangeQuery("num_long").from(3).to(4)) ), - 4L - ); - - // This made 2826 fail! (only with bit based filters) - assertHitCount( + // This made 2826 fail! (only with bit based filters) prepareSearch("test").setPostFilter( boolQuery().should(rangeQuery("num_long").from(1).to(2)).should(rangeQuery("num_long").from(3).to(4)) - ), - 4L + ) ); // This made #2979 fail! @@ -1697,11 +1698,11 @@ public class SearchQueryIT extends ESIntegTestCase { refresh(); { - WildcardQueryBuilder wildCardQuery = wildcardQuery("field1", "Bb*"); - assertHitCount(prepareSearch().setQuery(wildCardQuery), 1L); - - wildCardQuery = wildcardQuery("field1", "bb*"); - assertHitCount(prepareSearch().setQuery(wildCardQuery), 1L); + assertHitCount( + 1L, + prepareSearch().setQuery(wildcardQuery("field1", "Bb*")), + prepareSearch().setQuery(wildcardQuery("field1", "bb*")) + ); } } @@ -1725,12 +1726,12 @@ public class SearchQueryIT extends ESIntegTestCase { WildcardQueryBuilder wildCardQuery = wildcardQuery("field1", "Bb*"); assertHitCount(prepareSearch().setQuery(wildCardQuery), 0L); - // the following works not because of normalization but because of the `case_insensitive` parameter - wildCardQuery = wildcardQuery("field1", "Bb*").caseInsensitive(true); - assertHitCount(prepareSearch().setQuery(wildCardQuery), 1L); - - wildCardQuery = wildcardQuery("field1", "bb*"); - assertHitCount(prepareSearch().setQuery(wildCardQuery), 1L); + assertHitCount( + 1L, + // the following works not because of normalization but because of the `case_insensitive` parameter + prepareSearch().setQuery(wildcardQuery("field1", "Bb*").caseInsensitive(true)), + prepareSearch().setQuery(wildcardQuery("field1", "bb*")) + ); } } @@ -1751,11 +1752,11 @@ public class SearchQueryIT extends ESIntegTestCase { prepareIndex("test").setId("1").setSource("field", "label-1").get(); refresh(); - WildcardQueryBuilder wildCardQuery = wildcardQuery("field", "la*"); - assertHitCount(prepareSearch().setQuery(wildCardQuery), 1L); - - wildCardQuery = wildcardQuery("field", "la*el-?"); - assertHitCount(prepareSearch().setQuery(wildCardQuery), 1L); + assertHitCount( + 1L, + prepareSearch().setQuery(wildcardQuery("field", "la*")), + prepareSearch().setQuery(wildcardQuery("field", "la*el-?")) + ); } public static class MockAnalysisPlugin extends Plugin implements AnalysisPlugin { diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 5e3947a3cd13..771059c9dbcd 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -149,6 +149,7 @@ public class TransportVersions { public static final TransportVersion SIMULATE_IGNORED_FIELDS = def(8_813_00_0); public static final TransportVersion TRANSFORMS_UPGRADE_MODE = def(8_814_00_0); public static final TransportVersion NODE_SHUTDOWN_EPHEMERAL_ID_ADDED = def(8_815_00_0); + public static final TransportVersion ESQL_CCS_TELEMETRY_STATS = def(8_816_00_0); /* * WARNING: DO NOT MERGE INTO MAIN! diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/CCSTelemetrySnapshot.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/CCSTelemetrySnapshot.java index 3bbaa80ec200..8500302e4f75 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/CCSTelemetrySnapshot.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/CCSTelemetrySnapshot.java @@ -41,7 +41,6 @@ import java.util.Objects; *
*/ public final class CCSTelemetrySnapshot implements Writeable, ToXContentFragment { - public static final String CCS_TELEMETRY_FIELD_NAME = "_search"; private long totalCount; private long successCount; private final Map failureReasons; @@ -66,6 +65,9 @@ public final class CCSTelemetrySnapshot implements Writeable, ToXContentFragment private final Map clientCounts; private final Map byRemoteCluster; + // Whether we should use per-MRT (minimize roundtrips) metrics. + // ES|QL does not have "minimize_roundtrips" option, so we don't collect those metrics for ES|QL usage. + private boolean useMRT = true; /** * Creates a new stats instance with the provided info. @@ -191,6 +193,11 @@ public final class CCSTelemetrySnapshot implements Writeable, ToXContentFragment return Collections.unmodifiableMap(byRemoteCluster); } + public CCSTelemetrySnapshot setUseMRT(boolean useMRT) { + this.useMRT = useMRT; + return this; + } + public static class PerClusterCCSTelemetry implements Writeable, ToXContentFragment { private long count; private long skippedCount; @@ -270,6 +277,11 @@ public final class CCSTelemetrySnapshot implements Writeable, ToXContentFragment public int hashCode() { return Objects.hash(count, skippedCount, took); } + + @Override + public String toString() { + return Strings.toString(this, true, true); + } } /** @@ -291,8 +303,10 @@ public final class CCSTelemetrySnapshot implements Writeable, ToXContentFragment stats.featureCounts.forEach((k, v) -> featureCounts.merge(k, v, Long::sum)); stats.clientCounts.forEach((k, v) -> clientCounts.merge(k, v, Long::sum)); took.add(stats.took); - tookMrtTrue.add(stats.tookMrtTrue); - tookMrtFalse.add(stats.tookMrtFalse); + if (useMRT) { + tookMrtTrue.add(stats.tookMrtTrue); + tookMrtFalse.add(stats.tookMrtFalse); + } remotesPerSearchMax = Math.max(remotesPerSearchMax, stats.remotesPerSearchMax); if (totalCount > 0 && oldCount > 0) { // Weighted average @@ -328,30 +342,28 @@ public final class CCSTelemetrySnapshot implements Writeable, ToXContentFragment @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(CCS_TELEMETRY_FIELD_NAME); - { - builder.field("total", totalCount); - builder.field("success", successCount); - builder.field("skipped", skippedRemotes); - publishLatency(builder, "took", took); + builder.field("total", totalCount); + builder.field("success", successCount); + builder.field("skipped", skippedRemotes); + publishLatency(builder, "took", took); + if (useMRT) { publishLatency(builder, "took_mrt_true", tookMrtTrue); publishLatency(builder, "took_mrt_false", tookMrtFalse); - builder.field("remotes_per_search_max", remotesPerSearchMax); - builder.field("remotes_per_search_avg", remotesPerSearchAvg); - builder.field("failure_reasons", failureReasons); - builder.field("features", featureCounts); - builder.field("clients", clientCounts); - builder.startObject("clusters"); - { - for (var entry : byRemoteCluster.entrySet()) { - String remoteName = entry.getKey(); - if (RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY.equals(remoteName)) { - remoteName = SearchResponse.LOCAL_CLUSTER_NAME_REPRESENTATION; - } - builder.field(remoteName, entry.getValue()); + } + builder.field("remotes_per_search_max", remotesPerSearchMax); + builder.field("remotes_per_search_avg", remotesPerSearchAvg); + builder.field("failure_reasons", failureReasons); + builder.field("features", featureCounts); + builder.field("clients", clientCounts); + builder.startObject("clusters"); + { + for (var entry : byRemoteCluster.entrySet()) { + String remoteName = entry.getKey(); + if (RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY.equals(remoteName)) { + remoteName = SearchResponse.LOCAL_CLUSTER_NAME_REPRESENTATION; } + builder.field(remoteName, entry.getValue()); } - builder.endObject(); } builder.endObject(); return builder; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/CCSUsage.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/CCSUsage.java index 9e58d6d8febe..29a7dcb5d07d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/CCSUsage.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/CCSUsage.java @@ -10,6 +10,7 @@ package org.elasticsearch.action.admin.cluster.stats; import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ShardOperationFailedException; @@ -20,6 +21,7 @@ import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.core.TimeValue; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.query.SearchTimeoutException; +import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskCancelledException; import java.util.Arrays; @@ -84,6 +86,15 @@ public class CCSUsage { return this; } + public Builder setClientFromTask(Task task) { + String client = task.getHeader(Task.X_ELASTIC_PRODUCT_ORIGIN_HTTP_HEADER); + if (client != null) { + return setClient(client); + } else { + return this; + } + } + public Builder skippedRemote(String remote) { this.skippedRemotes.add(remote); return this; @@ -133,6 +144,10 @@ public class CCSUsage { if (ExceptionsHelper.unwrapCorruption(e) != null) { return Result.CORRUPTION; } + ElasticsearchStatusException se = (ElasticsearchStatusException) ExceptionsHelper.unwrap(e, ElasticsearchStatusException.class); + if (se != null && se.getDetailedMessage().contains("license")) { + return Result.LICENSE; + } // This is kind of last resort check - if we still don't know the reason but all shard failures are remote, // we assume it's remote's fault somehow. if (e instanceof SearchPhaseExecutionException spe) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/CCSUsageTelemetry.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/CCSUsageTelemetry.java index 6c8178282d3c..3f04eceed7eb 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/CCSUsageTelemetry.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/CCSUsageTelemetry.java @@ -47,6 +47,7 @@ public class CCSUsageTelemetry { TIMEOUT("timeout"), CORRUPTION("corruption"), SECURITY("security"), + LICENSE("license"), // May be helpful if there's a lot of other reasons, and it may be hard to calculate the unknowns for some clients. UNKNOWN("other"); @@ -106,8 +107,14 @@ public class CCSUsageTelemetry { private final Map clientCounts; private final Map byRemoteCluster; + // Should we calculate separate metrics per MRT? + private final boolean useMRT; public CCSUsageTelemetry() { + this(true); + } + + public CCSUsageTelemetry(boolean useMRT) { this.byRemoteCluster = new ConcurrentHashMap<>(); totalCount = new LongAdder(); successCount = new LongAdder(); @@ -119,6 +126,7 @@ public class CCSUsageTelemetry { skippedRemotes = new LongAdder(); featureCounts = new ConcurrentHashMap<>(); clientCounts = new ConcurrentHashMap<>(); + this.useMRT = useMRT; } public void updateUsage(CCSUsage ccsUsage) { @@ -134,10 +142,12 @@ public class CCSUsageTelemetry { if (isSuccess(ccsUsage)) { successCount.increment(); took.record(searchTook); - if (isMRT(ccsUsage)) { - tookMrtTrue.record(searchTook); - } else { - tookMrtFalse.record(searchTook); + if (useMRT) { + if (isMRT(ccsUsage)) { + tookMrtTrue.record(searchTook); + } else { + tookMrtFalse.record(searchTook); + } } ccsUsage.getPerClusterUsage().forEach((r, u) -> byRemoteCluster.computeIfAbsent(r, PerClusterCCSTelemetry::new).update(u)); } else { @@ -243,6 +253,6 @@ public class CCSUsageTelemetry { Collections.unmodifiableMap(Maps.transformValues(featureCounts, LongAdder::longValue)), Collections.unmodifiableMap(Maps.transformValues(clientCounts, LongAdder::longValue)), Collections.unmodifiableMap(Maps.transformValues(byRemoteCluster, PerClusterCCSTelemetry::getSnapshot)) - ); + ).setUseMRT(useMRT); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java index abeb73e5d8c3..48b4e967742c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java @@ -31,7 +31,8 @@ public class ClusterStatsNodeResponse extends BaseNodeResponse { private final ClusterHealthStatus clusterStatus; private final SearchUsageStats searchUsageStats; private final RepositoryUsageStats repositoryUsageStats; - private final CCSTelemetrySnapshot ccsMetrics; + private final CCSTelemetrySnapshot searchCcsMetrics; + private final CCSTelemetrySnapshot esqlCcsMetrics; public ClusterStatsNodeResponse(StreamInput in) throws IOException { super(in); @@ -46,10 +47,15 @@ public class ClusterStatsNodeResponse extends BaseNodeResponse { } if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { repositoryUsageStats = RepositoryUsageStats.readFrom(in); - ccsMetrics = new CCSTelemetrySnapshot(in); + searchCcsMetrics = new CCSTelemetrySnapshot(in); } else { repositoryUsageStats = RepositoryUsageStats.EMPTY; - ccsMetrics = new CCSTelemetrySnapshot(); + searchCcsMetrics = new CCSTelemetrySnapshot(); + } + if (in.getTransportVersion().onOrAfter(TransportVersions.ESQL_CCS_TELEMETRY_STATS)) { + esqlCcsMetrics = new CCSTelemetrySnapshot(in); + } else { + esqlCcsMetrics = new CCSTelemetrySnapshot(); } } @@ -61,7 +67,8 @@ public class ClusterStatsNodeResponse extends BaseNodeResponse { ShardStats[] shardsStats, SearchUsageStats searchUsageStats, RepositoryUsageStats repositoryUsageStats, - CCSTelemetrySnapshot ccsTelemetrySnapshot + CCSTelemetrySnapshot ccsTelemetrySnapshot, + CCSTelemetrySnapshot esqlTelemetrySnapshot ) { super(node); this.nodeInfo = nodeInfo; @@ -70,7 +77,8 @@ public class ClusterStatsNodeResponse extends BaseNodeResponse { this.clusterStatus = clusterStatus; this.searchUsageStats = Objects.requireNonNull(searchUsageStats); this.repositoryUsageStats = Objects.requireNonNull(repositoryUsageStats); - this.ccsMetrics = ccsTelemetrySnapshot; + this.searchCcsMetrics = ccsTelemetrySnapshot; + this.esqlCcsMetrics = esqlTelemetrySnapshot; } public NodeInfo nodeInfo() { @@ -101,8 +109,12 @@ public class ClusterStatsNodeResponse extends BaseNodeResponse { return repositoryUsageStats; } - public CCSTelemetrySnapshot getCcsMetrics() { - return ccsMetrics; + public CCSTelemetrySnapshot getSearchCcsMetrics() { + return searchCcsMetrics; + } + + public CCSTelemetrySnapshot getEsqlCcsMetrics() { + return esqlCcsMetrics; } @Override @@ -117,8 +129,11 @@ public class ClusterStatsNodeResponse extends BaseNodeResponse { } if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { repositoryUsageStats.writeTo(out); - ccsMetrics.writeTo(out); + searchCcsMetrics.writeTo(out); } // else just drop these stats, ok for bwc + if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_CCS_TELEMETRY_STATS)) { + esqlCcsMetrics.writeTo(out); + } } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsResponse.java index 5f7c45c5807a..ed8ca2f94a78 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsResponse.java @@ -36,10 +36,14 @@ public class ClusterStatsResponse extends BaseNodesResponse remoteClustersStats; + public static final String CCS_TELEMETRY_FIELD_NAME = "_search"; + public static final String ESQL_TELEMETRY_FIELD_NAME = "_esql"; + public ClusterStatsResponse( long timestamp, String clusterUUID, @@ -58,6 +62,7 @@ public class ClusterStatsResponse extends BaseNodesResponse ccsMetrics.add(node.getCcsMetrics())); + nodes.forEach(node -> { + ccsMetrics.add(node.getSearchCcsMetrics()); + esqlMetrics.add(node.getEsqlCcsMetrics()); + }); this.status = status; this.clusterSnapshotStats = clusterSnapshotStats; @@ -147,9 +155,18 @@ public class ClusterStatsResponse extends BaseNodesResponse 0) { + builder.startObject(ESQL_TELEMETRY_FIELD_NAME); + esqlMetrics.toXContent(builder, params); + builder.endObject(); + } + + builder.endObject(); + return builder; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java index c44db96e684e..0a63ce332c3e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java @@ -103,6 +103,7 @@ public class TransportClusterStatsAction extends TransportNodesAction< private final RepositoriesService repositoriesService; private final SearchUsageHolder searchUsageHolder; private final CCSUsageTelemetry ccsUsageHolder; + private final CCSUsageTelemetry esqlUsageHolder; private final Executor clusterStateStatsExecutor; private final MetadataStatsCache mappingStatsCache; @@ -135,6 +136,7 @@ public class TransportClusterStatsAction extends TransportNodesAction< this.repositoriesService = repositoriesService; this.searchUsageHolder = usageService.getSearchUsageHolder(); this.ccsUsageHolder = usageService.getCcsUsageHolder(); + this.esqlUsageHolder = usageService.getEsqlUsageHolder(); this.clusterStateStatsExecutor = threadPool.executor(ThreadPool.Names.MANAGEMENT); this.mappingStatsCache = new MetadataStatsCache<>(threadPool.getThreadContext(), MappingStats::of); this.analysisStatsCache = new MetadataStatsCache<>(threadPool.getThreadContext(), AnalysisStats::of); @@ -297,6 +299,7 @@ public class TransportClusterStatsAction extends TransportNodesAction< final RepositoryUsageStats repositoryUsageStats = repositoriesService.getUsageStats(); final CCSTelemetrySnapshot ccsTelemetry = ccsUsageHolder.getCCSTelemetrySnapshot(); + final CCSTelemetrySnapshot esqlTelemetry = esqlUsageHolder.getCCSTelemetrySnapshot(); return new ClusterStatsNodeResponse( nodeInfo.getNode(), @@ -306,7 +309,8 @@ public class TransportClusterStatsAction extends TransportNodesAction< shardsStats.toArray(new ShardStats[shardsStats.size()]), searchUsageStats, repositoryUsageStats, - ccsTelemetry + ccsTelemetry, + esqlTelemetry ); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/TransportResolveClusterAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/TransportResolveClusterAction.java index c30a2a44274a..e3e737595cac 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/TransportResolveClusterAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/TransportResolveClusterAction.java @@ -141,7 +141,7 @@ public class TransportResolveClusterAction extends HandledTransportAction @@ -1928,8 +1926,8 @@ public class TransportSearchAction extends HandledTransportAction new EntitlementBootstrap.PluginData(bundle.getDir(), bundle.pluginDescriptor().isModular(), true)) ).toList(); + var pluginsResolver = PluginsResolver.create(pluginsLoader); + EntitlementBootstrap.bootstrap(pluginData, pluginsResolver::resolveClassToPluginName); } else if (RuntimeVersionFeature.isSecurityManagerAvailable()) { // install SM after natives, shutdown hooks, etc. diff --git a/server/src/main/java/org/elasticsearch/features/FeatureService.java b/server/src/main/java/org/elasticsearch/features/FeatureService.java index c04fbae05ee2..da71b8f0ec2f 100644 --- a/server/src/main/java/org/elasticsearch/features/FeatureService.java +++ b/server/src/main/java/org/elasticsearch/features/FeatureService.java @@ -29,7 +29,7 @@ public class FeatureService { /** * A feature indicating that node features are supported. */ - public static final NodeFeature FEATURES_SUPPORTED = new NodeFeature("features_supported"); + public static final NodeFeature FEATURES_SUPPORTED = new NodeFeature("features_supported", true); public static final NodeFeature TEST_FEATURES_ENABLED = new NodeFeature("test_features_enabled"); private static final Logger logger = LogManager.getLogger(FeatureService.class); diff --git a/server/src/main/java/org/elasticsearch/features/NodeFeature.java b/server/src/main/java/org/elasticsearch/features/NodeFeature.java index 961b386d6280..ad270540274b 100644 --- a/server/src/main/java/org/elasticsearch/features/NodeFeature.java +++ b/server/src/main/java/org/elasticsearch/features/NodeFeature.java @@ -17,7 +17,7 @@ import java.util.Objects; * @param id The feature id. Must be unique in the node. * @param assumedAfterNextCompatibilityBoundary * {@code true} if this feature is removed at the next compatibility boundary (ie next major version), - * and so should be assumed to be true for all nodes after that boundary. + * and so should be assumed to be met by all nodes after that boundary, even if they don't publish it. */ public record NodeFeature(String id, boolean assumedAfterNextCompatibilityBoundary) { diff --git a/server/src/main/java/org/elasticsearch/index/IndexVersions.java b/server/src/main/java/org/elasticsearch/index/IndexVersions.java index 7a5cd97e5a3a..8d6404e0530e 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexVersions.java +++ b/server/src/main/java/org/elasticsearch/index/IndexVersions.java @@ -133,6 +133,7 @@ public class IndexVersions { public static final IndexVersion V8_DEPRECATE_SOURCE_MODE_MAPPER = def(8_521_00_0, Version.LUCENE_9_12_0); public static final IndexVersion USE_SYNTHETIC_SOURCE_FOR_RECOVERY_BACKPORT = def(8_522_00_0, Version.LUCENE_9_12_0); public static final IndexVersion UPGRADE_TO_LUCENE_9_12_1 = def(8_523_00_0, parseUnchecked("9.12.1")); + public static final IndexVersion INFERENCE_METADATA_FIELDS_BACKPORT = def(8_524_00_0, parseUnchecked("9.12.1")); public static final IndexVersion UPGRADE_TO_LUCENE_10_0_0 = def(9_000_00_0, Version.LUCENE_10_0_0); public static final IndexVersion LOGSDB_DEFAULT_IGNORE_DYNAMIC_BEYOND_LIMIT = def(9_001_00_0, Version.LUCENE_10_0_0); public static final IndexVersion TIME_BASED_K_ORDERED_DOC_ID = def(9_002_00_0, Version.LUCENE_10_0_0); diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 0a470e86ef85..8d3d1bde316e 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -661,7 +661,8 @@ public class InternalEngine extends Engine { translogDeletionPolicy, globalCheckpointSupplier, engineConfig.getPrimaryTermSupplier(), - persistedSequenceNumberConsumer + persistedSequenceNumberConsumer, + TranslogOperationAsserter.withEngineConfig(engineConfig) ); } diff --git a/server/src/main/java/org/elasticsearch/index/engine/NoOpEngine.java b/server/src/main/java/org/elasticsearch/index/engine/NoOpEngine.java index 49e0ae058708..044e6f6712c7 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/NoOpEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/NoOpEngine.java @@ -166,7 +166,8 @@ public final class NoOpEngine extends ReadOnlyEngine { translogDeletionPolicy, engineConfig.getGlobalCheckpointSupplier(), engineConfig.getPrimaryTermSupplier(), - seqNo -> {} + seqNo -> {}, + TranslogOperationAsserter.DEFAULT ) ) { translog.trimUnreferencedReaders(); diff --git a/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java b/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java index c3ab2ee91080..010fc1bd9e41 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java @@ -267,7 +267,8 @@ public class ReadOnlyEngine extends Engine { translogDeletionPolicy, config.getGlobalCheckpointSupplier(), config.getPrimaryTermSupplier(), - seqNo -> {} + seqNo -> {}, + TranslogOperationAsserter.DEFAULT ) ) { return translog.stats(); diff --git a/server/src/main/java/org/elasticsearch/index/engine/TranslogDirectoryReader.java b/server/src/main/java/org/elasticsearch/index/engine/TranslogDirectoryReader.java index b1c311af88e2..0928b4500e6d 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/TranslogDirectoryReader.java +++ b/server/src/main/java/org/elasticsearch/index/engine/TranslogDirectoryReader.java @@ -99,10 +99,6 @@ final class TranslogDirectoryReader extends DirectoryReader { return new UnsupportedOperationException(); } - public TranslogLeafReader getLeafReader() { - return leafReader; - } - @Override protected DirectoryReader doOpenIfChanged() { throw unsupported(); @@ -143,6 +139,45 @@ final class TranslogDirectoryReader extends DirectoryReader { return leafReader.getReaderCacheHelper(); } + static DirectoryReader createInMemoryReader( + ShardId shardId, + EngineConfig engineConfig, + Directory directory, + DocumentParser documentParser, + MappingLookup mappingLookup, + Translog.Index operation + ) { + final ParsedDocument parsedDocs = documentParser.parseDocument( + new SourceToParse(operation.id(), operation.source(), XContentHelper.xContentType(operation.source()), operation.routing()), + mappingLookup + ); + + parsedDocs.updateSeqID(operation.seqNo(), operation.primaryTerm()); + parsedDocs.version().setLongValue(operation.version()); + // To guarantee indexability, we configure the analyzer and codec using the main engine configuration + final IndexWriterConfig writeConfig = new IndexWriterConfig(engineConfig.getAnalyzer()).setOpenMode( + IndexWriterConfig.OpenMode.CREATE + ).setCodec(engineConfig.getCodec()); + try (IndexWriter writer = new IndexWriter(directory, writeConfig)) { + writer.addDocument(parsedDocs.rootDoc()); + final DirectoryReader reader = open(writer); + if (reader.leaves().size() != 1 || reader.leaves().get(0).reader().numDocs() != 1) { + reader.close(); + throw new IllegalStateException( + "Expected a single document segment; " + + "but [" + + reader.leaves().size() + + " segments with " + + reader.leaves().get(0).reader().numDocs() + + " documents" + ); + } + return reader; + } catch (IOException e) { + throw new EngineException(shardId, "failed to create an in-memory segment for get [" + operation.id() + "]", e); + } + } + private static class TranslogLeafReader extends LeafReader { private static final FieldInfo FAKE_SOURCE_FIELD = new FieldInfo( @@ -244,7 +279,8 @@ final class TranslogDirectoryReader extends DirectoryReader { ensureOpen(); reader = delegate.get(); if (reader == null) { - reader = createInMemoryLeafReader(); + var indexReader = createInMemoryReader(shardId, engineConfig, directory, documentParser, mappingLookup, operation); + reader = indexReader.leaves().get(0).reader(); final LeafReader existing = delegate.getAndSet(reader); assert existing == null; onSegmentCreated.run(); @@ -254,39 +290,6 @@ final class TranslogDirectoryReader extends DirectoryReader { return reader; } - private LeafReader createInMemoryLeafReader() { - assert Thread.holdsLock(this); - final ParsedDocument parsedDocs = documentParser.parseDocument( - new SourceToParse(operation.id(), operation.source(), XContentHelper.xContentType(operation.source()), operation.routing()), - mappingLookup - ); - - parsedDocs.updateSeqID(operation.seqNo(), operation.primaryTerm()); - parsedDocs.version().setLongValue(operation.version()); - // To guarantee indexability, we configure the analyzer and codec using the main engine configuration - final IndexWriterConfig writeConfig = new IndexWriterConfig(engineConfig.getAnalyzer()).setOpenMode( - IndexWriterConfig.OpenMode.CREATE - ).setCodec(engineConfig.getCodec()); - try (IndexWriter writer = new IndexWriter(directory, writeConfig)) { - writer.addDocument(parsedDocs.rootDoc()); - final DirectoryReader reader = open(writer); - if (reader.leaves().size() != 1 || reader.leaves().get(0).reader().numDocs() != 1) { - reader.close(); - throw new IllegalStateException( - "Expected a single document segment; " - + "but [" - + reader.leaves().size() - + " segments with " - + reader.leaves().get(0).reader().numDocs() - + " documents" - ); - } - return reader.leaves().get(0).reader(); - } catch (IOException e) { - throw new EngineException(shardId, "failed to create an in-memory segment for get [" + operation.id() + "]", e); - } - } - @Override public CacheHelper getCoreCacheHelper() { return getDelegate().getCoreCacheHelper(); diff --git a/server/src/main/java/org/elasticsearch/index/engine/TranslogOperationAsserter.java b/server/src/main/java/org/elasticsearch/index/engine/TranslogOperationAsserter.java new file mode 100644 index 000000000000..00de13b8e8d8 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/engine/TranslogOperationAsserter.java @@ -0,0 +1,90 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.index.engine; + +import org.apache.lucene.search.similarities.BM25Similarity; +import org.apache.lucene.store.ByteBuffersDirectory; +import org.elasticsearch.index.cache.query.TrivialQueryCachingPolicy; +import org.elasticsearch.index.mapper.DocumentParser; +import org.elasticsearch.index.mapper.MappingLookup; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.translog.Translog; + +import java.io.IOException; + +/** + * + * A utility class to assert that translog operations with the same sequence number + * in the same generation are either identical or equivalent when synthetic sources are used. + */ +public abstract class TranslogOperationAsserter { + public static final TranslogOperationAsserter DEFAULT = new TranslogOperationAsserter() { + }; + + private TranslogOperationAsserter() { + + } + + public static TranslogOperationAsserter withEngineConfig(EngineConfig engineConfig) { + return new TranslogOperationAsserter() { + @Override + public boolean assertSameIndexOperation(Translog.Index o1, Translog.Index o2) throws IOException { + if (super.assertSameIndexOperation(o1, o2)) { + return true; + } + if (engineConfig.getIndexSettings().isRecoverySourceSyntheticEnabled()) { + return super.assertSameIndexOperation(synthesizeSource(engineConfig, o1), o2) + || super.assertSameIndexOperation(o1, synthesizeSource(engineConfig, o2)); + } + return false; + } + }; + } + + static Translog.Index synthesizeSource(EngineConfig engineConfig, Translog.Index op) throws IOException { + final ShardId shardId = engineConfig.getShardId(); + final MappingLookup mappingLookup = engineConfig.getMapperService().mappingLookup(); + final DocumentParser documentParser = engineConfig.getMapperService().documentParser(); + try ( + var directory = new ByteBuffersDirectory(); + var reader = TranslogDirectoryReader.createInMemoryReader(shardId, engineConfig, directory, documentParser, mappingLookup, op) + ) { + final Engine.Searcher searcher = new Engine.Searcher( + "assert_translog", + reader, + new BM25Similarity(), + null, + TrivialQueryCachingPolicy.NEVER, + () -> {} + ); + try ( + LuceneSyntheticSourceChangesSnapshot snapshot = new LuceneSyntheticSourceChangesSnapshot( + mappingLookup, + searcher, + LuceneSyntheticSourceChangesSnapshot.DEFAULT_BATCH_SIZE, + Integer.MAX_VALUE, + op.seqNo(), + op.seqNo(), + true, + false, + engineConfig.getIndexSettings().getIndexVersionCreated() + ) + ) { + final Translog.Operation normalized = snapshot.next(); + assert normalized != null : "expected one operation; got zero"; + return (Translog.Index) normalized; + } + } + } + + public boolean assertSameIndexOperation(Translog.Index o1, Translog.Index o2) throws IOException { + return Translog.Index.equalsWithoutAutoGeneratedTimestamp(o1, o2); + } +} diff --git a/server/src/main/java/org/elasticsearch/index/mapper/CustomSyntheticSourceFieldLookup.java b/server/src/main/java/org/elasticsearch/index/mapper/CustomSyntheticSourceFieldLookup.java deleted file mode 100644 index dbbee8a9035d..000000000000 --- a/server/src/main/java/org/elasticsearch/index/mapper/CustomSyntheticSourceFieldLookup.java +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ - -package org.elasticsearch.index.mapper; - -import org.elasticsearch.core.Nullable; -import org.elasticsearch.index.IndexSettings; - -import java.util.HashMap; -import java.util.Map; - -/** - * Contains lookup information needed to perform custom synthetic source logic. - * For example fields that use fallback synthetic source implementation or fields that preserve array ordering - * in synthetic source; - */ -public class CustomSyntheticSourceFieldLookup { - private final Map fieldsWithCustomSyntheticSourceHandling; - - public CustomSyntheticSourceFieldLookup(Mapping mapping, @Nullable IndexSettings indexSettings, boolean isSourceSynthetic) { - var fields = new HashMap(); - if (isSourceSynthetic && indexSettings != null) { - populateFields(fields, mapping.getRoot(), indexSettings.sourceKeepMode()); - } - this.fieldsWithCustomSyntheticSourceHandling = Map.copyOf(fields); - } - - private void populateFields(Map fields, ObjectMapper currentLevel, Mapper.SourceKeepMode defaultSourceKeepMode) { - if (currentLevel.isEnabled() == false) { - fields.put(currentLevel.fullPath(), Reason.DISABLED_OBJECT); - return; - } - if (sourceKeepMode(currentLevel, defaultSourceKeepMode) == Mapper.SourceKeepMode.ALL) { - fields.put(currentLevel.fullPath(), Reason.SOURCE_KEEP_ALL); - return; - } - if (currentLevel.isNested() == false && sourceKeepMode(currentLevel, defaultSourceKeepMode) == Mapper.SourceKeepMode.ARRAYS) { - fields.put(currentLevel.fullPath(), Reason.SOURCE_KEEP_ARRAYS); - } - - for (Mapper child : currentLevel) { - if (child instanceof ObjectMapper objectMapper) { - populateFields(fields, objectMapper, defaultSourceKeepMode); - } else if (child instanceof FieldMapper fieldMapper) { - // The order here is important. - // If fallback logic is used, it should be always correctly marked as FALLBACK_SYNTHETIC_SOURCE. - // This allows us to apply an optimization for SOURCE_KEEP_ARRAYS and don't store arrays that have one element. - // If this order is changed and a field that both has SOURCE_KEEP_ARRAYS and FALLBACK_SYNTHETIC_SOURCE - // is marked as SOURCE_KEEP_ARRAYS we would lose data for this field by applying such an optimization. - if (fieldMapper.syntheticSourceMode() == FieldMapper.SyntheticSourceMode.FALLBACK) { - fields.put(fieldMapper.fullPath(), Reason.FALLBACK_SYNTHETIC_SOURCE); - } else if (sourceKeepMode(fieldMapper, defaultSourceKeepMode) == Mapper.SourceKeepMode.ALL) { - fields.put(fieldMapper.fullPath(), Reason.SOURCE_KEEP_ALL); - } else if (sourceKeepMode(fieldMapper, defaultSourceKeepMode) == Mapper.SourceKeepMode.ARRAYS) { - fields.put(fieldMapper.fullPath(), Reason.SOURCE_KEEP_ARRAYS); - } - } - } - } - - private Mapper.SourceKeepMode sourceKeepMode(ObjectMapper mapper, Mapper.SourceKeepMode defaultSourceKeepMode) { - return mapper.sourceKeepMode().orElse(defaultSourceKeepMode); - } - - private Mapper.SourceKeepMode sourceKeepMode(FieldMapper mapper, Mapper.SourceKeepMode defaultSourceKeepMode) { - return mapper.sourceKeepMode().orElse(defaultSourceKeepMode); - } - - public Map getFieldsWithCustomSyntheticSourceHandling() { - return fieldsWithCustomSyntheticSourceHandling; - } - - /** - * Specifies why this field needs custom handling. - */ - public enum Reason { - SOURCE_KEEP_ARRAYS, - SOURCE_KEEP_ALL, - FALLBACK_SYNTHETIC_SOURCE, - DISABLED_OBJECT - } -} diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java index 5abb7b5a1b72..03e6c343c7ab 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java @@ -49,7 +49,6 @@ public class DocumentMapper { mapping, mapping.toCompressedXContent(), IndexVersion.current(), - mapperService.getIndexSettings(), mapperService.getMapperMetrics(), mapperService.index().getName() ); @@ -60,13 +59,12 @@ public class DocumentMapper { Mapping mapping, CompressedXContent source, IndexVersion version, - IndexSettings indexSettings, MapperMetrics mapperMetrics, String indexName ) { this.documentParser = documentParser; this.type = mapping.getRoot().fullPath(); - this.mappingLookup = MappingLookup.fromMapping(mapping, indexSettings); + this.mappingLookup = MappingLookup.fromMapping(mapping); this.mappingSource = source; this.mapperMetrics = mapperMetrics; this.indexVersion = version; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java index 0fc14d4dbeeb..9ddb6f0d496a 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java @@ -27,7 +27,6 @@ import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.plugins.internal.XContentMeteringParserDecorator; import org.elasticsearch.search.lookup.SearchLookup; import org.elasticsearch.search.lookup.Source; -import org.elasticsearch.xcontent.FilterXContentParserWrapper; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentLocation; import org.elasticsearch.xcontent.XContentParseException; @@ -44,7 +43,6 @@ import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Optional; -import java.util.function.BiFunction; import java.util.function.Consumer; import static org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper.MAX_DIMS_COUNT; @@ -62,22 +60,10 @@ public final class DocumentParser { private final XContentParserConfiguration parserConfiguration; private final MappingParserContext mappingParserContext; - private final BiFunction listenersFactory; DocumentParser(XContentParserConfiguration parserConfiguration, MappingParserContext mappingParserContext) { this.mappingParserContext = mappingParserContext; this.parserConfiguration = parserConfiguration; - this.listenersFactory = this::createDefaultListeners; - } - - DocumentParser( - XContentParserConfiguration parserConfiguration, - MappingParserContext mappingParserContext, - BiFunction listenersFactory - ) { - this.mappingParserContext = mappingParserContext; - this.parserConfiguration = parserConfiguration; - this.listenersFactory = listenersFactory; } /** @@ -95,11 +81,13 @@ public final class DocumentParser { final RootDocumentParserContext context; final XContentType xContentType = source.getXContentType(); - Listeners listeners = listenersFactory.apply(mappingLookup, xContentType); - XContentMeteringParserDecorator meteringParserDecorator = source.getMeteringParserDecorator(); - try (XContentParser parser = meteringParserDecorator.decorate(createParser(source, xContentType, listeners))) { - context = new RootDocumentParserContext(mappingLookup, mappingParserContext, source, listeners, parser); + try ( + XContentParser parser = meteringParserDecorator.decorate( + XContentHelper.createParser(parserConfiguration, source.source(), xContentType) + ) + ) { + context = new RootDocumentParserContext(mappingLookup, mappingParserContext, source, parser); validateStart(context.parser()); MetadataFieldMapper[] metadataFieldsMappers = mappingLookup.getMapping().getSortedMetadataMappers(); internalParseDocument(metadataFieldsMappers, context); @@ -133,152 +121,6 @@ public final class DocumentParser { }; } - private Listeners createDefaultListeners(MappingLookup mappingLookup, XContentType xContentType) { - if (mappingLookup.isSourceSynthetic() && mappingParserContext.getIndexSettings().getSkipIgnoredSourceWrite() == false) { - return new Listeners.Single(new SyntheticSourceDocumentParserListener(mappingLookup, xContentType)); - } - - return Listeners.NOOP; - } - - private XContentParser createParser(SourceToParse sourceToParse, XContentType xContentType, Listeners listeners) throws IOException { - XContentParser plainParser = XContentHelper.createParser(parserConfiguration, sourceToParse.source(), xContentType); - - if (listeners.isNoop()) { - return plainParser; - } - - return new ListenerAwareXContentParser(plainParser, listeners); - } - - static class ListenerAwareXContentParser extends FilterXContentParserWrapper { - private final Listeners listeners; - - ListenerAwareXContentParser(XContentParser parser, Listeners listeners) { - super(parser); - this.listeners = listeners; - } - - @Override - public Token nextToken() throws IOException { - var token = delegate().nextToken(); - - if (listeners.anyActive()) { - var listenerToken = DocumentParserListener.Token.current(delegate()); - listeners.publish(listenerToken); - } - - return token; - } - - @Override - public void skipChildren() throws IOException { - // We can not use "native" implementation because some listeners may want to see - // skipped parts. - Token token = currentToken(); - if (token != Token.START_OBJECT && token != Token.START_ARRAY) { - return; - } - - int depth = 0; - while (token != null) { - if (token == Token.START_OBJECT || token == Token.START_ARRAY) { - depth += 1; - } - if (token == Token.END_OBJECT || token == Token.END_ARRAY) { - depth -= 1; - if (depth == 0) { - return; - } - } - - token = nextToken(); - } - } - } - - /** - * Encapsulates listeners that are subscribed to this document parser. This allows to generalize logic without knowing - * how many listeners are present (and if they are present at all). - */ - public interface Listeners { - void publish(DocumentParserListener.Event event, DocumentParserContext context) throws IOException; - - void publish(DocumentParserListener.Token token) throws IOException; - - DocumentParserListener.Output finish(); - - boolean isNoop(); - - boolean anyActive(); - - /** - * No listeners are present. - */ - Listeners NOOP = new Listeners() { - @Override - public void publish(DocumentParserListener.Event event, DocumentParserContext context) {} - - @Override - public void publish(DocumentParserListener.Token token) { - - } - - @Override - public DocumentParserListener.Output finish() { - return DocumentParserListener.Output.empty(); - } - - @Override - public boolean isNoop() { - return true; - } - - @Override - public boolean anyActive() { - return false; - } - }; - - /** - * One or more listeners are present. - */ - class Single implements Listeners { - private final DocumentParserListener listener; - - public Single(DocumentParserListener listener) { - this.listener = listener; - } - - @Override - public void publish(DocumentParserListener.Event event, DocumentParserContext context) throws IOException { - listener.consume(event); - } - - @Override - public void publish(DocumentParserListener.Token token) throws IOException { - if (listener.isActive()) { - listener.consume(token); - } - } - - @Override - public DocumentParserListener.Output finish() { - return listener.finish(); - } - - @Override - public boolean isNoop() { - return false; - } - - @Override - public boolean anyActive() { - return listener.isActive(); - } - } - } - private void internalParseDocument(MetadataFieldMapper[] metadataFieldsMappers, DocumentParserContext context) { try { final boolean emptyDoc = isEmptyDoc(context.root(), context.parser()); @@ -287,19 +129,26 @@ public final class DocumentParser { metadataMapper.preParse(context); } - context.publishEvent(new DocumentParserListener.Event.DocumentStart(context.root(), context.doc())); - if (context.root().isEnabled() == false) { // entire type is disabled - context.parser().skipChildren(); + if (context.canAddIgnoredField()) { + context.addIgnoredField( + new IgnoredSourceFieldMapper.NameValue( + MapperService.SINGLE_MAPPING_NAME, + 0, + context.encodeFlattenedToken(), + context.doc() + ) + ); + } else { + context.parser().skipChildren(); + } } else if (emptyDoc == false) { parseObjectOrNested(context); } executeIndexTimeScripts(context); - context.finishListeners(); - for (MetadataFieldMapper metadataMapper : metadataFieldsMappers) { metadataMapper.postParse(context); } @@ -425,12 +274,22 @@ public final class DocumentParser { } static void parseObjectOrNested(DocumentParserContext context) throws IOException { - XContentParser parser = context.parser(); String currentFieldName = parser.currentName(); if (context.parent().isEnabled() == false) { // entire type is disabled - parser.skipChildren(); + if (context.canAddIgnoredField()) { + context.addIgnoredField( + new IgnoredSourceFieldMapper.NameValue( + context.parent().fullPath(), + context.parent().fullPath().lastIndexOf(context.parent().leafName()), + context.encodeFlattenedToken(), + context.doc() + ) + ); + } else { + parser.skipChildren(); + } return; } XContentParser.Token token = parser.currentToken(); @@ -443,6 +302,22 @@ public final class DocumentParser { throwOnConcreteValue(context.parent(), currentFieldName, context); } + var sourceKeepMode = getSourceKeepMode(context, context.parent().sourceKeepMode()); + if (context.canAddIgnoredField() + && (sourceKeepMode == Mapper.SourceKeepMode.ALL + || (sourceKeepMode == Mapper.SourceKeepMode.ARRAYS && context.inArrayScope()))) { + context = context.addIgnoredFieldFromContext( + new IgnoredSourceFieldMapper.NameValue( + context.parent().fullPath(), + context.parent().fullPath().lastIndexOf(context.parent().leafName()), + null, + context.doc() + ) + ); + token = context.parser().currentToken(); + parser = context.parser(); + } + if (context.parent().isNested()) { // Handle a nested object that doesn't contain an array. Arrays are handled in #parseNonDynamicArray. context = context.createNestedContext((NestedObjectMapper) context.parent()); @@ -566,9 +441,6 @@ public final class DocumentParser { static void parseObjectOrField(DocumentParserContext context, Mapper mapper) throws IOException { if (mapper instanceof ObjectMapper objectMapper) { - context.publishEvent( - new DocumentParserListener.Event.ObjectStart(objectMapper, context.inArrayScope(), context.parent(), context.doc()) - ); parseObjectOrNested(context.createChildContext(objectMapper)); } else if (mapper instanceof FieldMapper fieldMapper) { if (shouldFlattenObject(context, fieldMapper)) { @@ -578,19 +450,12 @@ public final class DocumentParser { parseObjectOrNested(context.createFlattenContext(currentFieldName)); context.path().add(currentFieldName); } else { - context.publishEvent( - new DocumentParserListener.Event.LeafValue( - fieldMapper, - context.inArrayScope(), - context.parent(), - context.doc(), - context.parser() - ) - ); - + var sourceKeepMode = getSourceKeepMode(context, fieldMapper.sourceKeepMode()); if (context.canAddIgnoredField() - && context.isWithinCopyTo() == false - && context.isCopyToDestinationField(mapper.fullPath())) { + && (fieldMapper.syntheticSourceMode() == FieldMapper.SyntheticSourceMode.FALLBACK + || sourceKeepMode == Mapper.SourceKeepMode.ALL + || (sourceKeepMode == Mapper.SourceKeepMode.ARRAYS && context.inArrayScope()) + || (context.isWithinCopyTo() == false && context.isCopyToDestinationField(mapper.fullPath())))) { context = context.addIgnoredFieldFromContext( IgnoredSourceFieldMapper.NameValue.fromContext(context, fieldMapper.fullPath(), null) ); @@ -820,17 +685,40 @@ public final class DocumentParser { ) throws IOException { String fullPath = context.path().pathAsText(arrayFieldName); - if (mapper instanceof ObjectMapper objectMapper) { - context.publishEvent(new DocumentParserListener.Event.ObjectArrayStart(objectMapper, context.parent(), context.doc())); - } else if (mapper instanceof FieldMapper fieldMapper) { - context.publishEvent(new DocumentParserListener.Event.LeafArrayStart(fieldMapper, context.parent(), context.doc())); - } - // Check if we need to record the array source. This only applies to synthetic source. + boolean canRemoveSingleLeafElement = false; if (context.canAddIgnoredField()) { + Mapper.SourceKeepMode mode = Mapper.SourceKeepMode.NONE; + boolean objectWithFallbackSyntheticSource = false; + if (mapper instanceof ObjectMapper objectMapper) { + mode = getSourceKeepMode(context, objectMapper.sourceKeepMode()); + objectWithFallbackSyntheticSource = mode == Mapper.SourceKeepMode.ALL + || (mode == Mapper.SourceKeepMode.ARRAYS && objectMapper instanceof NestedObjectMapper == false); + } + boolean fieldWithFallbackSyntheticSource = false; + boolean fieldWithStoredArraySource = false; + if (mapper instanceof FieldMapper fieldMapper) { + mode = getSourceKeepMode(context, fieldMapper.sourceKeepMode()); + fieldWithFallbackSyntheticSource = fieldMapper.syntheticSourceMode() == FieldMapper.SyntheticSourceMode.FALLBACK; + fieldWithStoredArraySource = mode != Mapper.SourceKeepMode.NONE; + } boolean copyToFieldHasValuesInDocument = context.isWithinCopyTo() == false && context.isCopyToDestinationField(fullPath); - if (copyToFieldHasValuesInDocument) { + + canRemoveSingleLeafElement = mapper instanceof FieldMapper + && mode == Mapper.SourceKeepMode.ARRAYS + && fieldWithFallbackSyntheticSource == false + && copyToFieldHasValuesInDocument == false; + + if (objectWithFallbackSyntheticSource + || fieldWithFallbackSyntheticSource + || fieldWithStoredArraySource + || copyToFieldHasValuesInDocument) { context = context.addIgnoredFieldFromContext(IgnoredSourceFieldMapper.NameValue.fromContext(context, fullPath, null)); + } else if (mapper instanceof ObjectMapper objectMapper && (objectMapper.isEnabled() == false)) { + // No need to call #addIgnoredFieldFromContext as both singleton and array instances of this object + // get tracked through ignored source. + context.addIgnoredField(IgnoredSourceFieldMapper.NameValue.fromContext(context, fullPath, context.encodeFlattenedToken())); + return; } } @@ -841,20 +729,28 @@ public final class DocumentParser { XContentParser parser = context.parser(); XContentParser.Token token; + int elements = 0; while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { if (token == XContentParser.Token.START_OBJECT) { + elements = Integer.MAX_VALUE; parseObject(context, lastFieldName); } else if (token == XContentParser.Token.START_ARRAY) { + elements = Integer.MAX_VALUE; parseArray(context, lastFieldName); } else if (token == XContentParser.Token.VALUE_NULL) { + elements++; parseNullValue(context, lastFieldName); } else if (token == null) { throwEOFOnParseArray(arrayFieldName, context); } else { assert token.isValue(); + elements++; parseValue(context, lastFieldName); } } + if (elements <= 1 && canRemoveSingleLeafElement) { + context.removeLastIgnoredField(fullPath); + } postProcessDynamicArrayMapping(context, lastFieldName); } @@ -1153,14 +1049,12 @@ public final class DocumentParser { MappingLookup mappingLookup, MappingParserContext mappingParserContext, SourceToParse source, - Listeners listeners, XContentParser parser ) throws IOException { super( mappingLookup, mappingParserContext, source, - listeners, mappingLookup.getMapping().getRoot(), ObjectMapper.Dynamic.getRootDynamic(mappingLookup) ); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java index 625f3a3d19af..51e4e9f4c1b5 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java @@ -117,7 +117,6 @@ public abstract class DocumentParserContext { private final MappingLookup mappingLookup; private final MappingParserContext mappingParserContext; private final SourceToParse sourceToParse; - private final DocumentParser.Listeners listeners; private final Set ignoredFields; private final List ignoredFieldValues; @@ -150,7 +149,6 @@ public abstract class DocumentParserContext { MappingLookup mappingLookup, MappingParserContext mappingParserContext, SourceToParse sourceToParse, - DocumentParser.Listeners listeners, Set ignoreFields, List ignoredFieldValues, Scope currentScope, @@ -171,7 +169,6 @@ public abstract class DocumentParserContext { this.mappingLookup = mappingLookup; this.mappingParserContext = mappingParserContext; this.sourceToParse = sourceToParse; - this.listeners = listeners; this.ignoredFields = ignoreFields; this.ignoredFieldValues = ignoredFieldValues; this.currentScope = currentScope; @@ -195,7 +192,6 @@ public abstract class DocumentParserContext { in.mappingLookup, in.mappingParserContext, in.sourceToParse, - in.listeners, in.ignoredFields, in.ignoredFieldValues, in.currentScope, @@ -219,7 +215,6 @@ public abstract class DocumentParserContext { MappingLookup mappingLookup, MappingParserContext mappingParserContext, SourceToParse source, - DocumentParser.Listeners listeners, ObjectMapper parent, ObjectMapper.Dynamic dynamic ) { @@ -227,7 +222,6 @@ public abstract class DocumentParserContext { mappingLookup, mappingParserContext, source, - listeners, new HashSet<>(), new ArrayList<>(), Scope.SINGLETON, @@ -307,6 +301,12 @@ public abstract class DocumentParserContext { } } + final void removeLastIgnoredField(String name) { + if (ignoredFieldValues.isEmpty() == false && ignoredFieldValues.getLast().name().equals(name)) { + ignoredFieldValues.removeLast(); + } + } + /** * Return the collection of values for fields that have been ignored so far. */ @@ -470,15 +470,6 @@ public abstract class DocumentParserContext { return copyToFields; } - public void publishEvent(DocumentParserListener.Event event) throws IOException { - listeners.publish(event, this); - } - - public void finishListeners() { - var output = listeners.finish(); - ignoredFieldValues.addAll(output.ignoredSourceValues()); - } - /** * Add a new mapper dynamically created while parsing. * @@ -668,7 +659,7 @@ public abstract class DocumentParserContext { /** * Return a new context that will be used within a nested document. */ - public final DocumentParserContext createNestedContext(NestedObjectMapper nestedMapper) throws IOException { + public final DocumentParserContext createNestedContext(NestedObjectMapper nestedMapper) { if (isWithinCopyTo()) { // nested context will already have been set up for copy_to fields return this; @@ -697,7 +688,7 @@ public abstract class DocumentParserContext { /** * Return a new context that has the provided document as the current document. */ - public final DocumentParserContext switchDoc(final LuceneDocument document) throws IOException { + public final DocumentParserContext switchDoc(final LuceneDocument document) { DocumentParserContext cloned = new Wrapper(this.parent, this) { @Override public LuceneDocument doc() { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserListener.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserListener.java deleted file mode 100644 index 7ea902235da1..000000000000 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserListener.java +++ /dev/null @@ -1,221 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ - -package org.elasticsearch.index.mapper; - -import org.apache.lucene.util.BytesRef; -import org.elasticsearch.xcontent.XContentParser; - -import java.io.IOException; -import java.math.BigInteger; -import java.util.ArrayList; -import java.util.List; - -/** - * Component that listens to events produced by {@link DocumentParser} in order to implement some parsing related logic. - * It allows to keep such logic separate from actual document parsing workflow which is by itself complex. - */ -public interface DocumentParserListener { - /** - * Specifies if this listener is currently actively consuming tokens. - * This is used to avoid doing unnecessary work. - * @return - */ - boolean isActive(); - - /** - * Sends a {@link Token} to this listener. - * This is only called when {@link #isActive()} returns true since it involves a somewhat costly operation of creating a token instance - * and tokens are low level meaning this is called very frequently. - * @param token - * @throws IOException - */ - void consume(Token token) throws IOException; - - /** - * Sends an {@link Event} to this listener. Unlike tokens events are always sent to a listener. - * The logic here is that based on the event listener can decide to change the return value of {@link #isActive()}. - * @param event - * @throws IOException - */ - void consume(Event event) throws IOException; - - Output finish(); - - /** - * A lower level notification passed from the parser to a listener. - * This token is closely related to {@link org.elasticsearch.xcontent.XContentParser.Token} and is used for use cases like - * preserving the exact structure of the parsed document. - */ - sealed interface Token permits Token.FieldName, Token.StartObject, Token.EndObject, Token.StartArray, Token.EndArray, - Token.StringAsCharArrayValue, Token.NullValue, Token.ValueToken { - - record FieldName(String name) implements Token {} - - record StartObject() implements Token {} - - record EndObject() implements Token {} - - record StartArray() implements Token {} - - record EndArray() implements Token {} - - record NullValue() implements Token {} - - final class StringAsCharArrayValue implements Token { - private final XContentParser parser; - - public StringAsCharArrayValue(XContentParser parser) { - this.parser = parser; - } - - char[] buffer() throws IOException { - return parser.textCharacters(); - } - - int length() throws IOException { - return parser.textLength(); - } - - int offset() throws IOException { - return parser.textOffset(); - } - } - - non-sealed interface ValueToken extends Token { - T value() throws IOException; - } - - Token START_OBJECT = new StartObject(); - Token END_OBJECT = new EndObject(); - Token START_ARRAY = new StartArray(); - Token END_ARRAY = new EndArray(); - - static Token current(XContentParser parser) throws IOException { - return switch (parser.currentToken()) { - case START_OBJECT -> Token.START_OBJECT; - case END_OBJECT -> Token.END_OBJECT; - case START_ARRAY -> Token.START_ARRAY; - case END_ARRAY -> Token.END_ARRAY; - case FIELD_NAME -> new FieldName(parser.currentName()); - case VALUE_STRING -> { - if (parser.hasTextCharacters()) { - yield new StringAsCharArrayValue(parser); - } else { - yield (ValueToken) parser::text; - } - } - case VALUE_NUMBER -> switch (parser.numberType()) { - case INT -> (ValueToken) parser::intValue; - case BIG_INTEGER -> (ValueToken) () -> (BigInteger) parser.numberValue(); - case LONG -> (ValueToken) parser::longValue; - case FLOAT -> (ValueToken) parser::floatValue; - case DOUBLE -> (ValueToken) parser::doubleValue; - case BIG_DECIMAL -> { - // See @XContentGenerator#copyCurrentEvent - assert false : "missing xcontent number handling for type [" + parser.numberType() + "]"; - yield null; - } - }; - case VALUE_BOOLEAN -> (ValueToken) parser::booleanValue; - case VALUE_EMBEDDED_OBJECT -> (ValueToken) parser::binaryValue; - case VALUE_NULL -> new NullValue(); - case null -> null; - }; - } - } - - /** - * High level notification passed from the parser to a listener. - * Events represent meaningful logical operations during parsing and contain relevant context for the operation - * like a mapper being used. - * A listener can use events and/or tokens depending on the use case. For example, it can wait for a specific event and then switch - * to consuming tokens instead. - */ - sealed interface Event permits Event.DocumentStart, Event.ObjectStart, Event.ObjectArrayStart, Event.LeafArrayStart, Event.LeafValue { - record DocumentStart(RootObjectMapper rootObjectMapper, LuceneDocument document) implements Event {} - - record ObjectStart(ObjectMapper objectMapper, boolean insideObjectArray, ObjectMapper parentMapper, LuceneDocument document) - implements - Event {} - - record ObjectArrayStart(ObjectMapper objectMapper, ObjectMapper parentMapper, LuceneDocument document) implements Event {} - - final class LeafValue implements Event { - private final FieldMapper fieldMapper; - private final boolean insideObjectArray; - private final ObjectMapper parentMapper; - private final LuceneDocument document; - private final XContentParser parser; - private final boolean isObjectOrArray; - private final boolean isArray; - - public LeafValue( - FieldMapper fieldMapper, - boolean insideObjectArray, - ObjectMapper parentMapper, - LuceneDocument document, - XContentParser parser - ) { - this.fieldMapper = fieldMapper; - this.insideObjectArray = insideObjectArray; - this.parentMapper = parentMapper; - this.document = document; - this.parser = parser; - this.isObjectOrArray = parser.currentToken().isValue() == false && parser.currentToken() != XContentParser.Token.VALUE_NULL; - this.isArray = parser.currentToken() == XContentParser.Token.START_ARRAY; - } - - public FieldMapper fieldMapper() { - return fieldMapper; - } - - public boolean insideObjectArray() { - return insideObjectArray; - } - - public ObjectMapper parentMapper() { - return parentMapper; - } - - public LuceneDocument document() { - return document; - } - - /** - * @return whether a value is an object or an array vs a single value like a long. - */ - boolean isContainer() { - return isObjectOrArray; - } - - boolean isArray() { - return isArray; - } - - BytesRef encodeValue() throws IOException { - assert isContainer() == false : "Objects should not be handled with direct encoding"; - - return XContentDataHelper.encodeToken(parser); - } - } - - record LeafArrayStart(FieldMapper fieldMapper, ObjectMapper parentMapper, LuceneDocument document) implements Event {} - } - - record Output(List ignoredSourceValues) { - static Output empty() { - return new Output(new ArrayList<>()); - } - - void merge(Output part) { - this.ignoredSourceValues.addAll(part.ignoredSourceValues); - } - } -} diff --git a/server/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java index ffd60efc772f..5bbecbf117db 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java @@ -57,7 +57,6 @@ import java.util.function.BiFunction; import java.util.function.Consumer; import java.util.function.Function; import java.util.function.Supplier; -import java.util.stream.Stream; import static org.elasticsearch.core.Strings.format; @@ -444,7 +443,11 @@ public abstract class FieldMapper extends Mapper { @Override public int getTotalFieldsCount() { - return 1 + Stream.of(builderParams.multiFields.mappers).mapToInt(FieldMapper::getTotalFieldsCount).sum(); + int sum = 1; + for (FieldMapper mapper : builderParams.multiFields.mappers) { + sum += mapper.getTotalFieldsCount(); + } + return sum; } public Map indexAnalyzers() { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/InferenceMetadataFieldsMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/InferenceMetadataFieldsMapper.java index 6051aafb9f74..80fee58e9311 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/InferenceMetadataFieldsMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/InferenceMetadataFieldsMapper.java @@ -86,8 +86,12 @@ public abstract class InferenceMetadataFieldsMapper extends MetadataFieldMapper * @return {@code true} if the new format is enabled; {@code false} otherwise */ public static boolean isEnabled(Settings settings) { - return IndexMetadata.SETTING_INDEX_VERSION_CREATED.get(settings).onOrAfter(IndexVersions.INFERENCE_METADATA_FIELDS) - && USE_LEGACY_SEMANTIC_TEXT_FORMAT.get(settings) == false; + var version = IndexMetadata.SETTING_INDEX_VERSION_CREATED.get(settings); + if (version.before(IndexVersions.INFERENCE_METADATA_FIELDS) + && version.between(IndexVersions.INFERENCE_METADATA_FIELDS_BACKPORT, IndexVersions.UPGRADE_TO_LUCENE_10_0_0) == false) { + return false; + } + return USE_LEGACY_SEMANTIC_TEXT_FORMAT.get(settings) == false; } /** diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java index 689cbdcdbda7..fb4f86c3cba9 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -592,7 +592,6 @@ public class MapperService extends AbstractIndexComponent implements Closeable { mapping, mappingSource, indexVersionCreated, - indexSettings, mapperMetrics, index().getName() ); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MappingLookup.java b/server/src/main/java/org/elasticsearch/index/mapper/MappingLookup.java index 80dfa37a0ee0..ed02e5fc2961 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MappingLookup.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MappingLookup.java @@ -43,7 +43,7 @@ public final class MappingLookup { * A lookup representing an empty mapping. It can be used to look up fields, although it won't hold any, but it does not * hold a valid {@link DocumentParser}, {@link IndexSettings} or {@link IndexAnalyzers}. */ - public static final MappingLookup EMPTY = fromMappers(Mapping.EMPTY, List.of(), List.of(), null); + public static final MappingLookup EMPTY = fromMappers(Mapping.EMPTY, List.of(), List.of()); private final CacheKey cacheKey = new CacheKey(); @@ -59,16 +59,14 @@ public final class MappingLookup { private final List indexTimeScriptMappers; private final Mapping mapping; private final int totalFieldsCount; - private final CustomSyntheticSourceFieldLookup customSyntheticSourceFieldLookup; /** * Creates a new {@link MappingLookup} instance by parsing the provided mapping and extracting its field definitions. * * @param mapping the mapping source - * @param indexSettings index settings * @return the newly created lookup instance */ - public static MappingLookup fromMapping(Mapping mapping, IndexSettings indexSettings) { + public static MappingLookup fromMapping(Mapping mapping) { List newObjectMappers = new ArrayList<>(); List newFieldMappers = new ArrayList<>(); List newFieldAliasMappers = new ArrayList<>(); @@ -81,7 +79,7 @@ public final class MappingLookup { for (Mapper child : mapping.getRoot()) { collect(child, newObjectMappers, newFieldMappers, newFieldAliasMappers, newPassThroughMappers); } - return new MappingLookup(mapping, newFieldMappers, newObjectMappers, newFieldAliasMappers, newPassThroughMappers, indexSettings); + return new MappingLookup(mapping, newFieldMappers, newObjectMappers, newFieldAliasMappers, newPassThroughMappers); } private static void collect( @@ -122,7 +120,6 @@ public final class MappingLookup { * @param objectMappers the object mappers * @param aliasMappers the field alias mappers * @param passThroughMappers the pass-through mappers - * @param indexSettings index settings * @return the newly created lookup instance */ public static MappingLookup fromMappers( @@ -130,19 +127,13 @@ public final class MappingLookup { Collection mappers, Collection objectMappers, Collection aliasMappers, - Collection passThroughMappers, - @Nullable IndexSettings indexSettings + Collection passThroughMappers ) { - return new MappingLookup(mapping, mappers, objectMappers, aliasMappers, passThroughMappers, indexSettings); + return new MappingLookup(mapping, mappers, objectMappers, aliasMappers, passThroughMappers); } - public static MappingLookup fromMappers( - Mapping mapping, - Collection mappers, - Collection objectMappers, - @Nullable IndexSettings indexSettings - ) { - return new MappingLookup(mapping, mappers, objectMappers, List.of(), List.of(), indexSettings); + public static MappingLookup fromMappers(Mapping mapping, Collection mappers, Collection objectMappers) { + return new MappingLookup(mapping, mappers, objectMappers, List.of(), List.of()); } private MappingLookup( @@ -150,8 +141,7 @@ public final class MappingLookup { Collection mappers, Collection objectMappers, Collection aliasMappers, - Collection passThroughMappers, - @Nullable IndexSettings indexSettings + Collection passThroughMappers ) { this.totalFieldsCount = mapping.getRoot().getTotalFieldsCount(); this.mapping = mapping; @@ -217,7 +207,6 @@ public final class MappingLookup { this.runtimeFieldMappersCount = runtimeFields.size(); this.indexAnalyzersMap = Map.copyOf(indexAnalyzersMap); this.indexTimeScriptMappers = List.copyOf(indexTimeScriptMappers); - this.customSyntheticSourceFieldLookup = new CustomSyntheticSourceFieldLookup(mapping, indexSettings, isSourceSynthetic()); runtimeFields.stream().flatMap(RuntimeField::asMappedFieldTypes).map(MappedFieldType::name).forEach(this::validateDoesNotShadow); assert assertMapperNamesInterned(this.fieldMappers, this.objectMappers); @@ -554,8 +543,4 @@ public final class MappingLookup { throw new MapperParsingException("Field [" + name + "] attempted to shadow a time_series_metric"); } } - - public CustomSyntheticSourceFieldLookup getCustomSyntheticSourceFieldLookup() { - return customSyntheticSourceFieldLookup; - } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java index 6a107dbaa9e6..f4084b3ede24 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java @@ -263,7 +263,11 @@ public class ObjectMapper extends Mapper { @Override public int getTotalFieldsCount() { - return 1 + mappers.values().stream().mapToInt(Mapper::getTotalFieldsCount).sum(); + int sum = 1; + for (Mapper mapper : mappers.values()) { + sum += mapper.getTotalFieldsCount(); + } + return sum; } public static class TypeParser implements Mapper.TypeParser { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java index ce983e8a327c..2fe82b4eacfc 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java @@ -538,6 +538,6 @@ public class RootObjectMapper extends ObjectMapper { @Override public int getTotalFieldsCount() { - return mappers.values().stream().mapToInt(Mapper::getTotalFieldsCount).sum() + runtimeFields.size(); + return super.getTotalFieldsCount() - 1 + runtimeFields.size(); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/SyntheticSourceDocumentParserListener.java b/server/src/main/java/org/elasticsearch/index/mapper/SyntheticSourceDocumentParserListener.java deleted file mode 100644 index eabb11763557..000000000000 --- a/server/src/main/java/org/elasticsearch/index/mapper/SyntheticSourceDocumentParserListener.java +++ /dev/null @@ -1,398 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ - -package org.elasticsearch.index.mapper; - -import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentType; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -/** - * Listens for document parsing events and stores an additional copy of source data when it is needed for synthetic _source. - *
- * Note that synthetic source logic for dynamic fields and fields involved in copy_to logic is still handled in {@link DocumentParser}. - */ -class SyntheticSourceDocumentParserListener implements DocumentParserListener { - private final CustomSyntheticSourceFieldLookup customSyntheticSourceFieldLookup; - private final XContentType xContentType; - - private final Map>> ignoredSourceValues; - - private State state; - - SyntheticSourceDocumentParserListener(MappingLookup mappingLookup, XContentType xContentType) { - this.customSyntheticSourceFieldLookup = mappingLookup.getCustomSyntheticSourceFieldLookup(); - this.xContentType = xContentType; - - this.ignoredSourceValues = new HashMap<>(); - this.state = new Tracking(); - } - - @Override - public boolean isActive() { - return state instanceof Storing; - } - - @Override - public void consume(Token token) throws IOException { - if (token == null) { - return; - } - - this.state = state.consume(token); - } - - @Override - public void consume(Event event) throws IOException { - if (event == null) { - return; - } - - this.state = state.consume(event); - } - - @Override - public Output finish() { - var values = new ArrayList(); - - for (var fieldToValueMap : ignoredSourceValues.values()) { - for (var fieldValues : fieldToValueMap.values()) { - long singleElementArrays = 0; - long stashedValuesForSourceKeepArrays = 0; - - for (var fieldValue : fieldValues) { - if (fieldValue instanceof StoredValue.Array arr) { - // Arrays are stored to preserve the order of elements. - // If there is a single element it does not matter and we can drop such data. - if (arr.length == 1 && arr.reason() == StoreReason.LEAF_STORED_ARRAY) { - singleElementArrays += 1; - } - } - if (fieldValue instanceof StoredValue.Singleton singleton) { - // Stash values are values of fields that are inside object arrays and have synthetic_source_keep: "arrays". - // With current logic either all field values should be in ignored source - // or none of them. - // With object arrays the same field can be parsed multiple times (one time for every object array entry) - // and it is possible that one of the value is an array. - // Due to the rule above we need to proactively store all values of such fields because we may later discover - // that there is an array and we need to "switch" to ignored source usage. - // However if we stored all values but the array is not there, the field will be correctly constructed - // using regular logic and therefore we can drop this and save some space. - if (singleton.reason() == StoreReason.LEAF_VALUE_STASH_FOR_STORED_ARRAYS) { - stashedValuesForSourceKeepArrays += 1; - } - } - } - - // Only if all values match one of the optimization criteria we skip them, otherwise add all of them to resulting list. - if (singleElementArrays != fieldValues.size() && stashedValuesForSourceKeepArrays != fieldValues.size()) { - for (var storedValue : fieldValues) { - values.add(storedValue.nameValue()); - } - } - } - } - - return new Output(values); - } - - sealed interface StoredValue permits StoredValue.Array, StoredValue.Singleton { - IgnoredSourceFieldMapper.NameValue nameValue(); - - /** - * An array of values is stored f.e. due to synthetic_source_keep: "arrays". - */ - record Array(IgnoredSourceFieldMapper.NameValue nameValue, StoreReason reason, long length) implements StoredValue {} - - /** - * A single value. - */ - record Singleton(IgnoredSourceFieldMapper.NameValue nameValue, StoreReason reason) implements StoredValue {} - - } - - /** - * Reason for storing this value. - */ - enum StoreReason { - /** - * Leaf array that is stored due to "synthetic_source_keep": "arrays". - */ - LEAF_STORED_ARRAY, - - /** - * "Stashed" value needed to only in case there are mixed arrays and single values - * for this field. - * Can be dropped in some cases. - */ - LEAF_VALUE_STASH_FOR_STORED_ARRAYS, - - /** - * There is currently no need to distinguish other reasons. - */ - OTHER - } - - private void addIgnoredSourceValue(StoredValue storedValue, String fullPath, LuceneDocument luceneDocument) { - var values = ignoredSourceValues.computeIfAbsent(luceneDocument, ld -> new HashMap<>()) - .computeIfAbsent(fullPath, p -> new ArrayList<>()); - - values.add(storedValue); - } - - interface State { - State consume(Token token) throws IOException; - - State consume(Event event) throws IOException; - } - - class Storing implements State { - private final State returnState; - private final String fullPath; - private final ObjectMapper parentMapper; - private final StoreReason reason; - private final LuceneDocument document; - - private final XContentBuilder builder; - // Current object/array depth, needed to understand when the top-most object/arrays ends vs a nested one. - private int depth; - // If we are storing an array this is the length of the array. - private int length; - - Storing( - State returnState, - Token startingToken, - String fullPath, - ObjectMapper parentMapper, - StoreReason reason, - LuceneDocument document - ) throws IOException { - this.returnState = returnState; - this.fullPath = fullPath; - this.parentMapper = parentMapper; - this.reason = reason; - this.document = document; - - this.builder = XContentBuilder.builder(xContentType.xContent()); - - this.depth = 0; - this.length = 0; - - consume(startingToken); - } - - public State consume(Token token) throws IOException { - switch (token) { - case Token.StartObject startObject -> { - builder.startObject(); - if (depth == 1) { - length += 1; - } - depth += 1; - } - case Token.EndObject endObject -> { - builder.endObject(); - - if (processEndObjectOrArray(endObject)) { - return returnState; - } - } - case Token.StartArray startArray -> { - builder.startArray(); - depth += 1; - } - case Token.EndArray endArray -> { - builder.endArray(); - - if (processEndObjectOrArray(endArray)) { - return returnState; - } - } - case Token.FieldName fieldName -> builder.field(fieldName.name()); - case Token.StringAsCharArrayValue stringAsCharArrayValue -> { - if (depth == 1) { - length += 1; - } - builder.generator() - .writeString(stringAsCharArrayValue.buffer(), stringAsCharArrayValue.offset(), stringAsCharArrayValue.length()); - } - case Token.ValueToken valueToken -> { - if (depth == 1) { - length += 1; - } - builder.value(valueToken.value()); - } - case Token.NullValue nullValue -> { - if (depth == 1) { - length += 1; - } - builder.nullValue(); - } - case null -> { - } - } - - return this; - } - - public State consume(Event event) { - // We are currently storing something so events are not relevant. - return this; - } - - private boolean processEndObjectOrArray(Token token) throws IOException { - assert token instanceof Token.EndObject || token instanceof Token.EndArray - : "Unexpected token when storing ignored source value"; - - depth -= 1; - if (depth == 0) { - var parentOffset = parentMapper.isRoot() ? 0 : parentMapper.fullPath().length() + 1; - var nameValue = new IgnoredSourceFieldMapper.NameValue( - fullPath, - parentOffset, - XContentDataHelper.encodeXContentBuilder(builder), - document - ); - var storedValue = token instanceof Token.EndObject - ? new StoredValue.Singleton(nameValue, reason) - : new StoredValue.Array(nameValue, reason, length); - - addIgnoredSourceValue(storedValue, fullPath, document); - - return true; - } - - return false; - } - } - - class Tracking implements State { - public State consume(Token token) throws IOException { - return this; - } - - public State consume(Event event) throws IOException { - switch (event) { - case Event.DocumentStart documentStart -> { - if (documentStart.rootObjectMapper().isEnabled() == false) { - return new Storing( - this, - Token.START_OBJECT, - documentStart.rootObjectMapper().fullPath(), - documentStart.rootObjectMapper(), - StoreReason.OTHER, - documentStart.document() - ); - } - } - case Event.ObjectStart objectStart -> { - var reason = customSyntheticSourceFieldLookup.getFieldsWithCustomSyntheticSourceHandling() - .get(objectStart.objectMapper().fullPath()); - if (reason == null) { - return this; - } - if (reason == CustomSyntheticSourceFieldLookup.Reason.SOURCE_KEEP_ARRAYS && objectStart.insideObjectArray() == false) { - return this; - } - - return new Storing( - this, - Token.START_OBJECT, - objectStart.objectMapper().fullPath(), - objectStart.parentMapper(), - StoreReason.OTHER, - objectStart.document() - ); - } - case Event.ObjectArrayStart objectArrayStart -> { - var reason = customSyntheticSourceFieldLookup.getFieldsWithCustomSyntheticSourceHandling() - .get(objectArrayStart.objectMapper().fullPath()); - if (reason == null) { - return this; - } - - return new Storing( - this, - Token.START_ARRAY, - objectArrayStart.objectMapper().fullPath(), - objectArrayStart.parentMapper(), - StoreReason.OTHER, - objectArrayStart.document() - ); - } - case Event.LeafValue leafValue -> { - var reason = customSyntheticSourceFieldLookup.getFieldsWithCustomSyntheticSourceHandling() - .get(leafValue.fieldMapper().fullPath()); - if (reason == null) { - return this; - } - if (reason == CustomSyntheticSourceFieldLookup.Reason.SOURCE_KEEP_ARRAYS && leafValue.insideObjectArray() == false) { - return this; - } - - var storeReason = reason == CustomSyntheticSourceFieldLookup.Reason.SOURCE_KEEP_ARRAYS - ? StoreReason.LEAF_VALUE_STASH_FOR_STORED_ARRAYS - : StoreReason.OTHER; - - if (leafValue.isContainer()) { - return new Storing( - this, - leafValue.isArray() ? Token.START_ARRAY : Token.START_OBJECT, - leafValue.fieldMapper().fullPath(), - leafValue.parentMapper(), - storeReason, - leafValue.document() - ); - } - - var parentMapper = leafValue.parentMapper(); - var parentOffset = parentMapper.isRoot() ? 0 : parentMapper.fullPath().length() + 1; - - var nameValue = new IgnoredSourceFieldMapper.NameValue( - leafValue.fieldMapper().fullPath(), - parentOffset, - leafValue.encodeValue(), - leafValue.document() - ); - addIgnoredSourceValue( - new StoredValue.Singleton(nameValue, storeReason), - leafValue.fieldMapper().fullPath(), - leafValue.document() - ); - } - case Event.LeafArrayStart leafArrayStart -> { - var reason = customSyntheticSourceFieldLookup.getFieldsWithCustomSyntheticSourceHandling() - .get(leafArrayStart.fieldMapper().fullPath()); - if (reason == null) { - return this; - } - - var storeReason = reason == CustomSyntheticSourceFieldLookup.Reason.SOURCE_KEEP_ARRAYS - ? StoreReason.LEAF_STORED_ARRAY - : StoreReason.OTHER; - return new Storing( - this, - Token.START_ARRAY, - leafArrayStart.fieldMapper().fullPath(), - leafArrayStart.parentMapper(), - storeReason, - leafArrayStart.document() - ); - } - } - - return this; - } - } -} diff --git a/server/src/main/java/org/elasticsearch/index/translog/Translog.java b/server/src/main/java/org/elasticsearch/index/translog/Translog.java index dd13f905b6cb..b1a203616b12 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/Translog.java +++ b/server/src/main/java/org/elasticsearch/index/translog/Translog.java @@ -27,6 +27,7 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Releasable; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.engine.TranslogOperationAsserter; import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.Uid; @@ -123,6 +124,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC private final TranslogDeletionPolicy deletionPolicy; private final LongConsumer persistedSequenceNumberConsumer; private final OperationListener operationListener; + private final TranslogOperationAsserter operationAsserter; /** * Creates a new Translog instance. This method will create a new transaction log unless the given {@link TranslogGeneration} is @@ -150,7 +152,8 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC TranslogDeletionPolicy deletionPolicy, final LongSupplier globalCheckpointSupplier, final LongSupplier primaryTermSupplier, - final LongConsumer persistedSequenceNumberConsumer + final LongConsumer persistedSequenceNumberConsumer, + final TranslogOperationAsserter operationAsserter ) throws IOException { super(config.getShardId(), config.getIndexSettings()); this.config = config; @@ -158,6 +161,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC this.primaryTermSupplier = primaryTermSupplier; this.persistedSequenceNumberConsumer = persistedSequenceNumberConsumer; this.operationListener = config.getOperationListener(); + this.operationAsserter = operationAsserter; this.deletionPolicy = deletionPolicy; this.translogUUID = translogUUID; this.bigArrays = config.getBigArrays(); @@ -586,6 +590,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC bigArrays, diskIoBufferPool, operationListener, + operationAsserter, config.fsync() ); } catch (final IOException e) { @@ -1269,17 +1274,8 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC return false; } - Index index = (Index) o; - - if (version != index.version - || seqNo != index.seqNo - || primaryTerm != index.primaryTerm - || id.equals(index.id) == false - || autoGeneratedIdTimestamp != index.autoGeneratedIdTimestamp - || source.equals(index.source) == false) { - return false; - } - return Objects.equals(routing, index.routing); + Index other = (Index) o; + return autoGeneratedIdTimestamp == other.autoGeneratedIdTimestamp && equalsWithoutAutoGeneratedTimestamp(this, other); } @Override @@ -1315,6 +1311,15 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC return autoGeneratedIdTimestamp; } + public static boolean equalsWithoutAutoGeneratedTimestamp(Translog.Index o1, Translog.Index o2) { + return o1.version == o2.version + && o1.seqNo == o2.seqNo + && o1.primaryTerm == o2.primaryTerm + && o1.id.equals(o2.id) + && o1.source.equals(o2.source) + && Objects.equals(o1.routing, o2.routing); + } + } public static final class Delete extends Operation { @@ -1962,6 +1967,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC BigArrays.NON_RECYCLING_INSTANCE, DiskIoBufferPool.INSTANCE, TranslogConfig.NOOP_OPERATION_LISTENER, + TranslogOperationAsserter.DEFAULT, true ); writer.close(); diff --git a/server/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java b/server/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java index 3dda44ff5a6d..8cf631b660b1 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java +++ b/server/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java @@ -26,6 +26,7 @@ import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.Releasables; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.Tuple; +import org.elasticsearch.index.engine.TranslogOperationAsserter; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.ShardId; @@ -39,7 +40,6 @@ import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.Objects; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.locks.ReentrantLock; import java.util.function.LongConsumer; @@ -69,6 +69,7 @@ public class TranslogWriter extends BaseTranslogReader implements Closeable { // callback that's called whenever an operation with a given sequence number is successfully persisted. private final LongConsumer persistedSequenceNumberConsumer; private final OperationListener operationListener; + private final TranslogOperationAsserter operationAsserter; private final boolean fsync; protected final AtomicBoolean closed = new AtomicBoolean(false); @@ -108,6 +109,7 @@ public class TranslogWriter extends BaseTranslogReader implements Closeable { BigArrays bigArrays, DiskIoBufferPool diskIoBufferPool, OperationListener operationListener, + TranslogOperationAsserter operationAsserter, boolean fsync ) throws IOException { super(initialCheckpoint.generation, channel, path, header); @@ -136,6 +138,7 @@ public class TranslogWriter extends BaseTranslogReader implements Closeable { this.seenSequenceNumbers = Assertions.ENABLED ? new HashMap<>() : null; this.tragedy = tragedy; this.operationListener = operationListener; + this.operationAsserter = operationAsserter; this.fsync = fsync; this.lastModifiedTimeCache = new LastModifiedTimeCache(-1, -1, -1); } @@ -157,6 +160,7 @@ public class TranslogWriter extends BaseTranslogReader implements Closeable { BigArrays bigArrays, DiskIoBufferPool diskIoBufferPool, OperationListener operationListener, + TranslogOperationAsserter operationAsserter, boolean fsync ) throws IOException { final Path checkpointFile = file.getParent().resolve(Translog.CHECKPOINT_FILE_NAME); @@ -201,6 +205,7 @@ public class TranslogWriter extends BaseTranslogReader implements Closeable { bigArrays, diskIoBufferPool, operationListener, + operationAsserter, fsync ); } catch (Exception exception) { @@ -276,25 +281,16 @@ public class TranslogWriter extends BaseTranslogReader implements Closeable { Translog.Operation prvOp = Translog.readOperation( new BufferedChecksumStreamInput(previous.v1().streamInput(), "assertion") ); - // TODO: We haven't had timestamp for Index operations in Lucene yet, we need to loosen this check without timestamp. final boolean sameOp; if (newOp instanceof final Translog.Index o2 && prvOp instanceof final Translog.Index o1) { - sameOp = Objects.equals(o1.id(), o2.id()) - && Objects.equals(o1.source(), o2.source()) - && Objects.equals(o1.routing(), o2.routing()) - && o1.primaryTerm() == o2.primaryTerm() - && o1.seqNo() == o2.seqNo() - && o1.version() == o2.version(); + sameOp = operationAsserter.assertSameIndexOperation(o1, o2); } else if (newOp instanceof final Translog.Delete o1 && prvOp instanceof final Translog.Delete o2) { - sameOp = Objects.equals(o1.id(), o2.id()) - && o1.primaryTerm() == o2.primaryTerm() - && o1.seqNo() == o2.seqNo() - && o1.version() == o2.version(); + sameOp = o1.equals(o2); } else { sameOp = false; } - if (sameOp == false) { - throw new AssertionError( + assert sameOp + : new AssertionError( "seqNo [" + seqNo + "] was processed twice in generation [" @@ -307,7 +303,6 @@ public class TranslogWriter extends BaseTranslogReader implements Closeable { + "]", previous.v2() ); - } } } else { seenSequenceNumbers.put( diff --git a/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogAction.java b/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogAction.java index 960ae36d685e..b8f50e0b7c42 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogAction.java +++ b/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogAction.java @@ -25,6 +25,7 @@ import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.engine.TranslogOperationAsserter; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.RemoveCorruptedShardDataCommand; import org.elasticsearch.index.shard.ShardPath; @@ -171,7 +172,8 @@ public class TruncateTranslogAction { translogDeletionPolicy, () -> translogGlobalCheckpoint, () -> primaryTerm, - seqNo -> {} + seqNo -> {}, + TranslogOperationAsserter.DEFAULT ); Translog.Snapshot snapshot = translog.newSnapshot(0, Long.MAX_VALUE) ) { diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterStatsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterStatsAction.java index 63bd4523f9bd..690f3155971c 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterStatsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterStatsAction.java @@ -33,7 +33,8 @@ public class RestClusterStatsAction extends BaseRestHandler { "human-readable-total-docs-size", "verbose-dense-vector-mapping-stats", "ccs-stats", - "retrievers-usage-stats" + "retrievers-usage-stats", + "esql-stats" ); private static final Set SUPPORTED_QUERY_PARAMETERS = Set.of("include_remotes", "nodeId", REST_TIMEOUT_PARAM); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorsReducer.java b/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorsReducer.java index 6682fb2a8341..4c89877b7b1c 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorsReducer.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorsReducer.java @@ -12,7 +12,10 @@ package org.elasticsearch.search.aggregations; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; +import java.util.ArrayList; +import java.util.Collection; import java.util.HashMap; +import java.util.List; import java.util.Map; /** @@ -54,7 +57,12 @@ public final class AggregatorsReducer implements Releasable { * returns the reduced {@link InternalAggregations}. */ public InternalAggregations get() { - return InternalAggregations.from(aggByName.values().stream().map(AggregatorReducer::get).toList()); + final Collection reducers = aggByName.values(); + final List aggs = new ArrayList<>(reducers.size()); + for (AggregatorReducer reducer : reducers) { + aggs.add(reducer.get()); + } + return InternalAggregations.from(aggs); } @Override diff --git a/server/src/main/java/org/elasticsearch/usage/UsageService.java b/server/src/main/java/org/elasticsearch/usage/UsageService.java index dd4895eb4bdc..5b4fa0f27bf4 100644 --- a/server/src/main/java/org/elasticsearch/usage/UsageService.java +++ b/server/src/main/java/org/elasticsearch/usage/UsageService.java @@ -26,11 +26,13 @@ public class UsageService { private final Map handlers; private final SearchUsageHolder searchUsageHolder; private final CCSUsageTelemetry ccsUsageHolder; + private final CCSUsageTelemetry esqlUsageHolder; public UsageService() { this.handlers = new HashMap<>(); this.searchUsageHolder = new SearchUsageHolder(); this.ccsUsageHolder = new CCSUsageTelemetry(); + this.esqlUsageHolder = new CCSUsageTelemetry(false); } /** @@ -89,4 +91,8 @@ public class UsageService { public CCSUsageTelemetry getCcsUsageHolder() { return ccsUsageHolder; } + + public CCSUsageTelemetry getEsqlUsageHolder() { + return esqlUsageHolder; + } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/CCSTelemetrySnapshotTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/CCSTelemetrySnapshotTests.java index a72630c327ea..6444caf08f83 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/CCSTelemetrySnapshotTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/CCSTelemetrySnapshotTests.java @@ -352,4 +352,20 @@ public class CCSTelemetrySnapshotTests extends AbstractWireSerializingTestCase v.getTookMrtTrue().count() == 0 || v.getTookMrtFalse().count() == 0, + this::randomCCSTelemetrySnapshot + ); + + empty.add(randomWithMRT); + assertThat(empty.getTook().count(), equalTo(randomWithMRT.getTook().count())); + assertThat(empty.getTookMrtFalse().count(), equalTo(0L)); + assertThat(empty.getTookMrtTrue().count(), equalTo(0L)); + } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/CCSUsageTelemetryTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/CCSUsageTelemetryTests.java index c4a2fdee1111..5eb2224ec5f8 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/CCSUsageTelemetryTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/CCSUsageTelemetryTests.java @@ -340,4 +340,23 @@ public class CCSUsageTelemetryTests extends ESTestCase { CCSTelemetrySnapshot expectedSnapshot = ccsUsageHolder.getCCSTelemetrySnapshot(); assertThat(snapshot, equalTo(expectedSnapshot)); } + + public void testUseMRTFalse() { + // Ignore MRT counters if instructed. + CCSUsageTelemetry ccsUsageHolder = new CCSUsageTelemetry(false); + + CCSUsage.Builder builder = new CCSUsage.Builder(); + builder.took(10L).setRemotesCount(1).setClient("kibana"); + builder.setFeature(MRT_FEATURE); + ccsUsageHolder.updateUsage(builder.build()); + + builder = new CCSUsage.Builder(); + builder.took(11L).setRemotesCount(1).setClient("kibana"); + ccsUsageHolder.updateUsage(builder.build()); + + CCSTelemetrySnapshot snapshot = ccsUsageHolder.getCCSTelemetrySnapshot(); + assertThat(snapshot.getTook().count(), equalTo(2L)); + assertThat(snapshot.getTookMrtFalse().count(), equalTo(0L)); + assertThat(snapshot.getTookMrtTrue().count(), equalTo(0L)); + } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/VersionStatsTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/VersionStatsTests.java index 9bf4ad7c3cb6..fbd6e0916eef 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/VersionStatsTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/VersionStatsTests.java @@ -130,6 +130,7 @@ public class VersionStatsTests extends AbstractWireSerializingTestCase SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, - seqNo -> {} + seqNo -> {}, + TranslogOperationAsserter.DEFAULT ); translog.add(TranslogOperationsUtils.indexOp("SomeBogusId", 0, primaryTerm.get())); assertEquals(generation.translogFileGeneration(), translog.currentFileGeneration()); diff --git a/server/src/test/java/org/elasticsearch/index/engine/TranslogOperationAsserterTests.java b/server/src/test/java/org/elasticsearch/index/engine/TranslogOperationAsserterTests.java new file mode 100644 index 000000000000..b764bce464d1 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/engine/TranslogOperationAsserterTests.java @@ -0,0 +1,150 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.index.engine; + +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.mapper.SourceFieldMapper; +import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentType; + +import java.io.IOException; + +import static org.elasticsearch.index.mapper.SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING; + +public class TranslogOperationAsserterTests extends EngineTestCase { + + @Override + protected Settings indexSettings() { + return Settings.builder() + .put(super.indexSettings()) + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) + .put(INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), SourceFieldMapper.Mode.SYNTHETIC.name()) + .put(IndexSettings.RECOVERY_USE_SYNTHETIC_SOURCE_SETTING.getKey(), true) + .build(); + } + + Translog.Index toIndexOp(String source) throws IOException { + XContentParser parser = createParser(XContentType.JSON.xContent(), source); + XContentBuilder builder = XContentFactory.jsonBuilder(); + builder.copyCurrentStructure(parser); + return new Translog.Index( + "1", + 0, + 1, + 1, + new BytesArray(Strings.toString(builder)), + null, + IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP + ); + } + + EngineConfig engineConfig(boolean useSyntheticSource) { + EngineConfig config = engine.config(); + Settings.Builder settings = Settings.builder().put(config.getIndexSettings().getSettings()); + if (useSyntheticSource) { + settings.put(INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), SourceFieldMapper.Mode.SYNTHETIC.name()); + settings.put(IndexSettings.RECOVERY_USE_SYNTHETIC_SOURCE_SETTING.getKey(), true); + } else { + settings.put(INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), SourceFieldMapper.Mode.STORED.name()); + settings.put(IndexSettings.RECOVERY_USE_SYNTHETIC_SOURCE_SETTING.getKey(), false); + } + IndexMetadata imd = IndexMetadata.builder(config.getIndexSettings().getIndexMetadata()).settings(settings).build(); + return config( + new IndexSettings(imd, Settings.EMPTY), + config.getStore(), + config.getTranslogConfig().getTranslogPath(), + config.getMergePolicy(), + null + ); + } + + public void testBasic() throws Exception { + TranslogOperationAsserter syntheticAsserter = TranslogOperationAsserter.withEngineConfig(engineConfig(true)); + TranslogOperationAsserter regularAsserter = TranslogOperationAsserter.withEngineConfig(engineConfig(false)); + { + var o1 = toIndexOp(""" + { + "value": "value-1" + } + """); + var o2 = toIndexOp(""" + { + "value": [ "value-1" ] + } + """); + assertTrue(syntheticAsserter.assertSameIndexOperation(o1, o2)); + assertFalse(regularAsserter.assertSameIndexOperation(o1, o2)); + } + { + var o1 = toIndexOp(""" + { + "value": [ "value-1", "value-2" ] + } + """); + var o2 = toIndexOp(""" + { + "value": [ "value-1", "value-2" ] + } + """); + assertTrue(syntheticAsserter.assertSameIndexOperation(o1, o2)); + assertTrue(regularAsserter.assertSameIndexOperation(o1, o2)); + } + { + var o1 = toIndexOp(""" + { + "value": [ "value-2", "value-1" ] + } + """); + var o2 = toIndexOp(""" + { + "value": [ "value-1", "value-2" ] + } + """); + assertTrue(syntheticAsserter.assertSameIndexOperation(o1, o2)); + assertFalse(regularAsserter.assertSameIndexOperation(o1, o2)); + } + { + var o1 = toIndexOp(""" + { + "value": [ "value-1", "value-2" ] + } + """); + var o2 = toIndexOp(""" + { + "value": [ "value-1", "value-2", "value-2" ] + } + """); + assertTrue(syntheticAsserter.assertSameIndexOperation(o1, o2)); + assertFalse(regularAsserter.assertSameIndexOperation(o1, o2)); + } + { + var o1 = toIndexOp(""" + { + "value": [ "value-1", "value-2" ] + } + """); + var o2 = toIndexOp(""" + { + "value": [ "value-1", "value-2", "value-3" ] + } + """); + assertFalse(syntheticAsserter.assertSameIndexOperation(o1, o2)); + assertFalse(regularAsserter.assertSameIndexOperation(o1, o2)); + } + } +} diff --git a/server/src/test/java/org/elasticsearch/index/mapper/CustomSyntheticSourceFieldLookupTests.java b/server/src/test/java/org/elasticsearch/index/mapper/CustomSyntheticSourceFieldLookupTests.java deleted file mode 100644 index c3d6bbf285d7..000000000000 --- a/server/src/test/java/org/elasticsearch/index/mapper/CustomSyntheticSourceFieldLookupTests.java +++ /dev/null @@ -1,263 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ - -package org.elasticsearch.index.mapper; - -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.IndexVersion; - -import java.io.IOException; -import java.util.Map; - -public class CustomSyntheticSourceFieldLookupTests extends MapperServiceTestCase { - private static String MAPPING = """ - { - "_doc": { - "properties": { - "keep_all": { - "type": "keyword", - "synthetic_source_keep": "all" - }, - "keep_arrays": { - "type": "keyword", - "synthetic_source_keep": "arrays" - }, - "fallback_impl": { - "type": "long", - "doc_values": "false" - }, - "object_keep_all": { - "properties": {}, - "synthetic_source_keep": "all" - }, - "object_keep_arrays": { - "properties": {}, - "synthetic_source_keep": "arrays" - }, - "object_disabled": { - "properties": {}, - "enabled": "false" - }, - "nested_keep_all": { - "type": "nested", - "properties": {}, - "synthetic_source_keep": "all" - }, - "nested_disabled": { - "type": "nested", - "properties": {}, - "enabled": "false" - }, - "just_field": { - "type": "boolean" - }, - "just_object": { - "properties": {} - }, - "nested_obj": { - "properties": { - "keep_all": { - "type": "keyword", - "synthetic_source_keep": "all" - }, - "keep_arrays": { - "type": "keyword", - "synthetic_source_keep": "arrays" - }, - "fallback_impl": { - "type": "long", - "doc_values": "false" - }, - "object_keep_all": { - "properties": {}, - "synthetic_source_keep": "all" - }, - "object_keep_arrays": { - "properties": {}, - "synthetic_source_keep": "arrays" - }, - "object_disabled": { - "properties": {}, - "enabled": "false" - }, - "nested_keep_all": { - "type": "nested", - "properties": {}, - "synthetic_source_keep": "all" - }, - "nested_disabled": { - "type": "nested", - "properties": {}, - "enabled": "false" - }, - "just_field": { - "type": "boolean" - }, - "just_object": { - "properties": {} - } - } - }, - "nested_nested": { - "properties": { - "keep_all": { - "type": "keyword", - "synthetic_source_keep": "all" - }, - "keep_arrays": { - "type": "keyword", - "synthetic_source_keep": "arrays" - }, - "fallback_impl": { - "type": "long", - "doc_values": "false" - }, - "object_keep_all": { - "properties": {}, - "synthetic_source_keep": "all" - }, - "object_keep_arrays": { - "properties": {}, - "synthetic_source_keep": "arrays" - }, - "object_disabled": { - "properties": {}, - "enabled": "false" - }, - "nested_keep_all": { - "type": "nested", - "properties": {}, - "synthetic_source_keep": "all" - }, - "nested_disabled": { - "type": "nested", - "properties": {}, - "enabled": "false" - }, - "just_field": { - "type": "boolean" - }, - "just_object": { - "properties": {} - } - } - } - } - } - } - """; - - public void testIsNoopWhenSourceIsNotSynthetic() throws IOException { - var mapping = createMapperService(MAPPING).mappingLookup().getMapping(); - var indexSettings = indexSettings(Mapper.SourceKeepMode.NONE); - var sut = new CustomSyntheticSourceFieldLookup(mapping, indexSettings, false); - - assertEquals(sut.getFieldsWithCustomSyntheticSourceHandling(), Map.of()); - } - - public void testDetectsLeafWithKeepAll() throws IOException { - var mapping = createMapperService(MAPPING).mappingLookup().getMapping(); - var indexSettings = indexSettings(Mapper.SourceKeepMode.NONE); - var sut = new CustomSyntheticSourceFieldLookup(mapping, indexSettings, true); - - var fields = sut.getFieldsWithCustomSyntheticSourceHandling(); - assertEquals(CustomSyntheticSourceFieldLookup.Reason.SOURCE_KEEP_ALL, fields.get("keep_all")); - assertEquals(CustomSyntheticSourceFieldLookup.Reason.SOURCE_KEEP_ALL, fields.get("nested_obj.keep_all")); - assertEquals(CustomSyntheticSourceFieldLookup.Reason.SOURCE_KEEP_ALL, fields.get("nested_nested.keep_all")); - } - - public void testDetectsLeafWithKeepArrays() throws IOException { - var mapping = createMapperService(MAPPING).mappingLookup().getMapping(); - var indexSettings = indexSettings(Mapper.SourceKeepMode.NONE); - var sut = new CustomSyntheticSourceFieldLookup(mapping, indexSettings, true); - - var fields = sut.getFieldsWithCustomSyntheticSourceHandling(); - assertEquals(CustomSyntheticSourceFieldLookup.Reason.SOURCE_KEEP_ARRAYS, fields.get("keep_arrays")); - assertEquals(CustomSyntheticSourceFieldLookup.Reason.SOURCE_KEEP_ARRAYS, fields.get("nested_obj.keep_arrays")); - assertEquals(CustomSyntheticSourceFieldLookup.Reason.SOURCE_KEEP_ARRAYS, fields.get("nested_nested.keep_arrays")); - } - - public void testDetectsLeafWithFallback() throws IOException { - var mapping = createMapperService(MAPPING).mappingLookup().getMapping(); - var indexSettings = indexSettings(Mapper.SourceKeepMode.NONE); - var sut = new CustomSyntheticSourceFieldLookup(mapping, indexSettings, true); - - var fields = sut.getFieldsWithCustomSyntheticSourceHandling(); - assertEquals(CustomSyntheticSourceFieldLookup.Reason.FALLBACK_SYNTHETIC_SOURCE, fields.get("fallback_impl")); - assertEquals(CustomSyntheticSourceFieldLookup.Reason.FALLBACK_SYNTHETIC_SOURCE, fields.get("nested_obj.fallback_impl")); - assertEquals(CustomSyntheticSourceFieldLookup.Reason.FALLBACK_SYNTHETIC_SOURCE, fields.get("nested_nested.fallback_impl")); - } - - public void testDetectsObjectWithKeepAll() throws IOException { - var mapping = createMapperService(MAPPING).mappingLookup().getMapping(); - var indexSettings = indexSettings(Mapper.SourceKeepMode.NONE); - var sut = new CustomSyntheticSourceFieldLookup(mapping, indexSettings, true); - - var fields = sut.getFieldsWithCustomSyntheticSourceHandling(); - - assertEquals(CustomSyntheticSourceFieldLookup.Reason.SOURCE_KEEP_ALL, fields.get("object_keep_all")); - assertEquals(CustomSyntheticSourceFieldLookup.Reason.SOURCE_KEEP_ALL, fields.get("nested_obj.object_keep_all")); - assertEquals(CustomSyntheticSourceFieldLookup.Reason.SOURCE_KEEP_ALL, fields.get("nested_nested.object_keep_all")); - - assertEquals(CustomSyntheticSourceFieldLookup.Reason.SOURCE_KEEP_ALL, fields.get("nested_keep_all")); - assertEquals(CustomSyntheticSourceFieldLookup.Reason.SOURCE_KEEP_ALL, fields.get("nested_obj.nested_keep_all")); - assertEquals(CustomSyntheticSourceFieldLookup.Reason.SOURCE_KEEP_ALL, fields.get("nested_nested.nested_keep_all")); - } - - public void testDetectsObjectWithKeepArrays() throws IOException { - var mapping = createMapperService(MAPPING).mappingLookup().getMapping(); - var indexSettings = indexSettings(Mapper.SourceKeepMode.NONE); - var sut = new CustomSyntheticSourceFieldLookup(mapping, indexSettings, true); - - var fields = sut.getFieldsWithCustomSyntheticSourceHandling(); - assertEquals(CustomSyntheticSourceFieldLookup.Reason.SOURCE_KEEP_ARRAYS, fields.get("object_keep_arrays")); - assertEquals(CustomSyntheticSourceFieldLookup.Reason.SOURCE_KEEP_ARRAYS, fields.get("nested_obj.object_keep_arrays")); - assertEquals(CustomSyntheticSourceFieldLookup.Reason.SOURCE_KEEP_ARRAYS, fields.get("nested_nested.object_keep_arrays")); - } - - public void testDetectsDisabledObject() throws IOException { - var mapping = createMapperService(MAPPING).mappingLookup().getMapping(); - var indexSettings = indexSettings(Mapper.SourceKeepMode.NONE); - var sut = new CustomSyntheticSourceFieldLookup(mapping, indexSettings, true); - - var fields = sut.getFieldsWithCustomSyntheticSourceHandling(); - - assertEquals(CustomSyntheticSourceFieldLookup.Reason.DISABLED_OBJECT, fields.get("object_disabled")); - assertEquals(CustomSyntheticSourceFieldLookup.Reason.DISABLED_OBJECT, fields.get("nested_obj.object_disabled")); - assertEquals(CustomSyntheticSourceFieldLookup.Reason.DISABLED_OBJECT, fields.get("nested_nested.object_disabled")); - - assertEquals(CustomSyntheticSourceFieldLookup.Reason.DISABLED_OBJECT, fields.get("nested_disabled")); - assertEquals(CustomSyntheticSourceFieldLookup.Reason.DISABLED_OBJECT, fields.get("nested_obj.nested_disabled")); - assertEquals(CustomSyntheticSourceFieldLookup.Reason.DISABLED_OBJECT, fields.get("nested_nested.nested_disabled")); - } - - public void testAppliesIndexLevelSourceKeepMode() throws IOException { - var mapping = createMapperService(MAPPING).mappingLookup().getMapping(); - var indexSettings = indexSettings(Mapper.SourceKeepMode.ARRAYS); - var sut = new CustomSyntheticSourceFieldLookup(mapping, indexSettings, true); - - var fields = sut.getFieldsWithCustomSyntheticSourceHandling(); - - assertEquals(CustomSyntheticSourceFieldLookup.Reason.SOURCE_KEEP_ARRAYS, fields.get("just_field")); - assertEquals(CustomSyntheticSourceFieldLookup.Reason.SOURCE_KEEP_ARRAYS, fields.get("nested_obj.just_field")); - assertEquals(CustomSyntheticSourceFieldLookup.Reason.SOURCE_KEEP_ARRAYS, fields.get("nested_nested.just_field")); - - assertEquals(CustomSyntheticSourceFieldLookup.Reason.SOURCE_KEEP_ARRAYS, fields.get("just_object")); - assertEquals(CustomSyntheticSourceFieldLookup.Reason.SOURCE_KEEP_ARRAYS, fields.get("nested_obj.just_object")); - assertEquals(CustomSyntheticSourceFieldLookup.Reason.SOURCE_KEEP_ARRAYS, fields.get("nested_nested.just_object")); - } - - private static IndexSettings indexSettings(Mapper.SourceKeepMode sourceKeepMode) { - return createIndexSettings( - IndexVersion.current(), - Settings.builder().put(Mapper.SYNTHETIC_SOURCE_KEEP_INDEX_SETTING.getKey(), sourceKeepMode).build() - ); - } -} diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DocumentMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DocumentMapperTests.java index 9e617f638e75..b2ba3d60d217 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DocumentMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DocumentMapperTests.java @@ -81,7 +81,6 @@ public class DocumentMapperTests extends MapperServiceTestCase { merged, merged.toCompressedXContent(), IndexVersion.current(), - null, MapperMetrics.NOOP, "myIndex" ); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserListenerTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserListenerTests.java deleted file mode 100644 index a1dafacfb7b2..000000000000 --- a/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserListenerTests.java +++ /dev/null @@ -1,253 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ - -package org.elasticsearch.index.mapper; - -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParserConfiguration; -import org.elasticsearch.xcontent.XContentType; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; - -public class DocumentParserListenerTests extends MapperServiceTestCase { - private static class MemorizingDocumentParserListener implements DocumentParserListener { - private final List events = new ArrayList<>(); - private final List tokens = new ArrayList<>(); - - @Override - public boolean isActive() { - return true; - } - - @Override - public void consume(Token token) throws IOException { - // Tokens contains information tied to current parser state so we need to "materialize" them. - if (token instanceof Token.StringAsCharArrayValue charArray) { - var string = String.copyValueOf(charArray.buffer(), charArray.offset(), charArray.length()); - tokens.add((Token.ValueToken) () -> string); - } else if (token instanceof Token.ValueToken v) { - var value = v.value(); - tokens.add((Token.ValueToken) () -> value); - } else { - tokens.add(token); - } - } - - @Override - public void consume(Event event) throws IOException { - events.add(event); - } - - @Override - public Output finish() { - return new Output(List.of()); - } - - public List getTokens() { - return tokens; - } - - public List getEvents() { - return events; - } - } - - public void testEventFlow() throws IOException { - var mapping = XContentBuilder.builder(XContentType.JSON.xContent()).startObject().startObject("_doc").startObject("properties"); - { - mapping.startObject("leaf").field("type", "keyword").endObject(); - mapping.startObject("leaf_array").field("type", "keyword").endObject(); - - mapping.startObject("object").startObject("properties"); - { - mapping.startObject("leaf").field("type", "keyword").endObject(); - mapping.startObject("leaf_array").field("type", "keyword").endObject(); - } - mapping.endObject().endObject(); - - mapping.startObject("object_array").startObject("properties"); - { - mapping.startObject("leaf").field("type", "keyword").endObject(); - mapping.startObject("leaf_array").field("type", "keyword").endObject(); - } - mapping.endObject().endObject(); - } - mapping.endObject().endObject().endObject(); - var mappingService = createSytheticSourceMapperService(mapping); - - XContentType xContentType = randomFrom(XContentType.values()); - - var listener = new MemorizingDocumentParserListener(); - var documentParser = new DocumentParser( - XContentParserConfiguration.EMPTY, - mappingService.parserContext(), - (ml, xct) -> new DocumentParser.Listeners.Single(listener) - ); - - var source = XContentBuilder.builder(xContentType.xContent()); - source.startObject(); - { - source.field("leaf", "leaf"); - source.array("leaf_array", "one", "two"); - source.startObject("object"); - { - source.field("leaf", "leaf"); - source.array("leaf_array", "one", "two"); - } - source.endObject(); - source.startArray("object_array"); - { - source.startObject(); - { - source.field("leaf", "leaf"); - source.array("leaf_array", "one", "two"); - } - source.endObject(); - } - source.endArray(); - } - source.endObject(); - - documentParser.parseDocument(new SourceToParse("id1", BytesReference.bytes(source), xContentType), mappingService.mappingLookup()); - var events = listener.getEvents(); - - assertEquals("_doc", ((DocumentParserListener.Event.DocumentStart) events.get(0)).rootObjectMapper().fullPath()); - - assertLeafEvents(events, 1, "", "_doc", false); - - var objectStart = (DocumentParserListener.Event.ObjectStart) events.get(5); - assertEquals("object", objectStart.objectMapper().fullPath()); - assertEquals("_doc", objectStart.parentMapper().fullPath()); - assertFalse(objectStart.insideObjectArray()); - assertLeafEvents(events, 6, "object.", "object", false); - - var objectArrayStart = (DocumentParserListener.Event.ObjectArrayStart) events.get(10); - assertEquals("object_array", objectArrayStart.objectMapper().fullPath()); - assertEquals("_doc", objectArrayStart.parentMapper().fullPath()); - - var objectInArrayStart = (DocumentParserListener.Event.ObjectStart) events.get(11); - assertEquals("object_array", objectInArrayStart.objectMapper().fullPath()); - assertEquals("_doc", objectInArrayStart.parentMapper().fullPath()); - assertTrue(objectInArrayStart.insideObjectArray()); - assertLeafEvents(events, 12, "object_array.", "object_array", true); - } - - public void testTokenFlow() throws IOException { - var mapping = XContentBuilder.builder(XContentType.JSON.xContent()) - .startObject() - .startObject("_doc") - .field("enabled", false) - .endObject() - .endObject(); - var mappingService = createSytheticSourceMapperService(mapping); - - XContentType xContentType = randomFrom(XContentType.values()); - - var listener = new MemorizingDocumentParserListener(); - var documentParser = new DocumentParser( - XContentParserConfiguration.EMPTY, - mappingService.parserContext(), - (ml, xct) -> new DocumentParser.Listeners.Single(listener) - ); - - var source = XContentBuilder.builder(xContentType.xContent()); - source.startObject(); - { - source.field("leaf", "leaf"); - source.array("leaf_array", "one", "two"); - source.startObject("object"); - { - source.field("leaf", "leaf"); - source.array("leaf_array", "one", "two"); - } - source.endObject(); - source.startArray("object_array"); - { - source.startObject(); - { - source.field("leaf", "leaf"); - source.array("leaf_array", "one", "two"); - } - source.endObject(); - } - source.endArray(); - } - source.endObject(); - - documentParser.parseDocument(new SourceToParse("id1", BytesReference.bytes(source), xContentType), mappingService.mappingLookup()); - var tokens = listener.getTokens(); - assertTrue(tokens.get(0) instanceof DocumentParserListener.Token.StartObject); - { - assertLeafTokens(tokens, 1); - assertEquals("object", ((DocumentParserListener.Token.FieldName) tokens.get(8)).name()); - assertTrue(tokens.get(9) instanceof DocumentParserListener.Token.StartObject); - { - assertLeafTokens(tokens, 10); - } - assertTrue(tokens.get(17) instanceof DocumentParserListener.Token.EndObject); - assertEquals("object_array", ((DocumentParserListener.Token.FieldName) tokens.get(18)).name()); - assertTrue(tokens.get(19) instanceof DocumentParserListener.Token.StartArray); - { - assertTrue(tokens.get(20) instanceof DocumentParserListener.Token.StartObject); - { - assertLeafTokens(tokens, 21); - } - assertTrue(tokens.get(28) instanceof DocumentParserListener.Token.EndObject); - } - assertTrue(tokens.get(29) instanceof DocumentParserListener.Token.EndArray); - } - assertTrue(tokens.get(30) instanceof DocumentParserListener.Token.EndObject); - } - - private void assertLeafEvents( - List events, - int start, - String prefix, - String parent, - boolean inObjectArray - ) { - var leafValue = (DocumentParserListener.Event.LeafValue) events.get(start); - assertEquals(prefix + "leaf", leafValue.fieldMapper().fullPath()); - assertEquals(parent, leafValue.parentMapper().fullPath()); - assertFalse(leafValue.isArray()); - assertFalse(leafValue.isContainer()); - assertEquals(inObjectArray, leafValue.insideObjectArray()); - - var leafArray = (DocumentParserListener.Event.LeafArrayStart) events.get(start + 1); - assertEquals(prefix + "leaf_array", leafArray.fieldMapper().fullPath()); - assertEquals(parent, leafArray.parentMapper().fullPath()); - - var arrayValue1 = (DocumentParserListener.Event.LeafValue) events.get(start + 2); - assertEquals(prefix + "leaf_array", arrayValue1.fieldMapper().fullPath()); - assertEquals(parent, arrayValue1.parentMapper().fullPath()); - assertFalse(arrayValue1.isArray()); - assertFalse(arrayValue1.isContainer()); - assertEquals(inObjectArray, leafValue.insideObjectArray()); - - var arrayValue2 = (DocumentParserListener.Event.LeafValue) events.get(start + 3); - assertEquals(prefix + "leaf_array", arrayValue2.fieldMapper().fullPath()); - assertEquals(parent, arrayValue2.parentMapper().fullPath()); - assertFalse(arrayValue2.isArray()); - assertFalse(arrayValue2.isContainer()); - assertEquals(inObjectArray, leafValue.insideObjectArray()); - } - - private void assertLeafTokens(List tokens, int start) throws IOException { - assertEquals("leaf", ((DocumentParserListener.Token.FieldName) tokens.get(start)).name()); - assertEquals("leaf", ((DocumentParserListener.Token.ValueToken) tokens.get(start + 1)).value()); - assertEquals("leaf_array", ((DocumentParserListener.Token.FieldName) tokens.get(start + 2)).name()); - assertTrue(tokens.get(start + 3) instanceof DocumentParserListener.Token.StartArray); - assertEquals("one", ((DocumentParserListener.Token.ValueToken) tokens.get(start + 4)).value()); - assertEquals("two", ((DocumentParserListener.Token.ValueToken) tokens.get(start + 5)).value()); - assertTrue(tokens.get(start + 6) instanceof DocumentParserListener.Token.EndArray); - } -} diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java index 3699f97e243a..d128b25038a5 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java @@ -2688,7 +2688,6 @@ public class DocumentParserTests extends MapperServiceTestCase { newMapping, newMapping.toCompressedXContent(), IndexVersion.current(), - mapperService.getIndexSettings(), MapperMetrics.NOOP, "myIndex" ); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DynamicFieldsBuilderTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DynamicFieldsBuilderTests.java index 1f020520e7e3..d4d0e67ff414 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DynamicFieldsBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DynamicFieldsBuilderTests.java @@ -9,11 +9,8 @@ package org.elasticsearch.index.mapper; -import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.IndexVersion; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentType; @@ -78,14 +75,7 @@ public class DynamicFieldsBuilderTests extends ESTestCase { ).build(MapperBuilderContext.root(false, false)); Mapping mapping = new Mapping(root, new MetadataFieldMapper[] { sourceMapper }, Map.of()); - IndexMetadata indexMetadata = IndexMetadata.builder("index") - .settings(Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())) - .numberOfShards(1) - .numberOfReplicas(0) - .build(); - IndexSettings indexSettings = new IndexSettings(indexMetadata, Settings.EMPTY); - - DocumentParserContext ctx = new TestDocumentParserContext(MappingLookup.fromMapping(mapping, indexSettings), sourceToParse) { + DocumentParserContext ctx = new TestDocumentParserContext(MappingLookup.fromMapping(mapping), sourceToParse) { @Override public XContentParser parser() { return parser; diff --git a/server/src/test/java/org/elasticsearch/index/mapper/FieldAliasMapperValidationTests.java b/server/src/test/java/org/elasticsearch/index/mapper/FieldAliasMapperValidationTests.java index dbfc1f114fff..e385177b8714 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/FieldAliasMapperValidationTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/FieldAliasMapperValidationTests.java @@ -204,6 +204,6 @@ public class FieldAliasMapperValidationTests extends ESTestCase { new MetadataFieldMapper[0], Collections.emptyMap() ); - return MappingLookup.fromMappers(mapping, fieldMappers, objectMappers, fieldAliasMappers, emptyList(), null); + return MappingLookup.fromMappers(mapping, fieldMappers, objectMappers, fieldAliasMappers, emptyList()); } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/FieldNamesFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/FieldNamesFieldTypeTests.java index 67b62530f344..4a45824342c7 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/FieldNamesFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/FieldNamesFieldTypeTests.java @@ -22,6 +22,8 @@ import org.elasticsearch.test.ESTestCase; import java.util.List; import java.util.stream.Stream; +import static java.util.Collections.emptyList; + public class FieldNamesFieldTypeTests extends ESTestCase { public void testTermQuery() { @@ -34,6 +36,7 @@ public class FieldNamesFieldTypeTests extends ESTestCase { settings ); List mappers = Stream.of(fieldNamesFieldType, fieldType).map(MockFieldMapper::new).toList(); + MappingLookup mappingLookup = MappingLookup.fromMappers(Mapping.EMPTY, mappers, emptyList()); SearchExecutionContext searchExecutionContext = SearchExecutionContextHelper.createSimple(indexSettings, null, null); Query termQuery = fieldNamesFieldType.termQuery("field_name", searchExecutionContext); assertEquals(new TermQuery(new Term(FieldNamesFieldMapper.CONTENT_TYPE, "field_name")), termQuery); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapperTests.java index 5c9765eefb98..14902aa419b9 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapperTests.java @@ -848,7 +848,7 @@ public class IgnoredSourceFieldMapperTests extends MapperServiceTestCase { b.field("bool_value", true); }); assertEquals(""" - {"bool_value":true,"path":{"int_value":[10,20]}}""", syntheticSource); + {"bool_value":true,"path":{"int_value":[20,10]}}""", syntheticSource); } public void testIndexStoredArraySourceNestedValueArray() throws IOException { @@ -912,7 +912,7 @@ public class IgnoredSourceFieldMapperTests extends MapperServiceTestCase { b.endObject(); }); assertEquals(""" - {"path":{"bool_value":true,"int_value":[10,20,30],"obj":{"foo":[1,2]}}}""", syntheticSource); + {"path":{"bool_value":true,"int_value":[10,20,30],"obj":{"foo":[2,1]}}}""", syntheticSource); } public void testFieldStoredArraySourceNestedValueArray() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/index/mapper/MappingLookupTests.java b/server/src/test/java/org/elasticsearch/index/mapper/MappingLookupTests.java index 71edce3d1549..fd44e68df19a 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/MappingLookupTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/MappingLookupTests.java @@ -13,12 +13,8 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; -import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.analysis.AnalyzerScope; import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.mapper.TimeSeriesParams.MetricType; @@ -53,13 +49,7 @@ public class MappingLookupTests extends ESTestCase { new MetadataFieldMapper[0], Collections.emptyMap() ); - IndexMetadata indexMetadata = IndexMetadata.builder("index") - .settings(Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())) - .numberOfShards(1) - .numberOfReplicas(0) - .build(); - IndexSettings indexSettings = new IndexSettings(indexMetadata, Settings.EMPTY); - return MappingLookup.fromMappers(mapping, fieldMappers, objectMappers, indexSettings); + return MappingLookup.fromMappers(mapping, fieldMappers, objectMappers); } public void testOnlyRuntimeField() { diff --git a/server/src/test/java/org/elasticsearch/index/mapper/MappingParserTests.java b/server/src/test/java/org/elasticsearch/index/mapper/MappingParserTests.java index 289ec24f7d3b..b87ab09c530d 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/MappingParserTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/MappingParserTests.java @@ -107,7 +107,7 @@ public class MappingParserTests extends MapperServiceTestCase { b.endObject(); }); Mapping mapping = createMappingParser(Settings.EMPTY).parse("_doc", new CompressedXContent(BytesReference.bytes(builder))); - MappingLookup mappingLookup = MappingLookup.fromMapping(mapping, null); + MappingLookup mappingLookup = MappingLookup.fromMapping(mapping); assertNotNull(mappingLookup.getMapper("foo.bar")); assertNotNull(mappingLookup.getMapper("foo.baz.deep.field")); assertNotNull(mappingLookup.objectMappers().get("foo")); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/SyntheticSourceDocumentParserListenerTests.java b/server/src/test/java/org/elasticsearch/index/mapper/SyntheticSourceDocumentParserListenerTests.java deleted file mode 100644 index b5496e1001bf..000000000000 --- a/server/src/test/java/org/elasticsearch/index/mapper/SyntheticSourceDocumentParserListenerTests.java +++ /dev/null @@ -1,385 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ - -package org.elasticsearch.index.mapper; - -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xcontent.XContentType; - -import java.io.IOException; -import java.math.BigInteger; - -public class SyntheticSourceDocumentParserListenerTests extends MapperServiceTestCase { - public void testStoreLeafValue() throws IOException { - XContentType xContentType = randomFrom(XContentType.values()); - - var mapping = fieldMapping(b -> b.field("type", "long").field("synthetic_source_keep", "all")); - var mappingLookup = createSytheticSourceMapperService(mapping).mappingLookup(); - var sut = new SyntheticSourceDocumentParserListener(mappingLookup, xContentType); - - var doc = new LuceneDocument(); - - var value = XContentBuilder.builder(xContentType.xContent()).value(1234L); - var parser = createParser(value); - parser.nextToken(); - - sut.consume( - new DocumentParserListener.Event.LeafValue( - (FieldMapper) mappingLookup.getMapper("field"), - false, - mappingLookup.getMapping().getRoot(), - doc, - parser - ) - ); - - var output = sut.finish(); - - assertEquals(1, output.ignoredSourceValues().size()); - var valueToStore = output.ignoredSourceValues().get(0); - assertEquals("field", valueToStore.name()); - var decoded = XContentBuilder.builder(xContentType.xContent()); - XContentDataHelper.decodeAndWrite(decoded, valueToStore.value()); - assertEquals(BytesReference.bytes(value), BytesReference.bytes(decoded)); - } - - public void testStoreLeafArray() throws IOException { - XContentType xContentType = randomFrom(XContentType.values()); - - var mapping = fieldMapping(b -> b.field("type", "long").field("synthetic_source_keep", "all")); - var mappingLookup = createSytheticSourceMapperService(mapping).mappingLookup(); - var sut = new SyntheticSourceDocumentParserListener(mappingLookup, xContentType); - - var values = randomList(0, 10, ESTestCase::randomLong); - - var doc = new LuceneDocument(); - - sut.consume( - new DocumentParserListener.Event.LeafArrayStart( - (FieldMapper) mappingLookup.getMapper("field"), - mappingLookup.getMapping().getRoot(), - doc - ) - ); - for (long l : values) { - sut.consume((DocumentParserListener.Token.ValueToken) () -> l); - } - sut.consume(DocumentParserListener.Token.END_ARRAY); - - var output = sut.finish(); - - assertEquals(1, output.ignoredSourceValues().size()); - var valueToStore = output.ignoredSourceValues().get(0); - assertEquals("field", valueToStore.name()); - - var decoded = XContentBuilder.builder(xContentType.xContent()); - XContentDataHelper.decodeAndWrite(decoded, valueToStore.value()); - - var parser = createParser(decoded); - assertEquals(XContentParser.Token.START_ARRAY, parser.nextToken()); - for (long l : values) { - parser.nextToken(); - assertEquals(XContentParser.Token.VALUE_NUMBER, parser.currentToken()); - assertEquals(l, parser.longValue()); - } - assertEquals(XContentParser.Token.END_ARRAY, parser.nextToken()); - } - - public void testStoreObject() throws IOException { - XContentType xContentType = randomFrom(XContentType.values()); - - var mapping = fieldMapping(b -> b.field("type", "object").field("synthetic_source_keep", "all")); - var mappingLookup = createSytheticSourceMapperService(mapping).mappingLookup(); - var sut = new SyntheticSourceDocumentParserListener(mappingLookup, xContentType); - - var names = randomList(0, 10, () -> randomAlphaOfLength(10)); - var values = randomList(names.size(), names.size(), () -> randomAlphaOfLength(10)); - - var doc = new LuceneDocument(); - - sut.consume( - new DocumentParserListener.Event.ObjectStart( - mappingLookup.objectMappers().get("field"), - false, - mappingLookup.getMapping().getRoot(), - doc - ) - ); - for (int i = 0; i < names.size(); i++) { - sut.consume(new DocumentParserListener.Token.FieldName(names.get(i))); - var value = values.get(i); - sut.consume((DocumentParserListener.Token.ValueToken) () -> value); - } - sut.consume(DocumentParserListener.Token.END_OBJECT); - - var output = sut.finish(); - - assertEquals(1, output.ignoredSourceValues().size()); - var valueToStore = output.ignoredSourceValues().get(0); - assertEquals("field", valueToStore.name()); - - var decoded = XContentBuilder.builder(xContentType.xContent()); - XContentDataHelper.decodeAndWrite(decoded, valueToStore.value()); - - var parser = createParser(decoded); - assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken()); - for (int i = 0; i < names.size(); i++) { - parser.nextToken(); - assertEquals(XContentParser.Token.FIELD_NAME, parser.currentToken()); - assertEquals(names.get(i), parser.currentName()); - - assertEquals(XContentParser.Token.VALUE_STRING, parser.nextToken()); - assertEquals(values.get(i), parser.text()); - } - assertEquals(XContentParser.Token.END_OBJECT, parser.nextToken()); - } - - public void testStoreObjectArray() throws IOException { - XContentType xContentType = randomFrom(XContentType.values()); - - var mapping = fieldMapping(b -> b.field("type", "object").field("synthetic_source_keep", "all")); - var mappingLookup = createSytheticSourceMapperService(mapping).mappingLookup(); - var sut = new SyntheticSourceDocumentParserListener(mappingLookup, xContentType); - - var names = randomList(0, 10, () -> randomAlphaOfLength(10)); - var values = randomList(names.size(), names.size(), () -> randomAlphaOfLength(10)); - - var doc = new LuceneDocument(); - - sut.consume( - new DocumentParserListener.Event.ObjectArrayStart( - mappingLookup.objectMappers().get("field"), - mappingLookup.getMapping().getRoot(), - doc - ) - ); - for (int i = 0; i < names.size(); i++) { - sut.consume(DocumentParserListener.Token.START_OBJECT); - - sut.consume(new DocumentParserListener.Token.FieldName(names.get(i))); - var value = values.get(i); - sut.consume((DocumentParserListener.Token.ValueToken) () -> value); - - sut.consume(DocumentParserListener.Token.END_OBJECT); - } - sut.consume(DocumentParserListener.Token.END_ARRAY); - - var output = sut.finish(); - - assertEquals(1, output.ignoredSourceValues().size()); - var valueToStore = output.ignoredSourceValues().get(0); - assertEquals("field", valueToStore.name()); - - var decoded = XContentBuilder.builder(xContentType.xContent()); - XContentDataHelper.decodeAndWrite(decoded, valueToStore.value()); - - var parser = createParser(decoded); - assertEquals(XContentParser.Token.START_ARRAY, parser.nextToken()); - for (int i = 0; i < names.size(); i++) { - assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken()); - assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken()); - assertEquals(names.get(i), parser.currentName()); - - assertEquals(XContentParser.Token.VALUE_STRING, parser.nextToken()); - assertEquals(values.get(i), parser.text()); - - assertEquals(XContentParser.Token.END_OBJECT, parser.nextToken()); - } - assertEquals(XContentParser.Token.END_ARRAY, parser.nextToken()); - } - - public void testStashedLeafValue() throws IOException { - XContentType xContentType = randomFrom(XContentType.values()); - - var mapping = fieldMapping(b -> b.field("type", "boolean").field("synthetic_source_keep", "arrays")); - var mappingLookup = createSytheticSourceMapperService(mapping).mappingLookup(); - var sut = new SyntheticSourceDocumentParserListener(mappingLookup, xContentType); - - var doc = new LuceneDocument(); - - var value = XContentBuilder.builder(xContentType.xContent()).value(false); - var parser = createParser(value); - parser.nextToken(); - - sut.consume( - new DocumentParserListener.Event.LeafValue( - (FieldMapper) mappingLookup.getMapper("field"), - true, - mappingLookup.getMapping().getRoot(), - doc, - parser - ) - ); - - sut.consume( - new DocumentParserListener.Event.LeafValue( - (FieldMapper) mappingLookup.getMapper("field"), - true, - mappingLookup.getMapping().getRoot(), - doc, - parser - ) - ); - - var output = sut.finish(); - - // Single values are optimized away because there are no arrays mixed in and regular synthetic source logic is sufficient - assertEquals(0, output.ignoredSourceValues().size()); - } - - public void testStashedMixedValues() throws IOException { - XContentType xContentType = randomFrom(XContentType.values()); - - var mapping = fieldMapping(b -> b.field("type", "boolean").field("synthetic_source_keep", "arrays")); - var mappingLookup = createSytheticSourceMapperService(mapping).mappingLookup(); - var sut = new SyntheticSourceDocumentParserListener(mappingLookup, xContentType); - - var doc = new LuceneDocument(); - - var value = XContentBuilder.builder(xContentType.xContent()).value(false); - var parser = createParser(value); - parser.nextToken(); - - sut.consume( - new DocumentParserListener.Event.LeafValue( - (FieldMapper) mappingLookup.getMapper("field"), - true, - mappingLookup.getMapping().getRoot(), - doc, - parser - ) - ); - - sut.consume( - new DocumentParserListener.Event.LeafValue( - (FieldMapper) mappingLookup.getMapper("field"), - true, - mappingLookup.getMapping().getRoot(), - doc, - parser - ) - ); - - sut.consume( - new DocumentParserListener.Event.LeafArrayStart( - (FieldMapper) mappingLookup.getMapper("field"), - mappingLookup.getMapping().getRoot(), - doc - ) - ); - sut.consume((DocumentParserListener.Token.ValueToken) () -> true); - sut.consume((DocumentParserListener.Token.ValueToken) () -> true); - sut.consume(DocumentParserListener.Token.END_ARRAY); - - var output = sut.finish(); - - // Both arrays and individual values are stored. - assertEquals(3, output.ignoredSourceValues().size()); - } - - public void testStashedObjectValue() throws IOException { - XContentType xContentType = randomFrom(XContentType.values()); - - var mapping = fieldMapping(b -> b.field("type", "object").field("synthetic_source_keep", "arrays")); - var mappingLookup = createSytheticSourceMapperService(mapping).mappingLookup(); - var sut = new SyntheticSourceDocumentParserListener(mappingLookup, xContentType); - - var doc = new LuceneDocument(); - - var value = XContentBuilder.builder(xContentType.xContent()).value(1234L); - var parser = createParser(value); - parser.nextToken(); - - sut.consume( - new DocumentParserListener.Event.ObjectStart( - mappingLookup.objectMappers().get("field"), - true, - mappingLookup.getMapping().getRoot(), - doc - ) - ); - sut.consume(new DocumentParserListener.Token.FieldName("hello")); - sut.consume((DocumentParserListener.Token.ValueToken) () -> BigInteger.valueOf(13)); - sut.consume(DocumentParserListener.Token.END_OBJECT); - - var output = sut.finish(); - - // Single value optimization does not work for objects because it is possible that one of the fields - // of this object needs to be stored in ignored source. - // Because we stored the entire object we didn't store individual fields separately. - // Optimizing this away would lead to missing data from synthetic source in some cases. - // We could do both, but we don't do it now. - assertEquals(1, output.ignoredSourceValues().size()); - } - - public void testSingleElementArray() throws IOException { - XContentType xContentType = randomFrom(XContentType.values()); - - var mapping = fieldMapping(b -> b.field("type", "boolean").field("synthetic_source_keep", "arrays")); - var mappingLookup = createSytheticSourceMapperService(mapping).mappingLookup(); - var sut = new SyntheticSourceDocumentParserListener(mappingLookup, xContentType); - - var doc = new LuceneDocument(); - - sut.consume( - new DocumentParserListener.Event.LeafArrayStart( - (FieldMapper) mappingLookup.getMapper("field"), - mappingLookup.getMapping().getRoot(), - doc - ) - ); - sut.consume((DocumentParserListener.Token.ValueToken) () -> true); - sut.consume(DocumentParserListener.Token.END_ARRAY); - - var output = sut.finish(); - - // Since there is only one value in the array, order does not matter, - // and we can drop ignored source value and use standard synthetic source logic. - assertEquals(0, output.ignoredSourceValues().size()); - } - - public void testMultipleSingleElementArrays() throws IOException { - XContentType xContentType = randomFrom(XContentType.values()); - - var mapping = fieldMapping(b -> b.field("type", "boolean").field("synthetic_source_keep", "arrays")); - var mappingLookup = createSytheticSourceMapperService(mapping).mappingLookup(); - var sut = new SyntheticSourceDocumentParserListener(mappingLookup, xContentType); - - var doc = new LuceneDocument(); - - sut.consume( - new DocumentParserListener.Event.LeafArrayStart( - (FieldMapper) mappingLookup.getMapper("field"), - mappingLookup.getMapping().getRoot(), - doc - ) - ); - sut.consume((DocumentParserListener.Token.ValueToken) () -> true); - sut.consume(DocumentParserListener.Token.END_ARRAY); - - sut.consume( - new DocumentParserListener.Event.LeafArrayStart( - (FieldMapper) mappingLookup.getMapper("field"), - mappingLookup.getMapping().getRoot(), - doc - ) - ); - sut.consume((DocumentParserListener.Token.ValueToken) () -> false); - sut.consume(DocumentParserListener.Token.END_ARRAY); - - var output = sut.finish(); - - // Since there is only one value in the array, order does not matter, - // and we can drop ignored source value. - assertEquals(0, output.ignoredSourceValues().size()); - } -} diff --git a/server/src/test/java/org/elasticsearch/index/query/SearchExecutionContextTests.java b/server/src/test/java/org/elasticsearch/index/query/SearchExecutionContextTests.java index df2c9466f3b7..dc70c44a8912 100644 --- a/server/src/test/java/org/elasticsearch/index/query/SearchExecutionContextTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/SearchExecutionContextTests.java @@ -297,7 +297,7 @@ public class SearchExecutionContextTests extends ESTestCase { new MetadataFieldMapper[0], Collections.emptyMap() ); - return MappingLookup.fromMappers(mapping, mappers, Collections.emptyList(), null); + return MappingLookup.fromMappers(mapping, mappers, Collections.emptyList()); } public void testSearchRequestRuntimeFields() { @@ -389,13 +389,7 @@ public class SearchExecutionContextTests extends ESTestCase { new KeywordFieldMapper.Builder("cat", IndexVersion.current()).ignoreAbove(100) ).build(MapperBuilderContext.root(true, false)); Mapping mapping = new Mapping(root, new MetadataFieldMapper[] { sourceMapper }, Map.of()); - IndexMetadata indexMetadata = IndexMetadata.builder("index") - .settings(Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())) - .numberOfShards(1) - .numberOfReplicas(0) - .build(); - IndexSettings indexSettings = new IndexSettings(indexMetadata, Settings.EMPTY); - MappingLookup lookup = MappingLookup.fromMapping(mapping, indexSettings); + MappingLookup lookup = MappingLookup.fromMapping(mapping); SearchExecutionContext sec = createSearchExecutionContext("index", "", lookup, Map.of()); assertTrue(sec.isSourceSynthetic()); diff --git a/server/src/test/java/org/elasticsearch/index/translog/TranslogDeletionPolicyTests.java b/server/src/test/java/org/elasticsearch/index/translog/TranslogDeletionPolicyTests.java index f72152bd7ff8..9aa847c837e9 100644 --- a/server/src/test/java/org/elasticsearch/index/translog/TranslogDeletionPolicyTests.java +++ b/server/src/test/java/org/elasticsearch/index/translog/TranslogDeletionPolicyTests.java @@ -17,6 +17,7 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Tuple; +import org.elasticsearch.index.engine.TranslogOperationAsserter; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ESTestCase; import org.mockito.Mockito; @@ -95,6 +96,7 @@ public class TranslogDeletionPolicyTests extends ESTestCase { BigArrays.NON_RECYCLING_INSTANCE, TranslogTests.RANDOMIZING_IO_BUFFERS, TranslogConfig.NOOP_OPERATION_LISTENER, + TranslogOperationAsserter.DEFAULT, true ); writer = Mockito.spy(writer); diff --git a/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java b/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java index 97f49df41d09..99f2e2a562ee 100644 --- a/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java +++ b/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java @@ -53,6 +53,7 @@ import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.Engine.Operation.Origin; +import org.elasticsearch.index.engine.TranslogOperationAsserter; import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.LuceneDocument; import org.elasticsearch.index.mapper.ParsedDocument; @@ -224,7 +225,8 @@ public class TranslogTests extends ESTestCase { new TranslogDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, - getPersistedSeqNoConsumer() + getPersistedSeqNoConsumer(), + TranslogOperationAsserter.DEFAULT ); } @@ -235,7 +237,8 @@ public class TranslogTests extends ESTestCase { new TranslogDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, - getPersistedSeqNoConsumer() + getPersistedSeqNoConsumer(), + TranslogOperationAsserter.DEFAULT ); } @@ -270,7 +273,8 @@ public class TranslogTests extends ESTestCase { new TranslogDeletionPolicy(), () -> globalCheckpoint.get(), primaryTerm::get, - getPersistedSeqNoConsumer() + getPersistedSeqNoConsumer(), + TranslogOperationAsserter.DEFAULT ); } @@ -1444,7 +1448,8 @@ public class TranslogTests extends ESTestCase { new TranslogDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, - persistedSeqNos::add + persistedSeqNos::add, + TranslogOperationAsserter.DEFAULT ) { @Override ChannelFactory getChannelFactory() { @@ -1559,7 +1564,8 @@ public class TranslogTests extends ESTestCase { new TranslogDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, - persistedSeqNos::add + persistedSeqNos::add, + TranslogOperationAsserter.DEFAULT ) { @Override ChannelFactory getChannelFactory() { @@ -1746,7 +1752,8 @@ public class TranslogTests extends ESTestCase { translog.getDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, - seqNo -> {} + seqNo -> {}, + TranslogOperationAsserter.DEFAULT ); assertEquals( "lastCommitted must be 1 less than current", @@ -1803,7 +1810,8 @@ public class TranslogTests extends ESTestCase { deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, - seqNo -> {} + seqNo -> {}, + TranslogOperationAsserter.DEFAULT ) ) { assertNotNull(translogGeneration); @@ -1830,7 +1838,8 @@ public class TranslogTests extends ESTestCase { deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, - seqNo -> {} + seqNo -> {}, + TranslogOperationAsserter.DEFAULT ) ) { assertNotNull(translogGeneration); @@ -1894,7 +1903,8 @@ public class TranslogTests extends ESTestCase { deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, - seqNo -> {} + seqNo -> {}, + TranslogOperationAsserter.DEFAULT ) ) { assertNotNull(translogGeneration); @@ -1922,7 +1932,8 @@ public class TranslogTests extends ESTestCase { deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, - seqNo -> {} + seqNo -> {}, + TranslogOperationAsserter.DEFAULT ) ) { assertNotNull(translogGeneration); @@ -1984,7 +1995,15 @@ public class TranslogTests extends ESTestCase { final TranslogDeletionPolicy deletionPolicy = translog.getDeletionPolicy(); final TranslogCorruptedException translogCorruptedException = expectThrows( TranslogCorruptedException.class, - () -> new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, seqNo -> {}) + () -> new Translog( + config, + translogUUID, + deletionPolicy, + () -> SequenceNumbers.NO_OPS_PERFORMED, + primaryTerm::get, + seqNo -> {}, + TranslogOperationAsserter.DEFAULT + ) ); assertThat( translogCorruptedException.getMessage(), @@ -2010,7 +2029,8 @@ public class TranslogTests extends ESTestCase { deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, - seqNo -> {} + seqNo -> {}, + TranslogOperationAsserter.DEFAULT ) ) { assertNotNull(translogGeneration); @@ -2293,7 +2313,8 @@ public class TranslogTests extends ESTestCase { new TranslogDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, - seqNo -> {} + seqNo -> {}, + TranslogOperationAsserter.DEFAULT ); fail("translog doesn't belong to this UUID"); } catch (TranslogCorruptedException ex) { @@ -2305,7 +2326,8 @@ public class TranslogTests extends ESTestCase { deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, - seqNo -> {} + seqNo -> {}, + TranslogOperationAsserter.DEFAULT ); try (Translog.Snapshot snapshot = this.translog.newSnapshot(randomLongBetween(0, firstUncommitted), Long.MAX_VALUE)) { for (int i = firstUncommitted; i < translogOperations; i++) { @@ -2503,7 +2525,8 @@ public class TranslogTests extends ESTestCase { deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, - seqNo -> {} + seqNo -> {}, + TranslogOperationAsserter.DEFAULT ) ) { assertEquals( @@ -2649,7 +2672,8 @@ public class TranslogTests extends ESTestCase { new TranslogDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, - seqNo -> {} + seqNo -> {}, + TranslogOperationAsserter.DEFAULT ); Translog.Snapshot snapshot = tlog.newSnapshot() ) { @@ -2708,7 +2732,8 @@ public class TranslogTests extends ESTestCase { deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, - seqNo -> {} + seqNo -> {}, + TranslogOperationAsserter.DEFAULT ); assertThat(translog.getMinFileGeneration(), equalTo(1L)); // no trimming done yet, just recovered @@ -2771,7 +2796,8 @@ public class TranslogTests extends ESTestCase { deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, - seqNo -> {} + seqNo -> {}, + TranslogOperationAsserter.DEFAULT ) ) { // we don't know when things broke exactly @@ -2875,7 +2901,15 @@ public class TranslogTests extends ESTestCase { primaryTerm.get() ); } - return new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, seqNo -> {}) { + return new Translog( + config, + translogUUID, + deletionPolicy, + () -> SequenceNumbers.NO_OPS_PERFORMED, + primaryTerm::get, + seqNo -> {}, + TranslogOperationAsserter.DEFAULT + ) { @Override ChannelFactory getChannelFactory() { return channelFactory; @@ -3019,7 +3053,8 @@ public class TranslogTests extends ESTestCase { new TranslogDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, - seqNo -> {} + seqNo -> {}, + TranslogOperationAsserter.DEFAULT ) { @Override protected TranslogWriter createWriter( @@ -3087,7 +3122,8 @@ public class TranslogTests extends ESTestCase { translog.getDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, - seqNo -> {} + seqNo -> {}, + TranslogOperationAsserter.DEFAULT ) ); assertEquals(ex.getMessage(), "failed to create new translog file"); @@ -3114,7 +3150,8 @@ public class TranslogTests extends ESTestCase { deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, - seqNo -> {} + seqNo -> {}, + TranslogOperationAsserter.DEFAULT ) ) { assertFalse(tlog.syncNeeded()); @@ -3130,7 +3167,15 @@ public class TranslogTests extends ESTestCase { TranslogException ex = expectThrows( TranslogException.class, - () -> new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, seqNo -> {}) + () -> new Translog( + config, + translogUUID, + deletionPolicy, + () -> SequenceNumbers.NO_OPS_PERFORMED, + primaryTerm::get, + seqNo -> {}, + TranslogOperationAsserter.DEFAULT + ) ); assertEquals(ex.getMessage(), "failed to create new translog file"); assertEquals(ex.getCause().getClass(), FileAlreadyExistsException.class); @@ -3256,7 +3301,8 @@ public class TranslogTests extends ESTestCase { deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, - seqNo -> {} + seqNo -> {}, + TranslogOperationAsserter.DEFAULT ); Translog.Snapshot snapshot = translog.newSnapshot(localCheckpointOfSafeCommit + 1, Long.MAX_VALUE) ) { @@ -3351,7 +3397,8 @@ public class TranslogTests extends ESTestCase { deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, - seqNo -> {} + seqNo -> {}, + TranslogOperationAsserter.DEFAULT ); translog.add(TranslogOperationsUtils.indexOp("2", 1, primaryTerm.get())); translog.rollGeneration(); @@ -3365,7 +3412,8 @@ public class TranslogTests extends ESTestCase { deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, - seqNo -> {} + seqNo -> {}, + TranslogOperationAsserter.DEFAULT ); } @@ -3713,7 +3761,15 @@ public class TranslogTests extends ESTestCase { LongSupplier globalCheckpointSupplier, LongSupplier primaryTermSupplier ) throws IOException { - super(config, translogUUID, deletionPolicy, globalCheckpointSupplier, primaryTermSupplier, seqNo -> {}); + super( + config, + translogUUID, + deletionPolicy, + globalCheckpointSupplier, + primaryTermSupplier, + seqNo -> {}, + TranslogOperationAsserter.DEFAULT + ); } void callCloseDirectly() throws IOException { @@ -3855,7 +3911,8 @@ public class TranslogTests extends ESTestCase { brokenTranslog.getDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, - seqNo -> {} + seqNo -> {}, + TranslogOperationAsserter.DEFAULT ) ) { recoveredTranslog.rollGeneration(); @@ -3889,7 +3946,8 @@ public class TranslogTests extends ESTestCase { new TranslogDeletionPolicy(), globalCheckpointSupplier, primaryTerm::get, - persistedSeqNos::add + persistedSeqNos::add, + TranslogOperationAsserter.DEFAULT ) ) { Thread[] threads = new Thread[between(2, 8)]; @@ -3974,7 +4032,8 @@ public class TranslogTests extends ESTestCase { new TranslogDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, - seqNo -> {} + seqNo -> {}, + TranslogOperationAsserter.DEFAULT ) { @Override ChannelFactory getChannelFactory() { @@ -4040,7 +4099,8 @@ public class TranslogTests extends ESTestCase { new TranslogDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get, - getPersistedSeqNoConsumer() + getPersistedSeqNoConsumer(), + TranslogOperationAsserter.DEFAULT ) { @Override ChannelFactory getChannelFactory() { diff --git a/server/src/test/java/org/elasticsearch/indices/IndicesRequestCacheTests.java b/server/src/test/java/org/elasticsearch/indices/IndicesRequestCacheTests.java index 8afedbd63f14..773c660caa1c 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndicesRequestCacheTests.java +++ b/server/src/test/java/org/elasticsearch/indices/IndicesRequestCacheTests.java @@ -204,7 +204,7 @@ public class IndicesRequestCacheTests extends ESTestCase { public void testCacheDifferentMapping() throws Exception { IndicesRequestCache cache = new IndicesRequestCache(Settings.EMPTY); MappingLookup.CacheKey mappingKey1 = MappingLookup.EMPTY.cacheKey(); - MappingLookup.CacheKey mappingKey2 = MappingLookup.fromMappers(Mapping.EMPTY, emptyList(), emptyList(), null).cacheKey(); + MappingLookup.CacheKey mappingKey2 = MappingLookup.fromMappers(Mapping.EMPTY, emptyList(), emptyList()).cacheKey(); AtomicBoolean indexShard = new AtomicBoolean(true); ShardRequestCache requestCacheStats = new ShardRequestCache(); Directory dir = newDirectory(); @@ -364,13 +364,13 @@ public class IndicesRequestCacheTests extends ESTestCase { writer.updateDocument(new Term("id", "0"), newDoc(0, "bar")); DirectoryReader secondReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); - MappingLookup.CacheKey secondMappingKey = MappingLookup.fromMappers(Mapping.EMPTY, emptyList(), emptyList(), null).cacheKey(); + MappingLookup.CacheKey secondMappingKey = MappingLookup.fromMappers(Mapping.EMPTY, emptyList(), emptyList()).cacheKey(); TestEntity secondEntity = new TestEntity(requestCacheStats, indexShard); Loader secondLoader = new Loader(secondReader, 0); writer.updateDocument(new Term("id", "0"), newDoc(0, "baz")); DirectoryReader thirdReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); - MappingLookup.CacheKey thirdMappingKey = MappingLookup.fromMappers(Mapping.EMPTY, emptyList(), emptyList(), null).cacheKey(); + MappingLookup.CacheKey thirdMappingKey = MappingLookup.fromMappers(Mapping.EMPTY, emptyList(), emptyList()).cacheKey(); AtomicBoolean differentIdentity = new AtomicBoolean(true); TestEntity thirdEntity = new TestEntity(requestCacheStats, differentIdentity); Loader thirdLoader = new Loader(thirdReader, 0); @@ -506,7 +506,7 @@ public class IndicesRequestCacheTests extends ESTestCase { AtomicBoolean trueBoolean = new AtomicBoolean(true); AtomicBoolean falseBoolean = new AtomicBoolean(false); MappingLookup.CacheKey mKey1 = MappingLookup.EMPTY.cacheKey(); - MappingLookup.CacheKey mKey2 = MappingLookup.fromMappers(Mapping.EMPTY, emptyList(), emptyList(), null).cacheKey(); + MappingLookup.CacheKey mKey2 = MappingLookup.fromMappers(Mapping.EMPTY, emptyList(), emptyList()).cacheKey(); Directory dir = newDirectory(); IndexWriterConfig config = newIndexWriterConfig(); IndexWriter writer = new IndexWriter(dir, config); diff --git a/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java b/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java index 926ac534164f..d041121b8a96 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java +++ b/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java @@ -227,8 +227,7 @@ public class SearchServiceTests extends IndexShardTestCase { MappingLookup mappingLookup = MappingLookup.fromMappers( mapping, Collections.singletonList(keywordFieldMapper), - Collections.emptyList(), - indexSettings + Collections.emptyList() ); return new SearchExecutionContext( 0, diff --git a/server/src/test/java/org/elasticsearch/search/suggest/AbstractSuggestionBuilderTestCase.java b/server/src/test/java/org/elasticsearch/search/suggest/AbstractSuggestionBuilderTestCase.java index d78c697e19c8..3e4ed0ebac1b 100644 --- a/server/src/test/java/org/elasticsearch/search/suggest/AbstractSuggestionBuilderTestCase.java +++ b/server/src/test/java/org/elasticsearch/search/suggest/AbstractSuggestionBuilderTestCase.java @@ -169,7 +169,7 @@ public abstract class AbstractSuggestionBuilderTestCase new TestTemplateService.MockTemplateScript.Factory(((Script) invocation.getArguments()[0]).getIdOrCode()) ); List mappers = Collections.singletonList(new MockFieldMapper(fieldType)); - MappingLookup lookup = MappingLookup.fromMappers(Mapping.EMPTY, mappers, emptyList(), idxSettings); + MappingLookup lookup = MappingLookup.fromMappers(Mapping.EMPTY, mappers, emptyList()); SearchExecutionContext mockContext = new SearchExecutionContext( 0, 0, diff --git a/server/src/test/resources/org/elasticsearch/action/admin/cluster/stats/telemetry_test.json b/server/src/test/resources/org/elasticsearch/action/admin/cluster/stats/telemetry_test.json index fe9c77cb2a18..a92bab739b37 100644 --- a/server/src/test/resources/org/elasticsearch/action/admin/cluster/stats/telemetry_test.json +++ b/server/src/test/resources/org/elasticsearch/action/admin/cluster/stats/telemetry_test.json @@ -1,5 +1,4 @@ { - "_search" : { "total" : 10, "success" : 20, "skipped" : 5, @@ -63,5 +62,4 @@ } } } - } -} \ No newline at end of file +} diff --git a/settings.gradle b/settings.gradle index 18be6ba79275..df428139c92a 100644 --- a/settings.gradle +++ b/settings.gradle @@ -18,6 +18,8 @@ plugins { id 'elasticsearch.java-toolchain' } +enableFeaturePreview "STABLE_CONFIGURATION_CACHE" + rootProject.name = "elasticsearch" dependencyResolutionManagement { diff --git a/test/fixtures/ec2-imds-fixture/src/main/java/fixture/aws/imds/Ec2ImdsHttpHandler.java b/test/fixtures/ec2-imds-fixture/src/main/java/fixture/aws/imds/Ec2ImdsHttpHandler.java index 0c58205ac8d6..7606e9261dd3 100644 --- a/test/fixtures/ec2-imds-fixture/src/main/java/fixture/aws/imds/Ec2ImdsHttpHandler.java +++ b/test/fixtures/ec2-imds-fixture/src/main/java/fixture/aws/imds/Ec2ImdsHttpHandler.java @@ -14,8 +14,11 @@ import com.sun.net.httpserver.HttpHandler; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.Strings; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.core.Nullable; import org.elasticsearch.core.SuppressForbidden; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xcontent.ToXContent; import java.io.IOException; import java.nio.charset.StandardCharsets; @@ -45,21 +48,38 @@ public class Ec2ImdsHttpHandler implements HttpHandler { private final BiConsumer newCredentialsConsumer; private final Map instanceAddresses; - private final Set validCredentialsEndpoints = ConcurrentCollections.newConcurrentSet(); + private final Set validCredentialsEndpoints; + private final boolean dynamicProfileNames; private final Supplier availabilityZoneSupplier; + @Nullable // if instance identity document not available + private final ToXContent instanceIdentityDocument; public Ec2ImdsHttpHandler( Ec2ImdsVersion ec2ImdsVersion, BiConsumer newCredentialsConsumer, Collection alternativeCredentialsEndpoints, Supplier availabilityZoneSupplier, + @Nullable ToXContent instanceIdentityDocument, Map instanceAddresses ) { this.ec2ImdsVersion = Objects.requireNonNull(ec2ImdsVersion); this.newCredentialsConsumer = Objects.requireNonNull(newCredentialsConsumer); this.instanceAddresses = instanceAddresses; - this.validCredentialsEndpoints.addAll(alternativeCredentialsEndpoints); + + if (alternativeCredentialsEndpoints.isEmpty()) { + dynamicProfileNames = true; + validCredentialsEndpoints = ConcurrentCollections.newConcurrentSet(); + } else if (ec2ImdsVersion == Ec2ImdsVersion.V2) { + throw new IllegalArgumentException( + Strings.format("alternative credentials endpoints %s requires IMDSv1", alternativeCredentialsEndpoints) + ); + } else { + dynamicProfileNames = false; + validCredentialsEndpoints = Set.copyOf(alternativeCredentialsEndpoints); + } + this.availabilityZoneSupplier = availabilityZoneSupplier; + this.instanceIdentityDocument = instanceIdentityDocument; } @Override @@ -78,6 +98,8 @@ public class Ec2ImdsHttpHandler implements HttpHandler { validImdsTokens.add(token); final var responseBody = token.getBytes(StandardCharsets.UTF_8); exchange.getResponseHeaders().add("Content-Type", "text/plain"); + exchange.getResponseHeaders() + .add("x-aws-ec2-metadata-token-ttl-seconds", Long.toString(TimeValue.timeValueDays(1).seconds())); exchange.sendResponseHeaders(RestStatus.OK.getStatus(), responseBody.length); exchange.getResponseBody().write(responseBody); } @@ -98,7 +120,7 @@ public class Ec2ImdsHttpHandler implements HttpHandler { } if ("GET".equals(requestMethod)) { - if (path.equals(IMDS_SECURITY_CREDENTIALS_PATH)) { + if (path.equals(IMDS_SECURITY_CREDENTIALS_PATH) && dynamicProfileNames) { final var profileName = randomIdentifier(); validCredentialsEndpoints.add(IMDS_SECURITY_CREDENTIALS_PATH + profileName); sendStringResponse(exchange, profileName); @@ -107,6 +129,9 @@ public class Ec2ImdsHttpHandler implements HttpHandler { final var availabilityZone = availabilityZoneSupplier.get(); sendStringResponse(exchange, availabilityZone); return; + } else if (instanceIdentityDocument != null && path.equals("/latest/dynamic/instance-identity/document")) { + sendStringResponse(exchange, Strings.toString(instanceIdentityDocument)); + return; } else if (validCredentialsEndpoints.contains(path)) { final String accessKey = randomIdentifier(); final String sessionToken = randomIdentifier(); diff --git a/test/fixtures/ec2-imds-fixture/src/main/java/fixture/aws/imds/Ec2ImdsServiceBuilder.java b/test/fixtures/ec2-imds-fixture/src/main/java/fixture/aws/imds/Ec2ImdsServiceBuilder.java index 505c9978bc4f..d70ee723942e 100644 --- a/test/fixtures/ec2-imds-fixture/src/main/java/fixture/aws/imds/Ec2ImdsServiceBuilder.java +++ b/test/fixtures/ec2-imds-fixture/src/main/java/fixture/aws/imds/Ec2ImdsServiceBuilder.java @@ -10,6 +10,7 @@ package fixture.aws.imds; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.ToXContent; import java.util.Collection; import java.util.HashMap; @@ -24,6 +25,7 @@ public class Ec2ImdsServiceBuilder { private BiConsumer newCredentialsConsumer = Ec2ImdsServiceBuilder::rejectNewCredentials; private Collection alternativeCredentialsEndpoints = Set.of(); private Supplier availabilityZoneSupplier = Ec2ImdsServiceBuilder::rejectAvailabilityZone; + private ToXContent instanceIdentityDocument = null; private final Map instanceAddresses = new HashMap<>(); public Ec2ImdsServiceBuilder(Ec2ImdsVersion ec2ImdsVersion) { @@ -64,8 +66,13 @@ public class Ec2ImdsServiceBuilder { newCredentialsConsumer, alternativeCredentialsEndpoints, availabilityZoneSupplier, + instanceIdentityDocument, Map.copyOf(instanceAddresses) ); } + public Ec2ImdsServiceBuilder instanceIdentityDocument(ToXContent instanceIdentityDocument) { + this.instanceIdentityDocument = instanceIdentityDocument; + return this; + } } diff --git a/test/fixtures/ec2-imds-fixture/src/test/java/fixture/aws/imds/Ec2ImdsHttpHandlerTests.java b/test/fixtures/ec2-imds-fixture/src/test/java/fixture/aws/imds/Ec2ImdsHttpHandlerTests.java index 6d3eb3d14e9b..0c0d02b32d4a 100644 --- a/test/fixtures/ec2-imds-fixture/src/test/java/fixture/aws/imds/Ec2ImdsHttpHandlerTests.java +++ b/test/fixtures/ec2-imds-fixture/src/test/java/fixture/aws/imds/Ec2ImdsHttpHandlerTests.java @@ -52,16 +52,13 @@ public class Ec2ImdsHttpHandlerTests extends ESTestCase { assertTrue(Strings.hasText(profileName)); final var credentialsResponse = handleRequest(handler, "GET", SECURITY_CREDENTIALS_URI + profileName); - assertEquals(RestStatus.OK, credentialsResponse.status()); assertThat(generatedCredentials, aMapWithSize(1)); - final var accessKey = generatedCredentials.keySet().iterator().next(); - final var sessionToken = generatedCredentials.values().iterator().next(); - - final var responseMap = XContentHelper.convertToMap(XContentType.JSON.xContent(), credentialsResponse.body().streamInput(), false); - assertEquals(Set.of("AccessKeyId", "Expiration", "RoleArn", "SecretAccessKey", "Token"), responseMap.keySet()); - assertEquals(accessKey, responseMap.get("AccessKeyId")); - assertEquals(sessionToken, responseMap.get("Token")); + assertValidCredentialsResponse( + credentialsResponse, + generatedCredentials.keySet().iterator().next(), + generatedCredentials.values().iterator().next() + ); } public void testImdsV2Disabled() { @@ -78,6 +75,7 @@ public class Ec2ImdsHttpHandlerTests extends ESTestCase { final var tokenResponse = handleRequest(handler, "PUT", "/latest/api/token"); assertEquals(RestStatus.OK, tokenResponse.status()); + assertEquals(List.of("86400" /* seconds in a day */), tokenResponse.responseHeaders().get("x-aws-ec2-metadata-token-ttl-seconds")); final var token = tokenResponse.body().utf8ToString(); final var roleResponse = checkImdsV2GetRequest(handler, SECURITY_CREDENTIALS_URI, token); @@ -86,16 +84,13 @@ public class Ec2ImdsHttpHandlerTests extends ESTestCase { assertTrue(Strings.hasText(profileName)); final var credentialsResponse = checkImdsV2GetRequest(handler, SECURITY_CREDENTIALS_URI + profileName, token); - assertEquals(RestStatus.OK, credentialsResponse.status()); assertThat(generatedCredentials, aMapWithSize(1)); - final var accessKey = generatedCredentials.keySet().iterator().next(); - final var sessionToken = generatedCredentials.values().iterator().next(); - - final var responseMap = XContentHelper.convertToMap(XContentType.JSON.xContent(), credentialsResponse.body().streamInput(), false); - assertEquals(Set.of("AccessKeyId", "Expiration", "RoleArn", "SecretAccessKey", "Token"), responseMap.keySet()); - assertEquals(accessKey, responseMap.get("AccessKeyId")); - assertEquals(sessionToken, responseMap.get("Token")); + assertValidCredentialsResponse( + credentialsResponse, + generatedCredentials.keySet().iterator().next(), + generatedCredentials.values().iterator().next() + ); } public void testAvailabilityZone() { @@ -113,7 +108,54 @@ public class Ec2ImdsHttpHandlerTests extends ESTestCase { assertEquals(generatedAvailabilityZones, Set.of(availabilityZone)); } - private record TestHttpResponse(RestStatus status, BytesReference body) {} + public void testAlternativeCredentialsEndpoint() throws IOException { + expectThrows( + IllegalArgumentException.class, + new Ec2ImdsServiceBuilder(Ec2ImdsVersion.V2).alternativeCredentialsEndpoints(Set.of("/should-not-work"))::buildHandler + ); + + final var alternativePaths = randomList(1, 5, () -> "/" + randomIdentifier()); + final Map generatedCredentials = new HashMap<>(); + + final var handler = new Ec2ImdsServiceBuilder(Ec2ImdsVersion.V1).alternativeCredentialsEndpoints(alternativePaths) + .newCredentialsConsumer(generatedCredentials::put) + .buildHandler(); + + final var credentialsResponse = handleRequest(handler, "GET", randomFrom(alternativePaths)); + + assertThat(generatedCredentials, aMapWithSize(1)); + assertValidCredentialsResponse( + credentialsResponse, + generatedCredentials.keySet().iterator().next(), + generatedCredentials.values().iterator().next() + ); + } + + private static void assertValidCredentialsResponse(TestHttpResponse credentialsResponse, String accessKey, String sessionToken) + throws IOException { + assertEquals(RestStatus.OK, credentialsResponse.status()); + final var responseMap = XContentHelper.convertToMap(XContentType.JSON.xContent(), credentialsResponse.body().streamInput(), false); + assertEquals(Set.of("AccessKeyId", "Expiration", "RoleArn", "SecretAccessKey", "Token"), responseMap.keySet()); + assertEquals(accessKey, responseMap.get("AccessKeyId")); + assertEquals(sessionToken, responseMap.get("Token")); + } + + public void testInstanceIdentityDocument() { + final Set generatedRegions = new HashSet<>(); + final var handler = new Ec2ImdsServiceBuilder(Ec2ImdsVersion.V1).instanceIdentityDocument((builder, params) -> { + final var newRegion = randomIdentifier(); + generatedRegions.add(newRegion); + return builder.field("region", newRegion); + }).buildHandler(); + + final var instanceIdentityResponse = handleRequest(handler, "GET", "/latest/dynamic/instance-identity/document"); + assertEquals(RestStatus.OK, instanceIdentityResponse.status()); + final var instanceIdentityString = instanceIdentityResponse.body().utf8ToString(); + + assertEquals(Strings.format("{\"region\":\"%s\"}", generatedRegions.iterator().next()), instanceIdentityString); + } + + private record TestHttpResponse(RestStatus status, Headers responseHeaders, BytesReference body) {} private static TestHttpResponse checkImdsV2GetRequest(Ec2ImdsHttpHandler handler, String uri, String token) { final var unauthorizedResponse = handleRequest(handler, "GET", uri, null); @@ -145,7 +187,11 @@ public class Ec2ImdsHttpHandlerTests extends ESTestCase { fail(e); } assertNotEquals(0, httpExchange.getResponseCode()); - return new TestHttpResponse(RestStatus.fromCode(httpExchange.getResponseCode()), httpExchange.getResponseBodyContents()); + return new TestHttpResponse( + RestStatus.fromCode(httpExchange.getResponseCode()), + httpExchange.getResponseHeaders(), + httpExchange.getResponseBodyContents() + ); } private static class TestHttpExchange extends HttpExchange { diff --git a/test/fixtures/krb5kdc-fixture/Dockerfile b/test/fixtures/krb5kdc-fixture/Dockerfile index e862c7a71f2b..47fc05d5aaf5 100644 --- a/test/fixtures/krb5kdc-fixture/Dockerfile +++ b/test/fixtures/krb5kdc-fixture/Dockerfile @@ -1,9 +1,12 @@ -FROM ubuntu:14.04 -ADD . /fixture +FROM alpine:3.21.0 + +ADD src/main/resources /fixture +RUN apk update && apk add -y --no-cache python3 krb5 krb5-server + RUN echo kerberos.build.elastic.co > /etc/hostname -RUN bash /fixture/src/main/resources/provision/installkdc.sh +RUN sh /fixture/provision/installkdc.sh EXPOSE 88 EXPOSE 88/udp -CMD sleep infinity +CMD ["sleep", "infinity"] diff --git a/test/fixtures/krb5kdc-fixture/build.gradle b/test/fixtures/krb5kdc-fixture/build.gradle index c9540011d80d..887d6a2b6876 100644 --- a/test/fixtures/krb5kdc-fixture/build.gradle +++ b/test/fixtures/krb5kdc-fixture/build.gradle @@ -16,8 +16,8 @@ apply plugin: 'elasticsearch.deploy-test-fixtures' dockerFixtures { krb5dc { dockerContext = projectDir - version = "1.0" - baseImages = ["ubuntu:14.04"] + version = "1.1" + baseImages = ["alpine:3.21.0"] } } diff --git a/test/fixtures/krb5kdc-fixture/src/main/java/org/elasticsearch/test/fixtures/krb5kdc/Krb5kDcContainer.java b/test/fixtures/krb5kdc-fixture/src/main/java/org/elasticsearch/test/fixtures/krb5kdc/Krb5kDcContainer.java index cb1f86de51b1..f44058d0ebcc 100644 --- a/test/fixtures/krb5kdc-fixture/src/main/java/org/elasticsearch/test/fixtures/krb5kdc/Krb5kDcContainer.java +++ b/test/fixtures/krb5kdc-fixture/src/main/java/org/elasticsearch/test/fixtures/krb5kdc/Krb5kDcContainer.java @@ -29,7 +29,7 @@ import java.util.Arrays; import java.util.List; public final class Krb5kDcContainer extends DockerEnvironmentAwareTestContainer { - public static final String DOCKER_BASE_IMAGE = "docker.elastic.co/elasticsearch-dev/krb5dc-fixture:1.0"; + public static final String DOCKER_BASE_IMAGE = "docker.elastic.co/elasticsearch-dev/krb5dc-fixture:1.1"; private final TemporaryFolder temporaryFolder = new TemporaryFolder(); private final ProvisioningId provisioningId; private Path krb5ConfFile; @@ -39,14 +39,14 @@ public final class Krb5kDcContainer extends DockerEnvironmentAwareTestContainer public enum ProvisioningId { HDFS( "hdfs", - "/fixture/src/main/resources/provision/hdfs.sh", + "/fixture/provision/hdfs.sh", "/fixture/build/keytabs/hdfs_hdfs.build.elastic.co.keytab", "/fixture/build/keytabs/elasticsearch.keytab", "hdfs/hdfs.build.elastic.co@BUILD.ELASTIC.CO" ), PEPPA( "peppa", - "/fixture/src/main/resources/provision/peppa.sh", + "/fixture/provision/peppa.sh", "/fixture/build/keytabs/peppa.keytab", "/fixture/build/keytabs/HTTP_localhost.keytab", "peppa@BUILD.ELASTIC.CO" @@ -94,7 +94,7 @@ public final class Krb5kDcContainer extends DockerEnvironmentAwareTestContainer withNetworkAliases("kerberos.build.elastic.co", "build.elastic.co"); withCopyFileToContainer(MountableFile.forHostPath("/dev/urandom"), "/dev/random"); withExtraHost("kerberos.build.elastic.co", "127.0.0.1"); - withCommand("bash", provisioningId.scriptPath); + withCommand("sh", provisioningId.scriptPath); } @Override @@ -122,7 +122,7 @@ public final class Krb5kDcContainer extends DockerEnvironmentAwareTestContainer .findFirst(); String hostPortSpec = bindings.get().getHostPortSpec(); String s = copyFileFromContainer("/fixture/build/krb5.conf.template", i -> IOUtils.toString(i, StandardCharsets.UTF_8)); - return s.replace("${MAPPED_PORT}", hostPortSpec); + return s.replace("#KDC_DOCKER_HOST", "kdc = 127.0.0.1:" + hostPortSpec); } public Path getKeytab() { diff --git a/test/fixtures/krb5kdc-fixture/src/main/resources/provision/addprinc.sh b/test/fixtures/krb5kdc-fixture/src/main/resources/provision/addprinc.sh index 44bd7a841ded..553bd8f85f70 100755 --- a/test/fixtures/krb5kdc-fixture/src/main/resources/provision/addprinc.sh +++ b/test/fixtures/krb5kdc-fixture/src/main/resources/provision/addprinc.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/bin/sh # Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one # or more contributor license agreements. Licensed under the "Elastic License @@ -24,7 +24,7 @@ PASSWD="$2" USER=$(echo $PRINC | tr "/" "_") VDIR=/fixture -RESOURCES=$VDIR/src/main/resources +RESOURCES=$VDIR PROV_DIR=$RESOURCES/provision ENVPROP_FILE=$RESOURCES/env.properties BUILD_DIR=$VDIR/build @@ -45,16 +45,16 @@ USER_KTAB=$LOCALSTATEDIR/$USER.keytab if [ -f $USER_KTAB ] && [ -z "$PASSWD" ]; then echo "Principal '${PRINC}@${REALM}' already exists. Re-copying keytab..." - sudo cp $USER_KTAB $KEYTAB_DIR/$USER.keytab + cp $USER_KTAB $KEYTAB_DIR/$USER.keytab else if [ -z "$PASSWD" ]; then echo "Provisioning '${PRINC}@${REALM}' principal and keytab..." - sudo kadmin -p $ADMIN_PRIN -kt $ADMIN_KTAB -q "addprinc -randkey $USER_PRIN" - sudo kadmin -p $ADMIN_PRIN -kt $ADMIN_KTAB -q "ktadd -k $USER_KTAB $USER_PRIN" - sudo cp $USER_KTAB $KEYTAB_DIR/$USER.keytab + kadmin -p $ADMIN_PRIN -kt $ADMIN_KTAB -q "addprinc -randkey $USER_PRIN" + kadmin -p $ADMIN_PRIN -kt $ADMIN_KTAB -q "ktadd -k $USER_KTAB $USER_PRIN" + cp $USER_KTAB $KEYTAB_DIR/$USER.keytab else echo "Provisioning '${PRINC}@${REALM}' principal with password..." - sudo kadmin -p $ADMIN_PRIN -kt $ADMIN_KTAB -q "addprinc -pw $PASSWD $PRINC" + kadmin -p $ADMIN_PRIN -kt $ADMIN_KTAB -q "addprinc -pw $PASSWD $PRINC" fi fi diff --git a/test/fixtures/krb5kdc-fixture/src/main/resources/provision/hdfs.sh b/test/fixtures/krb5kdc-fixture/src/main/resources/provision/hdfs.sh index de08a52df330..cf2eb5a1b723 100644 --- a/test/fixtures/krb5kdc-fixture/src/main/resources/provision/hdfs.sh +++ b/test/fixtures/krb5kdc-fixture/src/main/resources/provision/hdfs.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/bin/sh set -e diff --git a/test/fixtures/krb5kdc-fixture/src/main/resources/provision/installkdc.sh b/test/fixtures/krb5kdc-fixture/src/main/resources/provision/installkdc.sh index 428747075ff3..a364349c56c6 100755 --- a/test/fixtures/krb5kdc-fixture/src/main/resources/provision/installkdc.sh +++ b/test/fixtures/krb5kdc-fixture/src/main/resources/provision/installkdc.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/bin/sh # Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one # or more contributor license agreements. Licensed under the "Elastic License @@ -12,8 +12,7 @@ set -e # KDC installation steps and considerations based on https://web.mit.edu/kerberos/krb5-latest/doc/admin/install_kdc.html # and helpful input from https://help.ubuntu.com/community/Kerberos -VDIR=/fixture -RESOURCES=$VDIR/src/main/resources +RESOURCES=/fixture PROV_DIR=$RESOURCES/provision ENVPROP_FILE=$RESOURCES/env.properties LOCALSTATEDIR=/etc @@ -49,33 +48,11 @@ touch $LOGDIR/kadmin.log touch $LOGDIR/krb5kdc.log touch $LOGDIR/krb5lib.log -# Update package manager -apt-get update -qqy - -# Installation asks a bunch of questions via debconf. Set the answers ahead of time -debconf-set-selections <<< "krb5-config krb5-config/read_conf boolean true" -debconf-set-selections <<< "krb5-config krb5-config/kerberos_servers string $KDC_NAME" -debconf-set-selections <<< "krb5-config krb5-config/add_servers boolean true" -debconf-set-selections <<< "krb5-config krb5-config/admin_server string $KDC_NAME" -debconf-set-selections <<< "krb5-config krb5-config/add_servers_realm string $REALM_NAME" -debconf-set-selections <<< "krb5-config krb5-config/default_realm string $REALM_NAME" -debconf-set-selections <<< "krb5-admin-server krb5-admin-server/kadmind boolean true" -debconf-set-selections <<< "krb5-admin-server krb5-admin-server/newrealm note" -debconf-set-selections <<< "krb5-kdc krb5-kdc/debconf boolean true" -debconf-set-selections <<< "krb5-kdc krb5-kdc/purge_data_too boolean false" - -# Install krb5 packages -apt-get install -qqy krb5-{admin-server,kdc} - -# /dev/random produces output very slowly on Ubuntu VM's. Install haveged to increase entropy. -apt-get install -qqy haveged -haveged - # Create kerberos database with stash file and garbage password kdb5_util create -s -r $REALM_NAME -P zyxwvutsrpqonmlk9876 # Set up admin acls -cat << EOF > /etc/krb5kdc/kadm5.acl +cat << EOF > /var/lib/krb5kdc/kadm5.acl */admin@$REALM_NAME * */*@$REALM_NAME i EOF diff --git a/test/fixtures/krb5kdc-fixture/src/main/resources/provision/krb5.conf.template b/test/fixtures/krb5kdc-fixture/src/main/resources/provision/krb5.conf.template index b66709968839..e79caecbcf33 100644 --- a/test/fixtures/krb5kdc-fixture/src/main/resources/provision/krb5.conf.template +++ b/test/fixtures/krb5kdc-fixture/src/main/resources/provision/krb5.conf.template @@ -6,6 +6,7 @@ # License v3.0 only", or the "Server Side Public License, v 1". [libdefaults] + spake_preauth_groups = edwards25519 default_realm = ${REALM_NAME} dns_canonicalize_hostname = false dns_lookup_kdc = false @@ -25,7 +26,7 @@ [realms] ${REALM_NAME} = { kdc = 127.0.0.1:88 - kdc = 127.0.0.1:${MAPPED_PORT} + #KDC_DOCKER_HOST admin_server = ${KDC_NAME}:749 default_domain = ${BUILD_ZONE} } diff --git a/test/fixtures/krb5kdc-fixture/src/main/resources/provision/peppa.sh b/test/fixtures/krb5kdc-fixture/src/main/resources/provision/peppa.sh index da6480d891af..24179da5882c 100644 --- a/test/fixtures/krb5kdc-fixture/src/main/resources/provision/peppa.sh +++ b/test/fixtures/krb5kdc-fixture/src/main/resources/provision/peppa.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/bin/sh set -e diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java b/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java index 6294230ad1dd..f6ed328d14dd 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java @@ -702,7 +702,7 @@ public final class DataStreamTestHelper { new MetadataFieldMapper[] { dtfm }, Collections.emptyMap() ); - mappingLookup = MappingLookup.fromMappers(mapping, List.of(dtfm, dateFieldMapper), List.of(), null); + mappingLookup = MappingLookup.fromMappers(mapping, List.of(dtfm, dateFieldMapper), List.of()); } IndicesService indicesService = mockIndicesServices(mappingLookup); diff --git a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java index 46f6a0b503bf..9a160fffb965 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java @@ -141,7 +141,6 @@ import static java.util.Collections.shuffle; import static org.elasticsearch.index.engine.Engine.Operation.Origin.PEER_RECOVERY; import static org.elasticsearch.index.engine.Engine.Operation.Origin.PRIMARY; import static org.elasticsearch.index.engine.Engine.Operation.Origin.REPLICA; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.lessThanOrEqualTo; @@ -458,7 +457,13 @@ public abstract class EngineTestCase extends ESTestCase { } public static ParsedDocument parseDocument(MapperService mapperService, String id, String routing) { - SourceToParse sourceToParse = new SourceToParse(id, new BytesArray("{ \"value\" : \"test\" }"), XContentType.JSON, routing); + String source = randomFrom( + "{ \"value\" : \"test-1\" }", + "{ \"value\" : [\"test-1\",\"test-2\"] }", + "{ \"value\" : [\"test-2\",\"test-1\"] }", + "{ \"value\" : [\"test-1\",\"test-2\",\"test-2\"] }" + ); + SourceToParse sourceToParse = new SourceToParse(id, new BytesArray(source), XContentType.JSON, routing); return mapperService.documentMapper().parse(sourceToParse); } @@ -492,7 +497,8 @@ public abstract class EngineTestCase extends ESTestCase { new TranslogDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTermSupplier, - seqNo -> {} + seqNo -> {}, + TranslogOperationAsserter.DEFAULT ); } @@ -1072,10 +1078,9 @@ public abstract class EngineTestCase extends ESTestCase { final int nestedValues = between(0, 3); final long startTime = threadPool.relativeTimeInNanos(); final int copies = allowDuplicate && rarely() ? between(2, 4) : 1; + final var nonNestedDoc = parseDocument(mapperService, id, null); for (int copy = 0; copy < copies; copy++) { - final ParsedDocument doc = isNestedDoc - ? nestedParsedDocFactory.apply(id, nestedValues) - : parseDocument(engine.engineConfig.getMapperService(), id, null); + final ParsedDocument doc = isNestedDoc ? nestedParsedDocFactory.apply(id, nestedValues) : nonNestedDoc; switch (opType) { case INDEX -> operations.add( new Engine.Index( @@ -1345,6 +1350,7 @@ public abstract class EngineTestCase extends ESTestCase { } else { minSeqNoToRetain = engine.getMinRetainedSeqNo(); } + TranslogOperationAsserter translogOperationAsserter = TranslogOperationAsserter.withEngineConfig(engine.engineConfig); for (Translog.Operation translogOp : translogOps) { final Translog.Operation luceneOp = luceneOps.get(translogOp.seqNo()); if (luceneOp == null) { @@ -1372,10 +1378,9 @@ public abstract class EngineTestCase extends ESTestCase { assertThat(luceneOp.opType(), equalTo(translogOp.opType())); if (luceneOp.opType() == Translog.Operation.Type.INDEX) { if (engine.engineConfig.getIndexSettings().isRecoverySourceSyntheticEnabled()) { - assertToXContentEquivalent( - ((Translog.Index) luceneOp).source(), - ((Translog.Index) translogOp).source(), - XContentFactory.xContentType(((Translog.Index) luceneOp).source().array()) + assertTrue( + "luceneOp=" + luceneOp + " != translogOp=" + translogOp, + translogOperationAsserter.assertSameIndexOperation((Translog.Index) luceneOp, (Translog.Index) translogOp) ); } else { assertThat(((Translog.Index) luceneOp).source(), equalTo(((Translog.Index) translogOp).source())); diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/TestDocumentParserContext.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/TestDocumentParserContext.java index 02ae0853909f..49fe9d30239a 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/TestDocumentParserContext.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/TestDocumentParserContext.java @@ -70,7 +70,6 @@ public class TestDocumentParserContext extends DocumentParserContext { } ), source, - DocumentParser.Listeners.NOOP, mappingLookup.getMapping().getRoot(), ObjectMapper.Dynamic.getRootDynamic(mappingLookup) ); diff --git a/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/datasource/DefaultMappingParametersHandler.java b/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/datasource/DefaultMappingParametersHandler.java index 4b33f3fefcf1..716e332bebb8 100644 --- a/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/datasource/DefaultMappingParametersHandler.java +++ b/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/datasource/DefaultMappingParametersHandler.java @@ -65,6 +65,10 @@ public class DefaultMappingParametersHandler implements DataSourceHandler { } } + if (ESTestCase.randomDouble() <= 0.2) { + injected.put("ignore_above", ESTestCase.randomIntBetween(1, 10000)); + } + return injected; }; } diff --git a/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java b/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java index 4fe41692e150..f057d35d6e7a 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java @@ -357,8 +357,7 @@ public abstract class AggregatorTestCase extends ESTestCase { Arrays.stream(fieldTypes) .map(ft -> new FieldAliasMapper(ft.name() + "-alias", ft.name() + "-alias", ft.name())) .collect(toList()), - List.of(), - indexSettings + List.of() ); BiFunction> fieldDataBuilder = (fieldType, context) -> fieldType .fielddataBuilder( @@ -466,7 +465,7 @@ public abstract class AggregatorTestCase extends ESTestCase { * of stuff. */ SearchExecutionContext subContext = spy(searchExecutionContext); - MappingLookup disableNestedLookup = MappingLookup.fromMappers(Mapping.EMPTY, Set.of(), Set.of(), indexSettings); + MappingLookup disableNestedLookup = MappingLookup.fromMappers(Mapping.EMPTY, Set.of(), Set.of()); doReturn(new NestedDocuments(disableNestedLookup, bitsetFilterCache::getBitSetProducer, indexSettings.getIndexVersionCreated())) .when(subContext) .getNestedDocuments(); diff --git a/test/framework/src/main/java/org/elasticsearch/test/SkipUnavailableRule.java b/test/framework/src/main/java/org/elasticsearch/test/SkipUnavailableRule.java new file mode 100644 index 000000000000..d5ce943b4d8f --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/test/SkipUnavailableRule.java @@ -0,0 +1,60 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.test; + +import org.junit.rules.TestRule; +import org.junit.runner.Description; +import org.junit.runners.model.Statement; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; +import java.util.Arrays; +import java.util.Map; +import java.util.function.Function; +import java.util.stream.Collectors; + +/** + * Test rule to process skip_unavailable override annotations + */ +public class SkipUnavailableRule implements TestRule { + private final Map skipMap; + + public SkipUnavailableRule(String... clusterAliases) { + this.skipMap = Arrays.stream(clusterAliases).collect(Collectors.toMap(Function.identity(), alias -> true)); + } + + public Map getMap() { + return skipMap; + } + + @Override + public Statement apply(Statement base, Description description) { + // Check for annotation named "SkipOverride" and set the overrides accordingly + var aliases = description.getAnnotation(NotSkipped.class); + if (aliases != null) { + for (String alias : aliases.aliases()) { + skipMap.put(alias, false); + } + } + return base; + } + + /** + * Annotation to mark specific cluster in a test as not to be skipped when unavailable + */ + @Retention(RetentionPolicy.RUNTIME) + @Target(ElementType.METHOD) + public @interface NotSkipped { + String[] aliases(); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java index 5e19b26b8f4d..ee9c8adc47ad 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java @@ -225,18 +225,29 @@ class KibanaOwnedReservedRoleDescriptors { RoleDescriptor.IndicesPrivileges.builder().indices("logs-fleet_server*").privileges("read", "delete_index").build(), // Legacy "Alerts as data" used in Security Solution. // Kibana user creates these indices; reads / writes to them. - RoleDescriptor.IndicesPrivileges.builder().indices(ReservedRolesStore.ALERTS_LEGACY_INDEX).privileges("all").build(), + RoleDescriptor.IndicesPrivileges.builder() + .indices(ReservedRolesStore.ALERTS_LEGACY_INDEX, ReservedRolesStore.ALERTS_LEGACY_INDEX_REINDEXED_V8) + .privileges("all") + .build(), // Used in Security Solution for value lists. // Kibana user creates these indices; reads / writes to them. RoleDescriptor.IndicesPrivileges.builder() - .indices(ReservedRolesStore.LISTS_INDEX, ReservedRolesStore.LISTS_ITEMS_INDEX) + .indices( + ReservedRolesStore.LISTS_INDEX, + ReservedRolesStore.LISTS_ITEMS_INDEX, + ReservedRolesStore.LISTS_INDEX_REINDEXED_V8, + ReservedRolesStore.LISTS_ITEMS_INDEX_REINDEXED_V8 + ) .privileges("all") .build(), // "Alerts as data" internal backing indices used in Security Solution, // Observability, etc. // Kibana system user creates these indices; reads / writes to them via the // aliases (see below). - RoleDescriptor.IndicesPrivileges.builder().indices(ReservedRolesStore.ALERTS_BACKING_INDEX).privileges("all").build(), + RoleDescriptor.IndicesPrivileges.builder() + .indices(ReservedRolesStore.ALERTS_BACKING_INDEX, ReservedRolesStore.ALERTS_BACKING_INDEX_REINDEXED) + .privileges("all") + .build(), // "Alerts as data" public index aliases used in Security Solution, // Observability, etc. // Kibana system user uses them to read / write alerts. @@ -248,7 +259,7 @@ class KibanaOwnedReservedRoleDescriptors { // Kibana system user creates these indices; reads / writes to them via the // aliases (see below). RoleDescriptor.IndicesPrivileges.builder() - .indices(ReservedRolesStore.PREVIEW_ALERTS_BACKING_INDEX_ALIAS) + .indices(ReservedRolesStore.PREVIEW_ALERTS_BACKING_INDEX, ReservedRolesStore.PREVIEW_ALERTS_BACKING_INDEX_REINDEXED) .privileges("all") .build(), // Endpoint / Fleet policy responses. Kibana requires read access to send diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java index bdaf75203ee5..3ab9bcc02461 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java @@ -43,9 +43,11 @@ import static java.util.Map.entry; public class ReservedRolesStore implements BiConsumer, ActionListener> { /** "Security Solutions" only legacy signals index */ public static final String ALERTS_LEGACY_INDEX = ".siem-signals*"; + public static final String ALERTS_LEGACY_INDEX_REINDEXED_V8 = ".reindexed-v8-siem-signals*"; /** Alerts, Rules, Cases (RAC) index used by multiple solutions */ public static final String ALERTS_BACKING_INDEX = ".internal.alerts*"; + public static final String ALERTS_BACKING_INDEX_REINDEXED = ".reindexed-v8-internal.alerts*"; /** Alerts, Rules, Cases (RAC) index used by multiple solutions */ public static final String ALERTS_INDEX_ALIAS = ".alerts*"; @@ -54,13 +56,16 @@ public class ReservedRolesStore implements BiConsumer, ActionListene public static final String PREVIEW_ALERTS_INDEX_ALIAS = ".preview.alerts*"; /** Alerts, Rules, Cases (RAC) preview index used by multiple solutions */ - public static final String PREVIEW_ALERTS_BACKING_INDEX_ALIAS = ".internal.preview.alerts*"; + public static final String PREVIEW_ALERTS_BACKING_INDEX = ".internal.preview.alerts*"; + public static final String PREVIEW_ALERTS_BACKING_INDEX_REINDEXED = ".reindexed-v8-internal.preview.alerts*"; /** "Security Solutions" only lists index for value lists for detections */ public static final String LISTS_INDEX = ".lists-*"; + public static final String LISTS_INDEX_REINDEXED_V8 = ".reindexed-v8-lists-*"; /** "Security Solutions" only lists index for value list items for detections */ public static final String LISTS_ITEMS_INDEX = ".items-*"; + public static final String LISTS_ITEMS_INDEX_REINDEXED_V8 = ".reindexed-v8-items-*"; /** Index pattern for Universal Profiling */ public static final String UNIVERSAL_PROFILING_ALIASES = "profiling-*"; @@ -827,7 +832,14 @@ public class ReservedRolesStore implements BiConsumer, ActionListene .build(), // Security RoleDescriptor.IndicesPrivileges.builder() - .indices(ReservedRolesStore.ALERTS_LEGACY_INDEX, ReservedRolesStore.LISTS_INDEX, ReservedRolesStore.LISTS_ITEMS_INDEX) + .indices( + ReservedRolesStore.ALERTS_LEGACY_INDEX, + ReservedRolesStore.LISTS_INDEX, + ReservedRolesStore.LISTS_ITEMS_INDEX, + ReservedRolesStore.ALERTS_LEGACY_INDEX_REINDEXED_V8, + ReservedRolesStore.LISTS_INDEX_REINDEXED_V8, + ReservedRolesStore.LISTS_ITEMS_INDEX_REINDEXED_V8 + ) .privileges("read", "view_index_metadata") .build(), // Alerts-as-data @@ -878,15 +890,24 @@ public class ReservedRolesStore implements BiConsumer, ActionListene .build(), // Security RoleDescriptor.IndicesPrivileges.builder() - .indices(ReservedRolesStore.ALERTS_LEGACY_INDEX, ReservedRolesStore.LISTS_INDEX, ReservedRolesStore.LISTS_ITEMS_INDEX) + .indices( + ReservedRolesStore.ALERTS_LEGACY_INDEX, + ReservedRolesStore.LISTS_INDEX, + ReservedRolesStore.LISTS_ITEMS_INDEX, + ReservedRolesStore.ALERTS_LEGACY_INDEX_REINDEXED_V8, + ReservedRolesStore.LISTS_INDEX_REINDEXED_V8, + ReservedRolesStore.LISTS_ITEMS_INDEX_REINDEXED_V8 + ) .privileges("read", "view_index_metadata", "write", "maintenance") .build(), // Alerts-as-data RoleDescriptor.IndicesPrivileges.builder() .indices( ReservedRolesStore.ALERTS_BACKING_INDEX, + ReservedRolesStore.ALERTS_BACKING_INDEX_REINDEXED, ReservedRolesStore.ALERTS_INDEX_ALIAS, - ReservedRolesStore.PREVIEW_ALERTS_BACKING_INDEX_ALIAS, + ReservedRolesStore.PREVIEW_ALERTS_BACKING_INDEX, + ReservedRolesStore.PREVIEW_ALERTS_BACKING_INDEX_REINDEXED, ReservedRolesStore.PREVIEW_ALERTS_INDEX_ALIAS ) .privileges("read", "view_index_metadata", "write", "maintenance") diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetBitsetCacheTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetBitsetCacheTests.java index ceb375a97d1d..5369c95ad6fa 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetBitsetCacheTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetBitsetCacheTests.java @@ -630,7 +630,7 @@ public class DocumentSubsetBitsetCacheTests extends ESTestCase { types.add(new MockFieldMapper(new KeywordFieldMapper.KeywordFieldType("dne-" + i))); } - MappingLookup mappingLookup = MappingLookup.fromMappers(Mapping.EMPTY, types, emptyList(), null); + MappingLookup mappingLookup = MappingLookup.fromMappers(Mapping.EMPTY, types, emptyList()); final Client client = mock(Client.class); when(client.settings()).thenReturn(Settings.EMPTY); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexReaderWrapperIntegrationTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexReaderWrapperIntegrationTests.java index 7cd4caab21ec..89b42228d891 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexReaderWrapperIntegrationTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexReaderWrapperIntegrationTests.java @@ -522,6 +522,6 @@ public class SecurityIndexReaderWrapperIntegrationTests extends AbstractBuilderT private static MappingLookup createMappingLookup(List concreteFields) { List mappers = concreteFields.stream().map(MockFieldMapper::new).collect(Collectors.toList()); - return MappingLookup.fromMappers(Mapping.EMPTY, mappers, emptyList(), null); + return MappingLookup.fromMappers(Mapping.EMPTY, mappers, emptyList()); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java index 0cf4e2afdfdc..211eb5a4c437 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java @@ -613,12 +613,17 @@ public class ReservedRolesStoreTests extends ESTestCase { ".apm-custom-link", ".apm-source-map", ReservedRolesStore.ALERTS_LEGACY_INDEX + randomAlphaOfLength(randomIntBetween(0, 13)), + ReservedRolesStore.ALERTS_LEGACY_INDEX_REINDEXED_V8 + randomAlphaOfLength(randomIntBetween(0, 13)), ReservedRolesStore.ALERTS_BACKING_INDEX + randomAlphaOfLength(randomIntBetween(0, 13)), + ReservedRolesStore.ALERTS_BACKING_INDEX_REINDEXED + randomAlphaOfLength(randomIntBetween(0, 13)), ReservedRolesStore.ALERTS_INDEX_ALIAS + randomAlphaOfLength(randomIntBetween(0, 13)), ReservedRolesStore.PREVIEW_ALERTS_INDEX_ALIAS + randomAlphaOfLength(randomIntBetween(0, 13)), - ReservedRolesStore.PREVIEW_ALERTS_BACKING_INDEX_ALIAS + randomAlphaOfLength(randomIntBetween(0, 13)), + ReservedRolesStore.PREVIEW_ALERTS_BACKING_INDEX + randomAlphaOfLength(randomIntBetween(0, 13)), + ReservedRolesStore.PREVIEW_ALERTS_BACKING_INDEX_REINDEXED + randomAlphaOfLength(randomIntBetween(0, 13)), ReservedRolesStore.LISTS_INDEX + randomAlphaOfLength(randomIntBetween(0, 13)), + ReservedRolesStore.LISTS_INDEX_REINDEXED_V8 + randomAlphaOfLength(randomIntBetween(0, 13)), ReservedRolesStore.LISTS_ITEMS_INDEX + randomAlphaOfLength(randomIntBetween(0, 13)), + ReservedRolesStore.LISTS_ITEMS_INDEX_REINDEXED_V8 + randomAlphaOfLength(randomIntBetween(0, 13)), ".slo-observability." + randomAlphaOfLength(randomIntBetween(0, 13)) ).forEach(index -> assertAllIndicesAccessAllowed(kibanaRole, index)); diff --git a/x-pack/plugin/esql/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/esql/EsqlSecurityIT.java b/x-pack/plugin/esql/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/esql/EsqlSecurityIT.java index 00cf4d63af33..ce4aa8582929 100644 --- a/x-pack/plugin/esql/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/esql/EsqlSecurityIT.java +++ b/x-pack/plugin/esql/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/esql/EsqlSecurityIT.java @@ -548,7 +548,7 @@ public class EsqlSecurityIT extends ESRestTestCase { public void testLookupJoinIndexAllowed() throws Exception { assumeTrue( "Requires LOOKUP JOIN capability", - EsqlSpecTestCase.hasCapabilities(adminClient(), List.of(EsqlCapabilities.Cap.JOIN_LOOKUP_V9.capabilityName())) + EsqlSpecTestCase.hasCapabilities(adminClient(), List.of(EsqlCapabilities.Cap.JOIN_LOOKUP_V10.capabilityName())) ); Response resp = runESQLCommand( @@ -587,7 +587,7 @@ public class EsqlSecurityIT extends ESRestTestCase { public void testLookupJoinIndexForbidden() throws Exception { assumeTrue( "Requires LOOKUP JOIN capability", - EsqlSpecTestCase.hasCapabilities(adminClient(), List.of(EsqlCapabilities.Cap.JOIN_LOOKUP_V9.capabilityName())) + EsqlSpecTestCase.hasCapabilities(adminClient(), List.of(EsqlCapabilities.Cap.JOIN_LOOKUP_V10.capabilityName())) ); var resp = expectThrows( diff --git a/x-pack/plugin/esql/qa/server/mixed-cluster/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/mixed/MixedClusterEsqlSpecIT.java b/x-pack/plugin/esql/qa/server/mixed-cluster/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/mixed/MixedClusterEsqlSpecIT.java index 9a09401785df..b22925b44eba 100644 --- a/x-pack/plugin/esql/qa/server/mixed-cluster/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/mixed/MixedClusterEsqlSpecIT.java +++ b/x-pack/plugin/esql/qa/server/mixed-cluster/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/mixed/MixedClusterEsqlSpecIT.java @@ -21,7 +21,7 @@ import java.io.IOException; import java.util.List; import static org.elasticsearch.xpack.esql.CsvTestUtils.isEnabled; -import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.Cap.JOIN_LOOKUP_V9; +import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.Cap.JOIN_LOOKUP_V10; import static org.elasticsearch.xpack.esql.qa.rest.EsqlSpecTestCase.Mode.ASYNC; public class MixedClusterEsqlSpecIT extends EsqlSpecTestCase { @@ -96,7 +96,7 @@ public class MixedClusterEsqlSpecIT extends EsqlSpecTestCase { @Override protected boolean supportsIndexModeLookup() throws IOException { - return hasCapabilities(List.of(JOIN_LOOKUP_V9.capabilityName())); + return hasCapabilities(List.of(JOIN_LOOKUP_V10.capabilityName())); } @Override diff --git a/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClusterSpecIT.java b/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClusterSpecIT.java index a809216d3beb..987a5334f903 100644 --- a/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClusterSpecIT.java +++ b/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClusterSpecIT.java @@ -48,7 +48,7 @@ import static org.elasticsearch.xpack.esql.CsvTestsDataLoader.ENRICH_SOURCE_INDI import static org.elasticsearch.xpack.esql.EsqlTestUtils.classpathResources; import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.Cap.INLINESTATS; import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.Cap.INLINESTATS_V2; -import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.Cap.JOIN_LOOKUP_V9; +import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.Cap.JOIN_LOOKUP_V10; import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.Cap.JOIN_PLANNING_V1; import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.Cap.METADATA_FIELDS_REMOTE_TEST; import static org.elasticsearch.xpack.esql.qa.rest.EsqlSpecTestCase.Mode.SYNC; @@ -124,7 +124,7 @@ public class MultiClusterSpecIT extends EsqlSpecTestCase { assumeFalse("INLINESTATS not yet supported in CCS", testCase.requiredCapabilities.contains(INLINESTATS.capabilityName())); assumeFalse("INLINESTATS not yet supported in CCS", testCase.requiredCapabilities.contains(INLINESTATS_V2.capabilityName())); assumeFalse("INLINESTATS not yet supported in CCS", testCase.requiredCapabilities.contains(JOIN_PLANNING_V1.capabilityName())); - assumeFalse("LOOKUP JOIN not yet supported in CCS", testCase.requiredCapabilities.contains(JOIN_LOOKUP_V9.capabilityName())); + assumeFalse("LOOKUP JOIN not yet supported in CCS", testCase.requiredCapabilities.contains(JOIN_LOOKUP_V10.capabilityName())); } private TestFeatureService remoteFeaturesService() throws IOException { @@ -283,8 +283,8 @@ public class MultiClusterSpecIT extends EsqlSpecTestCase { @Override protected boolean supportsIndexModeLookup() throws IOException { - // CCS does not yet support JOIN_LOOKUP_V9 and clusters falsely report they have this capability - // return hasCapabilities(List.of(JOIN_LOOKUP_V9.capabilityName())); + // CCS does not yet support JOIN_LOOKUP_V10 and clusters falsely report they have this capability + // return hasCapabilities(List.of(JOIN_LOOKUP_V10.capabilityName())); return false; } } diff --git a/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClustersIT.java b/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClustersIT.java index 452f40baa34a..c93b6404863e 100644 --- a/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClustersIT.java +++ b/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClustersIT.java @@ -12,6 +12,7 @@ import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; import org.apache.http.HttpHost; import org.elasticsearch.Version; import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; import org.elasticsearch.client.RestClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; @@ -37,9 +38,11 @@ import java.util.stream.Stream; import static org.elasticsearch.test.MapMatcher.assertMap; import static org.elasticsearch.test.MapMatcher.matchesMap; +import static org.elasticsearch.xpack.esql.ccq.Clusters.REMOTE_CLUSTER_NAME; import static org.hamcrest.Matchers.any; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.hasKey; @ThreadLeakFilters(filters = TestClustersThreadFilter.class) public class MultiClustersIT extends ESRestTestCase { @@ -395,6 +398,38 @@ public class MultiClustersIT extends ESRestTestCase { } } + @SuppressWarnings("unchecked") + public void testStats() throws IOException { + Request caps = new Request("GET", "_capabilities?method=GET&path=_cluster/stats&capabilities=esql-stats"); + Response capsResponse = client().performRequest(caps); + Map capsResult = entityAsMap(capsResponse.getEntity()); + assumeTrue("esql stats capability missing", capsResult.get("supported").equals(true)); + + run("FROM test-local-index,*:test-remote-index | STATS total = SUM(data) BY color | SORT color", includeCCSMetadata()); + Request stats = new Request("GET", "_cluster/stats"); + Response statsResponse = client().performRequest(stats); + Map result = entityAsMap(statsResponse.getEntity()); + assertThat(result, hasKey("ccs")); + Map ccs = (Map) result.get("ccs"); + assertThat(ccs, hasKey("_esql")); + Map esql = (Map) ccs.get("_esql"); + assertThat(esql, hasKey("total")); + assertThat(esql, hasKey("success")); + assertThat(esql, hasKey("took")); + assertThat(esql, hasKey("remotes_per_search_max")); + assertThat(esql, hasKey("remotes_per_search_avg")); + assertThat(esql, hasKey("failure_reasons")); + assertThat(esql, hasKey("features")); + assertThat(esql, hasKey("clusters")); + Map clusters = (Map) esql.get("clusters"); + assertThat(clusters, hasKey(REMOTE_CLUSTER_NAME)); + assertThat(clusters, hasKey("(local)")); + Map clusterData = (Map) clusters.get(REMOTE_CLUSTER_NAME); + assertThat(clusterData, hasKey("total")); + assertThat(clusterData, hasKey("skipped")); + assertThat(clusterData, hasKey("took")); + } + private RestClient remoteClusterClient() throws IOException { var clusterHosts = parseClusterHosts(remoteCluster.getHttpAddresses()); return buildClient(restClientSettings(), clusterHosts.toArray(new HttpHost[0])); diff --git a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RequestIndexFilteringTestCase.java b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RequestIndexFilteringTestCase.java index a83b6cf2e906..ba93e9b31bb0 100644 --- a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RequestIndexFilteringTestCase.java +++ b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RequestIndexFilteringTestCase.java @@ -221,7 +221,7 @@ public abstract class RequestIndexFilteringTestCase extends ESRestTestCase { assertThat(e.getMessage(), containsString("index_not_found_exception")); assertThat(e.getMessage(), containsString("no such index [foo]")); - if (EsqlCapabilities.Cap.JOIN_LOOKUP_V9.isEnabled()) { + if (EsqlCapabilities.Cap.JOIN_LOOKUP_V10.isEnabled()) { e = expectThrows( ResponseException.class, () -> runEsql(timestampFilter("gte", "2020-01-01").query("FROM test1 | LOOKUP JOIN foo ON id1")) diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestUtils.java b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestUtils.java index f0bdf089f69d..cbab6d0acfef 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestUtils.java +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestUtils.java @@ -551,11 +551,11 @@ public final class CsvTestUtils { } private static Type bytesRefBlockType(Type actualType) { - if (actualType == GEO_POINT || actualType == CARTESIAN_POINT || actualType == GEO_SHAPE || actualType == CARTESIAN_SHAPE) { - return actualType; - } else { - return KEYWORD; - } + return switch (actualType) { + case NULL -> NULL; + case GEO_POINT, CARTESIAN_POINT, GEO_SHAPE, CARTESIAN_SHAPE -> actualType; + default -> KEYWORD; + }; } Object convert(String value) { diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java index 8c1ff650b4f5..fbd4f9feca78 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java @@ -26,6 +26,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.logging.LogConfigurator; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.core.Nullable; import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; import org.elasticsearch.test.rest.ESRestTestCase; @@ -50,71 +51,72 @@ import static org.elasticsearch.xpack.esql.EsqlTestUtils.reader; public class CsvTestsDataLoader { private static final int BULK_DATA_SIZE = 100_000; - private static final TestsDataset EMPLOYEES = new TestsDataset("employees", "mapping-default.json", "employees.csv").noSubfields(); - private static final TestsDataset EMPLOYEES_INCOMPATIBLE = new TestsDataset( + private static final TestDataset EMPLOYEES = new TestDataset("employees", "mapping-default.json", "employees.csv").noSubfields(); + private static final TestDataset EMPLOYEES_INCOMPATIBLE = new TestDataset( "employees_incompatible", "mapping-default-incompatible.json", "employees_incompatible.csv" ).noSubfields(); - private static final TestsDataset HOSTS = new TestsDataset("hosts"); - private static final TestsDataset APPS = new TestsDataset("apps"); - private static final TestsDataset APPS_SHORT = APPS.withIndex("apps_short").withTypeMapping(Map.of("id", "short")); - private static final TestsDataset LANGUAGES = new TestsDataset("languages"); - private static final TestsDataset LANGUAGES_LOOKUP = LANGUAGES.withIndex("languages_lookup") + private static final TestDataset HOSTS = new TestDataset("hosts"); + private static final TestDataset APPS = new TestDataset("apps"); + private static final TestDataset APPS_SHORT = APPS.withIndex("apps_short").withTypeMapping(Map.of("id", "short")); + private static final TestDataset LANGUAGES = new TestDataset("languages"); + private static final TestDataset LANGUAGES_LOOKUP = LANGUAGES.withIndex("languages_lookup") .withSetting("languages_lookup-settings.json"); - private static final TestsDataset LANGUAGES_LOOKUP_NON_UNIQUE_KEY = LANGUAGES_LOOKUP.withIndex("languages_lookup_non_unique_key") + private static final TestDataset LANGUAGES_LOOKUP_NON_UNIQUE_KEY = LANGUAGES_LOOKUP.withIndex("languages_lookup_non_unique_key") .withData("languages_non_unique_key.csv"); - private static final TestsDataset LANGUAGES_NESTED_FIELDS = new TestsDataset( + private static final TestDataset LANGUAGES_NESTED_FIELDS = new TestDataset( "languages_nested_fields", "mapping-languages_nested_fields.json", "languages_nested_fields.csv" ).withSetting("languages_lookup-settings.json"); - private static final TestsDataset ALERTS = new TestsDataset("alerts"); - private static final TestsDataset UL_LOGS = new TestsDataset("ul_logs"); - private static final TestsDataset SAMPLE_DATA = new TestsDataset("sample_data"); - private static final TestsDataset MV_SAMPLE_DATA = new TestsDataset("mv_sample_data"); - private static final TestsDataset SAMPLE_DATA_STR = SAMPLE_DATA.withIndex("sample_data_str") + private static final TestDataset ALERTS = new TestDataset("alerts"); + private static final TestDataset UL_LOGS = new TestDataset("ul_logs"); + private static final TestDataset SAMPLE_DATA = new TestDataset("sample_data"); + private static final TestDataset MV_SAMPLE_DATA = new TestDataset("mv_sample_data"); + private static final TestDataset SAMPLE_DATA_STR = SAMPLE_DATA.withIndex("sample_data_str") .withTypeMapping(Map.of("client_ip", "keyword")); - private static final TestsDataset SAMPLE_DATA_TS_LONG = SAMPLE_DATA.withIndex("sample_data_ts_long") + private static final TestDataset SAMPLE_DATA_TS_LONG = SAMPLE_DATA.withIndex("sample_data_ts_long") .withData("sample_data_ts_long.csv") .withTypeMapping(Map.of("@timestamp", "long")); - private static final TestsDataset SAMPLE_DATA_TS_NANOS = SAMPLE_DATA.withIndex("sample_data_ts_nanos") + private static final TestDataset SAMPLE_DATA_TS_NANOS = SAMPLE_DATA.withIndex("sample_data_ts_nanos") .withData("sample_data_ts_nanos.csv") .withTypeMapping(Map.of("@timestamp", "date_nanos")); - private static final TestsDataset MISSING_IP_SAMPLE_DATA = new TestsDataset("missing_ip_sample_data"); - private static final TestsDataset CLIENT_IPS = new TestsDataset("clientips"); - private static final TestsDataset CLIENT_IPS_LOOKUP = CLIENT_IPS.withIndex("clientips_lookup") + private static final TestDataset MISSING_IP_SAMPLE_DATA = new TestDataset("missing_ip_sample_data"); + private static final TestDataset CLIENT_IPS = new TestDataset("clientips"); + private static final TestDataset CLIENT_IPS_LOOKUP = CLIENT_IPS.withIndex("clientips_lookup") .withSetting("clientips_lookup-settings.json"); - private static final TestsDataset MESSAGE_TYPES = new TestsDataset("message_types"); - private static final TestsDataset MESSAGE_TYPES_LOOKUP = MESSAGE_TYPES.withIndex("message_types_lookup") + private static final TestDataset MESSAGE_TYPES = new TestDataset("message_types"); + private static final TestDataset MESSAGE_TYPES_LOOKUP = MESSAGE_TYPES.withIndex("message_types_lookup") .withSetting("message_types_lookup-settings.json"); - private static final TestsDataset CLIENT_CIDR = new TestsDataset("client_cidr"); - private static final TestsDataset AGES = new TestsDataset("ages"); - private static final TestsDataset HEIGHTS = new TestsDataset("heights"); - private static final TestsDataset DECADES = new TestsDataset("decades"); - private static final TestsDataset AIRPORTS = new TestsDataset("airports"); - private static final TestsDataset AIRPORTS_MP = AIRPORTS.withIndex("airports_mp").withData("airports_mp.csv"); - private static final TestsDataset AIRPORTS_NO_DOC_VALUES = new TestsDataset("airports_no_doc_values").withData("airports.csv"); - private static final TestsDataset AIRPORTS_NOT_INDEXED = new TestsDataset("airports_not_indexed").withData("airports.csv"); - private static final TestsDataset AIRPORTS_NOT_INDEXED_NOR_DOC_VALUES = new TestsDataset("airports_not_indexed_nor_doc_values") - .withData("airports.csv"); - private static final TestsDataset AIRPORTS_WEB = new TestsDataset("airports_web"); - private static final TestsDataset DATE_NANOS = new TestsDataset("date_nanos"); - private static final TestsDataset COUNTRIES_BBOX = new TestsDataset("countries_bbox"); - private static final TestsDataset COUNTRIES_BBOX_WEB = new TestsDataset("countries_bbox_web"); - private static final TestsDataset AIRPORT_CITY_BOUNDARIES = new TestsDataset("airport_city_boundaries"); - private static final TestsDataset CARTESIAN_MULTIPOLYGONS = new TestsDataset("cartesian_multipolygons"); - private static final TestsDataset CARTESIAN_MULTIPOLYGONS_NO_DOC_VALUES = new TestsDataset("cartesian_multipolygons_no_doc_values") + private static final TestDataset CLIENT_CIDR = new TestDataset("client_cidr"); + private static final TestDataset AGES = new TestDataset("ages"); + private static final TestDataset HEIGHTS = new TestDataset("heights"); + private static final TestDataset DECADES = new TestDataset("decades"); + private static final TestDataset AIRPORTS = new TestDataset("airports"); + private static final TestDataset AIRPORTS_MP = AIRPORTS.withIndex("airports_mp").withData("airports_mp.csv"); + private static final TestDataset AIRPORTS_NO_DOC_VALUES = new TestDataset("airports_no_doc_values").withData("airports.csv"); + private static final TestDataset AIRPORTS_NOT_INDEXED = new TestDataset("airports_not_indexed").withData("airports.csv"); + private static final TestDataset AIRPORTS_NOT_INDEXED_NOR_DOC_VALUES = new TestDataset("airports_not_indexed_nor_doc_values").withData( + "airports.csv" + ); + private static final TestDataset AIRPORTS_WEB = new TestDataset("airports_web"); + private static final TestDataset DATE_NANOS = new TestDataset("date_nanos"); + private static final TestDataset COUNTRIES_BBOX = new TestDataset("countries_bbox"); + private static final TestDataset COUNTRIES_BBOX_WEB = new TestDataset("countries_bbox_web"); + private static final TestDataset AIRPORT_CITY_BOUNDARIES = new TestDataset("airport_city_boundaries"); + private static final TestDataset CARTESIAN_MULTIPOLYGONS = new TestDataset("cartesian_multipolygons"); + private static final TestDataset CARTESIAN_MULTIPOLYGONS_NO_DOC_VALUES = new TestDataset("cartesian_multipolygons_no_doc_values") .withData("cartesian_multipolygons.csv"); - private static final TestsDataset MULTIVALUE_GEOMETRIES = new TestsDataset("multivalue_geometries"); - private static final TestsDataset MULTIVALUE_POINTS = new TestsDataset("multivalue_points"); - private static final TestsDataset DISTANCES = new TestsDataset("distances"); - private static final TestsDataset K8S = new TestsDataset("k8s", "k8s-mappings.json", "k8s.csv").withSetting("k8s-settings.json"); - private static final TestsDataset ADDRESSES = new TestsDataset("addresses"); - private static final TestsDataset BOOKS = new TestsDataset("books").withSetting("books-settings.json"); - private static final TestsDataset SEMANTIC_TEXT = new TestsDataset("semantic_text").withInferenceEndpoint(true); + private static final TestDataset MULTIVALUE_GEOMETRIES = new TestDataset("multivalue_geometries"); + private static final TestDataset MULTIVALUE_POINTS = new TestDataset("multivalue_points"); + private static final TestDataset DISTANCES = new TestDataset("distances"); + private static final TestDataset K8S = new TestDataset("k8s", "k8s-mappings.json", "k8s.csv").withSetting("k8s-settings.json"); + private static final TestDataset ADDRESSES = new TestDataset("addresses"); + private static final TestDataset BOOKS = new TestDataset("books").withSetting("books-settings.json"); + private static final TestDataset SEMANTIC_TEXT = new TestDataset("semantic_text").withInferenceEndpoint(true); - public static final Map CSV_DATASET_MAP = Map.ofEntries( + public static final Map CSV_DATASET_MAP = Map.ofEntries( Map.entry(EMPLOYEES.indexName, EMPLOYEES), Map.entry(EMPLOYEES_INCOMPATIBLE.indexName, EMPLOYEES_INCOMPATIBLE), Map.entry(HOSTS.indexName, HOSTS), @@ -265,12 +267,12 @@ public class CsvTestsDataLoader { } } - public static Set availableDatasetsForEs(RestClient client, boolean supportsIndexModeLookup) throws IOException { + public static Set availableDatasetsForEs(RestClient client, boolean supportsIndexModeLookup) throws IOException { boolean inferenceEnabled = clusterHasInferenceEndpoint(client); - Set testDataSets = new HashSet<>(); + Set testDataSets = new HashSet<>(); - for (TestsDataset dataset : CSV_DATASET_MAP.values()) { + for (TestDataset dataset : CSV_DATASET_MAP.values()) { if ((inferenceEnabled || dataset.requiresInferenceEndpoint == false) && (supportsIndexModeLookup || isLookupDataset(dataset) == false)) { testDataSets.add(dataset); @@ -280,7 +282,7 @@ public class CsvTestsDataLoader { return testDataSets; } - public static boolean isLookupDataset(TestsDataset dataset) throws IOException { + public static boolean isLookupDataset(TestDataset dataset) throws IOException { Settings settings = dataset.readSettingsFile(); String mode = settings.get("index.mode"); return (mode != null && mode.equalsIgnoreCase("lookup")); @@ -362,7 +364,7 @@ public class CsvTestsDataLoader { client.performRequest(request); } - private static void load(RestClient client, TestsDataset dataset, Logger logger, IndexCreator indexCreator) throws IOException { + private static void load(RestClient client, TestDataset dataset, Logger logger, IndexCreator indexCreator) throws IOException { final String mappingName = "/" + dataset.mappingFileName; URL mapping = CsvTestsDataLoader.class.getResource(mappingName); if (mapping == null) { @@ -603,25 +605,32 @@ public class CsvTestsDataLoader { } } - public record TestsDataset( + public record MultiIndexTestDataset(String indexPattern, List datasets) { + public static MultiIndexTestDataset of(TestDataset testsDataset) { + return new MultiIndexTestDataset(testsDataset.indexName, List.of(testsDataset)); + } + + } + + public record TestDataset( String indexName, String mappingFileName, String dataFileName, String settingFileName, boolean allowSubFields, - Map typeMapping, + @Nullable Map typeMapping, boolean requiresInferenceEndpoint ) { - public TestsDataset(String indexName, String mappingFileName, String dataFileName) { + public TestDataset(String indexName, String mappingFileName, String dataFileName) { this(indexName, mappingFileName, dataFileName, null, true, null, false); } - public TestsDataset(String indexName) { + public TestDataset(String indexName) { this(indexName, "mapping-" + indexName + ".json", indexName + ".csv", null, true, null, false); } - public TestsDataset withIndex(String indexName) { - return new TestsDataset( + public TestDataset withIndex(String indexName) { + return new TestDataset( indexName, mappingFileName, dataFileName, @@ -632,8 +641,8 @@ public class CsvTestsDataLoader { ); } - public TestsDataset withData(String dataFileName) { - return new TestsDataset( + public TestDataset withData(String dataFileName) { + return new TestDataset( indexName, mappingFileName, dataFileName, @@ -644,8 +653,8 @@ public class CsvTestsDataLoader { ); } - public TestsDataset withSetting(String settingFileName) { - return new TestsDataset( + public TestDataset withSetting(String settingFileName) { + return new TestDataset( indexName, mappingFileName, dataFileName, @@ -656,8 +665,8 @@ public class CsvTestsDataLoader { ); } - public TestsDataset noSubfields() { - return new TestsDataset( + public TestDataset noSubfields() { + return new TestDataset( indexName, mappingFileName, dataFileName, @@ -668,8 +677,8 @@ public class CsvTestsDataLoader { ); } - public TestsDataset withTypeMapping(Map typeMapping) { - return new TestsDataset( + public TestDataset withTypeMapping(Map typeMapping) { + return new TestDataset( indexName, mappingFileName, dataFileName, @@ -680,8 +689,8 @@ public class CsvTestsDataLoader { ); } - public TestsDataset withInferenceEndpoint(boolean needsInference) { - return new TestsDataset(indexName, mappingFileName, dataFileName, settingFileName, allowSubFields, typeMapping, needsInference); + public TestDataset withInferenceEndpoint(boolean needsInference) { + return new TestDataset(indexName, mappingFileName, dataFileName, settingFileName, allowSubFields, typeMapping, needsInference); } private Settings readSettingsFile() throws IOException { diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/lookup-join.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/lookup-join.csv-spec index 309386228b1c..95119cae9559 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/lookup-join.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/lookup-join.csv-spec @@ -8,7 +8,7 @@ ############################################### basicOnTheDataNode -required_capability: join_lookup_v9 +required_capability: join_lookup_v10 FROM employees | EVAL language_code = languages @@ -25,7 +25,7 @@ emp_no:integer | language_code:integer | language_name:keyword ; basicRow -required_capability: join_lookup_v9 +required_capability: join_lookup_v10 ROW language_code = 1 | LOOKUP JOIN languages_lookup ON language_code @@ -36,7 +36,7 @@ language_code:integer | language_name:keyword ; basicOnTheCoordinator -required_capability: join_lookup_v9 +required_capability: join_lookup_v10 FROM employees | SORT emp_no @@ -53,7 +53,7 @@ emp_no:integer | language_code:integer | language_name:keyword ; subsequentEvalOnTheDataNode -required_capability: join_lookup_v9 +required_capability: join_lookup_v10 FROM employees | EVAL language_code = languages @@ -71,7 +71,7 @@ emp_no:integer | language_code:integer | language_name:keyword | language_code_x ; subsequentEvalOnTheCoordinator -required_capability: join_lookup_v9 +required_capability: join_lookup_v10 FROM employees | SORT emp_no @@ -89,7 +89,7 @@ emp_no:integer | language_code:integer | language_name:keyword | language_code_x ; sortEvalBeforeLookup -required_capability: join_lookup_v9 +required_capability: join_lookup_v10 FROM employees | SORT emp_no @@ -106,7 +106,7 @@ emp_no:integer | language_code:integer | language_name:keyword ; nonUniqueLeftKeyOnTheDataNode -required_capability: join_lookup_v9 +required_capability: join_lookup_v10 FROM employees | WHERE emp_no <= 10030 @@ -130,7 +130,7 @@ emp_no:integer | language_code:integer | language_name:keyword ; nonUniqueRightKeyOnTheDataNode -required_capability: join_lookup_v9 +required_capability: join_lookup_v10 FROM employees | EVAL language_code = emp_no % 10 @@ -150,7 +150,7 @@ emp_no:integer | language_code:integer | language_name:keyword | country:k ; nonUniqueRightKeyOnTheCoordinator -required_capability: join_lookup_v9 +required_capability: join_lookup_v10 FROM employees | SORT emp_no @@ -170,7 +170,7 @@ emp_no:integer | language_code:integer | language_name:keyword | country:k ; nonUniqueRightKeyFromRow -required_capability: join_lookup_v9 +required_capability: join_lookup_v10 ROW language_code = 2 | LOOKUP JOIN languages_lookup_non_unique_key ON language_code @@ -183,7 +183,7 @@ language_code:integer | language_name:keyword | country:keyword ; repeatedIndexOnFrom -required_capability: join_lookup_v9 +required_capability: join_lookup_v10 FROM languages_lookup | LOOKUP JOIN languages_lookup ON language_code @@ -201,7 +201,7 @@ dropAllLookedUpFieldsOnTheDataNode-Ignore // Depends on // https://github.com/elastic/elasticsearch/issues/118778 // https://github.com/elastic/elasticsearch/issues/118781 -required_capability: join_lookup_v9 +required_capability: join_lookup_v10 FROM employees | EVAL language_code = emp_no % 10 @@ -222,7 +222,7 @@ dropAllLookedUpFieldsOnTheCoordinator-Ignore // Depends on // https://github.com/elastic/elasticsearch/issues/118778 // https://github.com/elastic/elasticsearch/issues/118781 -required_capability: join_lookup_v9 +required_capability: join_lookup_v10 FROM employees | SORT emp_no @@ -247,7 +247,7 @@ emp_no:integer ############################################### filterOnLeftSide -required_capability: join_lookup_v9 +required_capability: join_lookup_v10 FROM employees | EVAL language_code = languages @@ -264,7 +264,7 @@ emp_no:integer | language_code:integer | language_name:keyword ; filterOnRightSide -required_capability: join_lookup_v9 +required_capability: join_lookup_v10 FROM sample_data | LOOKUP JOIN message_types_lookup ON message @@ -280,7 +280,7 @@ FROM sample_data ; filterOnRightSideAfterStats -required_capability: join_lookup_v9 +required_capability: join_lookup_v10 FROM sample_data | LOOKUP JOIN message_types_lookup ON message @@ -293,7 +293,7 @@ count:long | type:keyword ; filterOnJoinKey -required_capability: join_lookup_v9 +required_capability: join_lookup_v10 FROM employees | EVAL language_code = languages @@ -308,7 +308,7 @@ emp_no:integer | language_code:integer | language_name:keyword ; filterOnJoinKeyAndRightSide -required_capability: join_lookup_v9 +required_capability: join_lookup_v10 FROM employees | WHERE emp_no < 10006 @@ -325,7 +325,7 @@ emp_no:integer | language_code:integer | language_name:keyword ; filterOnRightSideOnTheCoordinator -required_capability: join_lookup_v9 +required_capability: join_lookup_v10 FROM employees | SORT emp_no @@ -341,7 +341,7 @@ emp_no:integer | language_code:integer | language_name:keyword ; filterOnJoinKeyOnTheCoordinator -required_capability: join_lookup_v9 +required_capability: join_lookup_v10 FROM employees | SORT emp_no @@ -357,7 +357,7 @@ emp_no:integer | language_code:integer | language_name:keyword ; filterOnJoinKeyAndRightSideOnTheCoordinator -required_capability: join_lookup_v9 +required_capability: join_lookup_v10 FROM employees | SORT emp_no @@ -374,7 +374,7 @@ emp_no:integer | language_code:integer | language_name:keyword ; filterOnTheDataNodeThenFilterOnTheCoordinator -required_capability: join_lookup_v9 +required_capability: join_lookup_v10 FROM employees | EVAL language_code = languages @@ -395,7 +395,7 @@ emp_no:integer | language_code:integer | language_name:keyword ########################################################################### nullJoinKeyOnTheDataNode -required_capability: join_lookup_v9 +required_capability: join_lookup_v10 FROM employees | WHERE emp_no < 10004 @@ -412,13 +412,14 @@ emp_no:integer | language_code:integer | language_name:keyword ; mvJoinKeyOnTheDataNode -required_capability: join_lookup_v9 +required_capability: join_lookup_v10 FROM employees | WHERE 10003 < emp_no AND emp_no < 10008 | EVAL language_code = emp_no % 10 | LOOKUP JOIN languages_lookup_non_unique_key ON language_code | SORT emp_no +| EVAL language_name = MV_SORT(language_name) | KEEP emp_no, language_code, language_name ; @@ -430,7 +431,7 @@ emp_no:integer | language_code:integer | language_name:keyword ; mvJoinKeyFromRow -required_capability: join_lookup_v9 +required_capability: join_lookup_v10 ROW language_code = [4, 5, 6, 7] | LOOKUP JOIN languages_lookup_non_unique_key ON language_code @@ -443,7 +444,7 @@ language_code:integer | language_name:keyword | country:keyword ; mvJoinKeyFromRowExpanded -required_capability: join_lookup_v9 +required_capability: join_lookup_v10 ROW language_code = [4, 5, 6, 7, 8] | MV_EXPAND language_code @@ -465,7 +466,7 @@ language_code:integer | language_name:keyword | country:keyword ########################################################################### joinOnNestedField -required_capability: join_lookup_v9 +required_capability: join_lookup_v10 FROM employees | WHERE 10000 < emp_no AND emp_no < 10006 @@ -485,7 +486,7 @@ emp_no:integer | language.id:integer | language.name:text joinOnNestedFieldRow -required_capability: join_lookup_v9 +required_capability: join_lookup_v10 ROW language.code = "EN" | LOOKUP JOIN languages_nested_fields ON language.code @@ -498,7 +499,7 @@ language.id:integer | language.code:keyword | language.name.keyword:keyword joinOnNestedNestedFieldRow -required_capability: join_lookup_v9 +required_capability: join_lookup_v10 ROW language.name.keyword = "English" | LOOKUP JOIN languages_nested_fields ON language.name.keyword @@ -514,7 +515,7 @@ language.id:integer | language.name:text | language.name.keyword:keyword ############################################### lookupIPFromRow -required_capability: join_lookup_v9 +required_capability: join_lookup_v10 ROW left = "left", client_ip = "172.21.0.5", right = "right" | LOOKUP JOIN clientips_lookup ON client_ip @@ -525,7 +526,7 @@ left | 172.21.0.5 | right | Development ; lookupIPFromKeepRow -required_capability: join_lookup_v9 +required_capability: join_lookup_v10 ROW left = "left", client_ip = "172.21.0.5", right = "right" | KEEP left, client_ip, right @@ -537,7 +538,7 @@ left | 172.21.0.5 | right | Development ; lookupIPFromRowWithShadowing -required_capability: join_lookup_v9 +required_capability: join_lookup_v10 ROW left = "left", client_ip = "172.21.0.5", env = "env", right = "right" | LOOKUP JOIN clientips_lookup ON client_ip @@ -548,7 +549,7 @@ left | 172.21.0.5 | right | Development ; lookupIPFromRowWithShadowingKeep -required_capability: join_lookup_v9 +required_capability: join_lookup_v10 ROW left = "left", client_ip = "172.21.0.5", env = "env", right = "right" | EVAL client_ip = client_ip::keyword @@ -561,7 +562,7 @@ left | 172.21.0.5 | right | Development ; lookupIPFromRowWithShadowingKeepReordered -required_capability: join_lookup_v9 +required_capability: join_lookup_v10 ROW left = "left", client_ip = "172.21.0.5", env = "env", right = "right" | EVAL client_ip = client_ip::keyword @@ -574,7 +575,7 @@ right | Development | 172.21.0.5 ; lookupIPFromIndex -required_capability: join_lookup_v9 +required_capability: join_lookup_v10 FROM sample_data | EVAL client_ip = client_ip::keyword @@ -593,7 +594,7 @@ ignoreOrder:true ; lookupIPFromIndexKeep -required_capability: join_lookup_v9 +required_capability: join_lookup_v10 FROM sample_data | EVAL client_ip = client_ip::keyword @@ -613,7 +614,7 @@ ignoreOrder:true ; lookupIPFromIndexKeepKeep -required_capability: join_lookup_v9 +required_capability: join_lookup_v10 FROM sample_data | KEEP client_ip, event_duration, @timestamp, message @@ -635,7 +636,7 @@ timestamp:date | client_ip:keyword | event_duration:long | msg:keyword ; lookupIPFromIndexStats -required_capability: join_lookup_v9 +required_capability: join_lookup_v10 FROM sample_data | EVAL client_ip = client_ip::keyword @@ -651,7 +652,7 @@ count:long | env:keyword ; lookupIPFromIndexStatsKeep -required_capability: join_lookup_v9 +required_capability: join_lookup_v10 FROM sample_data | EVAL client_ip = client_ip::keyword @@ -668,7 +669,7 @@ count:long | env:keyword ; statsAndLookupIPFromIndex -required_capability: join_lookup_v9 +required_capability: join_lookup_v10 FROM sample_data | EVAL client_ip = client_ip::keyword @@ -689,7 +690,7 @@ count:long | client_ip:keyword | env:keyword ############################################### lookupMessageFromRow -required_capability: join_lookup_v9 +required_capability: join_lookup_v10 ROW left = "left", message = "Connected to 10.1.0.1", right = "right" | LOOKUP JOIN message_types_lookup ON message @@ -700,7 +701,7 @@ left | Connected to 10.1.0.1 | right | Success ; lookupMessageFromKeepRow -required_capability: join_lookup_v9 +required_capability: join_lookup_v10 ROW left = "left", message = "Connected to 10.1.0.1", right = "right" | KEEP left, message, right @@ -712,7 +713,7 @@ left | Connected to 10.1.0.1 | right | Success ; lookupMessageFromRowWithShadowing -required_capability: join_lookup_v9 +required_capability: join_lookup_v10 ROW left = "left", message = "Connected to 10.1.0.1", type = "unknown", right = "right" | LOOKUP JOIN message_types_lookup ON message @@ -723,7 +724,7 @@ left | Connected to 10.1.0.1 | right | Success ; lookupMessageFromRowWithShadowingKeep -required_capability: join_lookup_v9 +required_capability: join_lookup_v10 ROW left = "left", message = "Connected to 10.1.0.1", type = "unknown", right = "right" | LOOKUP JOIN message_types_lookup ON message @@ -735,7 +736,7 @@ left | Connected to 10.1.0.1 | right | Success ; lookupMessageFromIndex -required_capability: join_lookup_v9 +required_capability: join_lookup_v10 FROM sample_data | LOOKUP JOIN message_types_lookup ON message @@ -753,7 +754,7 @@ ignoreOrder:true ; lookupMessageFromIndexKeep -required_capability: join_lookup_v9 +required_capability: join_lookup_v10 FROM sample_data | LOOKUP JOIN message_types_lookup ON message @@ -772,7 +773,7 @@ ignoreOrder:true ; lookupMessageFromIndexKeepKeep -required_capability: join_lookup_v9 +required_capability: join_lookup_v10 FROM sample_data | KEEP client_ip, event_duration, @timestamp, message @@ -792,7 +793,7 @@ ignoreOrder:true ; lookupMessageFromIndexKeepReordered -required_capability: join_lookup_v9 +required_capability: join_lookup_v10 FROM sample_data | LOOKUP JOIN message_types_lookup ON message @@ -811,7 +812,7 @@ Success | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 ; lookupMessageFromIndexStats -required_capability: join_lookup_v9 +required_capability: join_lookup_v10 FROM sample_data | LOOKUP JOIN message_types_lookup ON message @@ -826,7 +827,7 @@ count:long | type:keyword ; lookupMessageFromIndexStatsKeep -required_capability: join_lookup_v9 +required_capability: join_lookup_v10 FROM sample_data | LOOKUP JOIN message_types_lookup ON message @@ -842,7 +843,7 @@ count:long | type:keyword ; statsAndLookupMessageFromIndex -required_capability: join_lookup_v9 +required_capability: join_lookup_v10 FROM sample_data | STATS count = count(message) BY message @@ -860,7 +861,7 @@ count:long | type:keyword | message:keyword ; lookupMessageFromIndexTwice -required_capability: join_lookup_v9 +required_capability: join_lookup_v10 FROM sample_data | LOOKUP JOIN message_types_lookup ON message @@ -882,7 +883,7 @@ ignoreOrder:true ; lookupMessageFromIndexTwiceKeep -required_capability: join_lookup_v9 +required_capability: join_lookup_v10 FROM sample_data | LOOKUP JOIN message_types_lookup ON message @@ -905,7 +906,7 @@ ignoreOrder:true ; lookupMessageFromIndexTwiceFullyShadowing -required_capability: join_lookup_v9 +required_capability: join_lookup_v10 FROM sample_data | LOOKUP JOIN message_types_lookup ON message @@ -929,7 +930,7 @@ ignoreOrder:true ############################################### lookupIPAndMessageFromRow -required_capability: join_lookup_v9 +required_capability: join_lookup_v10 ROW left = "left", client_ip = "172.21.0.5", message = "Connected to 10.1.0.1", right = "right" | LOOKUP JOIN clientips_lookup ON client_ip @@ -941,7 +942,7 @@ left | 172.21.0.5 | Connected to 10.1.0.1 | right | Devel ; lookupIPAndMessageFromRowKeepBefore -required_capability: join_lookup_v9 +required_capability: join_lookup_v10 ROW left = "left", client_ip = "172.21.0.5", message = "Connected to 10.1.0.1", right = "right" | KEEP left, client_ip, message, right @@ -954,7 +955,7 @@ left | 172.21.0.5 | Connected to 10.1.0.1 | right | Devel ; lookupIPAndMessageFromRowKeepBetween -required_capability: join_lookup_v9 +required_capability: join_lookup_v10 ROW left = "left", client_ip = "172.21.0.5", message = "Connected to 10.1.0.1", right = "right" | LOOKUP JOIN clientips_lookup ON client_ip @@ -967,7 +968,7 @@ left | 172.21.0.5 | Connected to 10.1.0.1 | right | Devel ; lookupIPAndMessageFromRowKeepAfter -required_capability: join_lookup_v9 +required_capability: join_lookup_v10 ROW left = "left", client_ip = "172.21.0.5", message = "Connected to 10.1.0.1", right = "right" | LOOKUP JOIN clientips_lookup ON client_ip @@ -980,7 +981,7 @@ left | 172.21.0.5 | Connected to 10.1.0.1 | right | Devel ; lookupIPAndMessageFromRowWithShadowing -required_capability: join_lookup_v9 +required_capability: join_lookup_v10 ROW left = "left", client_ip = "172.21.0.5", message = "Connected to 10.1.0.1", env = "env", type = "type", right = "right" | LOOKUP JOIN clientips_lookup ON client_ip @@ -992,7 +993,7 @@ left | 172.21.0.5 | Connected to 10.1.0.1 | right | Devel ; lookupIPAndMessageFromRowWithShadowingKeep -required_capability: join_lookup_v9 +required_capability: join_lookup_v10 ROW left = "left", client_ip = "172.21.0.5", message = "Connected to 10.1.0.1", env = "env", right = "right" | EVAL client_ip = client_ip::keyword @@ -1006,7 +1007,7 @@ left | 172.21.0.5 | Connected to 10.1.0.1 | right | Devel ; lookupIPAndMessageFromRowWithShadowingKeepKeep -required_capability: join_lookup_v9 +required_capability: join_lookup_v10 ROW left = "left", client_ip = "172.21.0.5", message = "Connected to 10.1.0.1", env = "env", right = "right" | EVAL client_ip = client_ip::keyword @@ -1021,7 +1022,7 @@ left | 172.21.0.5 | Connected to 10.1.0.1 | right | Devel ; lookupIPAndMessageFromRowWithShadowingKeepKeepKeep -required_capability: join_lookup_v9 +required_capability: join_lookup_v10 ROW left = "left", client_ip = "172.21.0.5", message = "Connected to 10.1.0.1", env = "env", right = "right" | EVAL client_ip = client_ip::keyword @@ -1037,7 +1038,7 @@ left | 172.21.0.5 | Connected to 10.1.0.1 | right | Devel ; lookupIPAndMessageFromRowWithShadowingKeepReordered -required_capability: join_lookup_v9 +required_capability: join_lookup_v10 ROW left = "left", client_ip = "172.21.0.5", message = "Connected to 10.1.0.1", env = "env", right = "right" | EVAL client_ip = client_ip::keyword @@ -1051,7 +1052,7 @@ right | Development | Success | 172.21.0.5 ; lookupIPAndMessageFromIndex -required_capability: join_lookup_v9 +required_capability: join_lookup_v10 FROM sample_data | EVAL client_ip = client_ip::keyword @@ -1071,7 +1072,7 @@ ignoreOrder:true ; lookupIPAndMessageFromIndexKeep -required_capability: join_lookup_v9 +required_capability: join_lookup_v10 FROM sample_data | EVAL client_ip = client_ip::keyword @@ -1092,7 +1093,7 @@ ignoreOrder:true ; lookupIPAndMessageFromIndexStats -required_capability: join_lookup_v9 +required_capability: join_lookup_v10 FROM sample_data | EVAL client_ip = client_ip::keyword @@ -1110,7 +1111,7 @@ count:long | env:keyword | type:keyword ; lookupIPAndMessageFromIndexStatsKeep -required_capability: join_lookup_v9 +required_capability: join_lookup_v10 FROM sample_data | EVAL client_ip = client_ip::keyword @@ -1129,7 +1130,7 @@ count:long | env:keyword | type:keyword ; statsAndLookupIPAndMessageFromIndex -required_capability: join_lookup_v9 +required_capability: join_lookup_v10 FROM sample_data | EVAL client_ip = client_ip::keyword @@ -1148,7 +1149,7 @@ count:long | client_ip:keyword | message:keyword | env:keyword | type:keyw ; lookupIPAndMessageFromIndexChainedEvalKeep -required_capability: join_lookup_v9 +required_capability: join_lookup_v10 FROM sample_data | EVAL client_ip = client_ip::keyword @@ -1170,7 +1171,7 @@ ignoreOrder:true ; lookupIPAndMessageFromIndexChainedRenameKeep -required_capability: join_lookup_v9 +required_capability: join_lookup_v10 FROM sample_data | EVAL client_ip = client_ip::keyword @@ -1190,3 +1191,19 @@ ignoreOrder:true 2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | QA | null 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | QA | null ; + +lookupIndexInFromRepeatedRowBug +required_capability: join_lookup_v10 +FROM languages_lookup_non_unique_key +| WHERE language_code == 1 +| LOOKUP JOIN languages_lookup ON language_code +| KEEP language_code, language_name, country +| SORT language_code, language_name, country +; + +language_code:integer | language_name:keyword | country:text +1 | English | Canada +1 | English | United Kingdom +1 | English | United States of America +1 | English | null +; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec index d65a0975e22b..091baafe293d 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec @@ -1287,7 +1287,7 @@ emp_no:integer | first_name:keyword 10001 | Georgi ; -equalsToUpperFolded +equalsToUpperFolded#[skip:-8.12.99, reason:case insensitive operators implemented in v 8.13] from employees | where to_upper(first_name) == "Georgi" | keep emp_no, first_name @@ -1296,7 +1296,7 @@ from employees emp_no:integer | first_name:keyword ; -negatedEqualsToUpperFolded +negatedEqualsToUpperFolded#[skip:-8.12.99, reason:case insensitive operators implemented in v 8.13] from employees | where not(to_upper(first_name) == "Georgi") | stats c = count() @@ -1315,7 +1315,7 @@ from employees emp_no:integer | first_name:keyword ; -equalsNullToUpperFolded +equalsNullToUpperFolded#[skip:-8.12.99, reason:case insensitive operators implemented in v 8.13] from employees | where to_upper(first_name) == to_string(null) | keep emp_no, first_name @@ -1333,7 +1333,7 @@ from employees emp_no:integer | first_name:keyword ; -notEqualsNullToUpperFolded +notEqualsNullToUpperFolded#[skip:-8.12.99, reason:case insensitive operators implemented in v 8.13] from employees | where to_upper(first_name) != to_string(null) | keep emp_no, first_name @@ -1342,7 +1342,7 @@ from employees emp_no:integer | first_name:keyword ; -notEqualsToUpperFolded +notEqualsToUpperFolded#[skip:-8.12.99, reason:case insensitive operators implemented in v 8.13] from employees | where to_upper(first_name) != "Georgi" | stats c = count() @@ -1352,7 +1352,7 @@ c:long 90 ; -negatedNotEqualsToUpperFolded +negatedNotEqualsToUpperFolded#[skip:-8.12.99, reason:case insensitive operators implemented in v 8.13] from employees | where not(to_upper(first_name) != "Georgi") | stats c = count() @@ -1384,7 +1384,7 @@ emp_no:integer | first_name:keyword 10002 | Bezalel ; -equalsToLowerFolded +equalsToLowerFolded#[skip:-8.12.99, reason:case insensitive operators implemented in v 8.13] from employees | where to_lower(first_name) == "Georgi" | keep emp_no, first_name @@ -1393,7 +1393,7 @@ from employees emp_no:integer | first_name:keyword ; -notEqualsToLowerFolded +notEqualsToLowerFolded#[skip:-8.12.99, reason:case insensitive operators implemented in v 8.13] from employees | where to_lower(first_name) != "Georgi" | stats c = count() @@ -1403,7 +1403,7 @@ c:long 90 ; -equalsToLowerWithUnico(rn|d)s +equalsToLowerWithUnico(rn|d)s#[skip:-8.12.99, reason:case insensitive operators implemented in v 8.13] from employees | where to_lower(concat(first_name, "🦄🦄")) != "georgi🦄🦄" | stats c = count() diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/union_types.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/union_types.csv-spec index bf6e2f8ae089..81164382c054 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/union_types.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/union_types.csv-spec @@ -445,6 +445,7 @@ count:long | message:keyword multiIndexMissingIpToString required_capability: union_types +required_capability: metadata_fields required_capability: union_types_missing_field FROM sample_data, sample_data_str, missing_ip_sample_data METADATA _index @@ -479,6 +480,7 @@ sample_data_str | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450 multiIndexMissingIpToIp required_capability: union_types +required_capability: metadata_fields required_capability: union_types_missing_field FROM sample_data, sample_data_str, missing_ip_sample_data METADATA _index @@ -1373,9 +1375,6 @@ client_ip:ip | event_duration:long | message:keyword | @timestamp:keywo # Once INLINESTATS supports expressions in agg functions and groups, convert the group in the inlinestats multiIndexIndirectUseOfUnionTypesInSort -// TODO: `union_types` is required only because this makes the test skip in the csv tests; better solution: -// make the csv tests work with multiple indices. -required_capability: union_types FROM sample_data, sample_data_ts_long | SORT client_ip ASC | LIMIT 1 @@ -1386,8 +1385,6 @@ FROM sample_data, sample_data_ts_long ; multiIndexIndirectUseOfUnionTypesInEval -// TODO: `union_types` is required only because this makes the test skip in the csv tests; better solution: -// make the csv tests work with multiple indices. required_capability: union_types FROM sample_data, sample_data_ts_long | EVAL foo = event_duration > 1232381 @@ -1400,9 +1397,6 @@ FROM sample_data, sample_data_ts_long ; multiIndexIndirectUseOfUnionTypesInRename -// TODO: `union_types` is required only because this makes the test skip in the csv tests; better solution: -// make the csv tests work with multiple indices. -required_capability: union_types required_capability: union_types_fix_rename_resolution FROM sample_data, sample_data_ts_long | RENAME message AS event_message @@ -1415,9 +1409,6 @@ FROM sample_data, sample_data_ts_long ; multiIndexIndirectUseOfUnionTypesInKeep -// TODO: `union_types` is required only because this makes the test skip in the csv tests; better solution: -// make the csv tests work with multiple indices. -required_capability: union_types FROM sample_data, sample_data_ts_long | KEEP client_ip, event_duration, message | SORT client_ip ASC @@ -1429,9 +1420,6 @@ client_ip:ip | event_duration:long | message:keyword ; multiIndexIndirectUseOfUnionTypesInWildcardKeep -// TODO: `union_types` is required only because this makes the test skip in the csv tests; better solution: -// make the csv tests work with multiple indices. -required_capability: union_types required_capability: union_types_fix_rename_resolution FROM sample_data, sample_data_ts_long | KEEP * @@ -1444,9 +1432,6 @@ FROM sample_data, sample_data_ts_long ; multiIndexIndirectUseOfUnionTypesInWildcardKeep2 -// TODO: `union_types` is required only because this makes the test skip in the csv tests; better solution: -// make the csv tests work with multiple indices. -required_capability: union_types required_capability: union_types_fix_rename_resolution FROM sample_data, sample_data_ts_long | KEEP *e* @@ -1460,9 +1445,6 @@ FROM sample_data, sample_data_ts_long multiIndexUseOfUnionTypesInKeep -// TODO: `union_types` is required only because this makes the test skip in the csv tests; better solution: -// make the csv tests work with multiple indices. -required_capability: union_types required_capability: union_types_fix_rename_resolution FROM sample_data, sample_data_ts_long | KEEP @timestamp @@ -1474,9 +1456,6 @@ null ; multiIndexUseOfUnionTypesInDrop -// TODO: `union_types` is required only because this makes the test skip in the csv tests; better solution: -// make the csv tests work with multiple indices. -required_capability: union_types required_capability: union_types_fix_rename_resolution FROM sample_data, sample_data_ts_long | DROP @timestamp @@ -1489,9 +1468,6 @@ client_ip:ip | event_duration:long | message:keyword ; multiIndexIndirectUseOfUnionTypesInWildcardDrop -// TODO: `union_types` is required only because this makes the test skip in the csv tests; better solution: -// make the csv tests work with multiple indices. -required_capability: union_types required_capability: union_types_fix_rename_resolution FROM sample_data, sample_data_ts_long | DROP *time* @@ -1504,9 +1480,6 @@ client_ip:ip | event_duration:long | message:keyword ; multiIndexIndirectUseOfUnionTypesInWhere -// TODO: `union_types` is required only because this makes the test skip in the csv tests; better solution: -// make the csv tests work with multiple indices. -required_capability: union_types FROM sample_data, sample_data_ts_long | WHERE message == "Disconnected" ; @@ -1517,9 +1490,6 @@ FROM sample_data, sample_data_ts_long ; multiIndexIndirectUseOfUnionTypesInDissect -// TODO: `union_types` is required only because this makes the test skip in the csv tests; better solution: -// make the csv tests work with multiple indices. -required_capability: union_types FROM sample_data, sample_data_ts_long | DISSECT message "%{foo}" | SORT client_ip ASC @@ -1531,9 +1501,6 @@ FROM sample_data, sample_data_ts_long ; multiIndexIndirectUseOfUnionTypesInGrok -// TODO: `union_types` is required only because this makes the test skip in the csv tests; better solution: -// make the csv tests work with multiple indices. -required_capability: union_types FROM sample_data, sample_data_ts_long | GROK message "%{WORD:foo}" | SORT client_ip ASC @@ -1545,9 +1512,6 @@ FROM sample_data, sample_data_ts_long ; multiIndexIndirectUseOfUnionTypesInEnrich -// TODO: `union_types` is required only because this makes the test skip in the csv tests; better solution: -// make the csv tests work with multiple indices. -required_capability: union_types required_capability: enrich_load FROM sample_data, sample_data_ts_long | EVAL client_ip = client_ip::keyword @@ -1561,9 +1525,6 @@ FROM sample_data, sample_data_ts_long ; multiIndexIndirectUseOfUnionTypesInStats -// TODO: `union_types` is required only because this makes the test skip in the csv tests; better solution: -// make the csv tests work with multiple indices. -required_capability: union_types FROM sample_data, sample_data_ts_long | STATS foo = max(event_duration) BY client_ip | SORT client_ip ASC @@ -1577,9 +1538,6 @@ foo:long | client_ip:ip ; multiIndexIndirectUseOfUnionTypesInInlineStats-Ignore -// TODO: `union_types` is required only because this makes the test skip in the csv tests; better solution: -// make the csv tests work with multiple indices. -required_capability: union_types required_capability: inlinestats FROM sample_data, sample_data_ts_long | INLINESTATS foo = max(event_duration) @@ -1592,9 +1550,6 @@ FROM sample_data, sample_data_ts_long ; multiIndexIndirectUseOfUnionTypesInLookup-Ignore -// TODO: `union_types` is required only because this makes the test skip in the csv tests; better solution: -// make the csv tests work with multiple indices. -required_capability: union_types required_capability: lookup_v4 FROM sample_data, sample_data_ts_long | SORT client_ip ASC @@ -1608,9 +1563,6 @@ FROM sample_data, sample_data_ts_long ; multiIndexIndirectUseOfUnionTypesInMvExpand -// TODO: `union_types` is required only because this makes the test skip in the csv tests; better solution: -// make the csv tests work with multiple indices. -required_capability: union_types FROM sample_data, sample_data_ts_long | EVAL foo = MV_APPEND(message, message) | SORT client_ip ASC diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AbstractCrossClustersUsageTelemetryIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AbstractCrossClustersUsageTelemetryIT.java new file mode 100644 index 000000000000..ffbddd52b255 --- /dev/null +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AbstractCrossClustersUsageTelemetryIT.java @@ -0,0 +1,205 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.action; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.admin.cluster.stats.CCSTelemetrySnapshot; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.test.AbstractMultiClustersTestCase; +import org.elasticsearch.test.SkipUnavailableRule; +import org.elasticsearch.usage.UsageService; +import org.elasticsearch.xpack.core.async.GetAsyncResultRequest; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Rule; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; + +import static org.elasticsearch.core.TimeValue.timeValueMillis; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; + +public class AbstractCrossClustersUsageTelemetryIT extends AbstractMultiClustersTestCase { + private static final Logger LOGGER = LogManager.getLogger(AbstractCrossClustersUsageTelemetryIT.class); + protected static final String REMOTE1 = "cluster-a"; + protected static final String REMOTE2 = "cluster-b"; + protected static final String LOCAL_INDEX = "logs-1"; + protected static final String REMOTE_INDEX = "logs-2"; + // We want to send search to a specific node (we don't care which one) so that we could + // collect the CCS telemetry from it later + protected String queryNode; + + @Before + public void setupQueryNode() { + // The tests are set up in a way that all queries within a single test are sent to the same node, + // thus enabling incremental collection of telemetry data, but the node is random for each test. + queryNode = cluster(LOCAL_CLUSTER).getRandomNodeName(); + } + + protected CCSTelemetrySnapshot getTelemetryFromQuery(String query, String client) throws ExecutionException, InterruptedException { + EsqlQueryRequest request = EsqlQueryRequest.syncEsqlQueryRequest(); + request.query(query); + request.pragmas(AbstractEsqlIntegTestCase.randomPragmas()); + request.columnar(randomBoolean()); + request.includeCCSMetadata(randomBoolean()); + return getTelemetryFromQuery(request, client); + } + + protected CCSTelemetrySnapshot getTelemetryFromQuery(EsqlQueryRequest request, String client) throws ExecutionException, + InterruptedException { + // We don't care here too much about the response, we just want to trigger the telemetry collection. + // So we check it's not null and leave the rest to other tests. + if (client != null) { + assertResponse( + cluster(LOCAL_CLUSTER).client(queryNode) + .filterWithHeader(Map.of(Task.X_ELASTIC_PRODUCT_ORIGIN_HTTP_HEADER, client)) + .execute(EsqlQueryAction.INSTANCE, request), + Assert::assertNotNull + ); + + } else { + assertResponse(cluster(LOCAL_CLUSTER).client(queryNode).execute(EsqlQueryAction.INSTANCE, request), Assert::assertNotNull); + } + return getTelemetrySnapshot(queryNode); + } + + protected CCSTelemetrySnapshot getTelemetryFromAsyncQuery(String query) throws Exception { + EsqlQueryRequest request = EsqlQueryRequest.asyncEsqlQueryRequest(); + request.query(query); + request.pragmas(AbstractEsqlIntegTestCase.randomPragmas()); + request.columnar(randomBoolean()); + request.includeCCSMetadata(randomBoolean()); + request.waitForCompletionTimeout(TimeValue.timeValueMillis(100)); + request.keepOnCompletion(false); + return getTelemetryFromAsyncQuery(request); + } + + protected CCSTelemetrySnapshot getTelemetryFromAsyncQuery(EsqlQueryRequest request) throws Exception { + AtomicReference asyncExecutionId = new AtomicReference<>(); + assertResponse(cluster(LOCAL_CLUSTER).client(queryNode).execute(EsqlQueryAction.INSTANCE, request), resp -> { + if (resp.isRunning()) { + assertNotNull("async execution id is null", resp.asyncExecutionId()); + asyncExecutionId.set(resp.asyncExecutionId().get()); + } + }); + if (asyncExecutionId.get() != null) { + assertBusy(() -> { + var getResultsRequest = new GetAsyncResultRequest(asyncExecutionId.get()).setWaitForCompletionTimeout(timeValueMillis(1)); + try ( + var resp = cluster(LOCAL_CLUSTER).client(queryNode) + .execute(EsqlAsyncGetResultAction.INSTANCE, getResultsRequest) + .actionGet(30, TimeUnit.SECONDS) + ) { + assertFalse(resp.isRunning()); + } + }); + } + return getTelemetrySnapshot(queryNode); + } + + protected CCSTelemetrySnapshot getTelemetryFromFailedQuery(String query) throws Exception { + EsqlQueryRequest request = EsqlQueryRequest.syncEsqlQueryRequest(); + request.query(query); + request.pragmas(AbstractEsqlIntegTestCase.randomPragmas()); + request.columnar(randomBoolean()); + request.includeCCSMetadata(randomBoolean()); + + ExecutionException ee = expectThrows( + ExecutionException.class, + cluster(LOCAL_CLUSTER).client(queryNode).execute(EsqlQueryAction.INSTANCE, request)::get + ); + assertNotNull(ee.getCause()); + + return getTelemetrySnapshot(queryNode); + } + + private CCSTelemetrySnapshot getTelemetrySnapshot(String nodeName) { + var usage = cluster(LOCAL_CLUSTER).getInstance(UsageService.class, nodeName); + return usage.getEsqlUsageHolder().getCCSTelemetrySnapshot(); + } + + @Override + protected boolean reuseClusters() { + return false; + } + + @Override + protected List remoteClusterAlias() { + return List.of(REMOTE1, REMOTE2); + } + + @Rule + public SkipUnavailableRule skipOverride = new SkipUnavailableRule(REMOTE1, REMOTE2); + + protected Map setupClusters() { + int numShardsLocal = randomIntBetween(1, 5); + populateLocalIndices(LOCAL_INDEX, numShardsLocal); + + int numShardsRemote = randomIntBetween(1, 5); + populateRemoteIndices(REMOTE1, REMOTE_INDEX, numShardsRemote); + + Map clusterInfo = new HashMap<>(); + clusterInfo.put("local.num_shards", numShardsLocal); + clusterInfo.put("local.index", LOCAL_INDEX); + clusterInfo.put("remote.num_shards", numShardsRemote); + clusterInfo.put("remote.index", REMOTE_INDEX); + + int numShardsRemote2 = randomIntBetween(1, 5); + populateRemoteIndices(REMOTE2, REMOTE_INDEX, numShardsRemote2); + clusterInfo.put("remote2.index", REMOTE_INDEX); + clusterInfo.put("remote2.num_shards", numShardsRemote2); + + return clusterInfo; + } + + void populateLocalIndices(String indexName, int numShards) { + Client localClient = client(LOCAL_CLUSTER); + assertAcked( + localClient.admin() + .indices() + .prepareCreate(indexName) + .setSettings(Settings.builder().put("index.number_of_shards", numShards)) + .setMapping("id", "type=keyword", "tag", "type=keyword", "v", "type=long") + ); + for (int i = 0; i < 10; i++) { + localClient.prepareIndex(indexName).setSource("id", "local-" + i, "tag", "local", "v", i).get(); + } + localClient.admin().indices().prepareRefresh(indexName).get(); + } + + void populateRemoteIndices(String clusterAlias, String indexName, int numShards) { + Client remoteClient = client(clusterAlias); + assertAcked( + remoteClient.admin() + .indices() + .prepareCreate(indexName) + .setSettings(Settings.builder().put("index.number_of_shards", numShards)) + .setMapping("id", "type=keyword", "tag", "type=keyword", "v", "type=long") + ); + for (int i = 0; i < 10; i++) { + remoteClient.prepareIndex(indexName).setSource("id", "remote-" + i, "tag", "remote", "v", i * i).get(); + } + remoteClient.admin().indices().prepareRefresh(indexName).get(); + } + + @Override + protected Map skipUnavailableForRemoteClusters() { + var map = skipOverride.getMap(); + LOGGER.info("Using skip_unavailable map: [{}]", map); + return map; + } +} diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersUsageTelemetryIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersUsageTelemetryIT.java new file mode 100644 index 000000000000..33d868e7a69e --- /dev/null +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersUsageTelemetryIT.java @@ -0,0 +1,231 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.action; + +import org.elasticsearch.action.admin.cluster.stats.CCSTelemetrySnapshot; +import org.elasticsearch.action.admin.cluster.stats.CCSUsageTelemetry; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.SkipUnavailableRule; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.action.admin.cluster.stats.CCSUsageTelemetry.ASYNC_FEATURE; +import static org.hamcrest.Matchers.equalTo; + +public class CrossClustersUsageTelemetryIT extends AbstractCrossClustersUsageTelemetryIT { + + @Override + protected Collection> nodePlugins(String clusterAlias) { + List> plugins = new ArrayList<>(super.nodePlugins(clusterAlias)); + plugins.add(EsqlPluginWithEnterpriseOrTrialLicense.class); + plugins.add(CrossClustersQueryIT.InternalExchangePlugin.class); + return plugins; + } + + public void assertPerClusterCount(CCSTelemetrySnapshot.PerClusterCCSTelemetry perCluster, long count) { + assertThat(perCluster.getCount(), equalTo(count)); + assertThat(perCluster.getSkippedCount(), equalTo(0L)); + assertThat(perCluster.getTook().count(), equalTo(count)); + } + + public void testLocalRemote() throws Exception { + setupClusters(); + var telemetry = getTelemetryFromQuery("from logs-*,c*:logs-* | stats sum (v)", "kibana"); + + assertThat(telemetry.getTotalCount(), equalTo(1L)); + assertThat(telemetry.getSuccessCount(), equalTo(1L)); + assertThat(telemetry.getFailureReasons().size(), equalTo(0)); + assertThat(telemetry.getTook().count(), equalTo(1L)); + assertThat(telemetry.getTookMrtFalse().count(), equalTo(0L)); + assertThat(telemetry.getTookMrtTrue().count(), equalTo(0L)); + assertThat(telemetry.getRemotesPerSearchAvg(), equalTo(2.0)); + assertThat(telemetry.getRemotesPerSearchMax(), equalTo(2L)); + assertThat(telemetry.getSearchCountWithSkippedRemotes(), equalTo(0L)); + assertThat(telemetry.getClientCounts().size(), equalTo(1)); + assertThat(telemetry.getClientCounts().get("kibana"), equalTo(1L)); + assertThat(telemetry.getFeatureCounts().get(ASYNC_FEATURE), equalTo(null)); + + var perCluster = telemetry.getByRemoteCluster(); + assertThat(perCluster.size(), equalTo(3)); + for (String clusterAlias : remoteClusterAlias()) { + assertPerClusterCount(perCluster.get(clusterAlias), 1L); + } + assertPerClusterCount(perCluster.get(LOCAL_CLUSTER), 1L); + + telemetry = getTelemetryFromQuery("from logs-*,c*:logs-* | stats sum (v)", "kibana"); + assertThat(telemetry.getTotalCount(), equalTo(2L)); + assertThat(telemetry.getClientCounts().get("kibana"), equalTo(2L)); + perCluster = telemetry.getByRemoteCluster(); + assertThat(perCluster.size(), equalTo(3)); + for (String clusterAlias : remoteClusterAlias()) { + assertPerClusterCount(perCluster.get(clusterAlias), 2L); + } + assertPerClusterCount(perCluster.get(LOCAL_CLUSTER), 2L); + } + + public void testLocalOnly() throws Exception { + setupClusters(); + // Should not produce any usage info since it's a local search + var telemetry = getTelemetryFromQuery("from logs-* | stats sum (v)", "kibana"); + + assertThat(telemetry.getTotalCount(), equalTo(0L)); + assertThat(telemetry.getSuccessCount(), equalTo(0L)); + assertThat(telemetry.getByRemoteCluster().size(), equalTo(0)); + } + + @SkipUnavailableRule.NotSkipped(aliases = REMOTE1) + public void testFailed() throws Exception { + setupClusters(); + // Should not produce any usage info since it's a local search + var telemetry = getTelemetryFromFailedQuery("from no_such_index | stats sum (v)"); + + assertThat(telemetry.getTotalCount(), equalTo(0L)); + assertThat(telemetry.getSuccessCount(), equalTo(0L)); + assertThat(telemetry.getByRemoteCluster().size(), equalTo(0)); + + // One remote is skipped, one is not + telemetry = getTelemetryFromFailedQuery("from logs-*,c*:no_such_index | stats sum (v)"); + + assertThat(telemetry.getTotalCount(), equalTo(1L)); + assertThat(telemetry.getSuccessCount(), equalTo(0L)); + assertThat(telemetry.getByRemoteCluster().size(), equalTo(1)); + assertThat(telemetry.getRemotesPerSearchAvg(), equalTo(2.0)); + assertThat(telemetry.getRemotesPerSearchMax(), equalTo(2L)); + assertThat(telemetry.getSearchCountWithSkippedRemotes(), equalTo(1L)); + Map expectedFailure = Map.of(CCSUsageTelemetry.Result.NOT_FOUND.getName(), 1L); + assertThat(telemetry.getFailureReasons(), equalTo(expectedFailure)); + // cluster-b should be skipped + assertThat(telemetry.getByRemoteCluster().get(REMOTE2).getCount(), equalTo(0L)); + assertThat(telemetry.getByRemoteCluster().get(REMOTE2).getSkippedCount(), equalTo(1L)); + + // this is only for cluster-a so no skipped remotes + telemetry = getTelemetryFromFailedQuery("from logs-*,cluster-a:no_such_index | stats sum (v)"); + assertThat(telemetry.getTotalCount(), equalTo(2L)); + assertThat(telemetry.getSuccessCount(), equalTo(0L)); + assertThat(telemetry.getByRemoteCluster().size(), equalTo(1)); + assertThat(telemetry.getRemotesPerSearchAvg(), equalTo(2.0)); + assertThat(telemetry.getRemotesPerSearchMax(), equalTo(2L)); + assertThat(telemetry.getSearchCountWithSkippedRemotes(), equalTo(1L)); + expectedFailure = Map.of(CCSUsageTelemetry.Result.NOT_FOUND.getName(), 2L); + assertThat(telemetry.getFailureReasons(), equalTo(expectedFailure)); + assertThat(telemetry.getByRemoteCluster().size(), equalTo(1)); + } + + // TODO: enable when skip-up patch is merged + // public void testSkipAllRemotes() throws Exception { + // var telemetry = getTelemetryFromQuery("from logs-*,c*:no_such_index | stats sum (v)", "unknown"); + // + // assertThat(telemetry.getTotalCount(), equalTo(1L)); + // assertThat(telemetry.getSuccessCount(), equalTo(1L)); + // assertThat(telemetry.getFailureReasons().size(), equalTo(0)); + // assertThat(telemetry.getTook().count(), equalTo(1L)); + // assertThat(telemetry.getTookMrtFalse().count(), equalTo(0L)); + // assertThat(telemetry.getTookMrtTrue().count(), equalTo(0L)); + // assertThat(telemetry.getRemotesPerSearchAvg(), equalTo(2.0)); + // assertThat(telemetry.getRemotesPerSearchMax(), equalTo(2L)); + // assertThat(telemetry.getSearchCountWithSkippedRemotes(), equalTo(1L)); + // assertThat(telemetry.getClientCounts().size(), equalTo(0)); + // + // var perCluster = telemetry.getByRemoteCluster(); + // assertThat(perCluster.size(), equalTo(3)); + // for (String clusterAlias : remoteClusterAlias()) { + // var clusterData = perCluster.get(clusterAlias); + // assertThat(clusterData.getCount(), equalTo(0L)); + // assertThat(clusterData.getSkippedCount(), equalTo(1L)); + // assertThat(clusterData.getTook().count(), equalTo(0L)); + // } + // assertPerClusterCount(perCluster.get(LOCAL_CLUSTER), 1L); + // } + + public void testRemoteOnly() throws Exception { + setupClusters(); + var telemetry = getTelemetryFromQuery("from c*:logs-* | stats sum (v)", "kibana"); + + assertThat(telemetry.getTotalCount(), equalTo(1L)); + assertThat(telemetry.getSuccessCount(), equalTo(1L)); + assertThat(telemetry.getFailureReasons().size(), equalTo(0)); + assertThat(telemetry.getTook().count(), equalTo(1L)); + assertThat(telemetry.getTookMrtFalse().count(), equalTo(0L)); + assertThat(telemetry.getTookMrtTrue().count(), equalTo(0L)); + assertThat(telemetry.getRemotesPerSearchAvg(), equalTo(2.0)); + assertThat(telemetry.getRemotesPerSearchMax(), equalTo(2L)); + assertThat(telemetry.getSearchCountWithSkippedRemotes(), equalTo(0L)); + assertThat(telemetry.getClientCounts().size(), equalTo(1)); + assertThat(telemetry.getClientCounts().get("kibana"), equalTo(1L)); + assertThat(telemetry.getFeatureCounts().get(ASYNC_FEATURE), equalTo(null)); + + var perCluster = telemetry.getByRemoteCluster(); + assertThat(perCluster.size(), equalTo(2)); + for (String clusterAlias : remoteClusterAlias()) { + assertPerClusterCount(perCluster.get(clusterAlias), 1L); + } + assertThat(telemetry.getByRemoteCluster().size(), equalTo(2)); + } + + public void testAsync() throws Exception { + setupClusters(); + var telemetry = getTelemetryFromAsyncQuery("from logs-*,c*:logs-* | stats sum (v)"); + + assertThat(telemetry.getTotalCount(), equalTo(1L)); + assertThat(telemetry.getSuccessCount(), equalTo(1L)); + assertThat(telemetry.getFailureReasons().size(), equalTo(0)); + assertThat(telemetry.getTook().count(), equalTo(1L)); + assertThat(telemetry.getTookMrtFalse().count(), equalTo(0L)); + assertThat(telemetry.getTookMrtTrue().count(), equalTo(0L)); + assertThat(telemetry.getRemotesPerSearchAvg(), equalTo(2.0)); + assertThat(telemetry.getRemotesPerSearchMax(), equalTo(2L)); + assertThat(telemetry.getSearchCountWithSkippedRemotes(), equalTo(0L)); + assertThat(telemetry.getClientCounts().size(), equalTo(0)); + assertThat(telemetry.getFeatureCounts().get(ASYNC_FEATURE), equalTo(1L)); + + var perCluster = telemetry.getByRemoteCluster(); + assertThat(perCluster.size(), equalTo(3)); + for (String clusterAlias : remoteClusterAlias()) { + assertPerClusterCount(perCluster.get(clusterAlias), 1L); + } + assertPerClusterCount(perCluster.get(LOCAL_CLUSTER), 1L); + + // do it again + telemetry = getTelemetryFromAsyncQuery("from logs-*,c*:logs-* | stats sum (v)"); + assertThat(telemetry.getTotalCount(), equalTo(2L)); + assertThat(telemetry.getFeatureCounts().get(ASYNC_FEATURE), equalTo(2L)); + perCluster = telemetry.getByRemoteCluster(); + assertThat(perCluster.size(), equalTo(3)); + for (String clusterAlias : remoteClusterAlias()) { + assertPerClusterCount(perCluster.get(clusterAlias), 2L); + } + assertPerClusterCount(perCluster.get(LOCAL_CLUSTER), 2L); + } + + public void testNoSuchCluster() throws Exception { + setupClusters(); + // This is not recognized as a cross-cluster search + var telemetry = getTelemetryFromFailedQuery("from c*:logs*, nocluster:nomatch | stats sum (v)"); + + assertThat(telemetry.getTotalCount(), equalTo(0L)); + assertThat(telemetry.getSuccessCount(), equalTo(0L)); + assertThat(telemetry.getByRemoteCluster().size(), equalTo(0)); + } + + @SkipUnavailableRule.NotSkipped(aliases = REMOTE1) + public void testDisconnect() throws Exception { + setupClusters(); + // Disconnect remote1 + cluster(REMOTE1).close(); + var telemetry = getTelemetryFromFailedQuery("from logs-*,cluster-a:logs-* | stats sum (v)"); + + assertThat(telemetry.getTotalCount(), equalTo(1L)); + assertThat(telemetry.getSuccessCount(), equalTo(0L)); + Map expectedFailure = Map.of(CCSUsageTelemetry.Result.REMOTES_UNAVAILABLE.getName(), 1L); + assertThat(telemetry.getFailureReasons(), equalTo(expectedFailure)); + } + +} diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersUsageTelemetryNoLicenseIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersUsageTelemetryNoLicenseIT.java new file mode 100644 index 000000000000..2b993e947406 --- /dev/null +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersUsageTelemetryNoLicenseIT.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.action; + +import org.elasticsearch.action.admin.cluster.stats.CCSUsageTelemetry; +import org.elasticsearch.plugins.Plugin; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; + +public class CrossClustersUsageTelemetryNoLicenseIT extends AbstractCrossClustersUsageTelemetryIT { + + @Override + protected Collection> nodePlugins(String clusterAlias) { + List> plugins = new ArrayList<>(super.nodePlugins(clusterAlias)); + plugins.add(EsqlPluginWithNonEnterpriseOrExpiredLicense.class); + plugins.add(CrossClustersQueryIT.InternalExchangePlugin.class); + return plugins; + } + + public void testLicenseFailure() throws Exception { + setupClusters(); + var telemetry = getTelemetryFromFailedQuery("from logs-*,c*:logs-* | stats sum (v)"); + + assertThat(telemetry.getTotalCount(), equalTo(1L)); + assertThat(telemetry.getSuccessCount(), equalTo(0L)); + assertThat(telemetry.getTook().count(), equalTo(0L)); + assertThat(telemetry.getRemotesPerSearchAvg(), equalTo(2.0)); + assertThat(telemetry.getRemotesPerSearchMax(), equalTo(2L)); + Map expectedFailure = Map.of(CCSUsageTelemetry.Result.LICENSE.getName(), 1L); + assertThat(telemetry.getFailureReasons(), equalTo(expectedFailure)); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java index 22f7937ccf4f..5c259caa9c94 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java @@ -560,7 +560,7 @@ public class EsqlCapabilities { /** * LOOKUP JOIN */ - JOIN_LOOKUP_V9(Build.current().isSnapshot()), + JOIN_LOOKUP_V10(Build.current().isSnapshot()), /** * Fix for https://github.com/elastic/elasticsearch/issues/117054 diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlExecutionInfo.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlExecutionInfo.java index 9b21efc069e9..c1afa728bc37 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlExecutionInfo.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlExecutionInfo.java @@ -206,6 +206,10 @@ public class EsqlExecutionInfo implements ChunkedToXContentObject, Writeable { return clusterInfo.get(clusterAlias); } + public Map getClusters() { + return clusterInfo; + } + /** * Utility to swap a Cluster object. Guidelines for the remapping function: *
    diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/execution/PlanExecutor.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/execution/PlanExecutor.java index dad63d25046d..974f029eab2e 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/execution/PlanExecutor.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/execution/PlanExecutor.java @@ -80,7 +80,8 @@ public class PlanExecutor { ); QueryMetric clientId = QueryMetric.fromString("rest"); metrics.total(clientId); - session.execute(request, executionInfo, planRunner, wrap(x -> { + + ActionListener executeListener = wrap(x -> { planningMetricsManager.publish(planningMetrics, true); listener.onResponse(x); }, ex -> { @@ -88,7 +89,10 @@ public class PlanExecutor { metrics.failed(clientId); planningMetricsManager.publish(planningMetrics, false); listener.onFailure(ex); - })); + }); + // Wrap it in a listener so that if we have any exceptions during execution, the listener picks it up + // and all the metrics are properly updated + ActionListener.run(executeListener, l -> session.execute(request, executionInfo, planRunner, l)); } public IndexResolver indexResolver() { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/EstimatesRowSize.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/EstimatesRowSize.java index cfb6cce2579a..f4ab546fa749 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/EstimatesRowSize.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/EstimatesRowSize.java @@ -103,9 +103,6 @@ public interface EstimatesRowSize { static int estimateSize(DataType dataType) { ElementType elementType = PlannerUtils.toElementType(dataType); - if (elementType == ElementType.DOC) { - throw new EsqlIllegalArgumentException("can't load a [doc] with field extraction"); - } if (elementType == ElementType.UNKNOWN) { throw new EsqlIllegalArgumentException("[unknown] can't be the result of field extraction"); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/TopNExec.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/TopNExec.java index 61e40b3fa469..bbf0c681bec7 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/TopNExec.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/TopNExec.java @@ -10,9 +10,11 @@ package org.elasticsearch.xpack.esql.plan.physical; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.expression.Order; import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; @@ -95,6 +97,9 @@ public class TopNExec extends UnaryExec implements EstimatesRowSize { @Override public PhysicalPlan estimateRowSize(State state) { + final List output = output(); + final boolean needsSortedDocIds = output.stream().anyMatch(a -> a.dataType() == DataType.DOC_DATA_TYPE); + state.add(needsSortedDocIds, output); int size = state.consumeAllFields(true); return Objects.equals(this.estimatedRowSize, size) ? this : new TopNExec(source(), child(), order, limit, size); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java index 6d9cf38d3451..b1fe0e7a7cf5 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java @@ -13,22 +13,16 @@ import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; -import org.elasticsearch.common.breaker.CircuitBreaker; -import org.elasticsearch.common.breaker.NoopCircuitBreaker; import org.elasticsearch.common.logging.HeaderWarning; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.aggregation.GroupingAggregator; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.ElementType; -import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.lucene.LuceneCountOperator; import org.elasticsearch.compute.lucene.LuceneOperator; import org.elasticsearch.compute.lucene.LuceneSourceOperator; import org.elasticsearch.compute.lucene.LuceneTopNSourceOperator; import org.elasticsearch.compute.lucene.TimeSeriesSortedSourceOperatorFactory; import org.elasticsearch.compute.lucene.ValuesSourceReaderOperator; -import org.elasticsearch.compute.operator.DriverContext; -import org.elasticsearch.compute.operator.EvalOperator; import org.elasticsearch.compute.operator.Operator; import org.elasticsearch.compute.operator.OrdinalsGroupingOperator; import org.elasticsearch.compute.operator.SourceOperator; @@ -380,29 +374,13 @@ public class EsPhysicalOperationProviders extends AbstractPhysicalOperationProvi } } - static class TypeConvertingBlockLoader implements BlockLoader { - protected final BlockLoader delegate; - private final EvalOperator.ExpressionEvaluator convertEvaluator; + private static class TypeConvertingBlockLoader implements BlockLoader { + private final BlockLoader delegate; + private final TypeConverter typeConverter; protected TypeConvertingBlockLoader(BlockLoader delegate, AbstractConvertFunction convertFunction) { this.delegate = delegate; - DriverContext driverContext1 = new DriverContext( - BigArrays.NON_RECYCLING_INSTANCE, - new org.elasticsearch.compute.data.BlockFactory( - new NoopCircuitBreaker(CircuitBreaker.REQUEST), - BigArrays.NON_RECYCLING_INSTANCE - ) - ); - this.convertEvaluator = convertFunction.toEvaluator(e -> driverContext -> new EvalOperator.ExpressionEvaluator() { - @Override - public org.elasticsearch.compute.data.Block eval(Page page) { - // This is a pass-through evaluator, since it sits directly on the source loading (no prior expressions) - return page.getBlock(0); - } - - @Override - public void close() {} - }).get(driverContext1); + this.typeConverter = TypeConverter.fromConvertFunction(convertFunction); } @Override @@ -413,8 +391,7 @@ public class EsPhysicalOperationProviders extends AbstractPhysicalOperationProvi @Override public Block convert(Block block) { - Page page = new Page((org.elasticsearch.compute.data.Block) block); - return convertEvaluator.eval(page); + return typeConverter.convert((org.elasticsearch.compute.data.Block) block); } @Override @@ -427,9 +404,7 @@ public class EsPhysicalOperationProviders extends AbstractPhysicalOperationProvi @Override public Block read(BlockFactory factory, Docs docs) throws IOException { Block block = reader.read(factory, docs); - Page page = new Page((org.elasticsearch.compute.data.Block) block); - org.elasticsearch.compute.data.Block converted = convertEvaluator.eval(page); - return converted; + return typeConverter.convert((org.elasticsearch.compute.data.Block) block); } @Override @@ -469,7 +444,7 @@ public class EsPhysicalOperationProviders extends AbstractPhysicalOperationProvi @Override public final String toString() { - return "TypeConvertingBlockLoader[delegate=" + delegate + ", convertEvaluator=" + convertEvaluator + "]"; + return "TypeConvertingBlockLoader[delegate=" + delegate + ", typeConverter=" + typeConverter + "]"; } } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java index c40263baa656..af38551c1ad0 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java @@ -345,6 +345,8 @@ public class LocalExecutionPlanner { } private PhysicalOperation planTopN(TopNExec topNExec, LocalExecutionPlannerContext context) { + final Integer rowSize = topNExec.estimatedRowSize(); + assert rowSize != null && rowSize > 0 : "estimated row size [" + rowSize + "] wasn't set"; PhysicalOperation source = plan(topNExec.child(), context); ElementType[] elementTypes = new ElementType[source.layout.numberOfChannels()]; @@ -385,24 +387,8 @@ public class LocalExecutionPlanner { } else { throw new EsqlIllegalArgumentException("limit only supported with literal values"); } - - // TODO Replace page size with passing estimatedRowSize down - /* - * The 2000 below is a hack to account for incoming size and to make - * sure the estimated row size is never 0 which'd cause a divide by 0. - * But we should replace this with passing the estimate into the real - * topn and letting it actually measure the size of rows it produces. - * That'll be more accurate. And we don't have a path for estimating - * incoming rows. And we don't need one because we can estimate. - */ return source.with( - new TopNOperatorFactory( - limit, - asList(elementTypes), - asList(encoders), - orders, - context.pageSize(2000 + topNExec.estimatedRowSize()) - ), + new TopNOperatorFactory(limit, asList(elementTypes), asList(encoders), orders, context.pageSize(rowSize)), source.layout ); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/PlannerUtils.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/PlannerUtils.java index a312d048db0a..5325145a77ad 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/PlannerUtils.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/PlannerUtils.java @@ -14,7 +14,6 @@ import org.elasticsearch.compute.aggregation.AggregatorMode; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.ElementType; import org.elasticsearch.core.Tuple; -import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.SearchExecutionContext; @@ -27,13 +26,13 @@ import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.util.Holder; import org.elasticsearch.xpack.esql.core.util.Queries; -import org.elasticsearch.xpack.esql.index.EsIndex; import org.elasticsearch.xpack.esql.optimizer.LocalLogicalOptimizerContext; import org.elasticsearch.xpack.esql.optimizer.LocalLogicalPlanOptimizer; import org.elasticsearch.xpack.esql.optimizer.LocalPhysicalOptimizerContext; import org.elasticsearch.xpack.esql.optimizer.LocalPhysicalPlanOptimizer; import org.elasticsearch.xpack.esql.plan.logical.EsRelation; import org.elasticsearch.xpack.esql.plan.logical.Filter; +import org.elasticsearch.xpack.esql.plan.logical.join.Join; import org.elasticsearch.xpack.esql.plan.physical.AggregateExec; import org.elasticsearch.xpack.esql.plan.physical.EsSourceExec; import org.elasticsearch.xpack.esql.plan.physical.EstimatesRowSize; @@ -110,27 +109,10 @@ public class PlannerUtils { return Set.of(); } var indices = new LinkedHashSet(); - // TODO: This only works for LEFT join, we still need to support RIGHT join - forEachUpWithChildren(plan, node -> { - if (node instanceof FragmentExec f) { - f.fragment().forEachUp(EsRelation.class, r -> indices.addAll(r.index().concreteIndices())); - } - }, node -> node instanceof LookupJoinExec join ? List.of(join.left()) : node.children()); + forEachFromRelation(plan, relation -> indices.addAll(relation.index().concreteIndices())); return indices; } - /** - * Similar to {@link Node#forEachUp(Consumer)}, but with a custom callback to get the node children. - */ - private static > void forEachUpWithChildren( - T node, - Consumer action, - Function> childrenGetter - ) { - childrenGetter.apply(node).forEach(c -> forEachUpWithChildren(c, action, childrenGetter)); - action.accept(node); - } - /** * Returns the original indices specified in the FROM command of the query. We need the original query to resolve alias filters. */ @@ -139,16 +121,41 @@ public class PlannerUtils { return Strings.EMPTY_ARRAY; } var indices = new LinkedHashSet(); - plan.forEachUp( - FragmentExec.class, - f -> f.fragment().forEachUp(EsRelation.class, r -> addOriginalIndexIfNotLookup(indices, r.index())) - ); + forEachFromRelation(plan, relation -> indices.addAll(asList(Strings.commaDelimitedListToStringArray(relation.index().name())))); return indices.toArray(String[]::new); } - private static void addOriginalIndexIfNotLookup(Set indices, EsIndex index) { - if (index.indexNameWithModes().get(index.name()) != IndexMode.LOOKUP) { - indices.addAll(asList(Strings.commaDelimitedListToStringArray(index.name()))); + /** + * Iterates over the plan and applies the action to each {@link EsRelation} node. + *

    + * This method ignores the right side of joins. + *

    + */ + private static void forEachFromRelation(PhysicalPlan plan, Consumer action) { + // Take the non-join-side fragments + forEachUpWithChildren(plan, FragmentExec.class, fragment -> { + // Take the non-join-side relations + forEachUpWithChildren( + fragment.fragment(), + EsRelation.class, + action, + node -> node instanceof Join join ? List.of(join.left()) : node.children() + ); + }, node -> node instanceof LookupJoinExec join ? List.of(join.left()) : node.children()); + } + + /** + * Similar to {@link Node#forEachUp(Consumer)}, but with a custom callback to get the node children. + */ + private static , E extends T> void forEachUpWithChildren( + T node, + Class typeToken, + Consumer action, + Function> childrenGetter + ) { + childrenGetter.apply(node).forEach(c -> forEachUpWithChildren(c, typeToken, action, childrenGetter)); + if (typeToken.isInstance(node)) { + action.accept(typeToken.cast(node)); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/TypeConverter.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/TypeConverter.java new file mode 100644 index 000000000000..334875927eb9 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/TypeConverter.java @@ -0,0 +1,59 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.planner; + +import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.breaker.NoopCircuitBreaker; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; +import org.elasticsearch.xpack.esql.expression.function.scalar.convert.AbstractConvertFunction; + +class TypeConverter { + private final String evaluatorName; + private final ExpressionEvaluator convertEvaluator; + + private TypeConverter(String evaluatorName, ExpressionEvaluator convertEvaluator) { + this.evaluatorName = evaluatorName; + this.convertEvaluator = convertEvaluator; + } + + public static TypeConverter fromConvertFunction(AbstractConvertFunction convertFunction) { + DriverContext driverContext1 = new DriverContext( + BigArrays.NON_RECYCLING_INSTANCE, + new org.elasticsearch.compute.data.BlockFactory( + new NoopCircuitBreaker(CircuitBreaker.REQUEST), + BigArrays.NON_RECYCLING_INSTANCE + ) + ); + return new TypeConverter( + convertFunction.functionName(), + convertFunction.toEvaluator(e -> driverContext -> new ExpressionEvaluator() { + @Override + public org.elasticsearch.compute.data.Block eval(Page page) { + // This is a pass-through evaluator, since it sits directly on the source loading (no prior expressions) + return page.getBlock(0); + } + + @Override + public void close() {} + }).get(driverContext1) + ); + } + + public Block convert(Block block) { + return convertEvaluator.eval(new Page(block)); + } + + @Override + public String toString() { + return evaluatorName; + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlQueryAction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlQueryAction.java index 50d5819688e4..b44e249e3800 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlQueryAction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlQueryAction.java @@ -9,6 +9,8 @@ package org.elasticsearch.xpack.esql.plugin; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; +import org.elasticsearch.action.admin.cluster.stats.CCSUsage; +import org.elasticsearch.action.admin.cluster.stats.CCSUsageTelemetry; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.client.internal.Client; @@ -20,16 +22,20 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.operator.exchange.ExchangeService; +import org.elasticsearch.core.Nullable; import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.search.SearchService; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.RemoteClusterAware; import org.elasticsearch.transport.RemoteClusterService; import org.elasticsearch.transport.TransportService; +import org.elasticsearch.usage.UsageService; import org.elasticsearch.xpack.core.XPackPlugin; import org.elasticsearch.xpack.core.async.AsyncExecutionId; +import org.elasticsearch.xpack.esql.VerificationException; import org.elasticsearch.xpack.esql.action.ColumnInfoImpl; import org.elasticsearch.xpack.esql.action.EsqlExecutionInfo; import org.elasticsearch.xpack.esql.action.EsqlQueryAction; @@ -52,6 +58,7 @@ import java.util.List; import java.util.Locale; import java.util.Map; import java.util.concurrent.Executor; +import java.util.concurrent.atomic.AtomicInteger; import static org.elasticsearch.xpack.core.ClientHelper.ASYNC_SEARCH_ORIGIN; @@ -71,6 +78,7 @@ public class TransportEsqlQueryAction extends HandledTransportAction asyncTaskManagementService; private final RemoteClusterService remoteClusterService; private final QueryBuilderResolver queryBuilderResolver; + private final UsageService usageService; @Inject @SuppressWarnings("this-escape") @@ -86,8 +94,8 @@ public class TransportEsqlQueryAction extends HandledTransportAction toResponse(task, request, configuration, result)) + ActionListener.wrap(result -> { + recordCCSTelemetry(task, executionInfo, request, null); + listener.onResponse(toResponse(task, request, configuration, result)); + }, ex -> { + recordCCSTelemetry(task, executionInfo, request, ex); + listener.onFailure(ex); + }) ); + + } + + private void recordCCSTelemetry(Task task, EsqlExecutionInfo executionInfo, EsqlQueryRequest request, @Nullable Exception exception) { + if (executionInfo.isCrossClusterSearch() == false) { + return; + } + + CCSUsage.Builder usageBuilder = new CCSUsage.Builder(); + usageBuilder.setClientFromTask(task); + if (exception != null) { + if (exception instanceof VerificationException ve) { + CCSUsageTelemetry.Result failureType = classifyVerificationException(ve); + if (failureType != CCSUsageTelemetry.Result.UNKNOWN) { + usageBuilder.setFailure(failureType); + } else { + usageBuilder.setFailure(exception); + } + } else { + usageBuilder.setFailure(exception); + } + } + var took = executionInfo.overallTook(); + if (took != null) { + usageBuilder.took(took.getMillis()); + } + if (request.async()) { + usageBuilder.setFeature(CCSUsageTelemetry.ASYNC_FEATURE); + } + + AtomicInteger remotesCount = new AtomicInteger(); + executionInfo.getClusters().forEach((clusterAlias, cluster) -> { + if (cluster.getStatus() == EsqlExecutionInfo.Cluster.Status.SKIPPED) { + usageBuilder.skippedRemote(clusterAlias); + } else { + usageBuilder.perClusterUsage(clusterAlias, cluster.getTook()); + } + if (clusterAlias.equals(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY) == false) { + remotesCount.getAndIncrement(); + } + }); + assert remotesCount.get() > 0 : "Got cross-cluster search telemetry without any remote clusters"; + usageBuilder.setRemotesCount(remotesCount.get()); + usageService.getEsqlUsageHolder().updateUsage(usageBuilder.build()); + } + + private CCSUsageTelemetry.Result classifyVerificationException(VerificationException exception) { + if (exception.getDetailedMessage().contains("Unknown index")) { + return CCSUsageTelemetry.Result.NOT_FOUND; + } + return CCSUsageTelemetry.Result.UNKNOWN; } private EsqlExecutionInfo getOrCreateExecutionInfo(Task task, EsqlQueryRequest request) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java index bd3b3bdb3483..eb5e8206e9e6 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java @@ -312,7 +312,7 @@ public class EsqlSession { .collect(Collectors.toSet()); final List indices = preAnalysis.indices; - EsqlSessionCCSUtils.checkForCcsLicense(indices, indicesExpressionGrouper, verifier.licenseState()); + EsqlSessionCCSUtils.checkForCcsLicense(executionInfo, indices, indicesExpressionGrouper, verifier.licenseState()); final Set targetClusters = enrichPolicyResolver.groupIndicesPerCluster( indices.stream().flatMap(t -> Arrays.stream(Strings.commaDelimitedListToStringArray(t.id().index()))).toArray(String[]::new) diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSessionCCSUtils.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSessionCCSUtils.java index 662572c46651..95f7a37ce4d6 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSessionCCSUtils.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSessionCCSUtils.java @@ -308,6 +308,7 @@ class EsqlSessionCCSUtils { * @throws org.elasticsearch.ElasticsearchStatusException if the license is not valid (or present) for ES|QL CCS search. */ public static void checkForCcsLicense( + EsqlExecutionInfo executionInfo, List indices, IndicesExpressionGrouper indicesGrouper, XPackLicenseState licenseState @@ -326,6 +327,17 @@ class EsqlSessionCCSUtils { // check if it is a cross-cluster query if (groupedIndices.size() > 1 || groupedIndices.containsKey(RemoteClusterService.LOCAL_CLUSTER_GROUP_KEY) == false) { if (EsqlLicenseChecker.isCcsAllowed(licenseState) == false) { + // initialize the cluster entries in EsqlExecutionInfo before throwing the invalid license error + // so that the CCS telemetry handler can recognize that this error is CCS-related + for (Map.Entry entry : groupedIndices.entrySet()) { + executionInfo.swapCluster( + entry.getKey(), + (k, v) -> new EsqlExecutionInfo.Cluster( + entry.getKey(), + Strings.arrayToCommaDelimitedString(entry.getValue().indices()) + ) + ); + } throw EsqlLicenseChecker.invalidLicenseForCcsException(licenseState); } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java index 76744957ff5f..fe9a5e569669 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java @@ -57,6 +57,7 @@ import org.elasticsearch.xpack.esql.analysis.PreAnalyzer; import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.type.EsField; +import org.elasticsearch.xpack.esql.core.type.InvalidMappedField; import org.elasticsearch.xpack.esql.enrich.EnrichLookupService; import org.elasticsearch.xpack.esql.enrich.LookupFromIndexService; import org.elasticsearch.xpack.esql.enrich.ResolvedEnrichPolicy; @@ -97,11 +98,15 @@ import java.io.IOException; import java.net.URL; import java.util.ArrayList; import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.TreeMap; import java.util.concurrent.Executor; import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; import static org.elasticsearch.xpack.esql.CsvSpecReader.specParser; import static org.elasticsearch.xpack.esql.CsvTestUtils.ExpectedResults; @@ -245,10 +250,6 @@ public class CsvTests extends ESTestCase { testCase.requiredCapabilities.contains(EsqlCapabilities.Cap.MATCH_OPERATOR_COLON.capabilityName()) ); assumeFalse("can't load metrics in csv tests", testCase.requiredCapabilities.contains(cap(EsqlFeatures.METRICS_SYNTAX))); - assumeFalse( - "multiple indices aren't supported", - testCase.requiredCapabilities.contains(EsqlCapabilities.Cap.UNION_TYPES.capabilityName()) - ); assumeFalse( "can't use QSTR function in csv tests", testCase.requiredCapabilities.contains(EsqlCapabilities.Cap.QSTR_FUNCTION.capabilityName()) @@ -263,7 +264,7 @@ public class CsvTests extends ESTestCase { ); assumeFalse( "lookup join disabled for csv tests", - testCase.requiredCapabilities.contains(EsqlCapabilities.Cap.JOIN_LOOKUP_V9.capabilityName()) + testCase.requiredCapabilities.contains(EsqlCapabilities.Cap.JOIN_LOOKUP_V10.capabilityName()) ); assumeFalse( "can't use TERM function in csv tests", @@ -317,7 +318,13 @@ public class CsvTests extends ESTestCase { } finally { Releasables.close(() -> Iterators.map(actualResults.pages().iterator(), p -> p::releaseBlocks)); // Give the breaker service some time to clear in case we got results before the rest of the driver had cleaned up - assertBusy(() -> assertThat(bigArrays.breakerService().getBreaker(CircuitBreaker.REQUEST).getUsed(), equalTo(0L))); + assertBusy( + () -> assertThat( + "Not all circuits were cleaned up", + bigArrays.breakerService().getBreaker(CircuitBreaker.REQUEST).getUsed(), + equalTo(0L) + ) + ); } } @@ -332,29 +339,71 @@ public class CsvTests extends ESTestCase { CsvAssert.assertResults(expected, actual, ignoreOrder, logger); } - private static IndexResolution loadIndexResolution(String mappingName, String indexName, Map typeMapping) { - var mapping = new TreeMap<>(loadMapping(mappingName)); - if ((typeMapping == null || typeMapping.isEmpty()) == false) { - for (var entry : typeMapping.entrySet()) { - if (mapping.containsKey(entry.getKey())) { - DataType dataType = DataType.fromTypeName(entry.getValue()); - EsField field = mapping.get(entry.getKey()); - EsField editedField = new EsField(field.getName(), dataType, field.getProperties(), field.isAggregatable()); - mapping.put(entry.getKey(), editedField); - } + private static IndexResolution loadIndexResolution(CsvTestsDataLoader.MultiIndexTestDataset datasets) { + var indexNames = datasets.datasets().stream().map(CsvTestsDataLoader.TestDataset::indexName); + Map indexModes = indexNames.collect(Collectors.toMap(x -> x, x -> IndexMode.STANDARD)); + List mappings = datasets.datasets() + .stream() + .map(ds -> new MappingPerIndex(ds.indexName(), createMappingForIndex(ds))) + .toList(); + return IndexResolution.valid(new EsIndex(datasets.indexPattern(), mergeMappings(mappings), indexModes)); + } + + private static Map createMappingForIndex(CsvTestsDataLoader.TestDataset dataset) { + var mapping = new TreeMap<>(loadMapping(dataset.mappingFileName())); + if (dataset.typeMapping() == null) { + return mapping; + } + for (var entry : dataset.typeMapping().entrySet()) { + if (mapping.containsKey(entry.getKey())) { + DataType dataType = DataType.fromTypeName(entry.getValue()); + EsField field = mapping.get(entry.getKey()); + EsField editedField = new EsField(field.getName(), dataType, field.getProperties(), field.isAggregatable()); + mapping.put(entry.getKey(), editedField); } } - return IndexResolution.valid(new EsIndex(indexName, mapping, Map.of(indexName, IndexMode.STANDARD))); + return mapping; + } + + record MappingPerIndex(String index, Map mapping) {} + + private static Map mergeMappings(List mappingsPerIndex) { + Map> columnNamesToFieldByIndices = new HashMap<>(); + for (var mappingPerIndex : mappingsPerIndex) { + for (var entry : mappingPerIndex.mapping().entrySet()) { + String columnName = entry.getKey(); + EsField field = entry.getValue(); + columnNamesToFieldByIndices.computeIfAbsent(columnName, k -> new HashMap<>()).put(mappingPerIndex.index(), field); + } + } + + return columnNamesToFieldByIndices.entrySet() + .stream() + .collect(Collectors.toMap(Map.Entry::getKey, e -> mergeFields(e.getKey(), e.getValue()))); + } + + private static EsField mergeFields(String index, Map columnNameToField) { + var indexFields = columnNameToField.values(); + if (indexFields.stream().distinct().count() > 1) { + var typesToIndices = new HashMap>(); + for (var typeToIndex : columnNameToField.entrySet()) { + typesToIndices.computeIfAbsent(typeToIndex.getValue().getDataType().typeName(), k -> new HashSet<>()) + .add(typeToIndex.getKey()); + } + return new InvalidMappedField(index, typesToIndices); + } else { + return indexFields.iterator().next(); + } } private static EnrichResolution loadEnrichPolicies() { EnrichResolution enrichResolution = new EnrichResolution(); for (CsvTestsDataLoader.EnrichConfig policyConfig : CsvTestsDataLoader.ENRICH_POLICIES) { EnrichPolicy policy = loadEnrichPolicyMapping(policyConfig.policyFileName()); - CsvTestsDataLoader.TestsDataset sourceIndex = CSV_DATASET_MAP.get(policy.getIndices().get(0)); + CsvTestsDataLoader.TestDataset sourceIndex = CSV_DATASET_MAP.get(policy.getIndices().get(0)); // this could practically work, but it's wrong: // EnrichPolicyResolution should contain the policy (system) index, not the source index - EsIndex esIndex = loadIndexResolution(sourceIndex.mappingFileName(), sourceIndex.indexName(), null).get(); + EsIndex esIndex = loadIndexResolution(CsvTestsDataLoader.MultiIndexTestDataset.of(sourceIndex.withTypeMapping(Map.of()))).get(); var concreteIndices = Map.of(RemoteClusterService.LOCAL_CLUSTER_GROUP_KEY, Iterables.get(esIndex.concreteIndices(), 0)); enrichResolution.addResolvedPolicy( policyConfig.policyName(), @@ -382,8 +431,8 @@ public class CsvTests extends ESTestCase { } } - private LogicalPlan analyzedPlan(LogicalPlan parsed, CsvTestsDataLoader.TestsDataset dataset) { - var indexResolution = loadIndexResolution(dataset.mappingFileName(), dataset.indexName(), dataset.typeMapping()); + private LogicalPlan analyzedPlan(LogicalPlan parsed, CsvTestsDataLoader.MultiIndexTestDataset datasets) { + var indexResolution = loadIndexResolution(datasets); var enrichPolicies = loadEnrichPolicies(); var analyzer = new Analyzer(new AnalyzerContext(configuration, functionRegistry, indexResolution, enrichPolicies), TEST_VERIFIER); LogicalPlan plan = analyzer.analyze(parsed); @@ -392,7 +441,7 @@ public class CsvTests extends ESTestCase { return plan; } - private static CsvTestsDataLoader.TestsDataset testsDataset(LogicalPlan parsed) { + private static CsvTestsDataLoader.MultiIndexTestDataset testDatasets(LogicalPlan parsed) { var preAnalysis = new PreAnalyzer().preAnalyze(parsed); var indices = preAnalysis.indices; if (indices.isEmpty()) { @@ -400,13 +449,13 @@ public class CsvTests extends ESTestCase { * If the data set doesn't matter we'll just grab one we know works. * Employees is fine. */ - return CSV_DATASET_MAP.get("employees"); + return CsvTestsDataLoader.MultiIndexTestDataset.of(CSV_DATASET_MAP.get("employees")); } else if (preAnalysis.indices.size() > 1) { throw new IllegalArgumentException("unexpected index resolution to multiple entries [" + preAnalysis.indices.size() + "]"); } String indexName = indices.get(0).id().index(); - List datasets = new ArrayList<>(); + List datasets = new ArrayList<>(); if (indexName.endsWith("*")) { String indexPrefix = indexName.substring(0, indexName.length() - 1); for (var entry : CSV_DATASET_MAP.entrySet()) { @@ -415,25 +464,35 @@ public class CsvTests extends ESTestCase { } } } else { - var dataset = CSV_DATASET_MAP.get(indexName); - datasets.add(dataset); + for (String index : indexName.split(",")) { + var dataset = CSV_DATASET_MAP.get(index); + if (dataset == null) { + throw new IllegalArgumentException("unknown CSV dataset for table [" + index + "]"); + } + datasets.add(dataset); + } } if (datasets.isEmpty()) { throw new IllegalArgumentException("unknown CSV dataset for table [" + indexName + "]"); } - // TODO: Support multiple datasets - return datasets.get(0); + return new CsvTestsDataLoader.MultiIndexTestDataset(indexName, datasets); } - private static TestPhysicalOperationProviders testOperationProviders(CsvTestsDataLoader.TestsDataset dataset) throws Exception { - var testData = loadPageFromCsv(CsvTests.class.getResource("/data/" + dataset.dataFileName()), dataset.typeMapping()); - return new TestPhysicalOperationProviders(testData.v1(), testData.v2()); + private static TestPhysicalOperationProviders testOperationProviders(CsvTestsDataLoader.MultiIndexTestDataset datasets) + throws Exception { + var indexResolution = loadIndexResolution(datasets); + var indexPages = new ArrayList(); + for (CsvTestsDataLoader.TestDataset dataset : datasets.datasets()) { + var testData = loadPageFromCsv(CsvTests.class.getResource("/data/" + dataset.dataFileName()), dataset.typeMapping()); + indexPages.add(new TestPhysicalOperationProviders.IndexPage(dataset.indexName(), testData.v1(), testData.v2())); + } + return TestPhysicalOperationProviders.create(indexPages); } private ActualResults executePlan(BigArrays bigArrays) throws Exception { LogicalPlan parsed = parser.createStatement(testCase.query); - var testDataset = testsDataset(parsed); - LogicalPlan analyzed = analyzedPlan(parsed, testDataset); + var testDatasets = testDatasets(parsed); + LogicalPlan analyzed = analyzedPlan(parsed, testDatasets); EsqlSession session = new EsqlSession( getTestName(), @@ -449,7 +508,7 @@ public class CsvTests extends ESTestCase { null, EsqlTestUtils.MOCK_QUERY_BUILDER_RESOLVER ); - TestPhysicalOperationProviders physicalOperationProviders = testOperationProviders(testDataset); + TestPhysicalOperationProviders physicalOperationProviders = testOperationProviders(testDatasets); PlainActionFuture listener = new PlainActionFuture<>(); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java index be15bb7de8b4..dc4120f35772 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java @@ -2140,7 +2140,7 @@ public class AnalyzerTests extends ESTestCase { } public void testLookupJoinUnknownIndex() { - assumeTrue("requires LOOKUP JOIN capability", EsqlCapabilities.Cap.JOIN_LOOKUP_V9.isEnabled()); + assumeTrue("requires LOOKUP JOIN capability", EsqlCapabilities.Cap.JOIN_LOOKUP_V10.isEnabled()); String errorMessage = "Unknown index [foobar]"; IndexResolution missingLookupIndex = IndexResolution.invalid(errorMessage); @@ -2169,7 +2169,7 @@ public class AnalyzerTests extends ESTestCase { } public void testLookupJoinUnknownField() { - assumeTrue("requires LOOKUP JOIN capability", EsqlCapabilities.Cap.JOIN_LOOKUP_V9.isEnabled()); + assumeTrue("requires LOOKUP JOIN capability", EsqlCapabilities.Cap.JOIN_LOOKUP_V10.isEnabled()); String query = "FROM test | LOOKUP JOIN languages_lookup ON last_name"; String errorMessage = "1:45: Unknown column [last_name] in right side of join"; @@ -2192,7 +2192,7 @@ public class AnalyzerTests extends ESTestCase { } public void testMultipleLookupJoinsGiveDifferentAttributes() { - assumeTrue("requires LOOKUP JOIN capability", EsqlCapabilities.Cap.JOIN_LOOKUP_V9.isEnabled()); + assumeTrue("requires LOOKUP JOIN capability", EsqlCapabilities.Cap.JOIN_LOOKUP_V10.isEnabled()); // The field attributes that get contributed by different LOOKUP JOIN commands must have different name ids, // even if they have the same names. Otherwise, things like dependency analysis - like in PruneColumns - cannot work based on @@ -2222,7 +2222,7 @@ public class AnalyzerTests extends ESTestCase { } public void testLookupJoinIndexMode() { - assumeTrue("requires LOOKUP JOIN capability", EsqlCapabilities.Cap.JOIN_LOOKUP_V9.isEnabled()); + assumeTrue("requires LOOKUP JOIN capability", EsqlCapabilities.Cap.JOIN_LOOKUP_V10.isEnabled()); var indexResolution = AnalyzerTestUtils.expandedDefaultIndexResolution(); var lookupResolution = AnalyzerTestUtils.defaultLookupResolution(); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/ParsingTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/ParsingTests.java index 2f6cf46f2e2b..180e32fb7c15 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/ParsingTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/ParsingTests.java @@ -113,7 +113,7 @@ public class ParsingTests extends ESTestCase { } public void testJoinOnConstant() { - assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V9.isEnabled()); + assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V10.isEnabled()); assertEquals( "1:55: JOIN ON clause only supports fields at the moment, found [123]", error("row languages = 1, gender = \"f\" | lookup join test on 123") @@ -129,7 +129,7 @@ public class ParsingTests extends ESTestCase { } public void testJoinOnMultipleFields() { - assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V9.isEnabled()); + assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V10.isEnabled()); assertEquals( "1:35: JOIN ON clause only supports one field at the moment, found [2]", error("row languages = 1, gender = \"f\" | lookup join test on gender, languages") @@ -137,7 +137,7 @@ public class ParsingTests extends ESTestCase { } public void testJoinTwiceOnTheSameField() { - assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V9.isEnabled()); + assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V10.isEnabled()); assertEquals( "1:35: JOIN ON clause only supports one field at the moment, found [2]", error("row languages = 1, gender = \"f\" | lookup join test on languages, languages") @@ -145,7 +145,7 @@ public class ParsingTests extends ESTestCase { } public void testJoinTwiceOnTheSameField_TwoLookups() { - assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V9.isEnabled()); + assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V10.isEnabled()); assertEquals( "1:80: JOIN ON clause only supports one field at the moment, found [2]", error("row languages = 1, gender = \"f\" | lookup join test on languages | eval x = 1 | lookup join test on gender, gender") diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java index 533cc59b824c..fe6d1e00e5d2 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java @@ -1974,7 +1974,7 @@ public class VerifierTests extends ESTestCase { } public void testLookupJoinDataTypeMismatch() { - assumeTrue("requires LOOKUP JOIN capability", EsqlCapabilities.Cap.JOIN_LOOKUP_V9.isEnabled()); + assumeTrue("requires LOOKUP JOIN capability", EsqlCapabilities.Cap.JOIN_LOOKUP_V10.isEnabled()); query("FROM test | EVAL language_code = languages | LOOKUP JOIN languages_lookup ON language_code"); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java index 672eef7076c6..d46572b7c856 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java @@ -4927,7 +4927,7 @@ public class LogicalPlanOptimizerTests extends ESTestCase { } public void testPlanSanityCheckWithBinaryPlans() throws Exception { - assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V9.isEnabled()); + assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V10.isEnabled()); var plan = optimizedPlan(""" FROM test @@ -6003,7 +6003,7 @@ public class LogicalPlanOptimizerTests extends ESTestCase { * \_EsRelation[languages_lookup][LOOKUP][language_code{f}#18, language_name{f}#19] */ public void testLookupJoinPushDownFilterOnJoinKeyWithRename() { - assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V9.isEnabled()); + assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V10.isEnabled()); String query = """ FROM test @@ -6045,7 +6045,7 @@ public class LogicalPlanOptimizerTests extends ESTestCase { * \_EsRelation[languages_lookup][LOOKUP][language_code{f}#18, language_name{f}#19] */ public void testLookupJoinPushDownFilterOnLeftSideField() { - assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V9.isEnabled()); + assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V10.isEnabled()); String query = """ FROM test @@ -6088,7 +6088,7 @@ public class LogicalPlanOptimizerTests extends ESTestCase { * \_EsRelation[languages_lookup][LOOKUP][language_code{f}#18, language_name{f}#19] */ public void testLookupJoinPushDownDisabledForLookupField() { - assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V9.isEnabled()); + assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V10.isEnabled()); String query = """ FROM test @@ -6132,7 +6132,7 @@ public class LogicalPlanOptimizerTests extends ESTestCase { * \_EsRelation[languages_lookup][LOOKUP][language_code{f}#19, language_name{f}#20] */ public void testLookupJoinPushDownSeparatedForConjunctionBetweenLeftAndRightField() { - assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V9.isEnabled()); + assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V10.isEnabled()); String query = """ FROM test @@ -6183,7 +6183,7 @@ public class LogicalPlanOptimizerTests extends ESTestCase { * \_EsRelation[languages_lookup][LOOKUP][language_code{f}#19, language_name{f}#20] */ public void testLookupJoinPushDownDisabledForDisjunctionBetweenLeftAndRightField() { - assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V9.isEnabled()); + assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V10.isEnabled()); String query = """ FROM test diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java index 80f2772945e9..ff710a90e815 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java @@ -143,6 +143,7 @@ import java.util.Collection; import java.util.HashMap; import java.util.HashSet; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.Set; @@ -920,8 +921,8 @@ public class PhysicalPlanOptimizerTests extends ESTestCase { var optimized = optimizedPlan(plan); var topN = as(optimized, TopNExec.class); - // no fields are added after the top n - so 0 here - assertThat(topN.estimatedRowSize(), equalTo(0)); + // all fields + nullsum are loaded in the final TopN + assertThat(topN.estimatedRowSize(), equalTo(allFieldRowSize + Integer.BYTES)); var exchange = asRemoteExchange(topN.child()); var project = as(exchange.child(), ProjectExec.class); @@ -929,7 +930,7 @@ public class PhysicalPlanOptimizerTests extends ESTestCase { var eval = as(extract.child(), EvalExec.class); var source = source(eval.child()); // All fields loaded - assertThat(source.estimatedRowSize(), equalTo(allFieldRowSize + 3 * Integer.BYTES + Long.BYTES)); + assertThat(source.estimatedRowSize(), equalTo(allFieldRowSize + 3 * Integer.BYTES + 2 * Integer.BYTES)); } public void testPushAndInequalitiesFilter() { @@ -1141,8 +1142,8 @@ public class PhysicalPlanOptimizerTests extends ESTestCase { var project = as(exchange.child(), ProjectExec.class); var extract = as(project.child(), FieldExtractExec.class); var topNLocal = as(extract.child(), TopNExec.class); - // two extra ints for forwards and backwards map - assertThat(topNLocal.estimatedRowSize(), equalTo(allFieldRowSize + Integer.BYTES * 2)); + // all fields plus nullsum and shards, segments, docs and two extra ints for forwards and backwards map + assertThat(topNLocal.estimatedRowSize(), equalTo(allFieldRowSize + Integer.BYTES + Integer.BYTES * 2 + Integer.BYTES * 3)); var eval = as(topNLocal.child(), EvalExec.class); var source = source(eval.child()); @@ -2615,7 +2616,7 @@ public class PhysicalPlanOptimizerTests extends ESTestCase { } public void testVerifierOnMissingReferencesWithBinaryPlans() throws Exception { - assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V9.isEnabled()); + assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V10.isEnabled()); // Do not assert serialization: // This will have a LookupJoinExec, which is not serializable because it doesn't leave the coordinator. @@ -7298,7 +7299,7 @@ public class PhysicalPlanOptimizerTests extends ESTestCase { } public void testLookupJoinFieldLoading() throws Exception { - assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V9.isEnabled()); + assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V10.isEnabled()); TestDataSource data = dataSetWithLookupIndices(Map.of("lookup_index", List.of("first_name", "foo", "bar", "baz"))); @@ -7375,7 +7376,7 @@ public class PhysicalPlanOptimizerTests extends ESTestCase { } public void testLookupJoinFieldLoadingTwoLookups() throws Exception { - assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V9.isEnabled()); + assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V10.isEnabled()); TestDataSource data = dataSetWithLookupIndices( Map.of( @@ -7429,7 +7430,7 @@ public class PhysicalPlanOptimizerTests extends ESTestCase { @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/119082") public void testLookupJoinFieldLoadingTwoLookupsProjectInBetween() throws Exception { - assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V9.isEnabled()); + assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V10.isEnabled()); TestDataSource data = dataSetWithLookupIndices( Map.of( @@ -7470,7 +7471,7 @@ public class PhysicalPlanOptimizerTests extends ESTestCase { @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/118778") public void testLookupJoinFieldLoadingDropAllFields() throws Exception { - assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V9.isEnabled()); + assumeTrue("Requires LOOKUP JOIN", EsqlCapabilities.Cap.JOIN_LOOKUP_V10.isEnabled()); TestDataSource data = dataSetWithLookupIndices(Map.of("lookup_index", List.of("first_name", "foo", "bar", "baz"))); @@ -7650,6 +7651,31 @@ public class PhysicalPlanOptimizerTests extends ESTestCase { assertTrue(esRelation.output().stream().anyMatch(a -> a.name().equals(MetadataAttribute.SCORE) && a instanceof MetadataAttribute)); } + public void testReductionPlanForTopN() { + int limit = between(1, 100); + var plan = physicalPlan(String.format(Locale.ROOT, """ + FROM test + | sort emp_no + | LIMIT %d + """, limit)); + Tuple plans = PlannerUtils.breakPlanBetweenCoordinatorAndDataNode(plan, config); + PhysicalPlan reduction = PlannerUtils.reductionPlan(plans.v2()); + TopNExec reductionTopN = as(reduction, TopNExec.class); + assertThat(reductionTopN.estimatedRowSize(), equalTo(allFieldRowSize)); + assertThat(reductionTopN.limit().fold(), equalTo(limit)); + } + + public void testReductionPlanForAggs() { + var plan = physicalPlan(""" + FROM test + | stats x = sum(salary) BY first_name + """); + Tuple plans = PlannerUtils.breakPlanBetweenCoordinatorAndDataNode(plan, config); + PhysicalPlan reduction = PlannerUtils.reductionPlan(plans.v2()); + AggregateExec reductionAggs = as(reduction, AggregateExec.class); + assertThat(reductionAggs.estimatedRowSize(), equalTo(58)); // double and keyword + } + @SuppressWarnings("SameParameterValue") private static void assertFilterCondition( Filter filter, diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/TestPhysicalOperationProviders.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/TestPhysicalOperationProviders.java index 78512636b57e..01dd4db123ee 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/TestPhysicalOperationProviders.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/TestPhysicalOperationProviders.java @@ -30,16 +30,22 @@ import org.elasticsearch.compute.operator.Operator; import org.elasticsearch.compute.operator.OrdinalsGroupingOperator; import org.elasticsearch.compute.operator.SourceOperator; import org.elasticsearch.compute.operator.SourceOperator.SourceOperatorFactory; +import org.elasticsearch.core.Nullable; import org.elasticsearch.env.Environment; import org.elasticsearch.env.TestEnvironment; -import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.analysis.AnalysisRegistry; +import org.elasticsearch.index.mapper.MappedFieldType.FieldExtractPreference; import org.elasticsearch.indices.analysis.AnalysisModule; import org.elasticsearch.plugins.scanners.StablePluginsRegistry; import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; import org.elasticsearch.xpack.esql.TestBlockFactory; import org.elasticsearch.xpack.esql.core.expression.Attribute; +import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.core.type.MultiTypeEsField; import org.elasticsearch.xpack.esql.core.util.SpatialCoordinateTypes; +import org.elasticsearch.xpack.esql.expression.function.UnsupportedAttribute; +import org.elasticsearch.xpack.esql.expression.function.scalar.convert.AbstractConvertFunction; import org.elasticsearch.xpack.esql.plan.physical.AggregateExec; import org.elasticsearch.xpack.esql.plan.physical.EsQueryExec; import org.elasticsearch.xpack.esql.plan.physical.FieldExtractExec; @@ -48,8 +54,12 @@ import org.elasticsearch.xpack.esql.planner.LocalExecutionPlanner.PhysicalOperat import org.elasticsearch.xpack.ml.MachineLearning; import java.io.IOException; +import java.util.ArrayList; import java.util.List; +import java.util.OptionalInt; import java.util.Random; +import java.util.function.BiFunction; +import java.util.function.Consumer; import java.util.function.Function; import java.util.function.Supplier; import java.util.stream.IntStream; @@ -57,26 +67,33 @@ import java.util.stream.IntStream; import static com.carrotsearch.randomizedtesting.generators.RandomNumbers.randomIntBetween; import static java.util.stream.Collectors.joining; import static org.apache.lucene.tests.util.LuceneTestCase.createTempDir; -import static org.elasticsearch.index.mapper.MappedFieldType.FieldExtractPreference.DOC_VALUES; -import static org.elasticsearch.index.mapper.MappedFieldType.FieldExtractPreference.NONE; public class TestPhysicalOperationProviders extends AbstractPhysicalOperationProviders { + private final List indexPages; - private final Page testData; - private final List columnNames; + private TestPhysicalOperationProviders(List indexPages, AnalysisRegistry analysisRegistry) { + super(analysisRegistry); + this.indexPages = indexPages; + } - public TestPhysicalOperationProviders(Page testData, List columnNames) throws IOException { - super( - new AnalysisModule( - TestEnvironment.newEnvironment( - Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()).build() - ), - List.of(new MachineLearning(Settings.EMPTY), new CommonAnalysisPlugin()), - new StablePluginsRegistry() - ).getAnalysisRegistry() - ); - this.testData = testData; - this.columnNames = columnNames; + public static TestPhysicalOperationProviders create(List indexPages) throws IOException { + return new TestPhysicalOperationProviders(indexPages, createAnalysisRegistry()); + } + + public record IndexPage(String index, Page page, List columnNames) { + OptionalInt columnIndex(String columnName) { + return IntStream.range(0, columnNames.size()).filter(i -> columnNames.get(i).equals(columnName)).findFirst(); + } + } + + private static AnalysisRegistry createAnalysisRegistry() throws IOException { + return new AnalysisModule( + TestEnvironment.newEnvironment( + Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()).build() + ), + List.of(new MachineLearning(Settings.EMPTY), new CommonAnalysisPlugin()), + new StablePluginsRegistry() + ).getAnalysisRegistry(); } @Override @@ -118,13 +135,12 @@ public class TestPhysicalOperationProviders extends AbstractPhysicalOperationPro aggregatorFactories, groupElementType, context.bigArrays(), - attrSource.name() + attrSource ); } private class TestSourceOperator extends SourceOperator { - - boolean finished = false; + private int index = 0; private final DriverContext driverContext; TestSourceOperator(DriverContext driverContext) { @@ -133,28 +149,29 @@ public class TestPhysicalOperationProviders extends AbstractPhysicalOperationPro @Override public Page getOutput() { - if (finished == false) { - finish(); - } - + var pageIndex = indexPages.get(index); + var page = pageIndex.page; BlockFactory blockFactory = driverContext.blockFactory(); DocVector docVector = new DocVector( - blockFactory.newConstantIntVector(0, testData.getPositionCount()), - blockFactory.newConstantIntVector(0, testData.getPositionCount()), - blockFactory.newIntArrayVector(IntStream.range(0, testData.getPositionCount()).toArray(), testData.getPositionCount()), + // The shard ID is used to encode the index ID. + blockFactory.newConstantIntVector(index, page.getPositionCount()), + blockFactory.newConstantIntVector(0, page.getPositionCount()), + blockFactory.newIntArrayVector(IntStream.range(0, page.getPositionCount()).toArray(), page.getPositionCount()), true ); - return new Page(docVector.asBlock()); + var block = docVector.asBlock(); + index++; + return new Page(block); } @Override public boolean isFinished() { - return finished; + return index == indexPages.size(); } @Override public void finish() { - finished = true; + index = indexPages.size(); } @Override @@ -177,24 +194,19 @@ public class TestPhysicalOperationProviders extends AbstractPhysicalOperationPro } private class TestFieldExtractOperator implements Operator { - + private final Attribute attribute; private Page lastPage; boolean finished; - String columnName; - private final DataType dataType; - private final MappedFieldType.FieldExtractPreference extractPreference; + private final FieldExtractPreference extractPreference; - TestFieldExtractOperator(String columnName, DataType dataType, MappedFieldType.FieldExtractPreference extractPreference) { - assert columnNames.contains(columnName); - this.columnName = columnName; - this.dataType = dataType; + TestFieldExtractOperator(Attribute attr, FieldExtractPreference extractPreference) { + this.attribute = attr; this.extractPreference = extractPreference; } @Override public void addInput(Page page) { - Block block = extractBlockForColumn(page, columnName, dataType, extractPreference); - lastPage = page.appendBlock(block); + lastPage = page.appendBlock(getBlock(page.getBlock(0), attribute, extractPreference)); } @Override @@ -226,12 +238,12 @@ public class TestPhysicalOperationProviders extends AbstractPhysicalOperationPro } private class TestFieldExtractOperatorFactory implements Operator.OperatorFactory { - final Operator op; - private String columnName; + private final Operator op; + private final Attribute attribute; - TestFieldExtractOperatorFactory(Attribute attr, MappedFieldType.FieldExtractPreference extractPreference) { - this.op = new TestFieldExtractOperator(attr.name(), attr.dataType(), extractPreference); - this.columnName = attr.name(); + TestFieldExtractOperatorFactory(Attribute attr, FieldExtractPreference extractPreference) { + this.op = new TestFieldExtractOperator(attr, extractPreference); + this.attribute = attr; } @Override @@ -241,27 +253,88 @@ public class TestPhysicalOperationProviders extends AbstractPhysicalOperationPro @Override public String describe() { - return "TestFieldExtractOperator(" + columnName + ")"; + return "TestFieldExtractOperator(" + attribute.name() + ")"; + } + } + + private Block getBlock(DocBlock docBlock, Attribute attribute, FieldExtractPreference extractPreference) { + if (attribute instanceof UnsupportedAttribute) { + return docBlock.blockFactory().newConstantNullBlock(docBlock.getPositionCount()); + } + return extractBlockForColumn( + docBlock, + attribute.dataType(), + extractPreference, + attribute instanceof FieldAttribute fa && fa.field() instanceof MultiTypeEsField multiTypeEsField + ? (indexDoc, blockCopier) -> getBlockForMultiType(indexDoc, multiTypeEsField, blockCopier) + : (indexDoc, blockCopier) -> extractBlockForSingleDoc(indexDoc, attribute.name(), blockCopier) + ); + } + + private Block getBlockForMultiType(DocBlock indexDoc, MultiTypeEsField multiTypeEsField, TestBlockCopier blockCopier) { + var indexId = indexDoc.asVector().shards().getInt(0); + var indexPage = indexPages.get(indexId); + var conversion = (AbstractConvertFunction) multiTypeEsField.getConversionExpressionForIndex(indexPage.index); + Supplier nulls = () -> indexDoc.blockFactory().newConstantNullBlock(indexDoc.getPositionCount()); + if (conversion == null) { + return nulls.get(); + } + var field = (FieldAttribute) conversion.field(); + return indexPage.columnIndex(field.fieldName()).isEmpty() + ? nulls.get() + : TypeConverter.fromConvertFunction(conversion).convert(extractBlockForSingleDoc(indexDoc, field.fieldName(), blockCopier)); + } + + private Block extractBlockForSingleDoc(DocBlock docBlock, String columnName, TestBlockCopier blockCopier) { + var indexId = docBlock.asVector().shards().getInt(0); + var indexPage = indexPages.get(indexId); + int columnIndex = indexPage.columnIndex(columnName) + .orElseThrow(() -> new EsqlIllegalArgumentException("Cannot find column named [{}] in {}", columnName, indexPage.columnNames)); + var originalData = indexPage.page.getBlock(columnIndex); + return blockCopier.copyBlock(originalData); + } + + private static void foreachIndexDoc(DocBlock docBlock, Consumer indexDocConsumer) { + var currentIndex = -1; + List currentList = null; + DocVector vector = docBlock.asVector(); + for (int i = 0; i < docBlock.getPositionCount(); i++) { + int indexId = vector.shards().getInt(i); + if (indexId != currentIndex) { + consumeIndexDoc(indexDocConsumer, vector, currentList); + currentList = new ArrayList<>(); + currentIndex = indexId; + } + currentList.add(i); + } + consumeIndexDoc(indexDocConsumer, vector, currentList); + } + + private static void consumeIndexDoc(Consumer indexDocConsumer, DocVector vector, @Nullable List currentList) { + if (currentList != null) { + try (DocVector indexDocVector = vector.filter(currentList.stream().mapToInt(Integer::intValue).toArray())) { + indexDocConsumer.accept(indexDocVector.asBlock()); + } } } private class TestHashAggregationOperator extends HashAggregationOperator { - private final String columnName; + private final Attribute attribute; TestHashAggregationOperator( List aggregators, Supplier blockHash, - String columnName, + Attribute attribute, DriverContext driverContext ) { super(aggregators, blockHash, driverContext); - this.columnName = columnName; + this.attribute = attribute; } @Override protected Page wrapPage(Page page) { - return page.appendBlock(extractBlockForColumn(page, columnName, null, NONE)); + return page.appendBlock(getBlock(page.getBlock(0), attribute, FieldExtractPreference.NONE)); } } @@ -270,24 +343,24 @@ public class TestPhysicalOperationProviders extends AbstractPhysicalOperationPro * {@link HashAggregationOperator}. */ private class TestOrdinalsGroupingAggregationOperatorFactory implements Operator.OperatorFactory { - private int groupByChannel; - private List aggregators; - private ElementType groupElementType; - private BigArrays bigArrays; - private String columnName; + private final int groupByChannel; + private final List aggregators; + private final ElementType groupElementType; + private final BigArrays bigArrays; + private final Attribute attribute; TestOrdinalsGroupingAggregationOperatorFactory( int channelIndex, List aggregatorFactories, ElementType groupElementType, BigArrays bigArrays, - String name + Attribute attribute ) { this.groupByChannel = channelIndex; this.aggregators = aggregatorFactories; this.groupElementType = groupElementType; this.bigArrays = bigArrays; - this.columnName = name; + this.attribute = attribute; } @Override @@ -302,7 +375,7 @@ public class TestPhysicalOperationProviders extends AbstractPhysicalOperationPro pageSize, false ), - columnName, + attribute, driverContext ); } @@ -318,32 +391,34 @@ public class TestPhysicalOperationProviders extends AbstractPhysicalOperationPro } private Block extractBlockForColumn( - Page page, - String columnName, + DocBlock docBlock, DataType dataType, - MappedFieldType.FieldExtractPreference extractPreference + FieldExtractPreference extractPreference, + BiFunction extractBlock ) { - var columnIndex = -1; - // locate the block index corresponding to "columnName" - for (int i = 0, size = columnNames.size(); i < size && columnIndex < 0; i++) { - if (columnNames.get(i).equals(columnName)) { - columnIndex = i; - } + BlockFactory blockFactory = docBlock.blockFactory(); + boolean mapToDocValues = shouldMapToDocValues(dataType, extractPreference); + try ( + Block.Builder blockBuilder = mapToDocValues + ? blockFactory.newLongBlockBuilder(docBlock.getPositionCount()) + : blockBuilder(dataType, docBlock.getPositionCount(), TestBlockFactory.getNonBreakingInstance()) + ) { + foreachIndexDoc(docBlock, indexDoc -> { + TestBlockCopier blockCopier = mapToDocValues + ? TestSpatialPointStatsBlockCopier.create(indexDoc.asVector().docs(), dataType) + : new TestBlockCopier(indexDoc.asVector().docs()); + Block blockForIndex = extractBlock.apply(indexDoc, blockCopier); + blockBuilder.copyFrom(blockForIndex, 0, blockForIndex.getPositionCount()); + }); + var result = blockBuilder.build(); + assert result.getPositionCount() == docBlock.getPositionCount() + : "Expected " + docBlock.getPositionCount() + " rows, got " + result.getPositionCount(); + return result; } - if (columnIndex < 0) { - throw new EsqlIllegalArgumentException("Cannot find column named [{}] in {}", columnName, columnNames); - } - DocBlock docBlock = page.getBlock(0); - IntVector docIndices = docBlock.asVector().docs(); - Block originalData = testData.getBlock(columnIndex); - var blockCopier = shouldMapToDocValues(dataType, extractPreference) - ? TestSpatialPointStatsBlockCopier.create(docIndices, dataType) - : new TestBlockCopier(docIndices); - return blockCopier.copyBlock(originalData); } - private boolean shouldMapToDocValues(DataType dataType, MappedFieldType.FieldExtractPreference extractPreference) { - return extractPreference == DOC_VALUES && DataType.isSpatialPoint(dataType); + private boolean shouldMapToDocValues(DataType dataType, FieldExtractPreference extractPreference) { + return extractPreference == FieldExtractPreference.DOC_VALUES && DataType.isSpatialPoint(dataType); } private static class TestBlockCopier { @@ -409,9 +484,9 @@ public class TestPhysicalOperationProviders extends AbstractPhysicalOperationPro } private static TestSpatialPointStatsBlockCopier create(IntVector docIndices, DataType dataType) { - Function encoder = switch (dataType.esType()) { - case "geo_point" -> SpatialCoordinateTypes.GEO::wkbAsLong; - case "cartesian_point" -> SpatialCoordinateTypes.CARTESIAN::wkbAsLong; + Function encoder = switch (dataType) { + case GEO_POINT -> SpatialCoordinateTypes.GEO::wkbAsLong; + case CARTESIAN_POINT -> SpatialCoordinateTypes.CARTESIAN::wkbAsLong; default -> throw new IllegalArgumentException("Unsupported spatial data type: " + dataType); }; return new TestSpatialPointStatsBlockCopier(docIndices) { @@ -422,4 +497,13 @@ public class TestPhysicalOperationProviders extends AbstractPhysicalOperationPro }; } } + + private static Block.Builder blockBuilder(DataType dataType, int estimatedSize, BlockFactory blockFactory) { + ElementType elementType = switch (dataType) { + case SHORT -> ElementType.INT; + case FLOAT, HALF_FLOAT, SCALED_FLOAT -> ElementType.DOUBLE; + default -> PlannerUtils.toElementType(dataType); + }; + return elementType.newBlockBuilder(estimatedSize, blockFactory); + } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/EsqlSessionCCSUtilsTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/EsqlSessionCCSUtilsTests.java index 1000c05282fd..6b01010ffa5f 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/EsqlSessionCCSUtilsTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/EsqlSessionCCSUtilsTests.java @@ -644,6 +644,7 @@ public class EsqlSessionCCSUtilsTests extends ESTestCase { public void testCheckForCcsLicense() { final TestIndicesExpressionGrouper indicesGrouper = new TestIndicesExpressionGrouper(); + EsqlExecutionInfo executionInfo = new EsqlExecutionInfo(true); // this seems to be used only for tracking usage of features, not for checking if a license is expired final LongSupplier currTime = () -> System.currentTimeMillis(); @@ -671,22 +672,22 @@ public class EsqlSessionCCSUtilsTests extends ESTestCase { List indices = new ArrayList<>(); indices.add(new TableInfo(new TableIdentifier(EMPTY, null, randomFrom("idx", "idx1,idx2*")))); - checkForCcsLicense(indices, indicesGrouper, enterpriseLicenseValid); - checkForCcsLicense(indices, indicesGrouper, platinumLicenseValid); - checkForCcsLicense(indices, indicesGrouper, goldLicenseValid); - checkForCcsLicense(indices, indicesGrouper, trialLicenseValid); - checkForCcsLicense(indices, indicesGrouper, basicLicenseValid); - checkForCcsLicense(indices, indicesGrouper, standardLicenseValid); - checkForCcsLicense(indices, indicesGrouper, missingLicense); - checkForCcsLicense(indices, indicesGrouper, nullLicense); + checkForCcsLicense(executionInfo, indices, indicesGrouper, enterpriseLicenseValid); + checkForCcsLicense(executionInfo, indices, indicesGrouper, platinumLicenseValid); + checkForCcsLicense(executionInfo, indices, indicesGrouper, goldLicenseValid); + checkForCcsLicense(executionInfo, indices, indicesGrouper, trialLicenseValid); + checkForCcsLicense(executionInfo, indices, indicesGrouper, basicLicenseValid); + checkForCcsLicense(executionInfo, indices, indicesGrouper, standardLicenseValid); + checkForCcsLicense(executionInfo, indices, indicesGrouper, missingLicense); + checkForCcsLicense(executionInfo, indices, indicesGrouper, nullLicense); - checkForCcsLicense(indices, indicesGrouper, enterpriseLicenseInactive); - checkForCcsLicense(indices, indicesGrouper, platinumLicenseInactive); - checkForCcsLicense(indices, indicesGrouper, goldLicenseInactive); - checkForCcsLicense(indices, indicesGrouper, trialLicenseInactive); - checkForCcsLicense(indices, indicesGrouper, basicLicenseInactive); - checkForCcsLicense(indices, indicesGrouper, standardLicenseInactive); - checkForCcsLicense(indices, indicesGrouper, missingLicenseInactive); + checkForCcsLicense(executionInfo, indices, indicesGrouper, enterpriseLicenseInactive); + checkForCcsLicense(executionInfo, indices, indicesGrouper, platinumLicenseInactive); + checkForCcsLicense(executionInfo, indices, indicesGrouper, goldLicenseInactive); + checkForCcsLicense(executionInfo, indices, indicesGrouper, trialLicenseInactive); + checkForCcsLicense(executionInfo, indices, indicesGrouper, basicLicenseInactive); + checkForCcsLicense(executionInfo, indices, indicesGrouper, standardLicenseInactive); + checkForCcsLicense(executionInfo, indices, indicesGrouper, missingLicenseInactive); } // cross-cluster search requires a valid (active, non-expired) enterprise license OR a valid trial license @@ -701,8 +702,8 @@ public class EsqlSessionCCSUtilsTests extends ESTestCase { } // licenses that work - checkForCcsLicense(indices, indicesGrouper, enterpriseLicenseValid); - checkForCcsLicense(indices, indicesGrouper, trialLicenseValid); + checkForCcsLicense(executionInfo, indices, indicesGrouper, enterpriseLicenseValid); + checkForCcsLicense(executionInfo, indices, indicesGrouper, trialLicenseValid); // all others fail --- @@ -739,9 +740,10 @@ public class EsqlSessionCCSUtilsTests extends ESTestCase { XPackLicenseState licenseState, String expectedErrorMessageSuffix ) { + EsqlExecutionInfo executionInfo = new EsqlExecutionInfo(true); ElasticsearchStatusException e = expectThrows( ElasticsearchStatusException.class, - () -> checkForCcsLicense(indices, indicesGrouper, licenseState) + () -> checkForCcsLicense(executionInfo, indices, indicesGrouper, licenseState) ); assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST)); assertThat( diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/IndexResolverFieldNamesTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/IndexResolverFieldNamesTests.java index b344bd6b6325..4db4f7925d4f 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/IndexResolverFieldNamesTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/IndexResolverFieldNamesTests.java @@ -1365,7 +1365,7 @@ public class IndexResolverFieldNamesTests extends ESTestCase { } public void testLookupJoin() { - assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V9.isEnabled()); + assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V10.isEnabled()); assertFieldNames( "FROM employees | KEEP languages | RENAME languages AS language_code | LOOKUP JOIN languages_lookup ON language_code", Set.of("languages", "languages.*", "language_code", "language_code.*"), @@ -1374,7 +1374,7 @@ public class IndexResolverFieldNamesTests extends ESTestCase { } public void testLookupJoinKeep() { - assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V9.isEnabled()); + assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V10.isEnabled()); assertFieldNames( """ FROM employees @@ -1388,7 +1388,7 @@ public class IndexResolverFieldNamesTests extends ESTestCase { } public void testLookupJoinKeepWildcard() { - assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V9.isEnabled()); + assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V10.isEnabled()); assertFieldNames( """ FROM employees @@ -1402,7 +1402,7 @@ public class IndexResolverFieldNamesTests extends ESTestCase { } public void testMultiLookupJoin() { - assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V9.isEnabled()); + assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V10.isEnabled()); assertFieldNames( """ FROM sample_data @@ -1415,7 +1415,7 @@ public class IndexResolverFieldNamesTests extends ESTestCase { } public void testMultiLookupJoinKeepBefore() { - assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V9.isEnabled()); + assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V10.isEnabled()); assertFieldNames( """ FROM sample_data @@ -1429,7 +1429,7 @@ public class IndexResolverFieldNamesTests extends ESTestCase { } public void testMultiLookupJoinKeepBetween() { - assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V9.isEnabled()); + assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V10.isEnabled()); assertFieldNames( """ FROM sample_data @@ -1454,7 +1454,7 @@ public class IndexResolverFieldNamesTests extends ESTestCase { } public void testMultiLookupJoinKeepAfter() { - assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V9.isEnabled()); + assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V10.isEnabled()); assertFieldNames( """ FROM sample_data @@ -1481,7 +1481,7 @@ public class IndexResolverFieldNamesTests extends ESTestCase { } public void testMultiLookupJoinKeepAfterWildcard() { - assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V9.isEnabled()); + assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V10.isEnabled()); assertFieldNames( """ FROM sample_data @@ -1495,7 +1495,7 @@ public class IndexResolverFieldNamesTests extends ESTestCase { } public void testMultiLookupJoinSameIndex() { - assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V9.isEnabled()); + assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V10.isEnabled()); assertFieldNames( """ FROM sample_data @@ -1509,7 +1509,7 @@ public class IndexResolverFieldNamesTests extends ESTestCase { } public void testMultiLookupJoinSameIndexKeepBefore() { - assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V9.isEnabled()); + assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V10.isEnabled()); assertFieldNames( """ FROM sample_data @@ -1524,7 +1524,7 @@ public class IndexResolverFieldNamesTests extends ESTestCase { } public void testMultiLookupJoinSameIndexKeepBetween() { - assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V9.isEnabled()); + assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V10.isEnabled()); assertFieldNames( """ FROM sample_data @@ -1550,7 +1550,7 @@ public class IndexResolverFieldNamesTests extends ESTestCase { } public void testMultiLookupJoinSameIndexKeepAfter() { - assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V9.isEnabled()); + assumeTrue("LOOKUP JOIN available as snapshot only", EsqlCapabilities.Cap.JOIN_LOOKUP_V10.isEnabled()); assertFieldNames( """ FROM sample_data diff --git a/x-pack/plugin/identity-provider/src/main/plugin-metadata/entitlement-policy.yaml b/x-pack/plugin/identity-provider/src/main/plugin-metadata/entitlement-policy.yaml new file mode 100644 index 000000000000..d826de8ca872 --- /dev/null +++ b/x-pack/plugin/identity-provider/src/main/plugin-metadata/entitlement-policy.yaml @@ -0,0 +1,2 @@ +ALL-UNNAMED: + - set_https_connection_properties # potentially required by apache.httpcomponents diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceFeatures.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceFeatures.java index 876ff0181206..62c302e97815 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceFeatures.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferenceFeatures.java @@ -9,6 +9,7 @@ package org.elasticsearch.xpack.inference; import org.elasticsearch.features.FeatureSpecification; import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.xpack.inference.mapper.SemanticInferenceMetadataFieldsMapper; import org.elasticsearch.xpack.inference.mapper.SemanticTextFieldMapper; import org.elasticsearch.xpack.inference.queries.SemanticQueryBuilder; import org.elasticsearch.xpack.inference.rank.random.RandomRankRetrieverBuilder; @@ -48,7 +49,8 @@ public class InferenceFeatures implements FeatureSpecification { SemanticTextFieldMapper.SEMANTIC_TEXT_ALWAYS_EMIT_INFERENCE_ID_FIX, SEMANTIC_TEXT_HIGHLIGHTER, SEMANTIC_MATCH_QUERY_REWRITE_INTERCEPTION_SUPPORTED, - SEMANTIC_SPARSE_VECTOR_QUERY_REWRITE_INTERCEPTION_SUPPORTED + SEMANTIC_SPARSE_VECTOR_QUERY_REWRITE_INTERCEPTION_SUPPORTED, + SemanticInferenceMetadataFieldsMapper.EXPLICIT_NULL_FIXES ); } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilter.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilter.java index 44643bd24566..59444cddfd7f 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilter.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilter.java @@ -40,6 +40,7 @@ import org.elasticsearch.inference.Model; import org.elasticsearch.inference.UnparsedModel; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.tasks.Task; +import org.elasticsearch.xcontent.XContent; import org.elasticsearch.xpack.core.inference.results.ChunkedInferenceError; import org.elasticsearch.xpack.inference.mapper.SemanticTextField; import org.elasticsearch.xpack.inference.mapper.SemanticTextFieldMapper; @@ -51,6 +52,7 @@ import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; import java.util.HashMap; +import java.util.Iterator; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; @@ -68,6 +70,8 @@ import java.util.stream.Collectors; */ public class ShardBulkInferenceActionFilter implements MappedActionFilter { protected static final int DEFAULT_BATCH_SIZE = 512; + private static final Object EXPLICIT_NULL = new Object(); + private static final ChunkedInference EMPTY_CHUNKED_INFERENCE = new EmptyChunkedInference(); private final ClusterService clusterService; private final InferenceServiceRegistry inferenceServiceRegistry; @@ -395,11 +399,22 @@ public class ShardBulkInferenceActionFilter implements MappedActionFilter { for (var entry : response.responses.entrySet()) { var fieldName = entry.getKey(); var responses = entry.getValue(); - var model = responses.get(0).model(); + Model model = null; + + InferenceFieldMetadata inferenceFieldMetadata = fieldInferenceMap.get(fieldName); + if (inferenceFieldMetadata == null) { + throw new IllegalStateException("No inference field metadata for field [" + fieldName + "]"); + } + // ensure that the order in the original field is consistent in case of multiple inputs Collections.sort(responses, Comparator.comparingInt(FieldInferenceResponse::inputOrder)); Map> chunkMap = new LinkedHashMap<>(); for (var resp : responses) { + // Get the first non-null model from the response list + if (model == null) { + model = resp.model; + } + var lst = chunkMap.computeIfAbsent(resp.sourceField, k -> new ArrayList<>()); lst.addAll( SemanticTextField.toSemanticTextFieldChunks( @@ -411,21 +426,26 @@ public class ShardBulkInferenceActionFilter implements MappedActionFilter { ) ); } + List inputs = responses.stream() .filter(r -> r.sourceField().equals(fieldName)) .map(r -> r.input) .collect(Collectors.toList()); + + // The model can be null if we are only processing update requests that clear inference results. This is ok because we will + // merge in the field's existing model settings on the data node. var result = new SemanticTextField( useLegacyFormat, fieldName, useLegacyFormat ? inputs : null, new SemanticTextField.InferenceResult( - model.getInferenceEntityId(), - new SemanticTextField.ModelSettings(model), + inferenceFieldMetadata.getInferenceId(), + model != null ? new SemanticTextField.ModelSettings(model) : null, chunkMap ), indexRequest.getContentType() ); + if (useLegacyFormat) { SemanticTextUtils.insertValue(fieldName, newDocMap, result); } else { @@ -492,7 +512,8 @@ public class ShardBulkInferenceActionFilter implements MappedActionFilter { } else { var inferenceMetadataFieldsValue = XContentMapValues.extractValue( InferenceMetadataFieldsMapper.NAME + "." + field, - docMap + docMap, + EXPLICIT_NULL ); if (inferenceMetadataFieldsValue != null) { // Inference has already been computed @@ -502,9 +523,22 @@ public class ShardBulkInferenceActionFilter implements MappedActionFilter { int order = 0; for (var sourceField : entry.getSourceFields()) { - // TODO: Detect when the field is provided with an explicit null value - var valueObj = XContentMapValues.extractValue(sourceField, docMap); - if (valueObj == null) { + var valueObj = XContentMapValues.extractValue(sourceField, docMap, EXPLICIT_NULL); + if (useLegacyFormat == false && isUpdateRequest && valueObj == EXPLICIT_NULL) { + /** + * It's an update request, and the source field is explicitly set to null, + * so we need to propagate this information to the inference fields metadata + * to overwrite any inference previously computed on the field. + * This ensures that the field is treated as intentionally cleared, + * preventing any unintended carryover of prior inference results. + */ + var slot = ensureResponseAccumulatorSlot(itemIndex); + slot.addOrUpdateResponse( + new FieldInferenceResponse(field, sourceField, null, order++, 0, null, EMPTY_CHUNKED_INFERENCE) + ); + continue; + } + if (valueObj == null || valueObj == EXPLICIT_NULL) { if (isUpdateRequest && useLegacyFormat) { addInferenceResponseFailure( item.id(), @@ -554,4 +588,11 @@ public class ShardBulkInferenceActionFilter implements MappedActionFilter { return null; } } + + private static class EmptyChunkedInference implements ChunkedInference { + @Override + public Iterator chunksAsMatchedTextAndByteReference(XContent xcontent) { + return Collections.emptyIterator(); + } + } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticInferenceMetadataFieldsMapper.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticInferenceMetadataFieldsMapper.java index 7a1a9b056d0a..3f49973d6e35 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticInferenceMetadataFieldsMapper.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticInferenceMetadataFieldsMapper.java @@ -12,6 +12,7 @@ import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; import org.apache.lucene.search.join.BitSetProducer; import org.elasticsearch.common.xcontent.XContentParserUtils; +import org.elasticsearch.features.NodeFeature; import org.elasticsearch.index.mapper.ContentPath; import org.elasticsearch.index.mapper.DocumentParserContext; import org.elasticsearch.index.mapper.InferenceMetadataFieldsMapper; @@ -38,6 +39,8 @@ import java.util.function.Function; public class SemanticInferenceMetadataFieldsMapper extends InferenceMetadataFieldsMapper { private static final SemanticInferenceMetadataFieldsMapper INSTANCE = new SemanticInferenceMetadataFieldsMapper(); + public static final NodeFeature EXPLICIT_NULL_FIXES = new NodeFeature("semantic_text.inference_metadata_fields.explicit_null_fixes"); + public static final TypeParser PARSER = new FixedTypeParser( c -> InferenceMetadataFieldsMapper.isEnabled(c.getSettings()) ? INSTANCE : null ); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticTextField.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticTextField.java index cfd05cb29ca0..fddff17dab4c 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticTextField.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticTextField.java @@ -338,16 +338,13 @@ public record SemanticTextField( static { SEMANTIC_TEXT_FIELD_PARSER.declareStringArray(optionalConstructorArg(), new ParseField(TEXT_FIELD)); - SEMANTIC_TEXT_FIELD_PARSER.declareObject( - constructorArg(), - (p, c) -> INFERENCE_RESULT_PARSER.parse(p, c), - new ParseField(INFERENCE_FIELD) - ); + SEMANTIC_TEXT_FIELD_PARSER.declareObject(constructorArg(), INFERENCE_RESULT_PARSER, new ParseField(INFERENCE_FIELD)); INFERENCE_RESULT_PARSER.declareString(constructorArg(), new ParseField(INFERENCE_ID_FIELD)); - INFERENCE_RESULT_PARSER.declareObject( + INFERENCE_RESULT_PARSER.declareObjectOrNull( constructorArg(), (p, c) -> MODEL_SETTINGS_PARSER.parse(p, null), + null, new ParseField(MODEL_SETTINGS_FIELD) ); INFERENCE_RESULT_PARSER.declareField(constructorArg(), (p, c) -> { diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapper.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapper.java index b47c55c30227..690a136c566e 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapper.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapper.java @@ -384,6 +384,17 @@ public class SemanticTextFieldMapper extends FieldMapper implements InferenceFie mapper = this; } + if (mapper.fieldType().getModelSettings() == null) { + for (var chunkList : field.inference().chunks().values()) { + if (chunkList.isEmpty() == false) { + throw new DocumentParsingException( + xContentLocation, + "[" + MODEL_SETTINGS_FIELD + "] must be set for field [" + fullFieldName + "] when chunks are provided" + ); + } + } + } + var chunksField = mapper.fieldType().getChunksField(); var embeddingsField = mapper.fieldType().getEmbeddingsField(); var offsetsField = mapper.fieldType().getOffsetsField(); @@ -895,7 +906,7 @@ public class SemanticTextFieldMapper extends FieldMapper implements InferenceFie if (Objects.equals(previous, current)) { return true; } - if (previous == null) { + if (previous == null || current == null) { return true; } conflicts.addConflict("model_settings", ""); diff --git a/x-pack/plugin/inference/src/main/plugin-metadata/entitlement-policy.yaml b/x-pack/plugin/inference/src/main/plugin-metadata/entitlement-policy.yaml new file mode 100644 index 000000000000..41383d0b6736 --- /dev/null +++ b/x-pack/plugin/inference/src/main/plugin-metadata/entitlement-policy.yaml @@ -0,0 +1,2 @@ +com.google.api.client: + - set_https_connection_properties diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilterTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilterTests.java index 3a668cb96604..3552a9d209f2 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilterTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/filter/ShardBulkInferenceActionFilterTests.java @@ -18,6 +18,7 @@ import org.elasticsearch.action.bulk.TransportShardBulkAction; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.ActionFilterChain; import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetadata; @@ -68,6 +69,8 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXC import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.awaitLatch; import static org.elasticsearch.xpack.inference.action.filter.ShardBulkInferenceActionFilter.DEFAULT_BATCH_SIZE; import static org.elasticsearch.xpack.inference.action.filter.ShardBulkInferenceActionFilter.getIndexRequestOrNull; +import static org.elasticsearch.xpack.inference.mapper.SemanticTextField.getChunksFieldName; +import static org.elasticsearch.xpack.inference.mapper.SemanticTextField.getOriginalTextFieldName; import static org.elasticsearch.xpack.inference.mapper.SemanticTextFieldTests.randomChunkedInferenceEmbeddingSparse; import static org.elasticsearch.xpack.inference.mapper.SemanticTextFieldTests.randomSemanticText; import static org.elasticsearch.xpack.inference.mapper.SemanticTextFieldTests.randomSemanticTextInput; @@ -76,6 +79,7 @@ import static org.elasticsearch.xpack.inference.mapper.SemanticTextFieldTests.to import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.any; import static org.mockito.Mockito.doAnswer; @@ -84,6 +88,8 @@ import static org.mockito.Mockito.spy; import static org.mockito.Mockito.when; public class ShardBulkInferenceActionFilterTests extends ESTestCase { + private static final Object EXPLICIT_NULL = new Object(); + private final boolean useLegacyFormat; private ThreadPool threadPool; @@ -208,6 +214,11 @@ public class ShardBulkInferenceActionFilterTests extends ESTestCase { XContentMapValues.extractValue(useLegacyFormat ? "field1.text" : "field1", actualRequest.sourceAsMap()), equalTo("I am a success") ); + if (useLegacyFormat == false) { + assertNotNull( + XContentMapValues.extractValue(InferenceMetadataFieldsMapper.NAME + ".field1", actualRequest.sourceAsMap()) + ); + } // item 2 is a failure assertNotNull(bulkShardRequest.items()[2].getPrimaryResponse()); @@ -235,6 +246,79 @@ public class ShardBulkInferenceActionFilterTests extends ESTestCase { awaitLatch(chainExecuted, 10, TimeUnit.SECONDS); } + @SuppressWarnings({ "unchecked", "rawtypes" }) + public void testExplicitNull() throws Exception { + StaticModel model = StaticModel.createRandomInstance(); + model.putResult("I am a failure", new ChunkedInferenceError(new IllegalArgumentException("boom"))); + model.putResult("I am a success", randomChunkedInferenceEmbeddingSparse(List.of("I am a success"))); + + ShardBulkInferenceActionFilter filter = createFilter( + threadPool, + Map.of(model.getInferenceEntityId(), model), + randomIntBetween(1, 10), + useLegacyFormat + ); + + CountDownLatch chainExecuted = new CountDownLatch(1); + ActionFilterChain actionFilterChain = (task, action, request, listener) -> { + try { + BulkShardRequest bulkShardRequest = (BulkShardRequest) request; + assertNull(bulkShardRequest.getInferenceFieldMap()); + assertThat(bulkShardRequest.items().length, equalTo(5)); + + // item 0 + assertNull(bulkShardRequest.items()[0].getPrimaryResponse()); + IndexRequest actualRequest = getIndexRequestOrNull(bulkShardRequest.items()[0].request()); + assertThat(XContentMapValues.extractValue("obj.field1", actualRequest.sourceAsMap(), EXPLICIT_NULL), is(EXPLICIT_NULL)); + assertNull(XContentMapValues.extractValue(InferenceMetadataFieldsMapper.NAME, actualRequest.sourceAsMap(), EXPLICIT_NULL)); + + // item 1 is a success + assertNull(bulkShardRequest.items()[1].getPrimaryResponse()); + actualRequest = getIndexRequestOrNull(bulkShardRequest.items()[1].request()); + assertInferenceResults(useLegacyFormat, actualRequest, "obj.field1", "I am a success", 1); + + // item 2 is a failure + assertNotNull(bulkShardRequest.items()[2].getPrimaryResponse()); + assertTrue(bulkShardRequest.items()[2].getPrimaryResponse().isFailed()); + var failure = bulkShardRequest.items()[2].getPrimaryResponse().getFailure(); + assertThat(failure.getCause().getCause().getMessage(), containsString("boom")); + + // item 3 + assertNull(bulkShardRequest.items()[3].getPrimaryResponse()); + actualRequest = getIndexRequestOrNull(bulkShardRequest.items()[3].request()); + assertInferenceResults(useLegacyFormat, actualRequest, "obj.field1", EXPLICIT_NULL, 0); + + // item 4 + assertNull(bulkShardRequest.items()[4].getPrimaryResponse()); + actualRequest = getIndexRequestOrNull(bulkShardRequest.items()[4].request()); + assertNull(XContentMapValues.extractValue("obj.field1", actualRequest.sourceAsMap(), EXPLICIT_NULL)); + assertNull(XContentMapValues.extractValue(InferenceMetadataFieldsMapper.NAME, actualRequest.sourceAsMap(), EXPLICIT_NULL)); + } finally { + chainExecuted.countDown(); + } + }; + ActionListener actionListener = mock(ActionListener.class); + Task task = mock(Task.class); + + Map inferenceFieldMap = Map.of( + "obj.field1", + new InferenceFieldMetadata("obj.field1", model.getInferenceEntityId(), new String[] { "obj.field1" }) + ); + Map sourceWithNull = new HashMap<>(); + sourceWithNull.put("field1", null); + + BulkItemRequest[] items = new BulkItemRequest[5]; + items[0] = new BulkItemRequest(0, new IndexRequest("index").source(Map.of("obj", sourceWithNull))); + items[1] = new BulkItemRequest(1, new IndexRequest("index").source("obj.field1", "I am a success")); + items[2] = new BulkItemRequest(2, new IndexRequest("index").source("obj.field1", "I am a failure")); + items[3] = new BulkItemRequest(3, new UpdateRequest().doc(new IndexRequest("index").source(Map.of("obj", sourceWithNull)))); + items[4] = new BulkItemRequest(4, new UpdateRequest().doc(new IndexRequest("index").source(Map.of("field2", "value")))); + BulkShardRequest request = new BulkShardRequest(new ShardId("test", "test", 0), WriteRequest.RefreshPolicy.NONE, items); + request.setInferenceFieldMap(inferenceFieldMap); + filter.apply(task, TransportShardBulkAction.ACTION_NAME, request, actionListener, actionFilterChain); + awaitLatch(chainExecuted, 10, TimeUnit.SECONDS); + } + @SuppressWarnings({ "unchecked", "rawtypes" }) public void testManyRandomDocs() throws Exception { Map inferenceModelMap = new HashMap<>(); @@ -441,6 +525,53 @@ public class ShardBulkInferenceActionFilterTests extends ESTestCase { new BulkItemRequest(requestId, new IndexRequest("index").source(expectedDocMap, requestContentType)) }; } + @SuppressWarnings({ "unchecked" }) + private static void assertInferenceResults( + boolean useLegacyFormat, + IndexRequest request, + String fieldName, + Object expectedOriginalValue, + int expectedChunkCount + ) { + final Map requestMap = request.sourceAsMap(); + if (useLegacyFormat) { + assertThat( + XContentMapValues.extractValue(getOriginalTextFieldName(fieldName), requestMap, EXPLICIT_NULL), + equalTo(expectedOriginalValue) + ); + + List chunks = (List) XContentMapValues.extractValue(getChunksFieldName(fieldName), requestMap); + if (expectedChunkCount > 0) { + assertNotNull(chunks); + assertThat(chunks.size(), equalTo(expectedChunkCount)); + } else { + // If the expected chunk count is 0, we expect that no inference has been performed. In this case, the source should not be + // transformed, and thus the semantic text field structure should not be created. + assertNull(chunks); + } + } else { + assertThat(XContentMapValues.extractValue(fieldName, requestMap, EXPLICIT_NULL), equalTo(expectedOriginalValue)); + + Map inferenceMetadataFields = (Map) XContentMapValues.extractValue( + InferenceMetadataFieldsMapper.NAME, + requestMap, + EXPLICIT_NULL + ); + assertNotNull(inferenceMetadataFields); + + // When using the inference metadata fields format, chunks are mapped by source field. We handle clearing inference results for + // a field by emitting an empty chunk list for it. This is done to prevent the clear operation from clearing inference results + // for other source fields. + List chunks = (List) XContentMapValues.extractValue( + getChunksFieldName(fieldName) + "." + fieldName, + inferenceMetadataFields, + EXPLICIT_NULL + ); + assertNotNull(chunks); + assertThat(chunks.size(), equalTo(expectedChunkCount)); + } + } + private static class StaticModel extends TestModel { private final Map resultMap; diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/SemanticInferenceMetadataFieldMapperTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/SemanticInferenceMetadataFieldMapperTests.java index 6504ccc4dd39..8fcc0df0093c 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/SemanticInferenceMetadataFieldMapperTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/SemanticInferenceMetadataFieldMapperTests.java @@ -9,10 +9,15 @@ package org.elasticsearch.xpack.inference.mapper; import org.apache.lucene.index.FieldInfo; import org.apache.lucene.index.FieldInfos; +import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.IndexVersions; +import org.elasticsearch.index.mapper.InferenceMetadataFieldsMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperServiceTestCase; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.index.IndexVersionUtils; import org.elasticsearch.xpack.inference.InferencePlugin; import java.util.Collection; @@ -24,6 +29,32 @@ public class SemanticInferenceMetadataFieldMapperTests extends MapperServiceTest return Collections.singletonList(new InferencePlugin(Settings.EMPTY)); } + public void testIsEnabled() { + var settings = Settings.builder() + .put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), getRandomCompatibleIndexVersion(true)) + .put(InferenceMetadataFieldsMapper.USE_LEGACY_SEMANTIC_TEXT_FORMAT.getKey(), true) + .build(); + assertFalse(InferenceMetadataFieldsMapper.isEnabled(settings)); + + settings = Settings.builder() + .put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), getRandomCompatibleIndexVersion(true)) + .put(InferenceMetadataFieldsMapper.USE_LEGACY_SEMANTIC_TEXT_FORMAT.getKey(), false) + .build(); + assertFalse(InferenceMetadataFieldsMapper.isEnabled(settings)); + + settings = Settings.builder() + .put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), getRandomCompatibleIndexVersion(false)) + .put(InferenceMetadataFieldsMapper.USE_LEGACY_SEMANTIC_TEXT_FORMAT.getKey(), true) + .build(); + assertFalse(InferenceMetadataFieldsMapper.isEnabled(settings)); + + settings = Settings.builder() + .put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), getRandomCompatibleIndexVersion(false)) + .put(InferenceMetadataFieldsMapper.USE_LEGACY_SEMANTIC_TEXT_FORMAT.getKey(), false) + .build(); + assertTrue(InferenceMetadataFieldsMapper.isEnabled(settings)); + } + @Override public void testFieldHasValue() { assertTrue( @@ -42,4 +73,26 @@ public class SemanticInferenceMetadataFieldMapperTests extends MapperServiceTest public MappedFieldType getMappedFieldType() { return new SemanticInferenceMetadataFieldsMapper.FieldType(); } + + static IndexVersion getRandomCompatibleIndexVersion(boolean useLegacyFormat) { + if (useLegacyFormat) { + if (randomBoolean()) { + return IndexVersionUtils.randomVersionBetween( + random(), + IndexVersions.UPGRADE_TO_LUCENE_10_0_0, + IndexVersionUtils.getPreviousVersion(IndexVersions.INFERENCE_METADATA_FIELDS) + ); + } + return IndexVersionUtils.randomPreviousCompatibleVersion(random(), IndexVersions.INFERENCE_METADATA_FIELDS_BACKPORT); + } else { + if (randomBoolean()) { + return IndexVersionUtils.randomVersionBetween(random(), IndexVersions.INFERENCE_METADATA_FIELDS, IndexVersion.current()); + } + return IndexVersionUtils.randomVersionBetween( + random(), + IndexVersions.INFERENCE_METADATA_FIELDS_BACKPORT, + IndexVersionUtils.getPreviousVersion(IndexVersions.UPGRADE_TO_LUCENE_10_0_0) + ); + } + } } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapperTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapperTests.java index 11362c3cedd0..e6d68c8343d8 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapperTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapperTests.java @@ -24,6 +24,7 @@ import org.apache.lucene.search.join.BitSetProducer; import org.apache.lucene.search.join.QueryBitSetProducer; import org.apache.lucene.search.join.ScoreMode; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; +import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.CheckedBiConsumer; import org.elasticsearch.common.CheckedBiFunction; import org.elasticsearch.common.Strings; @@ -112,6 +113,10 @@ public class SemanticTextFieldMapperTests extends MapperTestCase { private MapperService createMapperService(XContentBuilder mappings, boolean useLegacyFormat) throws IOException { var settings = Settings.builder() + .put( + IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), + SemanticInferenceMetadataFieldMapperTests.getRandomCompatibleIndexVersion(useLegacyFormat) + ) .put(InferenceMetadataFieldsMapper.USE_LEGACY_SEMANTIC_TEXT_FORMAT.getKey(), useLegacyFormat) .build(); return createMapperService(settings, mappings); @@ -770,6 +775,35 @@ public class SemanticTextFieldMapperTests extends MapperTestCase { assertMapperService.accept(byteMapperService, DenseVectorFieldMapper.ElementType.BYTE); } + public void testModelSettingsRequiredWithChunks() throws IOException { + // Create inference results where model settings are set to null and chunks are provided + Model model = TestModel.createRandomInstance(TaskType.SPARSE_EMBEDDING); + SemanticTextField randomSemanticText = randomSemanticText(useLegacyFormat, "field", model, List.of("a"), XContentType.JSON); + SemanticTextField inferenceResults = new SemanticTextField( + randomSemanticText.useLegacyFormat(), + randomSemanticText.fieldName(), + randomSemanticText.originalValues(), + new SemanticTextField.InferenceResult( + randomSemanticText.inference().inferenceId(), + null, + randomSemanticText.inference().chunks() + ), + randomSemanticText.contentType() + ); + + MapperService mapperService = createMapperService( + mapping(b -> addSemanticTextMapping(b, "field", model.getInferenceEntityId(), null)), + useLegacyFormat + ); + SourceToParse source = source(b -> addSemanticTextInferenceResults(useLegacyFormat, b, List.of(inferenceResults))); + DocumentParsingException ex = expectThrows( + DocumentParsingException.class, + DocumentParsingException.class, + () -> mapperService.documentMapper().parse(source) + ); + assertThat(ex.getMessage(), containsString("[model_settings] must be set for field [field] when chunks are provided")); + } + private MapperService mapperServiceForFieldWithModelSettings( String fieldName, String inferenceId, diff --git a/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/60_semantic_text_inference_update.yml b/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/60_semantic_text_inference_update.yml index 660d3e37f424..27c405f6c23b 100644 --- a/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/60_semantic_text_inference_update.yml +++ b/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/60_semantic_text_inference_update.yml @@ -819,84 +819,210 @@ setup: - match: { hits.hits.0._source._inference_fields.dense_field.inference.chunks.dense_field.0.start_offset: 0 } - match: { hits.hits.0._source._inference_fields.dense_field.inference.chunks.dense_field.0.end_offset: 30 } -# TODO: Uncomment this test once we implement a fix -#--- -#"Bypass inference on bulk update operation": -# # Update as upsert -# - do: -# bulk: -# body: -# - '{"update": {"_index": "test-index", "_id": "doc_1"}}' -# - '{"doc": { "sparse_field": "inference test", "dense_field": "another inference test", "non_inference_field": "non inference test" }, "doc_as_upsert": true}' -# -# - match: { errors: false } -# - match: { items.0.update.result: "created" } -# -# - do: -# bulk: -# body: -# - '{"update": {"_index": "test-index", "_id": "doc_1"}}' -# - '{"doc": { "non_inference_field": "another value" }, "doc_as_upsert": true}' -# refresh: true -# -# - match: { errors: false } -# - match: { items.0.update.result: "updated" } -# -# - do: -# search: -# index: test-index -# body: -# fields: [ _inference_fields ] -# query: -# match_all: { } -# -# - match: { hits.total.value: 1 } -# - match: { hits.total.relation: eq } -# -# - length: { hits.hits.0._source._inference_fields.sparse_field.inference.chunks: 1 } -# - length: { hits.hits.0._source._inference_fields.sparse_field.inference.chunks.sparse_field: 1 } -# - exists: hits.hits.0._source._inference_fields.sparse_field.inference.chunks.sparse_field.0.embeddings -# - match: { hits.hits.0._source._inference_fields.sparse_field.inference.chunks.sparse_field.0.start_offset: 0 } -# - match: { hits.hits.0._source._inference_fields.sparse_field.inference.chunks.sparse_field.0.end_offset: 14 } -# -# - length: { hits.hits.0._source._inference_fields.dense_field.inference.chunks: 1 } -# - length: { hits.hits.0._source._inference_fields.dense_field.inference.chunks.dense_field: 1 } -# - exists: hits.hits.0._source._inference_fields.dense_field.inference.chunks.dense_field.0.embeddings -# - match: { hits.hits.0._source._inference_fields.dense_field.inference.chunks.dense_field.0.start_offset: 0 } -# - match: { hits.hits.0._source._inference_fields.dense_field.inference.chunks.dense_field.0.end_offset: 22 } -# -# - match: { hits.hits.0._source.sparse_field: "inference test" } -# - match: { hits.hits.0._source.dense_field: "another inference test" } -# - match: { hits.hits.0._source.non_inference_field: "another value" } -# -# - do: -# bulk: -# body: -# - '{"update": {"_index": "test-index", "_id": "doc_1"}}' -# - '{"doc": { "sparse_field": null, "dense_field": null, "non_inference_field": "updated value" }, "doc_as_upsert": true}' -# refresh: true -# -# - match: { errors: false } -# - match: { items.0.update.result: "updated" } -# -# - do: -# search: -# index: test-index -# body: -# fields: [ _inference_fields ] -# query: -# match_all: { } -# -# - match: { hits.total.value: 1 } -# - match: { hits.total.relation: eq } -# -# # TODO: BUG! Setting sparse_field & dense_field to null does not clear _inference_fields -# - length: { hits.hits.0._source._inference_fields.sparse_field.inference.chunks: 1 } -# - length: { hits.hits.0._source._inference_fields.sparse_field.inference.chunks.sparse_field: 0 } -# -# - length: { hits.hits.0._source._inference_fields.dense_field.inference.chunks: 1 } -# - length: { hits.hits.0._source._inference_fields.dense_field.inference.chunks.dense_field: 0 } -# -# - not_exists: hits.hits.0._source.sparse_field -# - not_exists: hits.hits.0._source.dense_field -# - match: { hits.hits.0._source.non_inference_field: "updated value" } +--- +"Bypass inference on bulk update operation": + # Update as upsert + - do: + bulk: + body: + - '{"update": {"_index": "test-index", "_id": "doc_1"}}' + - '{"doc": { "sparse_field": "inference test", "dense_field": "another inference test", "non_inference_field": "non inference test" }, "doc_as_upsert": true}' + + - match: { errors: false } + - match: { items.0.update.result: "created" } + + - do: + bulk: + body: + - '{"update": {"_index": "test-index", "_id": "doc_1"}}' + - '{"doc": { "non_inference_field": "another value" }, "doc_as_upsert": true}' + refresh: true + + - match: { errors: false } + - match: { items.0.update.result: "updated" } + + - do: + search: + index: test-index + body: + fields: [ _inference_fields ] + query: + match_all: { } + + - match: { hits.total.value: 1 } + - match: { hits.total.relation: eq } + + - length: { hits.hits.0._source._inference_fields.sparse_field.inference.chunks: 1 } + - length: { hits.hits.0._source._inference_fields.sparse_field.inference.chunks.sparse_field: 1 } + - exists: hits.hits.0._source._inference_fields.sparse_field.inference.chunks.sparse_field.0.embeddings + - match: { hits.hits.0._source._inference_fields.sparse_field.inference.chunks.sparse_field.0.start_offset: 0 } + - match: { hits.hits.0._source._inference_fields.sparse_field.inference.chunks.sparse_field.0.end_offset: 14 } + + - length: { hits.hits.0._source._inference_fields.dense_field.inference.chunks: 1 } + - length: { hits.hits.0._source._inference_fields.dense_field.inference.chunks.dense_field: 1 } + - exists: hits.hits.0._source._inference_fields.dense_field.inference.chunks.dense_field.0.embeddings + - match: { hits.hits.0._source._inference_fields.dense_field.inference.chunks.dense_field.0.start_offset: 0 } + - match: { hits.hits.0._source._inference_fields.dense_field.inference.chunks.dense_field.0.end_offset: 22 } + + - match: { hits.hits.0._source.sparse_field: "inference test" } + - match: { hits.hits.0._source.dense_field: "another inference test" } + - match: { hits.hits.0._source.non_inference_field: "another value" } + +--- +"Explicit nulls clear inference results on bulk update operation": + - requires: + cluster_features: "semantic_text.inference_metadata_fields.explicit_null_fixes" + reason: Fixes explicit null handling when using the _inference_fields metafield + + - skip: + features: [ "headers" ] + + - do: + indices.create: + index: test-copy-to-index + body: + settings: + index: + mapping: + semantic_text: + use_legacy_format: false + mappings: + properties: + sparse_field: + type: semantic_text + inference_id: sparse-inference-id + sparse_source_field: + type: text + copy_to: sparse_field + dense_field: + type: semantic_text + inference_id: dense-inference-id + dense_source_field: + type: text + copy_to: dense_field + non_inference_field: + type: text + + - do: + index: + index: test-copy-to-index + id: doc_1 + body: + sparse_field: "inference test" + sparse_source_field: "sparse source test" + dense_field: "another inference test" + dense_source_field: "dense source test" + non_inference_field: "non inference test" + refresh: true + + - do: + headers: + # Force JSON content type so that we use a parser that interprets the embeddings as doubles + Content-Type: application/json + search: + index: test-copy-to-index + body: + fields: [ _inference_fields ] + query: + match_all: { } + + - match: { hits.total.value: 1 } + - match: { hits.total.relation: eq } + + - length: { hits.hits.0._source._inference_fields.sparse_field.inference.chunks: 2 } + - length: { hits.hits.0._source._inference_fields.sparse_field.inference.chunks.sparse_field: 1 } + - exists: hits.hits.0._source._inference_fields.sparse_field.inference.chunks.sparse_field.0.embeddings + - match: { hits.hits.0._source._inference_fields.sparse_field.inference.chunks.sparse_field.0.start_offset: 0 } + - match: { hits.hits.0._source._inference_fields.sparse_field.inference.chunks.sparse_field.0.end_offset: 14 } + - length: { hits.hits.0._source._inference_fields.sparse_field.inference.chunks.sparse_source_field: 1 } + - exists: hits.hits.0._source._inference_fields.sparse_field.inference.chunks.sparse_source_field.0.embeddings + - set: { hits.hits.0._source._inference_fields.sparse_field.inference.chunks.sparse_source_field.0.embeddings: sparse_source_field_embeddings } + - match: { hits.hits.0._source._inference_fields.sparse_field.inference.chunks.sparse_source_field.0.start_offset: 0 } + - match: { hits.hits.0._source._inference_fields.sparse_field.inference.chunks.sparse_source_field.0.end_offset: 18 } + + - length: { hits.hits.0._source._inference_fields.dense_field.inference.chunks: 2 } + - length: { hits.hits.0._source._inference_fields.dense_field.inference.chunks.dense_field: 1 } + - exists: hits.hits.0._source._inference_fields.dense_field.inference.chunks.dense_field.0.embeddings + - match: { hits.hits.0._source._inference_fields.dense_field.inference.chunks.dense_field.0.start_offset: 0 } + - match: { hits.hits.0._source._inference_fields.dense_field.inference.chunks.dense_field.0.end_offset: 22 } + - length: { hits.hits.0._source._inference_fields.dense_field.inference.chunks.dense_source_field: 1 } + - exists: hits.hits.0._source._inference_fields.dense_field.inference.chunks.dense_source_field.0.embeddings + - set: { hits.hits.0._source._inference_fields.dense_field.inference.chunks.dense_source_field.0.embeddings: dense_source_field_embeddings } + - match: { hits.hits.0._source._inference_fields.dense_field.inference.chunks.dense_source_field.0.start_offset: 0 } + - match: { hits.hits.0._source._inference_fields.dense_field.inference.chunks.dense_source_field.0.end_offset: 17 } + + - match: { hits.hits.0._source.sparse_field: "inference test" } + - match: { hits.hits.0._source.sparse_source_field: "sparse source test" } + - match: { hits.hits.0._source.dense_field: "another inference test" } + - match: { hits.hits.0._source.dense_source_field: "dense source test" } + - match: { hits.hits.0._source.non_inference_field: "non inference test" } + + - do: + bulk: + body: + - '{"update": {"_index": "test-copy-to-index", "_id": "doc_1"}}' + - '{"doc": { "sparse_field": null, "dense_field": null, "non_inference_field": "updated value" }, "doc_as_upsert": true}' + refresh: true + + - match: { errors: false } + - match: { items.0.update.result: "updated" } + + - do: + headers: + # Force JSON content type so that we use a parser that interprets the embeddings as doubles + Content-Type: application/json + search: + index: test-copy-to-index + body: + fields: [ _inference_fields ] + query: + match_all: { } + + - match: { hits.total.value: 1 } + - match: { hits.total.relation: eq } + + - length: { hits.hits.0._source._inference_fields.sparse_field.inference.chunks: 1 } + - length: { hits.hits.0._source._inference_fields.sparse_field.inference.chunks.sparse_source_field: 1 } + - match: { hits.hits.0._source._inference_fields.sparse_field.inference.chunks.sparse_source_field.0.embeddings: $sparse_source_field_embeddings } + - match: { hits.hits.0._source._inference_fields.sparse_field.inference.chunks.sparse_source_field.0.start_offset: 0 } + - match: { hits.hits.0._source._inference_fields.sparse_field.inference.chunks.sparse_source_field.0.end_offset: 18 } + + - length: { hits.hits.0._source._inference_fields.dense_field.inference.chunks: 1 } + - length: { hits.hits.0._source._inference_fields.dense_field.inference.chunks.dense_source_field: 1 } + - match: { hits.hits.0._source._inference_fields.dense_field.inference.chunks.dense_source_field.0.embeddings: $dense_source_field_embeddings } + - match: { hits.hits.0._source._inference_fields.dense_field.inference.chunks.dense_source_field.0.start_offset: 0 } + - match: { hits.hits.0._source._inference_fields.dense_field.inference.chunks.dense_source_field.0.end_offset: 17 } + + - not_exists: hits.hits.0._source.sparse_field + - match: { hits.hits.0._source.sparse_source_field: "sparse source test" } + - not_exists: hits.hits.0._source.dense_field + - match: { hits.hits.0._source.dense_source_field: "dense source test" } + - match: { hits.hits.0._source.non_inference_field: "updated value" } + + - do: + bulk: + body: + - '{"update": {"_index": "test-copy-to-index", "_id": "doc_1"}}' + - '{"doc": { "sparse_source_field": null, "dense_source_field": null, "non_inference_field": "another value" }, "doc_as_upsert": true}' + refresh: true + + - match: { errors: false } + - match: { items.0.update.result: "updated" } + + - do: + search: + index: test-copy-to-index + body: + fields: [ _inference_fields ] + query: + match_all: { } + + - match: { hits.total.value: 1 } + - match: { hits.total.relation: eq } + + - not_exists: hits.hits.0._source._inference_fields + - not_exists: hits.hits.0._source.sparse_field + - not_exists: hits.hits.0._source.sparse_source_field + - not_exists: hits.hits.0._source.dense_field + - not_exists: hits.hits.0._source.dense_source_field + - match: { hits.hits.0._source.non_inference_field: "another value" } diff --git a/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/60_semantic_text_inference_update_bwc.yml b/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/60_semantic_text_inference_update_bwc.yml index 6b494d531b2d..912cdb5a85d3 100644 --- a/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/60_semantic_text_inference_update_bwc.yml +++ b/x-pack/plugin/inference/src/yamlRestTest/resources/rest-api-spec/test/inference/60_semantic_text_inference_update_bwc.yml @@ -632,6 +632,31 @@ setup: - match: { _source.dense_field.inference.chunks.0.text: "another inference test" } - match: { _source.non_inference_field: "another value" } +--- +"Explicit nulls clear inference results on bulk update operation": + # Update as upsert + - do: + bulk: + body: + - '{"update": {"_index": "test-index", "_id": "doc_1"}}' + - '{"doc": { "sparse_field": "inference test", "dense_field": "another inference test", "non_inference_field": "non inference test" }, "doc_as_upsert": true}' + + - match: { errors: false } + - match: { items.0.update.result: "created" } + + - do: + get: + index: test-index + id: doc_1 + + - match: { _source.sparse_field.text: "inference test" } + - exists: _source.sparse_field.inference.chunks.0.embeddings + - match: { _source.sparse_field.inference.chunks.0.text: "inference test" } + - match: { _source.dense_field.text: "another inference test" } + - exists: _source.dense_field.inference.chunks.0.embeddings + - match: { _source.dense_field.inference.chunks.0.text: "another inference test" } + - match: { _source.non_inference_field: "non inference test" } + - do: bulk: body: diff --git a/x-pack/plugin/monitoring/src/main/plugin-metadata/entitlement-policy.yaml b/x-pack/plugin/monitoring/src/main/plugin-metadata/entitlement-policy.yaml new file mode 100644 index 000000000000..d826de8ca872 --- /dev/null +++ b/x-pack/plugin/monitoring/src/main/plugin-metadata/entitlement-policy.yaml @@ -0,0 +1,2 @@ +ALL-UNNAMED: + - set_https_connection_properties # potentially required by apache.httpcomponents diff --git a/x-pack/plugin/otel-data/src/main/resources/component-templates/logs-otel@mappings.yaml b/x-pack/plugin/otel-data/src/main/resources/component-templates/logs-otel@mappings.yaml index 5f4dcbd41672..9f19e2e04d2c 100644 --- a/x-pack/plugin/otel-data/src/main/resources/component-templates/logs-otel@mappings.yaml +++ b/x-pack/plugin/otel-data/src/main/resources/component-templates/logs-otel@mappings.yaml @@ -39,6 +39,8 @@ template: log.level: type: alias path: severity_text + event_name: + type: keyword body: type: object properties: diff --git a/x-pack/plugin/otel-data/src/yamlRestTest/resources/rest-api-spec/test/20_logs_tests.yml b/x-pack/plugin/otel-data/src/yamlRestTest/resources/rest-api-spec/test/20_logs_tests.yml index 95a42b137df5..635ba386f739 100644 --- a/x-pack/plugin/otel-data/src/yamlRestTest/resources/rest-api-spec/test/20_logs_tests.yml +++ b/x-pack/plugin/otel-data/src/yamlRestTest/resources/rest-api-spec/test/20_logs_tests.yml @@ -105,6 +105,7 @@ Event body: service.name: my-service attributes: event.name: foo + event_name: foo body: structured: foo: @@ -119,6 +120,7 @@ Event body: index: $datastream-backing-index - is_true: $datastream-backing-index - match: { .$datastream-backing-index.mappings.properties.body.properties.structured.properties.foo\.bar.type: "keyword" } + - match: { .$datastream-backing-index.mappings.properties.event_name.type: "keyword" } --- Structured log body: - do: diff --git a/x-pack/plugin/security/src/main/plugin-metadata/entitlement-policy.yaml b/x-pack/plugin/security/src/main/plugin-metadata/entitlement-policy.yaml new file mode 100644 index 000000000000..98c6b8155357 --- /dev/null +++ b/x-pack/plugin/security/src/main/plugin-metadata/entitlement-policy.yaml @@ -0,0 +1,2 @@ +org.elasticsearch.security: + - set_https_connection_properties # for CommandLineHttpClient diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/190_lookup_join.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/190_lookup_join.yml index 57d2dac23026..1567b6b556bd 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/190_lookup_join.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/190_lookup_join.yml @@ -6,7 +6,7 @@ setup: - method: POST path: /_query parameters: [] - capabilities: [join_lookup_v9] + capabilities: [join_lookup_v10] reason: "uses LOOKUP JOIN" - do: indices.create: diff --git a/x-pack/plugin/wildcard/src/test/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapperTests.java b/x-pack/plugin/wildcard/src/test/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapperTests.java index 108efaa0f769..0b31e96ece84 100644 --- a/x-pack/plugin/wildcard/src/test/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapperTests.java +++ b/x-pack/plugin/wildcard/src/test/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapperTests.java @@ -1093,7 +1093,7 @@ public class WildcardFieldMapperTests extends MapperTestCase { IndexFieldData.Builder builder = fieldType.fielddataBuilder(fdc); return builder.build(new IndexFieldDataCache.None(), null); }; - MappingLookup lookup = MappingLookup.fromMapping(Mapping.EMPTY, null); + MappingLookup lookup = MappingLookup.fromMapping(Mapping.EMPTY); return new SearchExecutionContext( 0, 0, diff --git a/x-pack/qa/repository-old-versions-compatibility/build.gradle b/x-pack/qa/repository-old-versions-compatibility/build.gradle new file mode 100644 index 000000000000..37e5eea85a08 --- /dev/null +++ b/x-pack/qa/repository-old-versions-compatibility/build.gradle @@ -0,0 +1,25 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ +apply plugin: 'elasticsearch.internal-java-rest-test' +apply plugin: 'elasticsearch.internal-test-artifact' +apply plugin: 'elasticsearch.bwc-test' + +buildParams.bwcVersions.withLatestReadOnlyIndexCompatible { bwcVersion -> + tasks.named("javaRestTest").configure { + systemProperty("tests.minimum.index.compatible", bwcVersion) + usesBwcDistribution(bwcVersion) + enabled = true + } +} + +tasks.withType(Test).configureEach { + // CI doesn't like it when there's multiple clusters running at once + maxParallelForks = 1 +} + diff --git a/x-pack/qa/repository-old-versions-compatibility/src/javaRestTest/java/org/elasticsearch/oldrepos/AbstractUpgradeCompatibilityTestCase.java b/x-pack/qa/repository-old-versions-compatibility/src/javaRestTest/java/org/elasticsearch/oldrepos/AbstractUpgradeCompatibilityTestCase.java new file mode 100644 index 000000000000..4ff2b80aa29c --- /dev/null +++ b/x-pack/qa/repository-old-versions-compatibility/src/javaRestTest/java/org/elasticsearch/oldrepos/AbstractUpgradeCompatibilityTestCase.java @@ -0,0 +1,211 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.oldrepos; + +import com.carrotsearch.randomizedtesting.TestMethodAndParams; +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import com.carrotsearch.randomizedtesting.annotations.TestCaseOrdering; + +import org.apache.http.util.EntityUtils; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.repositories.fs.FsRepository; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.LocalClusterConfigProvider; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.elasticsearch.test.cluster.util.Version; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TemporaryFolder; +import org.junit.rules.TestRule; + +import java.io.IOException; +import java.io.OutputStream; +import java.net.URISyntaxException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.Comparator; +import java.util.Objects; +import java.util.stream.Stream; +import java.util.zip.ZipEntry; +import java.util.zip.ZipInputStream; + +import static org.elasticsearch.test.cluster.util.Version.CURRENT; +import static org.elasticsearch.test.cluster.util.Version.fromString; +import static org.elasticsearch.test.rest.ObjectPath.createFromResponse; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.notNullValue; + +@TestCaseOrdering(AbstractUpgradeCompatibilityTestCase.TestCaseOrdering.class) +public abstract class AbstractUpgradeCompatibilityTestCase extends ESRestTestCase { + + protected static final Version VERSION_MINUS_2 = fromString(System.getProperty("tests.minimum.index.compatible")); + protected static final Version VERSION_MINUS_1 = fromString(System.getProperty("tests.minimum.wire.compatible")); + protected static final Version VERSION_CURRENT = CURRENT; + + protected static TemporaryFolder REPOSITORY_PATH = new TemporaryFolder(); + + protected static LocalClusterConfigProvider clusterConfig = c -> {}; + private static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .version(VERSION_MINUS_1) + .nodes(2) + .setting("xpack.security.enabled", "false") + .setting("xpack.ml.enabled", "false") + .setting("path.repo", () -> REPOSITORY_PATH.getRoot().getPath()) + .apply(() -> clusterConfig) + .build(); + + @ClassRule + public static TestRule ruleChain = RuleChain.outerRule(REPOSITORY_PATH).around(cluster); + + private static boolean upgradeFailed = false; + + private final Version clusterVersion; + + public AbstractUpgradeCompatibilityTestCase(@Name("cluster") Version clusterVersion) { + this.clusterVersion = clusterVersion; + } + + @ParametersFactory + public static Iterable parameters() { + return Stream.of(VERSION_MINUS_1, CURRENT).map(v -> new Object[] { v }).toList(); + } + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + + @Override + protected boolean preserveClusterUponCompletion() { + return true; + } + + /** + * This method verifies the currentVersion against the clusterVersion and performs a "full cluster restart" upgrade if the current + * is before clusterVersion. The cluster version is fetched externally and is controlled by the gradle setup. + * + * @throws Exception + */ + @Before + public void maybeUpgrade() throws Exception { + // We want to use this test suite for the V9 upgrade, but we are not fully committed to necessarily having N-2 support + // in V10, so we add a check here to ensure we'll revisit this decision once V10 exists. + assertThat("Explicit check that N-2 version is Elasticsearch 7", VERSION_MINUS_2.getMajor(), equalTo(7)); + + var currentVersion = clusterVersion(); + if (currentVersion.before(clusterVersion)) { + try { + cluster.upgradeToVersion(clusterVersion); + closeClients(); + initClient(); + } catch (Exception e) { + upgradeFailed = true; + throw e; + } + } + + // Skip remaining tests if upgrade failed + assumeFalse("Cluster upgrade failed", upgradeFailed); + } + + protected static Version clusterVersion() throws Exception { + var response = assertOK(client().performRequest(new Request("GET", "/"))); + var responseBody = createFromResponse(response); + var version = Version.fromString(responseBody.evaluate("version.number").toString()); + assertThat("Failed to retrieve cluster version", version, notNullValue()); + return version; + } + + /** + * Execute the test suite with the parameters provided by the {@link #parameters()} in version order. + */ + public static class TestCaseOrdering implements Comparator { + @Override + public int compare(TestMethodAndParams o1, TestMethodAndParams o2) { + var version1 = (Version) o1.getInstanceArguments().get(0); + var version2 = (Version) o2.getInstanceArguments().get(0); + return version1.compareTo(version2); + } + } + + public final void verifyCompatibility(String version) throws Exception { + final String repository = "repository"; + final String snapshot = "snapshot"; + final String index = "index"; + final int numDocs = 5; + + String repositoryPath = REPOSITORY_PATH.getRoot().getPath(); + + if (VERSION_MINUS_1.equals(clusterVersion())) { + assertEquals(VERSION_MINUS_1, clusterVersion()); + assertTrue(getIndices(client()).isEmpty()); + + // Copy a snapshot of an index with 5 documents + copySnapshotFromResources(repositoryPath, version); + registerRepository(client(), repository, FsRepository.TYPE, true, Settings.builder().put("location", repositoryPath).build()); + recover(client(), repository, snapshot, index); + + assertTrue(getIndices(client()).contains(index)); + assertDocCount(client(), index, numDocs); + + return; + } + + if (VERSION_CURRENT.equals(clusterVersion())) { + assertEquals(VERSION_CURRENT, clusterVersion()); + assertTrue(getIndices(client()).contains(index)); + assertDocCount(client(), index, numDocs); + } + } + + public abstract void recover(RestClient restClient, String repository, String snapshot, String index) throws Exception; + + private static String getIndices(RestClient client) throws IOException { + final Request request = new Request("GET", "_cat/indices"); + Response response = client.performRequest(request); + return EntityUtils.toString(response.getEntity()); + } + + private static void copySnapshotFromResources(String repositoryPath, String version) throws IOException, URISyntaxException { + Path zipFilePath = Paths.get( + Objects.requireNonNull(AbstractUpgradeCompatibilityTestCase.class.getClassLoader().getResource("snapshot_v" + version + ".zip")) + .toURI() + ); + unzip(zipFilePath, Paths.get(repositoryPath)); + } + + private static void unzip(Path zipFilePath, Path outputDir) throws IOException { + try (ZipInputStream zipIn = new ZipInputStream(Files.newInputStream(zipFilePath))) { + ZipEntry entry; + while ((entry = zipIn.getNextEntry()) != null) { + Path outputPath = outputDir.resolve(entry.getName()); + if (entry.isDirectory()) { + Files.createDirectories(outputPath); + } else { + Files.createDirectories(outputPath.getParent()); + try (OutputStream out = Files.newOutputStream(outputPath)) { + byte[] buffer = new byte[1024]; + int len; + while ((len = zipIn.read(buffer)) > 0) { + out.write(buffer, 0, len); + } + } + } + zipIn.closeEntry(); + } + } + } +} diff --git a/x-pack/qa/repository-old-versions-compatibility/src/javaRestTest/java/org/elasticsearch/oldrepos/archiveindex/ArchiveIndexTestCase.java b/x-pack/qa/repository-old-versions-compatibility/src/javaRestTest/java/org/elasticsearch/oldrepos/archiveindex/ArchiveIndexTestCase.java new file mode 100644 index 000000000000..17bdb76e0eae --- /dev/null +++ b/x-pack/qa/repository-old-versions-compatibility/src/javaRestTest/java/org/elasticsearch/oldrepos/archiveindex/ArchiveIndexTestCase.java @@ -0,0 +1,54 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.oldrepos.archiveindex; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.common.Strings; +import org.elasticsearch.oldrepos.AbstractUpgradeCompatibilityTestCase; +import org.elasticsearch.test.cluster.util.Version; + +import static org.elasticsearch.test.rest.ObjectPath.createFromResponse; + +/** + * Test suite for Archive indices backward compatibility with N-2 versions. + * The test suite creates a cluster in the N-1 version, where N is the current version. + * Restores snapshots from old-clusters (version 5/6) and upgrades it to the current version. + * Test methods are executed after each upgrade. + * + * For example the test suite creates a cluster of version 8, then restores a snapshot of an index created + * when deployed ES version 5/6. The cluster then upgrades to version 9, verifying that the archive index + * is successfully restored. + */ +public class ArchiveIndexTestCase extends AbstractUpgradeCompatibilityTestCase { + + static { + clusterConfig = config -> config.setting("xpack.license.self_generated.type", "trial"); + } + + public ArchiveIndexTestCase(Version version) { + super(version); + } + + /** + * Overrides the snapshot-restore operation for archive-indices scenario. + */ + @Override + public void recover(RestClient client, String repository, String snapshot, String index) throws Exception { + var request = new Request("POST", "/_snapshot/" + repository + "/" + snapshot + "/_restore"); + request.addParameter("wait_for_completion", "true"); + request.setJsonEntity(Strings.format(""" + { + "indices": "%s", + "include_global_state": false, + "rename_pattern": "(.+)", + "include_aliases": false + }""", index)); + createFromResponse(client.performRequest(request)); + } +} diff --git a/x-pack/qa/repository-old-versions-compatibility/src/javaRestTest/java/org/elasticsearch/oldrepos/archiveindex/RestoreFromVersion5IT.java b/x-pack/qa/repository-old-versions-compatibility/src/javaRestTest/java/org/elasticsearch/oldrepos/archiveindex/RestoreFromVersion5IT.java new file mode 100644 index 000000000000..9f62d65592a3 --- /dev/null +++ b/x-pack/qa/repository-old-versions-compatibility/src/javaRestTest/java/org/elasticsearch/oldrepos/archiveindex/RestoreFromVersion5IT.java @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.oldrepos.archiveindex; + +import org.elasticsearch.test.cluster.util.Version; + +public class RestoreFromVersion5IT extends ArchiveIndexTestCase { + + public RestoreFromVersion5IT(Version version) { + super(version); + } + + public void testArchiveIndex() throws Exception { + verifyCompatibility("5"); + } +} diff --git a/x-pack/qa/repository-old-versions-compatibility/src/javaRestTest/java/org/elasticsearch/oldrepos/archiveindex/RestoreFromVersion6IT.java b/x-pack/qa/repository-old-versions-compatibility/src/javaRestTest/java/org/elasticsearch/oldrepos/archiveindex/RestoreFromVersion6IT.java new file mode 100644 index 000000000000..b3cca45c205f --- /dev/null +++ b/x-pack/qa/repository-old-versions-compatibility/src/javaRestTest/java/org/elasticsearch/oldrepos/archiveindex/RestoreFromVersion6IT.java @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.oldrepos.archiveindex; + +import org.elasticsearch.test.cluster.util.Version; + +public class RestoreFromVersion6IT extends ArchiveIndexTestCase { + + public RestoreFromVersion6IT(Version version) { + super(version); + } + + public void testArchiveIndex() throws Exception { + verifyCompatibility("6"); + } +} diff --git a/x-pack/qa/repository-old-versions-compatibility/src/javaRestTest/resources/README.md b/x-pack/qa/repository-old-versions-compatibility/src/javaRestTest/resources/README.md new file mode 100644 index 000000000000..c937448e9723 --- /dev/null +++ b/x-pack/qa/repository-old-versions-compatibility/src/javaRestTest/resources/README.md @@ -0,0 +1,147 @@ + +### Create data structure and config file +``` +mkdir /tmp/sharedESData +mkdir /tmp/sharedESData/config +mkdir /tmp/sharedESData/data +mkdir /tmp/sharedESData/snapshots +``` + +``` +touch /tmp/sharedESData/config/elasticsearch.yml + +cat <> /tmp/sharedESData/config/elasticsearch.yml +cluster.name: "archive-indides-test" +node.name: "node-1" +path.repo: ["/usr/share/elasticsearch/snapshots"] +network.host: 0.0.0.0 +http.port: 9200 + +discovery.type: single-node +xpack.security.enabled: false +EOF +``` + +### Define path +``` +SHARED_FOLDER=/tmp/sharedESData +``` + +### Deploy container +``` +docker run -d --name es \ +-p 9200:9200 -p 9300:9300 \ +-v ${SHARED_FOLDER}/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml \ +-v ${SHARED_FOLDER}/data:/usr/share/elasticsearch/data \ +-v ${SHARED_FOLDER}/snapshots:/usr/share/elasticsearch/snapshots \ +--env "discovery.type=single-node" \ +docker.elastic.co/elasticsearch/elasticsearch:5.6.16 + +// Version 6 +docker.elastic.co/elasticsearch/elasticsearch:6.8.23 +``` + +### Create Index Version 5 +``` +PUT /index +{ + "settings": { + "number_of_shards": 1, + "number_of_replicas": 1 + }, + "mappings": { + "my_type": { + "properties": { + "title": { + "type": "text" + }, + "created_at": { + "type": "date" + }, + "views": { + "type": "integer" + } + } + } + } +} +``` + +### Create Index Version 6 +``` +PUT /index +{ + "settings": { + "number_of_shards": 1, + "number_of_replicas": 1 + }, + "mappings": { + "_doc": { + "properties": { + "title": { + "type": "text" + }, + "content": { + "type": "text" + }, + "created_at": { + "type": "date" + } + } + } + } +} +``` + +### Add documents Version 5 +``` +POST /index/my_type +{ + "title": "Title 5", + "content": "Elasticsearch is a powerful search engine.", + "created_at": "2024-12-16" +} +``` + +### Add documents Version 6 +``` +POST /index/_doc +{ + "title": "Title 5", + "content": "Elasticsearch is a powerful search engine.", + "created_at": "2024-12-16" +} +``` + +### Register repository +``` +PUT /_snapshot/repository +{ + "type": "fs", + "settings": { + "location": "/usr/share/elasticsearch/snapshots", + "compress": true + } +} +``` + +### Create a snapshot +``` +PUT /_snapshot/repository/snapshot +{ + "indices": "index", + "ignore_unavailable": "true", + "include_global_state": false +} +``` + +### Create zip file +``` +zip -r snapshot.zip /tmp/sharedESData/snapshots/* +``` + +### Cleanup +``` +docker rm -f es +rm -rf /tmp/sharedESData/ +``` diff --git a/x-pack/qa/repository-old-versions-compatibility/src/javaRestTest/resources/snapshot_v5.zip b/x-pack/qa/repository-old-versions-compatibility/src/javaRestTest/resources/snapshot_v5.zip new file mode 100644 index 000000000000..54dcf4f6182c Binary files /dev/null and b/x-pack/qa/repository-old-versions-compatibility/src/javaRestTest/resources/snapshot_v5.zip differ diff --git a/x-pack/qa/repository-old-versions-compatibility/src/javaRestTest/resources/snapshot_v6.zip b/x-pack/qa/repository-old-versions-compatibility/src/javaRestTest/resources/snapshot_v6.zip new file mode 100644 index 000000000000..d83152fb71c6 Binary files /dev/null and b/x-pack/qa/repository-old-versions-compatibility/src/javaRestTest/resources/snapshot_v6.zip differ