diff --git a/LICENSE.txt b/LICENSE.txt index e601d4382ad6..c1b552919e4f 100644 --- a/LICENSE.txt +++ b/LICENSE.txt @@ -1,13 +1,10 @@ -Source code in this repository is variously licensed under the Apache License -Version 2.0, an Apache compatible license, or the Elastic License. Outside of -the "x-pack" folder, source code in a given file is licensed under the Apache -License Version 2.0, unless otherwise noted at the beginning of the file or a -LICENSE file present in the directory subtree declares a separate license. -Within the "x-pack" folder, source code in a given file is licensed under the -Elastic License, unless otherwise noted at the beginning of the file or a -LICENSE file present in the directory subtree declares a separate license. +Source code in this repository is covered by one of three licenses: (i) the +Apache License 2.0 (ii) an Apache License 2.0 compatible license (iii) the +Elastic License. The default license throughout the repository is Apache License +2.0 unless the header specifies another license. Elastic Licensed code is found +only in the x-pack directory. The build produces two sets of binaries - one set that falls under the Elastic -License and another set that falls under Apache License Version 2.0. The -binaries that contain `-oss` in the artifact name are licensed under the Apache -License Version 2.0. +License and another set that falls under Apache License 2.0. The binaries that +contain `-oss` in the artifact name are licensed under Apache License 2.0 and +these binaries do not package any code from the x-pack directory. diff --git a/TESTING.asciidoc b/TESTING.asciidoc index 022ebc47f7ec..cb94ad4a8a96 100644 --- a/TESTING.asciidoc +++ b/TESTING.asciidoc @@ -76,28 +76,21 @@ In order to set an Elasticsearch setting, provide a setting with the following p === Test case filtering. -- `tests.class` is a class-filtering shell-like glob pattern, -- `tests.method` is a method-filtering glob pattern. +You can run a single test, provided that you specify the Gradle project. See the documentation on +https://docs.gradle.org/current/userguide/userguide_single.html#simple_name_pattern[simple name pattern filtering]. -Run a single test case (variants) +Run a single test case in the `server` project: ---------------------------------------------------------- -./gradlew test -Dtests.class=org.elasticsearch.package.ClassName -./gradlew test "-Dtests.class=*.ClassName" +./gradlew :server:test --tests org.elasticsearch.package.ClassName ---------------------------------------------------------- -Run all tests in a package and its sub-packages +Run all tests in a package and its sub-packages: ---------------------------------------------------- -./gradlew test "-Dtests.class=org.elasticsearch.package.*" +./gradlew :server:test --tests 'org.elasticsearch.package.*' ---------------------------------------------------- -Run any test methods that contain 'esi' (like: ...r*esi*ze...) - -------------------------------- -./gradlew test "-Dtests.method=*esi*" -------------------------------- - Run all tests that are waiting for a bugfix (disabled by default) ------------------------------------------------ @@ -118,7 +111,7 @@ Every test repetition will have a different method seed (derived from a single random master seed). -------------------------------------------------- -./gradlew test -Dtests.iters=N -Dtests.class=*.ClassName +./gradlew :server:test -Dtests.iters=N --tests org.elasticsearch.package.ClassName -------------------------------------------------- === Repeats _all_ tests of ClassName N times. @@ -127,7 +120,7 @@ Every test repetition will have exactly the same master (0xdead) and method-level (0xbeef) seed. ------------------------------------------------------------------------ -./gradlew test -Dtests.iters=N -Dtests.class=*.ClassName -Dtests.seed=DEAD:BEEF +./gradlew :server:test -Dtests.iters=N -Dtests.seed=DEAD:BEEF --tests org.elasticsearch.package.ClassName ------------------------------------------------------------------------ === Repeats a given test N times @@ -137,14 +130,14 @@ ie: testFoo[0], testFoo[1], etc... so using testmethod or tests.method ending in a glob is necessary to ensure iterations are run). ------------------------------------------------------------------------- -./gradlew test -Dtests.iters=N -Dtests.class=*.ClassName -Dtests.method=mytest* +./gradlew :server:test -Dtests.iters=N --tests org.elasticsearch.package.ClassName.methodName ------------------------------------------------------------------------- Repeats N times but skips any tests after the first failure or M initial failures. ------------------------------------------------------------- -./gradlew test -Dtests.iters=N -Dtests.failfast=true -Dtestcase=... -./gradlew test -Dtests.iters=N -Dtests.maxfailures=M -Dtestcase=... +./gradlew test -Dtests.iters=N -Dtests.failfast=true ... +./gradlew test -Dtests.iters=N -Dtests.maxfailures=M ... ------------------------------------------------------------- === Test groups. @@ -175,7 +168,7 @@ systemProp.tests.jvms=8 ---------------------------- Its difficult to pick the "right" number here. Hypercores don't count for CPU -intensive tests and you should leave some slack for JVM-interal threads like +intensive tests and you should leave some slack for JVM-internal threads like the garbage collector. And you have to have enough RAM to handle each JVM. === Test compatibility. @@ -553,10 +546,10 @@ When running `./gradlew check`, minimal bwc checks are also run against compatib Sometimes a backward compatibility change spans two versions. A common case is a new functionality that needs a BWC bridge in an unreleased versioned of a release branch (for example, 5.x). To test the changes, you can instruct Gradle to build the BWC version from a another remote/branch combination instead of -pulling the release branch from GitHub. You do so using the `tests.bwc.remote` and `tests.bwc.refspec.BRANCH` system properties: +pulling the release branch from GitHub. You do so using the `bwc.remote` and `bwc.refspec.BRANCH` system properties: ------------------------------------------------- -./gradlew check -Dtests.bwc.remote=${remote} -Dtests.bwc.refspec.5.x=index_req_bwc_5.x +./gradlew check -Dbwc.remote=${remote} -Dbwc.refspec.5.x=index_req_bwc_5.x ------------------------------------------------- The branch needs to be available on the remote that the BWC makes of the @@ -571,7 +564,7 @@ will need to: will contain your change. . Create a branch called `index_req_bwc_5.x` off `5.x`. This will contain your bwc layer. . Push both branches to your remote repository. -. Run the tests with `./gradlew check -Dtests.bwc.remote=${remote} -Dtests.bwc.refspec.5.x=index_req_bwc_5.x`. +. Run the tests with `./gradlew check -Dbwc.remote=${remote} -Dbwc.refspec.5.x=index_req_bwc_5.x`. ==== Skip fetching latest diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index 7107f6f96426..75adcbea2f16 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -881,6 +881,9 @@ class BuildPlugin implements Plugin { // TODO: remove this once ctx isn't added to update script params in 7.0 test.systemProperty 'es.scripting.update.ctx_in_params', 'false' + // TODO: remove this once cname is prepended to transport.publish_address by default in 8.0 + test.systemProperty 'es.transport.cname_in_publish_address', 'true' + test.testLogging { TestLoggingContainer logging -> logging.showExceptions = true logging.showCauses = true diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy index f70f258b80a7..524d4fd9e1de 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy @@ -21,9 +21,9 @@ package org.elasticsearch.gradle.test import org.apache.tools.ant.DefaultLogger import org.apache.tools.ant.taskdefs.condition.Os import org.elasticsearch.gradle.BuildPlugin +import org.elasticsearch.gradle.BwcVersions import org.elasticsearch.gradle.LoggedExec import org.elasticsearch.gradle.Version -import org.elasticsearch.gradle.BwcVersions import org.elasticsearch.gradle.VersionProperties import org.elasticsearch.gradle.plugin.PluginBuildPlugin import org.elasticsearch.gradle.plugin.PluginPropertiesExtension @@ -39,11 +39,13 @@ import org.gradle.api.logging.Logger import org.gradle.api.tasks.Copy import org.gradle.api.tasks.Delete import org.gradle.api.tasks.Exec +import org.gradle.internal.jvm.Jvm import java.nio.charset.StandardCharsets import java.nio.file.Paths import java.util.concurrent.TimeUnit import java.util.stream.Collectors + /** * A helper for creating tasks to build a cluster that is used by a task, and tear down the cluster when the task is finished. */ @@ -887,15 +889,7 @@ class ClusterFormationTasks { onlyIf { node.pidFile.exists() } // the pid file won't actually be read until execution time, since the read is wrapped within an inner closure of the GString ext.pid = "${ -> node.pidFile.getText('UTF-8').trim()}" - File jps - if (Os.isFamily(Os.FAMILY_WINDOWS)) { - jps = getJpsExecutableByName(project, "jps.exe") - } else { - jps = getJpsExecutableByName(project, "jps") - } - if (!jps.exists()) { - throw new GradleException("jps executable not found; ensure that you're running Gradle with the JDK rather than the JRE") - } + final File jps = Jvm.forHome(project.runtimeJavaHome).getExecutable('jps') commandLine jps, '-l' standardOutput = new ByteArrayOutputStream() doLast { @@ -914,10 +908,6 @@ class ClusterFormationTasks { } } - private static File getJpsExecutableByName(Project project, String jpsExecutableName) { - return Paths.get(project.runtimeJavaHome.toString(), "bin/" + jpsExecutableName).toFile() - } - /** Adds a task to kill an elasticsearch node with the given pidfile */ static Task configureStopTask(String name, Project project, Object depends, NodeInfo node) { return project.tasks.create(name: name, type: LoggedExec, dependsOn: depends) { diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/DistroTestPlugin.java b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/DistroTestPlugin.java index 3aa52f896cd4..63ad809ac5c4 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/DistroTestPlugin.java +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/DistroTestPlugin.java @@ -36,6 +36,7 @@ import org.elasticsearch.gradle.vagrant.VagrantExtension; import org.gradle.api.NamedDomainObjectContainer; import org.gradle.api.Plugin; import org.gradle.api.Project; +import org.gradle.api.Task; import org.gradle.api.artifacts.Configuration; import org.gradle.api.file.Directory; import org.gradle.api.plugins.ExtraPropertiesExtension; @@ -75,6 +76,7 @@ public class DistroTestPlugin implements Plugin { private static final String COPY_UPGRADE_TASK = "copyUpgradePackages"; private static final String COPY_PLUGINS_TASK = "copyPlugins"; private static final String IN_VM_SYSPROP = "tests.inVM"; + private static final String DISTRIBUTION_SYSPROP = "tests.distribution"; @Override public void apply(Project project) { @@ -89,14 +91,15 @@ public class DistroTestPlugin implements Plugin { Provider upgradeDir = project.getLayout().getBuildDirectory().dir("packaging/upgrade"); Provider pluginsDir = project.getLayout().getBuildDirectory().dir("packaging/plugins"); - configureDistributions(project, upgradeVersion); + List distributions = configureDistributions(project, upgradeVersion); TaskProvider copyDistributionsTask = configureCopyDistributionsTask(project, distributionsDir); TaskProvider copyUpgradeTask = configureCopyUpgradeTask(project, upgradeVersion, upgradeDir); TaskProvider copyPluginsTask = configureCopyPluginsTask(project, pluginsDir); - Map> distroTests = new HashMap<>(); Map> batsTests = new HashMap<>(); - distroTests.put("distribution", configureDistroTest(project, distributionsDir, copyDistributionsTask)); + for (ElasticsearchDistribution distribution : distributions) { + configureDistroTest(project, distribution); + } batsTests.put("bats oss", configureBatsTest(project, "oss", distributionsDir, copyDistributionsTask)); batsTests.put("bats default", configureBatsTest(project, "default", distributionsDir, copyDistributionsTask)); configureBatsTest(project, "plugins",distributionsDir, copyDistributionsTask, copyPluginsTask).configure(t -> @@ -109,17 +112,27 @@ public class DistroTestPlugin implements Plugin { vmProject.getPluginManager().apply(VagrantBasePlugin.class); vmProject.getPluginManager().apply(JdkDownloadPlugin.class); List vmDependencies = new ArrayList<>(configureVM(vmProject)); - // a hack to ensure the parent task has already been run. this will not be necessary once tests are per distribution - // which will eliminate the copy distributions task altogether - vmDependencies.add(copyDistributionsTask); vmDependencies.add(project.getConfigurations().getByName("testRuntimeClasspath")); - distroTests.forEach((desc, task) -> configureVMWrapperTask(vmProject, desc, task.getName(), vmDependencies)); - VagrantExtension vagrant = vmProject.getExtensions().getByType(VagrantExtension.class); + TaskProvider distroTest = vmProject.getTasks().register("distroTest"); + for (ElasticsearchDistribution distribution : distributions) { + String destructiveTaskName = destructiveDistroTestTaskName(distribution); + Platform platform = distribution.getPlatform(); + // this condition ensures windows boxes get windows distributions, and linux boxes get linux distributions + if (isWindows(vmProject) == (platform == Platform.WINDOWS)) { + TaskProvider vmTask = + configureVMWrapperTask(vmProject, distribution.getName() + " distribution", destructiveTaskName, vmDependencies); + vmTask.configure(t -> t.dependsOn(distribution)); + distroTest.configure(t -> t.dependsOn(vmTask)); + } + } + + batsTests.forEach((desc, task) -> { configureVMWrapperTask(vmProject, desc, task.getName(), vmDependencies).configure(t -> { t.setProgressHandler(new BatsProgressLogger(project.getLogger())); - t.onlyIf(spec -> vagrant.isWindowsVM() == false); // bats doesn't run on windows + t.onlyIf(spec -> isWindows(vmProject) == false); // bats doesn't run on windows + t.dependsOn(copyDistributionsTask); }); }); }); @@ -169,7 +182,7 @@ public class DistroTestPlugin implements Plugin { vagrant.setBox(box); vagrant.vmEnv("SYSTEM_JAVA_HOME", convertPath(project, vagrant, systemJdk, "", "")); vagrant.vmEnv("PATH", convertPath(project, vagrant, gradleJdk, "/bin:$PATH", "\\bin;$Env:PATH")); - vagrant.setIsWindowsVM(box.contains("windows")); + vagrant.setIsWindowsVM(isWindows(project)); return Arrays.asList(systemJdk, gradleJdk); } @@ -269,15 +282,14 @@ public class DistroTestPlugin implements Plugin { }); } - private static TaskProvider configureDistroTest(Project project, Provider distributionsDir, - TaskProvider copyPackagingArchives) { - // TODO: don't run with security manager... - return project.getTasks().register("destructiveDistroTest", Test.class, + private static TaskProvider configureDistroTest(Project project, ElasticsearchDistribution distribution) { + return project.getTasks().register(destructiveDistroTestTaskName(distribution), Test.class, t -> { t.setMaxParallelForks(1); - t.setWorkingDir(distributionsDir); + t.setWorkingDir(project.getProjectDir()); + t.systemProperty(DISTRIBUTION_SYSPROP, distribution.toString()); if (System.getProperty(IN_VM_SYSPROP) == null) { - t.dependsOn(copyPackagingArchives); + t.dependsOn(distribution); } }); } @@ -297,7 +309,7 @@ public class DistroTestPlugin implements Plugin { }); } - private void configureDistributions(Project project, Version upgradeVersion) { + private List configureDistributions(Project project, Version upgradeVersion) { NamedDomainObjectContainer distributions = DistributionDownloadPlugin.getContainer(project); List currentDistros = new ArrayList<>(); List upgradeDistros = new ArrayList<>(); @@ -335,13 +347,15 @@ public class DistroTestPlugin implements Plugin { List distroUpgradeConfigs = upgradeDistros.stream().map(ElasticsearchDistribution::getConfiguration) .collect(Collectors.toList()); packagingUpgradeConfig.setExtendsFrom(distroUpgradeConfigs); + + return currentDistros; } private static void addDistro(NamedDomainObjectContainer distributions, Type type, Platform platform, Flavor flavor, boolean bundledJdk, String version, List container) { - String name = flavor + "-" + (type == Type.ARCHIVE ? platform + "-" : "") + type + (bundledJdk ? "" : "-no-jdk") + "-" + version; + String name = distroId(type, platform, flavor, bundledJdk) + "-" + version; if (distributions.findByName(name) != null) { return; } @@ -356,4 +370,17 @@ public class DistroTestPlugin implements Plugin { }); container.add(distro); } + + // return true if the project is for a windows VM, false otherwise + private static boolean isWindows(Project project) { + return project.getName().contains("windows"); + } + + private static String distroId(Type type, Platform platform, Flavor flavor, boolean bundledJdk) { + return flavor + "-" + (type == Type.ARCHIVE ? platform + "-" : "") + type + (bundledJdk ? "" : "-no-jdk"); + } + + private static String destructiveDistroTestTaskName(ElasticsearchDistribution distro) { + return "destructiveDistroTest." + distroId(distro.getType(), distro.getPlatform(), distro.getFlavor(), distro.getBundledJdk()); + } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy index 1c2f5ab3441b..aef41d0a16ae 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy @@ -62,6 +62,7 @@ class RestIntegTestTask extends DefaultTask { boolean usesTestclusters = project.plugins.hasPlugin(TestClustersPlugin.class) if (usesTestclusters == false) { clusterConfig = project.extensions.create("${name}Cluster", ClusterConfiguration.class, project) + runner.outputs.doNotCacheIf("Caching is disabled when using ClusterFormationTasks", { true }) } else { project.testClusters { "$name" { diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/info/GlobalBuildInfoPlugin.java b/buildSrc/src/main/java/org/elasticsearch/gradle/info/GlobalBuildInfoPlugin.java index 29d9f23c0eb4..d97f5fb2cd32 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/info/GlobalBuildInfoPlugin.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/info/GlobalBuildInfoPlugin.java @@ -93,7 +93,7 @@ public class GlobalBuildInfoPlugin implements Plugin { ext.set("minimumCompilerVersion", minimumCompilerVersion); ext.set("minimumRuntimeVersion", minimumRuntimeVersion); ext.set("gradleJavaVersion", Jvm.current().getJavaVersion()); - ext.set("gitRevision", gitRevision(project)); + ext.set("gitRevision", gitRevision(project.getRootProject().getRootDir())); ext.set("buildDate", ZonedDateTime.now(ZoneOffset.UTC)); }); } @@ -204,7 +204,7 @@ public class GlobalBuildInfoPlugin implements Plugin { return _defaultParallel; } - private String gitRevision(final Project project) { + public static String gitRevision(File rootDir) { try { /* * We want to avoid forking another process to run git rev-parse HEAD. Instead, we will read the refs manually. The @@ -222,7 +222,7 @@ public class GlobalBuildInfoPlugin implements Plugin { * In the case of a worktree, we read the gitdir from the plain text .git file. This resolves to a directory from which we read * the HEAD file and resolve commondir to the plain git repository. */ - final Path dotGit = project.getRootProject().getRootDir().toPath().resolve(".git"); + final Path dotGit = rootDir.toPath().resolve(".git"); final String revision; if (Files.exists(dotGit) == false) { return "unknown"; @@ -259,7 +259,7 @@ public class GlobalBuildInfoPlugin implements Plugin { } } - private String readFirstLine(final Path path) throws IOException { + private static String readFirstLine(final Path path) throws IOException { return Files.lines(path, StandardCharsets.UTF_8) .findFirst() .orElseThrow(() -> new IOException("file [" + path + "] is empty")); diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java index c20f0128f237..826ec1211a66 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.gradle.testclusters; -import org.elasticsearch.gradle.ElasticsearchDistribution; import org.elasticsearch.gradle.FileSupplier; import org.elasticsearch.gradle.PropertyNormalization; import org.elasticsearch.gradle.ReaperService; @@ -59,24 +58,23 @@ public class ElasticsearchCluster implements TestClusterConfiguration, Named { private final String clusterName; private final NamedDomainObjectContainer nodes; private final File workingDirBase; - private final Function distributionFactory; private final LinkedHashMap> waitConditions = new LinkedHashMap<>(); private final Project project; private final ReaperService reaper; + private int nodeIndex = 0; - public ElasticsearchCluster(String path, String clusterName, Project project, ReaperService reaper, - Function distributionFactory, File workingDirBase) { + public ElasticsearchCluster(String path, String clusterName, Project project, + ReaperService reaper, File workingDirBase) { this.path = path; this.clusterName = clusterName; this.project = project; this.reaper = reaper; - this.distributionFactory = distributionFactory; this.workingDirBase = workingDirBase; this.nodes = project.container(ElasticsearchNode.class); this.nodes.add( new ElasticsearchNode( path, clusterName + "-0", - project, reaper, workingDirBase, distributionFactory.apply(0) + project, reaper, workingDirBase ) ); // configure the cluster name eagerly so nodes know about it @@ -100,7 +98,7 @@ public class ElasticsearchCluster implements TestClusterConfiguration, Named { for (int i = nodes.size() ; i < numberOfNodes; i++) { this.nodes.add(new ElasticsearchNode( - path, clusterName + "-" + i, project, reaper, workingDirBase, distributionFactory.apply(i) + path, clusterName + "-" + i, project, reaper, workingDirBase )); } } @@ -126,6 +124,11 @@ public class ElasticsearchCluster implements TestClusterConfiguration, Named { nodes.all(each -> each.setVersion(version)); } + @Override + public void setVersions(List version) { + nodes.all(each -> each.setVersions(version)); + } + @Override public void setTestDistribution(TestDistribution distribution) { nodes.all(each -> each.setTestDistribution(distribution)); @@ -249,8 +252,8 @@ public class ElasticsearchCluster implements TestClusterConfiguration, Named { if (nodes.stream().map(ElasticsearchNode::getName).anyMatch( name -> name == null)) { nodeNames = null; } else { - nodeNames = nodes.stream().map(ElasticsearchNode::getName).collect(Collectors.joining(",")); - }; + nodeNames = nodes.stream().map(ElasticsearchNode::getName).map(this::safeName).collect(Collectors.joining(",")); + } for (ElasticsearchNode node : nodes) { if (nodeNames != null) { // Can only configure master nodes if we have node names defined @@ -269,6 +272,19 @@ public class ElasticsearchCluster implements TestClusterConfiguration, Named { nodes.forEach(ElasticsearchNode::restart); } + @Override + public void goToNextVersion() { + nodes.all(ElasticsearchNode::goToNextVersion); + } + + public void nextNodeToNextVersion() { + if (nodeIndex + 1 > nodes.size()) { + throw new TestClustersException("Ran out of nodes to take to the next version"); + } + nodes.getByName(clusterName + "-" + nodeIndex).goToNextVersion(); + nodeIndex += 1; + } + @Override public void extraConfigFile(String destination, File from) { nodes.all(node -> node.extraConfigFile(destination, from)); @@ -363,7 +379,6 @@ public class ElasticsearchCluster implements TestClusterConfiguration, Named { nodes.size() ); if (httpSslEnabled) { - getFirstNode().configureHttpWait(wait); } List> credentials = getFirstNode().getCredentials(); diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java index 900a0f99ed9f..2cc7fbe24dc6 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.gradle.testclusters; +import org.elasticsearch.gradle.DistributionDownloadPlugin; import org.elasticsearch.gradle.ElasticsearchDistribution; import org.elasticsearch.gradle.FileSupplier; import org.elasticsearch.gradle.LazyPropertyList; @@ -31,8 +32,8 @@ import org.elasticsearch.gradle.VersionProperties; import org.elasticsearch.gradle.http.WaitForHttpResource; import org.gradle.api.Action; import org.gradle.api.Named; +import org.gradle.api.NamedDomainObjectContainer; import org.gradle.api.Project; -import org.gradle.api.file.FileCollection; import org.gradle.api.logging.Logger; import org.gradle.api.logging.Logging; import org.gradle.api.tasks.Classpath; @@ -71,6 +72,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Set; +import java.util.TreeSet; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; @@ -135,23 +137,23 @@ public class ElasticsearchNode implements TestClusterConfiguration { private final Path esStdoutFile; private final Path esStderrFile; private final Path tmpDir; + private final Path distroDir; - private String version; + private int currentDistro = 0; private TestDistribution testDistribution; - private ElasticsearchDistribution distribution; + private List distributions = new ArrayList<>(); private File javaHome; private volatile Process esProcess; private Function nameCustomization = Function.identity(); private boolean isWorkingDirConfigured = false; - ElasticsearchNode(String path, String name, Project project, ReaperService reaper, File workingDirBase, - ElasticsearchDistribution distribution) { + ElasticsearchNode(String path, String name, Project project, ReaperService reaper, File workingDirBase) { this.path = path; this.name = name; this.project = project; this.reaper = reaper; - this.workingDir = workingDirBase.toPath().resolve(safeName(name)).toAbsolutePath(); - this.distribution = distribution; + workingDir = workingDirBase.toPath().resolve(safeName(name)).toAbsolutePath(); + distroDir = workingDir.resolve("distro"); confPathRepo = workingDir.resolve("repo"); configFile = workingDir.resolve("config/elasticsearch.yml"); confPathData = workingDir.resolve("data"); @@ -173,15 +175,31 @@ public class ElasticsearchNode implements TestClusterConfiguration { @Internal public Version getVersion() { - return distribution.getVersion(); + return distributions.get(currentDistro).getVersion(); } @Override public void setVersion(String version) { requireNonNull(version, "null version passed when configuring test cluster `" + this + "`"); + String distroName = "testclusters" + path.replace(":", "-") + "-" + this.name + "-" + version + "-"; + NamedDomainObjectContainer container = DistributionDownloadPlugin.getContainer(project); + if (container.findByName(distroName) == null){ + container.create(distroName); + } + ElasticsearchDistribution distro = container.getByName(distroName); + distro.setVersion(version); + setDistributionType(distro, testDistribution); + distributions.add(distro); + } + + @Override + public void setVersions(List versions) { + requireNonNull(versions, "null version list passed when configuring test cluster `" + this + "`"); checkFrozen(); - this.version = version; - this.distribution.setVersion(version); + distributions.clear(); + for (String version : versions) { + setVersion(version); + } } @Internal @@ -191,8 +209,8 @@ public class ElasticsearchNode implements TestClusterConfiguration { // package private just so test clusters plugin can access to wire up task dependencies @Internal - ElasticsearchDistribution getDistribution() { - return distribution; + List getDistributions() { + return distributions; } @Override @@ -200,14 +218,20 @@ public class ElasticsearchNode implements TestClusterConfiguration { requireNonNull(testDistribution, "null distribution passed when configuring test cluster `" + this + "`"); checkFrozen(); this.testDistribution = testDistribution; + for (ElasticsearchDistribution distribution : distributions) { + setDistributionType(distribution, testDistribution); + } + } + + private void setDistributionType(ElasticsearchDistribution distribution, TestDistribution testDistribution) { if (testDistribution == TestDistribution.INTEG_TEST) { - this.distribution.setType(ElasticsearchDistribution.Type.INTEG_TEST_ZIP); + distribution.setType(ElasticsearchDistribution.Type.INTEG_TEST_ZIP); } else { - this.distribution.setType(ElasticsearchDistribution.Type.ARCHIVE); + distribution.setType(ElasticsearchDistribution.Type.ARCHIVE); if (testDistribution == TestDistribution.DEFAULT) { - this.distribution.setFlavor(ElasticsearchDistribution.Flavor.DEFAULT); + distribution.setFlavor(ElasticsearchDistribution.Flavor.DEFAULT); } else { - this.distribution.setFlavor(ElasticsearchDistribution.Flavor.OSS); + distribution.setFlavor(ElasticsearchDistribution.Flavor.OSS); } } } @@ -317,8 +341,7 @@ public class ElasticsearchNode implements TestClusterConfiguration { @Override public void freeze() { - requireNonNull(distribution, "null distribution passed when configuring test cluster `" + this + "`"); - requireNonNull(getVersion(), "null version passed when configuring test cluster `" + this + "`"); + requireNonNull(distributions, "null distribution passed when configuring test cluster `" + this + "`"); requireNonNull(javaHome, "null javaHome passed when configuring test cluster `" + this + "`"); LOGGER.info("Locking configuration of `{}`", this); configurationFrozen.set(true); @@ -361,10 +384,13 @@ public class ElasticsearchNode implements TestClusterConfiguration { try { if (isWorkingDirConfigured == false) { logToProcessStdout("Configuring working directory: " + workingDir); - // Only configure working dir once so we don't lose data on restarts + // make sure we always start fresh + if (Files.exists(workingDir)) { + project.delete(workingDir); + } isWorkingDirConfigured = true; - createWorkingDir(getExtractedDistributionDir()); } + createWorkingDir(getExtractedDistributionDir()); } catch (IOException e) { throw new UncheckedIOException("Failed to create working directory for " + this, e); } @@ -446,6 +472,18 @@ public class ElasticsearchNode implements TestClusterConfiguration { start(); } + @Override + public void goToNextVersion() { + if (currentDistro + 1 >= distributions.size()) { + throw new TestClustersException("Ran out of versions to go to for " + this); + } + LOGGER.info("Switch version from {} to {} for {}", + getVersion(), distributions.get(currentDistro + 1).getVersion(), this + ); + currentDistro += 1; + restart(); + } + private boolean isSettingMissingOrTrue(String name) { return Boolean.valueOf(settings.getOrDefault(name, "false").toString()); } @@ -474,8 +512,9 @@ public class ElasticsearchNode implements TestClusterConfiguration { if (testDistribution == TestDistribution.INTEG_TEST) { logToProcessStdout("Installing " + modules.size() + "modules"); for (File module : modules) { - Path destination = workingDir.resolve("modules").resolve(module.getName().replace(".zip", "") - .replace("-" + version, "")); + Path destination = distroDir.resolve("modules").resolve(module.getName().replace(".zip", "") + .replace("-" + getVersion(), "") + .replace("-SNAPSHOT", "")); // only install modules that are not already bundled with the integ-test distribution if (Files.exists(destination) == false) { @@ -492,7 +531,7 @@ public class ElasticsearchNode implements TestClusterConfiguration { } } } else { - LOGGER.info("Not installing " + modules.size() + "(s) since the " + distribution + " distribution already " + + LOGGER.info("Not installing " + modules.size() + "(s) since the " + distributions + " distribution already " + "has them"); } } @@ -533,8 +572,8 @@ public class ElasticsearchNode implements TestClusterConfiguration { private void runElaticsearchBinScriptWithInput(String input, String tool, String... args) { if ( - Files.exists(workingDir.resolve("bin").resolve(tool)) == false && - Files.exists(workingDir.resolve("bin").resolve(tool + ".bat")) == false + Files.exists(distroDir.resolve("bin").resolve(tool)) == false && + Files.exists(distroDir.resolve("bin").resolve(tool + ".bat")) == false ) { throw new TestClustersException("Can't run bin script: `" + tool + "` does not exist. " + "Is this the distribution you expect it to be ?"); @@ -542,7 +581,7 @@ public class ElasticsearchNode implements TestClusterConfiguration { try (InputStream byteArrayInputStream = new ByteArrayInputStream(input.getBytes(StandardCharsets.UTF_8))) { LoggedExec.exec(project, spec -> { spec.setEnvironment(getESEnvironment()); - spec.workingDir(workingDir); + spec.workingDir(distroDir); spec.executable( OS.conditionalString() .onUnix(() -> "./bin/" + tool) @@ -620,8 +659,8 @@ public class ElasticsearchNode implements TestClusterConfiguration { final ProcessBuilder processBuilder = new ProcessBuilder(); List command = OS.>conditional() - .onUnix(() -> Arrays.asList("./bin/elasticsearch")) - .onWindows(() -> Arrays.asList("cmd", "/c", "bin\\elasticsearch.bat")) + .onUnix(() -> Arrays.asList(distroDir.getFileName().resolve("./bin/elasticsearch").toString())) + .onWindows(() -> Arrays.asList("cmd", "/c", distroDir.getFileName().resolve("bin\\elasticsearch.bat").toString())) .supply(); processBuilder.command(command); processBuilder.directory(workingDir.toFile()); @@ -821,7 +860,7 @@ public class ElasticsearchNode implements TestClusterConfiguration { } private void createWorkingDir(Path distroExtractDir) throws IOException { - syncWithLinks(distroExtractDir, workingDir); + syncWithLinks(distroExtractDir, distroDir); Files.createDirectories(configFile.getParent()); Files.createDirectories(confPathRepo); Files.createDirectories(confPathData); @@ -844,7 +883,14 @@ public class ElasticsearchNode implements TestClusterConfiguration { try (Stream stream = Files.walk(sourceRoot)) { stream.forEach(source -> { - Path destination = destinationRoot.resolve(sourceRoot.relativize(source)); + Path relativeDestination = sourceRoot.relativize(source); + if (relativeDestination.getNameCount() <= 1) { + return; + } + // Throw away the first name as the archives have everything in a single top level folder we are not interested in + relativeDestination = relativeDestination.subpath(1, relativeDestination.getNameCount()); + + Path destination = destinationRoot.resolve(relativeDestination); if (Files.isDirectory(source)) { try { Files.createDirectories(destination); @@ -920,9 +966,6 @@ public class ElasticsearchNode implements TestClusterConfiguration { .forEach(defaultConfig::remove); try { - // We create hard links for the distribution, so we need to remove the config file before writing it - // to prevent the changes to reflect across all copies. - Files.delete(configFile); Files.write( configFile, Stream.concat( @@ -931,8 +974,21 @@ public class ElasticsearchNode implements TestClusterConfiguration { ) .map(entry -> entry.getKey() + ": " + entry.getValue()) .collect(Collectors.joining("\n")) - .getBytes(StandardCharsets.UTF_8) + .getBytes(StandardCharsets.UTF_8), + StandardOpenOption.TRUNCATE_EXISTING, StandardOpenOption.CREATE ); + + final List configFiles; + try (Stream stream = Files.list(distroDir.resolve("config"))) { + configFiles = stream.collect(Collectors.toList()); + } + logToProcessStdout("Copying additional config files from distro " + configFiles); + for (Path file : configFiles) { + Path dest = configFile.getParent().resolve(file.getFileName()); + if (Files.exists(dest) == false) { + Files.copy(file, dest); + } + } } catch (IOException e) { throw new UncheckedIOException("Could not write config file: " + configFile, e); } @@ -972,7 +1028,7 @@ public class ElasticsearchNode implements TestClusterConfiguration { } private Path getExtractedDistributionDir() { - return Paths.get(distribution.getExtracted().toString()).resolve("elasticsearch-" + version); + return Paths.get(distributions.get(currentDistro).getExtracted().toString()); } private List getInstalledFileSet(Action filter) { @@ -1007,19 +1063,26 @@ public class ElasticsearchNode implements TestClusterConfiguration { } @Classpath - private List getDistributionClasspath() { - ArrayList files = new ArrayList<>(project.fileTree(getExtractedDistributionDir()) - .matching(filter -> filter.include("**/*.jar")) - .getFiles()); - files.sort(Comparator.comparing(File::getName)); - - return files; + private Set getDistributionClasspath() { + return getDistributionFiles(filter -> filter.include("**/*.jar")); } @InputFiles @PathSensitive(PathSensitivity.RELATIVE) - private FileCollection getDistributionFiles() { - return project.fileTree(getExtractedDistributionDir()).minus(project.files(getDistributionClasspath())); + private Set getDistributionFiles() { + return getDistributionFiles(filter -> filter.exclude("**/*.jar")); + } + + private Set getDistributionFiles(Action patternFilter) { + Set files = new TreeSet<>(); + for (ElasticsearchDistribution distribution : distributions) { + files.addAll( + project.fileTree(Paths.get(distribution.getExtracted().toString())) + .matching(patternFilter) + .getFiles() + ); + } + return files; } @Nested diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClusterConfiguration.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClusterConfiguration.java index 8b8c980f523f..1b2eb44e66b6 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClusterConfiguration.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClusterConfiguration.java @@ -38,6 +38,8 @@ public interface TestClusterConfiguration { void setVersion(String version); + void setVersions(List version); + void setTestDistribution(TestDistribution distribution); void plugin(URI plugin); @@ -86,6 +88,8 @@ public interface TestClusterConfiguration { void restart(); + void goToNextVersion(); + void extraConfigFile(String destination, File from); void extraConfigFile(String destination, File from, PropertyNormalization normalization); @@ -165,7 +169,7 @@ public interface TestClusterConfiguration { default String safeName(String name) { return name .replaceAll("^[^a-zA-Z0-9]+", "") - .replaceAll("[^a-zA-Z0-9]+", "-"); + .replaceAll("[^a-zA-Z0-9\\.]+", "-"); } boolean isProcessAlive(); diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersAware.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersAware.java index 72703399c95a..1669a62d57b5 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersAware.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersAware.java @@ -18,9 +18,9 @@ interface TestClustersAware extends Task { ); } - for (ElasticsearchNode node : cluster.getNodes()) { - this.dependsOn(node.getDistribution().getExtracted()); - } + cluster.getNodes().stream().flatMap(node -> node.getDistributions().stream()).forEach( distro -> + dependsOn(distro.getExtracted()) + ); getClusters().add(cluster); } } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java index b6c8c39e2edf..5b6d6b4194a7 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java @@ -19,7 +19,6 @@ package org.elasticsearch.gradle.testclusters; import org.elasticsearch.gradle.DistributionDownloadPlugin; -import org.elasticsearch.gradle.ElasticsearchDistribution; import org.elasticsearch.gradle.ReaperPlugin; import org.elasticsearch.gradle.ReaperService; import org.gradle.api.NamedDomainObjectContainer; @@ -76,8 +75,6 @@ public class TestClustersPlugin implements Plugin { } private NamedDomainObjectContainer createTestClustersContainerExtension(Project project) { - NamedDomainObjectContainer distros = DistributionDownloadPlugin.getContainer(project); - // Create an extensions that allows describing clusters NamedDomainObjectContainer container = project.container( ElasticsearchCluster.class, @@ -86,7 +83,6 @@ public class TestClustersPlugin implements Plugin { name, project, reaper, - i -> distros.create(name + "-" + i), new File(project.getBuildDir(), "testclusters") ) ); diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersRegistry.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersRegistry.java index 35f6fffc39bf..fe1c75cdd44b 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersRegistry.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersRegistry.java @@ -48,7 +48,7 @@ public class TestClustersRegistry { } } } else { - cluster.stop(false); + cluster.stop(true); runningClusters.remove(cluster); } } else { diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/vagrant/VagrantShellTask.java b/buildSrc/src/main/java/org/elasticsearch/gradle/vagrant/VagrantShellTask.java index 538759b31ffc..89d95de5d441 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/vagrant/VagrantShellTask.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/vagrant/VagrantShellTask.java @@ -104,7 +104,7 @@ public abstract class VagrantShellTask extends DefaultTask { spec.setProgressHandler(progressHandler); }); } catch (Exception e) { - getLogger().error("Failed command, dumping dmesg", e); + /*getLogger().error("Failed command, dumping dmesg", e); service.execute(spec -> { spec.setCommand("ssh"); spec.setArgs("--command", "dmesg"); @@ -112,7 +112,7 @@ public abstract class VagrantShellTask extends DefaultTask { getLogger().error(line); return null; }); - }); + });*/ throw e; } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java index e49a8539753e..9df33b410a79 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java @@ -39,7 +39,6 @@ import org.elasticsearch.client.ml.DeleteFilterRequest; import org.elasticsearch.client.ml.DeleteForecastRequest; import org.elasticsearch.client.ml.DeleteJobRequest; import org.elasticsearch.client.ml.DeleteModelSnapshotRequest; -import org.elasticsearch.client.ml.EstimateMemoryUsageRequest; import org.elasticsearch.client.ml.EvaluateDataFrameRequest; import org.elasticsearch.client.ml.FindFileStructureRequest; import org.elasticsearch.client.ml.FlushJobRequest; @@ -701,7 +700,7 @@ final class MLRequestConverters { return request; } - static Request estimateMemoryUsage(EstimateMemoryUsageRequest estimateRequest) throws IOException { + static Request estimateMemoryUsage(PutDataFrameAnalyticsRequest estimateRequest) throws IOException { String endpoint = new EndpointBuilder() .addPathPartAsIs("_ml", "data_frame", "analytics", "_estimate_memory_usage") .build(); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java index c39c11c7e15e..ef078cf52dbd 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java @@ -34,7 +34,6 @@ import org.elasticsearch.client.ml.DeleteForecastRequest; import org.elasticsearch.client.ml.DeleteJobRequest; import org.elasticsearch.client.ml.DeleteJobResponse; import org.elasticsearch.client.ml.DeleteModelSnapshotRequest; -import org.elasticsearch.client.ml.EstimateMemoryUsageRequest; import org.elasticsearch.client.ml.EstimateMemoryUsageResponse; import org.elasticsearch.client.ml.EvaluateDataFrameRequest; import org.elasticsearch.client.ml.EvaluateDataFrameResponse; @@ -2195,14 +2194,15 @@ public final class MachineLearningClient { * see * Estimate Memory Usage for Data Frame Analytics documentation * - * @param request The {@link EstimateMemoryUsageRequest} + * @param request The {@link PutDataFrameAnalyticsRequest} * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @return {@link EstimateMemoryUsageResponse} response object * @throws IOException when there is a serialization issue sending the request or receiving the response */ - public EstimateMemoryUsageResponse estimateMemoryUsage(EstimateMemoryUsageRequest request, + public EstimateMemoryUsageResponse estimateMemoryUsage(PutDataFrameAnalyticsRequest request, RequestOptions options) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(request, + return restHighLevelClient.performRequestAndParseEntity( + request, MLRequestConverters::estimateMemoryUsage, options, EstimateMemoryUsageResponse::fromXContent, @@ -2216,13 +2216,14 @@ public final class MachineLearningClient { * see * Estimate Memory Usage for Data Frame Analytics documentation * - * @param request The {@link EstimateMemoryUsageRequest} + * @param request The {@link PutDataFrameAnalyticsRequest} * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener Listener to be notified upon request completion */ - public void estimateMemoryUsageAsync(EstimateMemoryUsageRequest request, RequestOptions options, + public void estimateMemoryUsageAsync(PutDataFrameAnalyticsRequest request, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, + restHighLevelClient.performRequestAsyncAndParseEntity( + request, MLRequestConverters::estimateMemoryUsage, options, EstimateMemoryUsageResponse::fromXContent, diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java index 6adc287645c7..78a84b33ff83 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java @@ -554,7 +554,8 @@ final class RequestConverters { .withRefresh(reindexRequest.isRefresh()) .withTimeout(reindexRequest.getTimeout()) .withWaitForActiveShards(reindexRequest.getWaitForActiveShards()) - .withRequestsPerSecond(reindexRequest.getRequestsPerSecond()); + .withRequestsPerSecond(reindexRequest.getRequestsPerSecond()) + .withSlices(reindexRequest.getSlices()); if (reindexRequest.getScrollTime() != null) { params.putParam("scroll", reindexRequest.getScrollTime()); @@ -897,6 +898,10 @@ final class RequestConverters { return putParam("routing", routing); } + Params withSlices(int slices) { + return putParam("slices", String.valueOf(slices)); + } + Params withStoredFields(String[] storedFields) { if (storedFields != null && storedFields.length > 0) { return putParam("stored_fields", String.join(",", storedFields)); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/SnapshotClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/SnapshotClient.java index f3a49f064596..d3b2ea466f45 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/SnapshotClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/SnapshotClient.java @@ -20,6 +20,8 @@ package org.elasticsearch.client; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryRequest; +import org.elasticsearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryResponse; import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; @@ -170,6 +172,35 @@ public final class SnapshotClient { VerifyRepositoryResponse::fromXContent, listener, emptySet()); } + /** + * Cleans up a snapshot repository. + * See Snapshot and Restore + * API on elastic.co + * @param cleanupRepositoryRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public CleanupRepositoryResponse cleanupRepository(CleanupRepositoryRequest cleanupRepositoryRequest, RequestOptions options) + throws IOException { + return restHighLevelClient.performRequestAndParseEntity(cleanupRepositoryRequest, SnapshotRequestConverters::cleanupRepository, + options, CleanupRepositoryResponse::fromXContent, emptySet()); + } + + /** + * Asynchronously cleans up a snapshot repository. + * See Snapshot and Restore + * API on elastic.co + * @param cleanupRepositoryRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void cleanupRepositoryAsync(CleanupRepositoryRequest cleanupRepositoryRequest, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(cleanupRepositoryRequest, SnapshotRequestConverters::cleanupRepository, + options, CleanupRepositoryResponse::fromXContent, listener, emptySet()); + } + /** * Creates a snapshot. *

diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/SnapshotRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/SnapshotRequestConverters.java index 406470ea52cd..703aa0d67255 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/SnapshotRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/SnapshotRequestConverters.java @@ -23,6 +23,7 @@ import org.apache.http.client.methods.HttpDelete; import org.apache.http.client.methods.HttpGet; import org.apache.http.client.methods.HttpPost; import org.apache.http.client.methods.HttpPut; +import org.elasticsearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryRequest; import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest; @@ -94,6 +95,20 @@ final class SnapshotRequestConverters { return request; } + static Request cleanupRepository(CleanupRepositoryRequest cleanupRepositoryRequest) { + String endpoint = new RequestConverters.EndpointBuilder().addPathPartAsIs("_snapshot") + .addPathPart(cleanupRepositoryRequest.name()) + .addPathPartAsIs("_cleanup") + .build(); + Request request = new Request(HttpPost.METHOD_NAME, endpoint); + + RequestConverters.Params parameters = new RequestConverters.Params(); + parameters.withMasterTimeout(cleanupRepositoryRequest.masterNodeTimeout()); + parameters.withTimeout(cleanupRepositoryRequest.timeout()); + request.addParameters(parameters.asMap()); + return request; + } + static Request createSnapshot(CreateSnapshotRequest createSnapshotRequest) throws IOException { String endpoint = new RequestConverters.EndpointBuilder().addPathPart("_snapshot") .addPathPart(createSnapshotRequest.repository()) diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/EstimateMemoryUsageRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/EstimateMemoryUsageRequest.java deleted file mode 100644 index 1de435647a32..000000000000 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/EstimateMemoryUsageRequest.java +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.client.ml; - -import org.elasticsearch.client.Validatable; -import org.elasticsearch.client.ml.dataframe.DataFrameAnalyticsConfig; -import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.xcontent.ConstructingObjectParser; -import org.elasticsearch.common.xcontent.ToXContentObject; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentParser; - -import java.io.IOException; -import java.util.Objects; - -import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; - -public class EstimateMemoryUsageRequest implements ToXContentObject, Validatable { - - private static final ParseField DATA_FRAME_ANALYTICS_CONFIG = new ParseField("data_frame_analytics_config"); - - private static final ConstructingObjectParser PARSER = - new ConstructingObjectParser<>( - "estimate_memory_usage_request", - true, - args -> { - DataFrameAnalyticsConfig config = (DataFrameAnalyticsConfig) args[0]; - return new EstimateMemoryUsageRequest(config); - }); - - static { - PARSER.declareObject(constructorArg(), (p, c) -> DataFrameAnalyticsConfig.fromXContent(p), DATA_FRAME_ANALYTICS_CONFIG); - } - - public static EstimateMemoryUsageRequest fromXContent(XContentParser parser) { - return PARSER.apply(parser, null); - } - - private final DataFrameAnalyticsConfig config; - - public EstimateMemoryUsageRequest(DataFrameAnalyticsConfig config) { - this.config = Objects.requireNonNull(config); - } - - public DataFrameAnalyticsConfig getConfig() { - return config; - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - builder.field(DATA_FRAME_ANALYTICS_CONFIG.getPreferredName(), config); - builder.endObject(); - return builder; - } - - @Override - public boolean equals(Object other) { - if (this == other) { - return true; - } - if (other == null || getClass() != other.getClass()) { - return false; - } - - EstimateMemoryUsageRequest that = (EstimateMemoryUsageRequest) other; - return Objects.equals(config, that.config); - } - - @Override - public int hashCode() { - return Objects.hash(config); - } -} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/EstimateMemoryUsageResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/EstimateMemoryUsageResponse.java index a007a46cc63a..c97cc545cdb7 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/EstimateMemoryUsageResponse.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/EstimateMemoryUsageResponse.java @@ -35,10 +35,8 @@ import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optiona public class EstimateMemoryUsageResponse implements ToXContentObject { - public static final ParseField EXPECTED_MEMORY_USAGE_WITH_ONE_PARTITION = - new ParseField("expected_memory_usage_with_one_partition"); - public static final ParseField EXPECTED_MEMORY_USAGE_WITH_MAX_PARTITIONS = - new ParseField("expected_memory_usage_with_max_partitions"); + public static final ParseField EXPECTED_MEMORY_WITHOUT_DISK = new ParseField("expected_memory_without_disk"); + public static final ParseField EXPECTED_MEMORY_WITH_DISK = new ParseField("expected_memory_with_disk"); static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( @@ -49,13 +47,13 @@ public class EstimateMemoryUsageResponse implements ToXContentObject { static { PARSER.declareField( optionalConstructorArg(), - (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), EXPECTED_MEMORY_USAGE_WITH_ONE_PARTITION.getPreferredName()), - EXPECTED_MEMORY_USAGE_WITH_ONE_PARTITION, + (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), EXPECTED_MEMORY_WITHOUT_DISK.getPreferredName()), + EXPECTED_MEMORY_WITHOUT_DISK, ObjectParser.ValueType.VALUE); PARSER.declareField( optionalConstructorArg(), - (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), EXPECTED_MEMORY_USAGE_WITH_MAX_PARTITIONS.getPreferredName()), - EXPECTED_MEMORY_USAGE_WITH_MAX_PARTITIONS, + (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), EXPECTED_MEMORY_WITH_DISK.getPreferredName()), + EXPECTED_MEMORY_WITH_DISK, ObjectParser.ValueType.VALUE); } @@ -63,33 +61,30 @@ public class EstimateMemoryUsageResponse implements ToXContentObject { return PARSER.apply(parser, null); } - private final ByteSizeValue expectedMemoryUsageWithOnePartition; - private final ByteSizeValue expectedMemoryUsageWithMaxPartitions; + private final ByteSizeValue expectedMemoryWithoutDisk; + private final ByteSizeValue expectedMemoryWithDisk; - public EstimateMemoryUsageResponse(@Nullable ByteSizeValue expectedMemoryUsageWithOnePartition, - @Nullable ByteSizeValue expectedMemoryUsageWithMaxPartitions) { - this.expectedMemoryUsageWithOnePartition = expectedMemoryUsageWithOnePartition; - this.expectedMemoryUsageWithMaxPartitions = expectedMemoryUsageWithMaxPartitions; + public EstimateMemoryUsageResponse(@Nullable ByteSizeValue expectedMemoryWithoutDisk, @Nullable ByteSizeValue expectedMemoryWithDisk) { + this.expectedMemoryWithoutDisk = expectedMemoryWithoutDisk; + this.expectedMemoryWithDisk = expectedMemoryWithDisk; } - public ByteSizeValue getExpectedMemoryUsageWithOnePartition() { - return expectedMemoryUsageWithOnePartition; + public ByteSizeValue getExpectedMemoryWithoutDisk() { + return expectedMemoryWithoutDisk; } - public ByteSizeValue getExpectedMemoryUsageWithMaxPartitions() { - return expectedMemoryUsageWithMaxPartitions; + public ByteSizeValue getExpectedMemoryWithDisk() { + return expectedMemoryWithDisk; } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - if (expectedMemoryUsageWithOnePartition != null) { - builder.field( - EXPECTED_MEMORY_USAGE_WITH_ONE_PARTITION.getPreferredName(), expectedMemoryUsageWithOnePartition.getStringRep()); + if (expectedMemoryWithoutDisk != null) { + builder.field(EXPECTED_MEMORY_WITHOUT_DISK.getPreferredName(), expectedMemoryWithoutDisk.getStringRep()); } - if (expectedMemoryUsageWithMaxPartitions != null) { - builder.field( - EXPECTED_MEMORY_USAGE_WITH_MAX_PARTITIONS.getPreferredName(), expectedMemoryUsageWithMaxPartitions.getStringRep()); + if (expectedMemoryWithDisk != null) { + builder.field(EXPECTED_MEMORY_WITH_DISK.getPreferredName(), expectedMemoryWithDisk.getStringRep()); } builder.endObject(); return builder; @@ -105,12 +100,12 @@ public class EstimateMemoryUsageResponse implements ToXContentObject { } EstimateMemoryUsageResponse that = (EstimateMemoryUsageResponse) other; - return Objects.equals(expectedMemoryUsageWithOnePartition, that.expectedMemoryUsageWithOnePartition) - && Objects.equals(expectedMemoryUsageWithMaxPartitions, that.expectedMemoryUsageWithMaxPartitions); + return Objects.equals(expectedMemoryWithoutDisk, that.expectedMemoryWithoutDisk) + && Objects.equals(expectedMemoryWithDisk, that.expectedMemoryWithDisk); } @Override public int hashCode() { - return Objects.hash(expectedMemoryUsageWithOnePartition, expectedMemoryUsageWithMaxPartitions); + return Objects.hash(expectedMemoryWithoutDisk, expectedMemoryWithDisk); } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java index f593eef5f589..5cefeccb91ea 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java @@ -35,7 +35,6 @@ import org.elasticsearch.client.ml.DeleteFilterRequest; import org.elasticsearch.client.ml.DeleteForecastRequest; import org.elasticsearch.client.ml.DeleteJobRequest; import org.elasticsearch.client.ml.DeleteModelSnapshotRequest; -import org.elasticsearch.client.ml.EstimateMemoryUsageRequest; import org.elasticsearch.client.ml.EvaluateDataFrameRequest; import org.elasticsearch.client.ml.FindFileStructureRequest; import org.elasticsearch.client.ml.FindFileStructureRequestTests; @@ -797,13 +796,13 @@ public class MLRequestConvertersTests extends ESTestCase { } public void testEstimateMemoryUsage() throws IOException { - EstimateMemoryUsageRequest estimateRequest = new EstimateMemoryUsageRequest(randomDataFrameAnalyticsConfig()); + PutDataFrameAnalyticsRequest estimateRequest = new PutDataFrameAnalyticsRequest(randomDataFrameAnalyticsConfig()); Request request = MLRequestConverters.estimateMemoryUsage(estimateRequest); assertEquals(HttpPost.METHOD_NAME, request.getMethod()); assertEquals("/_ml/data_frame/analytics/_estimate_memory_usage", request.getEndpoint()); try (XContentParser parser = createParser(JsonXContent.jsonXContent, request.getEntity().getContent())) { - EstimateMemoryUsageRequest parsedRequest = EstimateMemoryUsageRequest.fromXContent(parser); - assertThat(parsedRequest, equalTo(estimateRequest)); + DataFrameAnalyticsConfig parsedConfig = DataFrameAnalyticsConfig.fromXContent(parser); + assertThat(parsedConfig, equalTo(estimateRequest.getConfig())); } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java index 68eaad59cabe..2111b314c9b4 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java @@ -44,7 +44,6 @@ import org.elasticsearch.client.ml.DeleteForecastRequest; import org.elasticsearch.client.ml.DeleteJobRequest; import org.elasticsearch.client.ml.DeleteJobResponse; import org.elasticsearch.client.ml.DeleteModelSnapshotRequest; -import org.elasticsearch.client.ml.EstimateMemoryUsageRequest; import org.elasticsearch.client.ml.EstimateMemoryUsageResponse; import org.elasticsearch.client.ml.EvaluateDataFrameRequest; import org.elasticsearch.client.ml.EvaluateDataFrameResponse; @@ -1701,8 +1700,8 @@ public class MachineLearningIT extends ESRestHighLevelClientTestCase { highLevelClient().bulk(bulk1, RequestOptions.DEFAULT); MachineLearningClient machineLearningClient = highLevelClient().machineLearning(); - EstimateMemoryUsageRequest estimateMemoryUsageRequest = - new EstimateMemoryUsageRequest( + PutDataFrameAnalyticsRequest estimateMemoryUsageRequest = + new PutDataFrameAnalyticsRequest( DataFrameAnalyticsConfig.builder() .setSource(DataFrameAnalyticsSource.builder().setIndex(indexName).build()) .setAnalysis(OutlierDetection.createDefault()) @@ -1716,8 +1715,8 @@ public class MachineLearningIT extends ESRestHighLevelClientTestCase { EstimateMemoryUsageResponse response1 = execute( estimateMemoryUsageRequest, machineLearningClient::estimateMemoryUsage, machineLearningClient::estimateMemoryUsageAsync); - assertThat(response1.getExpectedMemoryUsageWithOnePartition(), allOf(greaterThan(lowerBound), lessThan(upperBound))); - assertThat(response1.getExpectedMemoryUsageWithMaxPartitions(), allOf(greaterThan(lowerBound), lessThan(upperBound))); + assertThat(response1.getExpectedMemoryWithoutDisk(), allOf(greaterThan(lowerBound), lessThan(upperBound))); + assertThat(response1.getExpectedMemoryWithDisk(), allOf(greaterThan(lowerBound), lessThan(upperBound))); BulkRequest bulk2 = new BulkRequest() .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); @@ -1731,11 +1730,8 @@ public class MachineLearningIT extends ESRestHighLevelClientTestCase { execute( estimateMemoryUsageRequest, machineLearningClient::estimateMemoryUsage, machineLearningClient::estimateMemoryUsageAsync); assertThat( - response2.getExpectedMemoryUsageWithOnePartition(), - allOf(greaterThan(response1.getExpectedMemoryUsageWithOnePartition()), lessThan(upperBound))); - assertThat( - response2.getExpectedMemoryUsageWithMaxPartitions(), - allOf(greaterThan(response1.getExpectedMemoryUsageWithMaxPartitions()), lessThan(upperBound))); + response2.getExpectedMemoryWithoutDisk(), allOf(greaterThan(response1.getExpectedMemoryWithoutDisk()), lessThan(upperBound))); + assertThat(response2.getExpectedMemoryWithDisk(), allOf(greaterThan(response1.getExpectedMemoryWithDisk()), lessThan(upperBound))); } public void testPutFilter() throws Exception { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java index a1946baa3e0e..8ec5bd1d57c0 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java @@ -439,6 +439,13 @@ public class RequestConvertersTests extends ESTestCase { if (reindexRequest.getRemoteInfo() == null && randomBoolean()) { reindexRequest.setSourceQuery(new TermQueryBuilder("foo", "fooval")); } + if (randomBoolean()) { + int slices = randomInt(100); + reindexRequest.setSlices(slices); + expectedParams.put("slices", String.valueOf(slices)); + } else { + expectedParams.put("slices", "1"); + } setRandomTimeout(reindexRequest::setTimeout, ReplicationRequest.DEFAULT_TIMEOUT, expectedParams); setRandomWaitForActiveShards(reindexRequest::setWaitForActiveShards, ActiveShardCount.DEFAULT, expectedParams); expectedParams.put("scroll", reindexRequest.getScrollTime().getStringRep()); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/SnapshotIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/SnapshotIT.java index 8e4001442b0c..f9679cf5eb61 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/SnapshotIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/SnapshotIT.java @@ -20,6 +20,8 @@ package org.elasticsearch.client; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryRequest; +import org.elasticsearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryResponse; import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; @@ -133,6 +135,17 @@ public class SnapshotIT extends ESRestHighLevelClientTestCase { assertThat(response.getNodes().size(), equalTo(1)); } + public void testCleanupRepository() throws IOException { + AcknowledgedResponse putRepositoryResponse = createTestRepository("test", FsRepository.TYPE, "{\"location\": \".\"}"); + assertTrue(putRepositoryResponse.isAcknowledged()); + + CleanupRepositoryRequest request = new CleanupRepositoryRequest("test"); + CleanupRepositoryResponse response = execute(request, highLevelClient().snapshot()::cleanupRepository, + highLevelClient().snapshot()::cleanupRepositoryAsync); + assertThat(response.result().bytes(), equalTo(0L)); + assertThat(response.result().blobs(), equalTo(0L)); + } + public void testCreateSnapshot() throws IOException { String repository = "test_repository"; assertTrue(createTestRepository(repository, FsRepository.TYPE, "{\"location\": \".\"}").isAcknowledged()); @@ -317,4 +330,4 @@ public class SnapshotIT extends ESRestHighLevelClientTestCase { } return metadata; } -} \ No newline at end of file +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java index 1e9737a1c92f..809985a9bb30 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java @@ -48,6 +48,7 @@ import org.elasticsearch.client.ml.DeleteForecastRequest; import org.elasticsearch.client.ml.DeleteJobRequest; import org.elasticsearch.client.ml.DeleteJobResponse; import org.elasticsearch.client.ml.DeleteModelSnapshotRequest; +import org.elasticsearch.client.ml.EstimateMemoryUsageResponse; import org.elasticsearch.client.ml.EvaluateDataFrameRequest; import org.elasticsearch.client.ml.EvaluateDataFrameResponse; import org.elasticsearch.client.ml.FindFileStructureRequest; @@ -194,11 +195,13 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; +import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.closeTo; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.lessThan; import static org.hamcrest.core.Is.is; public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase { @@ -3262,6 +3265,72 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase { } } + public void testEstimateMemoryUsage() throws Exception { + createIndex("estimate-test-source-index"); + BulkRequest bulkRequest = + new BulkRequest("estimate-test-source-index") + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + for (int i = 0; i < 10; ++i) { + bulkRequest.add(new IndexRequest().source(XContentType.JSON, "timestamp", 123456789L, "total", 10L)); + } + RestHighLevelClient client = highLevelClient(); + client.bulk(bulkRequest, RequestOptions.DEFAULT); + { + // tag::estimate-memory-usage-request + DataFrameAnalyticsConfig config = DataFrameAnalyticsConfig.builder() + .setSource(DataFrameAnalyticsSource.builder().setIndex("estimate-test-source-index").build()) + .setAnalysis(OutlierDetection.createDefault()) + .build(); + PutDataFrameAnalyticsRequest request = new PutDataFrameAnalyticsRequest(config); // <1> + // end::estimate-memory-usage-request + + // tag::estimate-memory-usage-execute + EstimateMemoryUsageResponse response = client.machineLearning().estimateMemoryUsage(request, RequestOptions.DEFAULT); + // end::estimate-memory-usage-execute + + // tag::estimate-memory-usage-response + ByteSizeValue expectedMemoryWithoutDisk = response.getExpectedMemoryWithoutDisk(); // <1> + ByteSizeValue expectedMemoryWithDisk = response.getExpectedMemoryWithDisk(); // <2> + // end::estimate-memory-usage-response + + // We are pretty liberal here as this test does not aim at verifying concrete numbers but rather end-to-end user workflow. + ByteSizeValue lowerBound = new ByteSizeValue(1, ByteSizeUnit.KB); + ByteSizeValue upperBound = new ByteSizeValue(1, ByteSizeUnit.GB); + assertThat(expectedMemoryWithoutDisk, allOf(greaterThan(lowerBound), lessThan(upperBound))); + assertThat(expectedMemoryWithDisk, allOf(greaterThan(lowerBound), lessThan(upperBound))); + } + { + DataFrameAnalyticsConfig config = DataFrameAnalyticsConfig.builder() + .setSource(DataFrameAnalyticsSource.builder().setIndex("estimate-test-source-index").build()) + .setAnalysis(OutlierDetection.createDefault()) + .build(); + PutDataFrameAnalyticsRequest request = new PutDataFrameAnalyticsRequest(config); + // tag::estimate-memory-usage-execute-listener + ActionListener listener = new ActionListener<>() { + @Override + public void onResponse(EstimateMemoryUsageResponse response) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::estimate-memory-usage-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::estimate-memory-usage-execute-async + client.machineLearning().estimateMemoryUsageAsync(request, RequestOptions.DEFAULT, listener); // <1> + // end::estimate-memory-usage-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } + public void testCreateFilter() throws Exception { RestHighLevelClient client = highLevelClient(); { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/EstimateMemoryUsageRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/EstimateMemoryUsageRequestTests.java deleted file mode 100644 index 03a880d7c4bb..000000000000 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/EstimateMemoryUsageRequestTests.java +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.client.ml; - -import org.elasticsearch.client.ml.dataframe.DataFrameAnalyticsConfigTests; -import org.elasticsearch.client.ml.dataframe.MlDataFrameAnalysisNamedXContentProvider; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.NamedXContentRegistry; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.search.SearchModule; -import org.elasticsearch.test.AbstractXContentTestCase; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.function.Predicate; - -public class EstimateMemoryUsageRequestTests extends AbstractXContentTestCase { - - public static EstimateMemoryUsageRequest randomRequest() { - return new EstimateMemoryUsageRequest(DataFrameAnalyticsConfigTests.randomDataFrameAnalyticsConfig()); - } - - @Override - protected EstimateMemoryUsageRequest createTestInstance() { - return randomRequest(); - } - - @Override - protected EstimateMemoryUsageRequest doParseInstance(XContentParser parser) throws IOException { - return EstimateMemoryUsageRequest.fromXContent(parser); - } - - @Override - protected boolean supportsUnknownFields() { - return true; - } - - @Override - protected Predicate getRandomFieldsExcludeFilter() { - return field -> field.contains("."); - } - - @Override - protected NamedXContentRegistry xContentRegistry() { - List namedXContent = new ArrayList<>(); - namedXContent.addAll(new SearchModule(Settings.EMPTY, Collections.emptyList()).getNamedXContents()); - namedXContent.addAll(new MlDataFrameAnalysisNamedXContentProvider().getNamedXContentParsers()); - return new NamedXContentRegistry(namedXContent); - } -} diff --git a/distribution/bwc/build.gradle b/distribution/bwc/build.gradle index 649dd7ff8de5..aa35598c9cdd 100644 --- a/distribution/bwc/build.gradle +++ b/distribution/bwc/build.gradle @@ -21,6 +21,7 @@ import org.apache.tools.ant.taskdefs.condition.Os import org.elasticsearch.gradle.LoggedExec import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.BwcVersions +import org.elasticsearch.gradle.info.GlobalBuildInfoPlugin import java.nio.charset.StandardCharsets @@ -42,7 +43,7 @@ bwcVersions.forPreviousUnreleased { BwcVersions.UnreleasedVersionInfo unreleased File checkoutDir = file("${buildDir}/bwc/checkout-${bwcBranch}") - final String remote = System.getProperty("tests.bwc.remote", "elastic") + final String remote = System.getProperty("bwc.remote", "elastic") boolean gitFetchLatest final String gitFetchLatestProperty = System.getProperty("tests.bwc.git_fetch_latest", "true") @@ -103,8 +104,8 @@ bwcVersions.forPreviousUnreleased { BwcVersions.UnreleasedVersionInfo unreleased task checkoutBwcBranch() { dependsOn fetchLatest doLast { - String refspec = System.getProperty("tests.bwc.refspec.${bwcBranch}", "${remote}/${bwcBranch}") - if (System.getProperty("tests.bwc.checkout.align") != null) { + String refspec = System.getProperty("bwc.refspec.${bwcBranch}") ?: System.getProperty("tests.bwc.refspec.${bwcBranch}") ?: "${remote}/${bwcBranch}" + if (System.getProperty("bwc.checkout.align") != null || System.getProperty("tests.bwc.checkout.align") != null) { /* We use a time based approach to make the bwc versions built deterministic and compatible with the current hash. Most of the time we want to test against latest, but when running delayed exhaustive tests or wanting @@ -145,6 +146,7 @@ bwcVersions.forPreviousUnreleased { BwcVersions.UnreleasedVersionInfo unreleased spec.workingDir = checkoutDir spec.commandLine "git", "checkout", refspec } + file("${project.buildDir}/refspec").text = GlobalBuildInfoPlugin.gitRevision(checkoutDir) } } @@ -217,6 +219,9 @@ bwcVersions.forPreviousUnreleased { BwcVersions.UnreleasedVersionInfo unreleased Closure createBuildBwcTask = { projectName, projectDir, projectArtifact -> Task bwcTask = createRunBwcGradleTask(buildBwcTaskName(projectName)) { + inputs.file("${project.buildDir}/refspec") + outputs.files(projectArtifact) + outputs.cacheIf { true } args ":${projectDir.replace('/', ':')}:assemble" doLast { if (projectArtifact.exists() == false) { diff --git a/distribution/tools/keystore-cli/src/test/java/org/elasticsearch/common/settings/AddStringKeyStoreCommandTests.java b/distribution/tools/keystore-cli/src/test/java/org/elasticsearch/common/settings/AddStringKeyStoreCommandTests.java index be4fb90fc827..b5e6a31e1489 100644 --- a/distribution/tools/keystore-cli/src/test/java/org/elasticsearch/common/settings/AddStringKeyStoreCommandTests.java +++ b/distribution/tools/keystore-cli/src/test/java/org/elasticsearch/common/settings/AddStringKeyStoreCommandTests.java @@ -23,7 +23,6 @@ import java.io.ByteArrayInputStream; import java.io.CharArrayWriter; import java.io.InputStream; import java.nio.charset.StandardCharsets; -import java.util.Locale; import java.util.Map; import org.elasticsearch.cli.Command; @@ -176,14 +175,15 @@ public class AddStringKeyStoreCommandTests extends KeyStoreCommandTestCase { assertThat(e.getMessage(), containsString("The setting name can not be null")); } - public void testUpperCaseInName() throws Exception { + public void testSpecialCharacterInName() throws Exception { createKeystore(""); terminal.addSecretInput("value"); - final String key = randomAlphaOfLength(4) + randomAlphaOfLength(1).toUpperCase(Locale.ROOT) + randomAlphaOfLength(4); + final String key = randomAlphaOfLength(4) + '@' + randomAlphaOfLength(4); final UserException e = expectThrows(UserException.class, () -> execute(key)); + final String exceptionString= "Setting name [" + key + "] does not match the allowed setting name pattern [[A-Za-z0-9_\\-.]+]"; assertThat( e, - hasToString(containsString("Setting name [" + key + "] does not match the allowed setting name pattern [[a-z0-9_\\-.]+]"))); + hasToString(containsString(exceptionString))); } void setInput(String inputStr) { diff --git a/distribution/tools/keystore-cli/src/test/java/org/elasticsearch/common/settings/KeyStoreWrapperTests.java b/distribution/tools/keystore-cli/src/test/java/org/elasticsearch/common/settings/KeyStoreWrapperTests.java index 568ddfe97df1..afd90a1a9e93 100644 --- a/distribution/tools/keystore-cli/src/test/java/org/elasticsearch/common/settings/KeyStoreWrapperTests.java +++ b/distribution/tools/keystore-cli/src/test/java/org/elasticsearch/common/settings/KeyStoreWrapperTests.java @@ -317,12 +317,12 @@ public class KeyStoreWrapperTests extends ESTestCase { } public void testIllegalSettingName() throws Exception { - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> KeyStoreWrapper.validateSettingName("UpperCase")); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> KeyStoreWrapper.validateSettingName("*")); assertTrue(e.getMessage().contains("does not match the allowed setting name pattern")); KeyStoreWrapper keystore = KeyStoreWrapper.create(); - e = expectThrows(IllegalArgumentException.class, () -> keystore.setString("UpperCase", new char[0])); + e = expectThrows(IllegalArgumentException.class, () -> keystore.setString("*", new char[0])); assertTrue(e.getMessage().contains("does not match the allowed setting name pattern")); - e = expectThrows(IllegalArgumentException.class, () -> keystore.setFile("UpperCase", new byte[0])); + e = expectThrows(IllegalArgumentException.class, () -> keystore.setFile("*", new byte[0])); assertTrue(e.getMessage().contains("does not match the allowed setting name pattern")); } diff --git a/docs/build.gradle b/docs/build.gradle index dce7921fb76d..a2d13cd0d090 100644 --- a/docs/build.gradle +++ b/docs/build.gradle @@ -59,6 +59,9 @@ testClusters.integTest { extraConfigFile 'hunspell/en_US/en_US.dic', project(":server").file('src/test/resources/indices/analyze/conf_dir/hunspell/en_US/en_US.dic') // Whitelist reindexing from the local node so we can test it. setting 'reindex.remote.whitelist', '127.0.0.1:*' + + // TODO: remove this once cname is prepended to transport.publish_address by default in 8.0 + systemProperty 'es.transport.cname_in_publish_address', 'true' } // build the cluster with all plugins diff --git a/docs/java-rest/high-level/ml/estimate-memory-usage.asciidoc b/docs/java-rest/high-level/ml/estimate-memory-usage.asciidoc new file mode 100644 index 000000000000..659e7e11755e --- /dev/null +++ b/docs/java-rest/high-level/ml/estimate-memory-usage.asciidoc @@ -0,0 +1,35 @@ +-- +:api: estimate-memory-usage +:request: PutDataFrameAnalyticsRequest +:response: EstimateMemoryUsageResponse +-- +[id="{upid}-{api}"] +=== Estimate memory usage API + +The Estimate memory usage API is used to estimate memory usage of {dfanalytics}. +Estimation results can be used when deciding the appropriate value for `model_memory_limit` setting later on. + +The API accepts an +{request}+ object and returns an +{response}+. + +[id="{upid}-{api}-request"] +==== Estimate memory usage Request + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-request] +-------------------------------------------------- +<1> Constructing a new request containing a {dataframe-analytics-config} for which memory usage estimation should be performed + +include::../execution.asciidoc[] + +[id="{upid}-{api}-response"] +==== Response + +The returned +{response}+ contains the memory usage estimates. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-response] +-------------------------------------------------- +<1> Estimated memory usage under the assumption that the whole {dfanalytics} should happen in memory (i.e. without overflowing to disk). +<2> Estimated memory usage under the assumption that overflowing to disk is allowed during {dfanalytics}. \ No newline at end of file diff --git a/docs/java-rest/high-level/supported-apis.asciidoc b/docs/java-rest/high-level/supported-apis.asciidoc index 06c1dca33faa..42e28dd4785a 100644 --- a/docs/java-rest/high-level/supported-apis.asciidoc +++ b/docs/java-rest/high-level/supported-apis.asciidoc @@ -295,6 +295,7 @@ The Java High Level REST Client supports the following Machine Learning APIs: * <<{upid}-start-data-frame-analytics>> * <<{upid}-stop-data-frame-analytics>> * <<{upid}-evaluate-data-frame>> +* <<{upid}-estimate-memory-usage>> * <<{upid}-put-filter>> * <<{upid}-get-filters>> * <<{upid}-update-filter>> @@ -346,6 +347,7 @@ include::ml/delete-data-frame-analytics.asciidoc[] include::ml/start-data-frame-analytics.asciidoc[] include::ml/stop-data-frame-analytics.asciidoc[] include::ml/evaluate-data-frame.asciidoc[] +include::ml/estimate-memory-usage.asciidoc[] include::ml/put-filter.asciidoc[] include::ml/get-filters.asciidoc[] include::ml/update-filter.asciidoc[] diff --git a/docs/painless/painless-contexts/painless-context-examples.asciidoc b/docs/painless/painless-contexts/painless-context-examples.asciidoc index 8a0691459960..e840f34f33ad 100644 --- a/docs/painless/painless-contexts/painless-context-examples.asciidoc +++ b/docs/painless/painless-contexts/painless-context-examples.asciidoc @@ -48,7 +48,7 @@ PUT /seats "mappings": { "properties": { "theatre": { "type": "keyword" }, - "play": { "type": "text" }, + "play": { "type": "keyword" }, "actors": { "type": "text" }, "row": { "type": "integer" }, "number": { "type": "integer" }, @@ -72,7 +72,7 @@ seat data is indexed. + [source,js] ---- -curl -XPOST localhost:9200/seats/seat/_bulk?pipeline=seats -H "Content-Type: application/x-ndjson" --data-binary "@//seats.json" +curl -XPOST "localhost:9200/seats/_bulk?pipeline=seats" -H "Content-Type: application/x-ndjson" --data-binary "@//seats.json" ---- // NOTCONSOLE diff --git a/docs/painless/painless-contexts/painless-filter-context.asciidoc b/docs/painless/painless-contexts/painless-filter-context.asciidoc index eea810f61629..57c865f049ef 100644 --- a/docs/painless/painless-contexts/painless-filter-context.asciidoc +++ b/docs/painless/painless-contexts/painless-filter-context.asciidoc @@ -43,7 +43,7 @@ all available theatre seats for evening performances that are under $18. [source,js] ---- -GET evening/_search +GET seats/_search { "query": { "bool" : { diff --git a/docs/painless/painless-contexts/painless-watcher-condition-context.asciidoc b/docs/painless/painless-contexts/painless-watcher-condition-context.asciidoc index 8e4924d426b0..ff03eeee5dab 100644 --- a/docs/painless/painless-contexts/painless-watcher-condition-context.asciidoc +++ b/docs/painless/painless-contexts/painless-watcher-condition-context.asciidoc @@ -18,7 +18,7 @@ The standard <> is available. *Example* -[source,Painless] +[source,js] ---- POST _watcher/watch/_execute { @@ -65,6 +65,8 @@ POST _watcher/watch/_execute } } ---- +// CONSOLE +// TEST[skip: requires setup from other pages] <1> The Java Stream API is used in the condition. This API allows manipulation of the elements of the list in a pipeline. @@ -76,7 +78,7 @@ on the value of the seats sold for the plays in the data set. The script aggrega the total sold seats for each play and returns true if there is at least one play that has sold over $50,000. -[source,Painless] +[source,js] ---- POST _watcher/watch/_execute { @@ -121,6 +123,8 @@ POST _watcher/watch/_execute } } ---- +// CONSOLE +// TEST[skip: requires setup from other pages] This example uses a nearly identical condition as the previous example. The differences below are subtle and are worth calling out. diff --git a/docs/painless/painless-contexts/painless-watcher-context-example.asciidoc b/docs/painless/painless-contexts/painless-watcher-context-example.asciidoc index fa78b4855f21..5996e2ddc985 100644 --- a/docs/painless/painless-contexts/painless-watcher-context-example.asciidoc +++ b/docs/painless/painless-contexts/painless-watcher-context-example.asciidoc @@ -1,4 +1,4 @@ -[source,Painless] +[source,js] ---- POST _watcher/watch/_execute { @@ -99,10 +99,12 @@ POST _watcher/watch/_execute } } ---- +// CONSOLE +// TEST[skip: requires setup from other pages] The following example shows the use of metadata and transforming dates into a readable format. -[source,Painless] +[source,js] ---- POST _watcher/watch/_execute { @@ -155,3 +157,5 @@ POST _watcher/watch/_execute } } ---- +// CONSOLE +// TEST[skip: requires setup from other pages] \ No newline at end of file diff --git a/docs/painless/painless-contexts/painless-watcher-transform-context.asciidoc b/docs/painless/painless-contexts/painless-watcher-transform-context.asciidoc index ec0ac6519a44..25c41d04546f 100644 --- a/docs/painless/painless-contexts/painless-watcher-transform-context.asciidoc +++ b/docs/painless/painless-contexts/painless-watcher-transform-context.asciidoc @@ -18,7 +18,7 @@ The standard <> is available. *Example* -[source,Painless] +[source,js] ---- POST _watcher/watch/_execute { @@ -75,6 +75,8 @@ POST _watcher/watch/_execute } } ---- +// CONSOLE +// TEST[skip: requires setup from other pages] <1> The Java Stream API is used in the transform. This API allows manipulation of the elements of the list in a pipeline. @@ -86,7 +88,7 @@ the elements of the list in a pipeline. The following action transform changes each value in the mod_log action into a `String`. This transform does not change the values in the unmod_log action. -[source,Painless] +[source,js] ---- POST _watcher/watch/_execute { @@ -140,6 +142,8 @@ POST _watcher/watch/_execute } } ---- +// CONSOLE +// TEST[skip: requires setup from other pages] This example uses the streaming API in a very similar manner. The differences below are subtle and worth calling out. diff --git a/docs/plugins/analysis-kuromoji.asciidoc b/docs/plugins/analysis-kuromoji.asciidoc index 383df5afb485..8ccea28beda9 100644 --- a/docs/plugins/analysis-kuromoji.asciidoc +++ b/docs/plugins/analysis-kuromoji.asciidoc @@ -98,6 +98,39 @@ dictionary to `$ES_HOME/config/userdict_ja.txt`: 東京スカイツリー,東京 スカイツリー,トウキョウ スカイツリー,カスタム名詞 ----------------------- +-- + +You can also inline the rules directly in the tokenizer definition using +the `user_dictionary_rules` option: + +[source,js] +-------------------------------------------------- +PUT nori_sample +{ + "settings": { + "index": { + "analysis": { + "tokenizer": { + "kuromoji_user_dict": { + "type": "kuromoji_tokenizer", + "mode": "extended", + "user_dictionary_rules": ["東京スカイツリー,東京 スカイツリー,トウキョウ スカイツリー,カスタム名詞"] + } + }, + "analyzer": { + "my_analyzer": { + "type": "custom", + "tokenizer": "kuromoji_user_dict" + } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE +-- + `nbest_cost`/`nbest_examples`:: + -- diff --git a/docs/reference/aggregations/bucket/histogram-aggregation.asciidoc b/docs/reference/aggregations/bucket/histogram-aggregation.asciidoc index 2576662b6d13..32a540130ef3 100644 --- a/docs/reference/aggregations/bucket/histogram-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/histogram-aggregation.asciidoc @@ -143,7 +143,7 @@ the first bucket you'll get will be the one with `100` as its key. This is confu to get those buckets between `0 - 100`. With `extended_bounds` setting, you now can "force" the histogram aggregation to start building buckets on a specific -`min` values and also keep on building buckets up to a `max` value (even if there are no documents anymore). Using +`min` value and also keep on building buckets up to a `max` value (even if there are no documents anymore). Using `extended_bounds` only makes sense when `min_doc_count` is 0 (the empty buckets will never be returned if `min_doc_count` is greater than 0). @@ -185,8 +185,10 @@ the `order` setting. Supports the same `order` functionality as the < The node where shard 0 currently has a replica on -You can also have Elasticsearch explain the allocation of the first unassigned -shard that it finds by sending an empty body for the request: -[source,js] --------------------------------------------------- -GET /_cluster/allocation/explain --------------------------------------------------- -// CONSOLE - -[float] -==== Explain API Response - -This section includes examples of the cluster allocation explain API response output -under various scenarios. - -////////////////////////// +===== Examples of unassigned primary shard explanations [source,js] -------------------------------------------------- @@ -89,9 +116,8 @@ GET /_cluster/allocation/explain -------------------------------------------------- // CONSOLE -////////////////////////// -The API response for an unassigned shard: +The API returns the following response for an unassigned primary shard: [source,js] -------------------------------------------------- @@ -131,36 +157,13 @@ The API response for an unassigned shard: // TESTRESPONSE[s/"transport_address" : "[^"]*"/"transport_address" : $body.$_path/] // TESTRESPONSE[s/"node_attributes" : \{\}/"node_attributes" : $body.$_path/] -<1> The current state of the shard -<2> The reason for the shard originally becoming unassigned -<3> Whether to allocate the shard -<4> Whether to allocate the shard to the particular node -<5> The decider which led to the `no` decision for the node -<6> An explanation as to why the decider returned a `no` decision, with a helpful hint pointing to the setting that led to the decision +<1> The current state of the shard. +<2> The reason for the shard originally becoming unassigned. +<3> Whether to allocate the shard. +<4> Whether to allocate the shard to the particular node. +<5> The decider which led to the `no` decision for the node. +<6> An explanation as to why the decider returned a `no` decision, with a helpful hint pointing to the setting that led to the decision. -You can return information gathered by the cluster info service about disk usage -and shard sizes by setting the `include_disk_info` parameter to `true`: - -[source,js] --------------------------------------------------- -GET /_cluster/allocation/explain?include_disk_info=true --------------------------------------------------- -// CONSOLE - -Additionally, if you would like to include all decisions that were factored into the final -decision, the `include_yes_decisions` parameter will return all decisions for each node: - -[source,js] --------------------------------------------------- -GET /_cluster/allocation/explain?include_yes_decisions=true --------------------------------------------------- -// CONSOLE - -The default value for `include_yes_decisions` is `false`, which will only -include the `no` decisions in the response. This is generally what you would -want, as the `no` decisions indicate why a shard is unassigned or cannot be moved, -and including all decisions include the `yes` ones adds a lot of verbosity to the -API's response output. The API response output for an unassigned primary shard that had previously been allocated to a node in the cluster: @@ -184,7 +187,11 @@ allocated to a node in the cluster: -------------------------------------------------- // NOTCONSOLE -The API response output for a replica that is unassigned due to delayed allocation: + +===== Example of an unassigned replica shard explanation + +The API response output for a replica that is unassigned due to delayed +allocation: [source,js] -------------------------------------------------- @@ -233,12 +240,15 @@ The API response output for a replica that is unassigned due to delayed allocati } -------------------------------------------------- // NOTCONSOLE -<1> The configured delay before allocating a replica shard that does not exist due to the node holding it leaving the cluster -<2> The remaining delay before allocating the replica shard -<3> Information about the shard data found on a node +<1> The configured delay before allocating a replica shard that does not exist due to the node holding it leaving the cluster. +<2> The remaining delay before allocating the replica shard. +<3> Information about the shard data found on a node. -The API response output for an assigned shard that is not allowed to -remain on its current node and is required to move: + +===== Examples of allocated shard explanations + +The API response output for an assigned shard that is not allowed to remain on +its current node and is required to move: [source,js] -------------------------------------------------- @@ -281,9 +291,10 @@ remain on its current node and is required to move: } -------------------------------------------------- // NOTCONSOLE -<1> Whether the shard is allowed to remain on its current node -<2> The deciders that factored into the decision of why the shard is not allowed to remain on its current node -<3> Whether the shard is allowed to be allocated to another node +<1> Whether the shard is allowed to remain on its current node. +<2> The deciders that factored into the decision of why the shard is not allowed to remain on its current node. +<3> Whether the shard is allowed to be allocated to another node. + The API response output for an assigned shard that remains on its current node because moving the shard to another node does not form a better cluster balance: @@ -317,6 +328,6 @@ because moving the shard to another node does not form a better cluster balance: } -------------------------------------------------- // NOTCONSOLE -<1> Whether rebalancing is allowed on the cluster -<2> Whether the shard can be rebalanced to another node -<3> The reason the shard cannot be rebalanced to the node, in this case indicating that it offers no better balance than the current node +<1> Whether rebalancing is allowed on the cluster. +<2> Whether the shard can be rebalanced to another node. +<3> The reason the shard cannot be rebalanced to the node, in this case indicating that it offers no better balance than the current node. diff --git a/docs/reference/cluster/nodes-hot-threads.asciidoc b/docs/reference/cluster/nodes-hot-threads.asciidoc index 82994058e204..635e943926a1 100644 --- a/docs/reference/cluster/nodes-hot-threads.asciidoc +++ b/docs/reference/cluster/nodes-hot-threads.asciidoc @@ -1,29 +1,65 @@ [[cluster-nodes-hot-threads]] === Nodes hot_threads +Returns the hot threads on each selected node in the cluster. + + +[[cluster-nodes-hot-threads-api-request]] +==== {api-request-title} + +`GET /_nodes/hot_threads` + + +`GET /_nodes/{node_id}/hot_threads` + + +[[cluster-nodes-hot-threads-api-desc]] +==== {api-description-title} + This API yields a breakdown of the hot threads on each selected node in the -cluster. Its endpoints are `/_nodes/hot_threads` and -`/_nodes/{nodes}/hot_threads`: +cluster. The output is plain text with a breakdown of each node's top hot +threads. + + +[[cluster-nodes-hot-threads-api-path-params]] +==== {api-path-parms-title} + +include::{docdir}/rest-api/common-parms.asciidoc[tag=node-id] + + +[[cluster-nodes-hot-threads-api-query-params]] +==== {api-query-parms-title} + + +`ignore_idle_threads`:: + (Optional, boolean) If true, known idle threads (e.g. waiting in a socket + select, or to get a task from an empty queue) are filtered out. Defaults to + true. + +`interval`:: + (Optional, <>) The interval to do the second + sampling of threads. Defaults to `500ms`. + +`snapshots`:: + (Optional, integer) Number of samples of thread stacktrace. Defaults to + `10`. + +`threads`:: + (Optional, integer) Specifies the number of hot threads to provide + information for. Defaults to `3`. + +include::{docdir}/rest-api/common-parms.asciidoc[tag=timeoutparms] + +`type`:: + (Optional, string) The type to sample. Available options are `block`, `cpu`, and + `wait`. Defaults to `cpu`. + + +[[cluster-nodes-hot-threads-api-example]] +==== {api-examples-title} [source,js] -------------------------------------------------- GET /_nodes/hot_threads GET /_nodes/nodeId1,nodeId2/hot_threads -------------------------------------------------- -// CONSOLE - -The first command gets the hot threads of all the nodes in the cluster. The -second command gets the hot threads of only `nodeId1` and `nodeId2`. Nodes can -be selected using <>. - -The output is plain text with a breakdown of each node's top hot threads. The -allowed parameters are: - -[horizontal] -`threads`:: number of hot threads to provide, defaults to 3. -`interval`:: the interval to do the second sampling of threads. - Defaults to 500ms. -`type`:: The type to sample, defaults to cpu, but supports wait and - block to see hot threads that are in wait or block state. -`ignore_idle_threads`:: If true, known idle threads (e.g. waiting in a socket select, or to - get a task from an empty queue) are filtered out. Defaults to true. +// CONSOLE \ No newline at end of file diff --git a/docs/reference/cluster/tasks.asciidoc b/docs/reference/cluster/tasks.asciidoc index 9f2e6ee7e1ef..c19d769672d2 100644 --- a/docs/reference/cluster/tasks.asciidoc +++ b/docs/reference/cluster/tasks.asciidoc @@ -3,12 +3,52 @@ beta[The Task Management API is new and should still be considered a beta feature. The API may change in ways that are not backwards compatible] -[float] -==== Current Tasks Information +Returns information about the tasks currently executing in the cluster. + +[[tasks-api-request]] +==== {api-request-title} + +`GET /_tasks` + + +`GET /_tasks/{task_id}` + + +[[tasks-api-desc]] +==== {api-description-title} The task management API allows to retrieve information about the tasks currently executing on one or more nodes in the cluster. + +[[tasks-api-path-params]] +==== {api-path-parms-title} + +{task_id} + (Optional, string) The ID of the task to return (`node_id:task_number`). + + +[[tasks-api-query-params]] +==== {api-query-parms-title} + +include::{docdir}/rest-api/common-parms.asciidoc[tag=timeoutparms] + +`wait_for_completion`:: + (Optional, boolean) If `true`, it waits for the matching tasks to complete. + Defaults to `false`. + + + +[[tasks-api-response-codes]] +==== {api-response-codes-title} + +`404` (Missing resources):: + If `{task_id}` is specified but not found, this code indicates that there + are no resources that match the request. + + +[[tasks-api-examples]] +==== {api-examples-title} + [source,js] -------------------------------------------------- GET _tasks <1> @@ -22,7 +62,7 @@ GET _tasks?nodes=nodeId1,nodeId2&actions=cluster:* <3> <2> Retrieves all tasks running on nodes `nodeId1` and `nodeId2`. See <> for more info about how to select individual nodes. <3> Retrieves all cluster-related tasks running on nodes `nodeId1` and `nodeId2`. -The result will look similar to the following: +The API returns the following result: [source,js] -------------------------------------------------- @@ -60,6 +100,8 @@ The result will look similar to the following: -------------------------------------------------- // TESTRESPONSE +===== Retrieve information from a particular task + It is also possible to retrieve information for a particular task. The following example retrieves information about task `oTUltX4IQMOUUVeiohTt8A:124`: @@ -82,6 +124,9 @@ GET _tasks?parent_task_id=oTUltX4IQMOUUVeiohTt8A:123 If the parent isn't found, the API does not return a 404. + +===== Get more information about tasks + You can also use the `detailed` request parameter to get more information about the running tasks. This is useful for telling one task from another but is more costly to execute. For example, fetching all searches using the `detailed` @@ -94,7 +139,7 @@ GET _tasks?actions=*search&detailed // CONSOLE // TEST[skip:No tasks to retrieve] -The results might look like: +The API returns the following result: [source,js] -------------------------------------------------- @@ -145,6 +190,9 @@ releases. ============================== + +===== Wait for completion + The task API can also be used to wait for completion of a particular task. The following call will block for 10 seconds or until the task with id `oTUltX4IQMOUUVeiohTt8A:12345` is completed. @@ -156,8 +204,8 @@ GET _tasks/oTUltX4IQMOUUVeiohTt8A:12345?wait_for_completion=true&timeout=10s // CONSOLE // TEST[catch:missing] -You can also wait for all tasks for certain action types to finish. This -command will wait for all `reindex` tasks to finish: +You can also wait for all tasks for certain action types to finish. This command +will wait for all `reindex` tasks to finish: [source,js] -------------------------------------------------- @@ -165,6 +213,9 @@ GET _tasks?actions=*reindex&wait_for_completion=true&timeout=10s -------------------------------------------------- // CONSOLE + +===== Listing tasks by using _cat + Tasks can be also listed using _cat version of the list tasks command, which accepts the same arguments as the standard list tasks command. @@ -175,9 +226,8 @@ GET _cat/tasks?detailed -------------------------------------------------- // CONSOLE -[float] [[task-cancellation]] -==== Task Cancellation +===== Task Cancellation If a long-running task supports cancellation, it can be cancelled with the cancel tasks API. The following example cancels task `oTUltX4IQMOUUVeiohTt8A:12345`: @@ -188,8 +238,9 @@ POST _tasks/oTUltX4IQMOUUVeiohTt8A:12345/_cancel -------------------------------------------------- // CONSOLE -The task cancellation command supports the same task selection parameters as the list tasks command, so multiple tasks -can be cancelled at the same time. For example, the following command will cancel all reindex tasks running on the +The task cancellation command supports the same task selection parameters as the +list tasks command, so multiple tasks can be cancelled at the same time. For +example, the following command will cancel all reindex tasks running on the nodes `nodeId1` and `nodeId2`. [source,js] @@ -198,11 +249,11 @@ POST _tasks/_cancel?nodes=nodeId1,nodeId2&actions=*reindex -------------------------------------------------- // CONSOLE -[float] -==== Task Grouping +===== Task Grouping -The task lists returned by task API commands can be grouped either by nodes (default) or by parent tasks using the `group_by` parameter. -The following command will change the grouping to parent tasks: +The task lists returned by task API commands can be grouped either by nodes +(default) or by parent tasks using the `group_by` parameter. The following +command will change the grouping to parent tasks: [source,js] -------------------------------------------------- @@ -218,12 +269,13 @@ GET _tasks?group_by=none -------------------------------------------------- // CONSOLE -[float] -==== Identifying running tasks -The `X-Opaque-Id` header, when provided on the HTTP request header, is going to be returned as a header in the response as well as -in the `headers` field for in the task information. This allows to track certain calls, or associate certain tasks with -a the client that started them: +===== Identifying running tasks + +The `X-Opaque-Id` header, when provided on the HTTP request header, is going to +be returned as a header in the response as well as in the `headers` field for in +the task information. This allows to track certain calls, or associate certain +tasks with a the client that started them: [source,sh] -------------------------------------------------- @@ -231,7 +283,7 @@ curl -i -H "X-Opaque-Id: 123456" "http://localhost:9200/_tasks?group_by=parents" -------------------------------------------------- //NOTCONSOLE -The result will look similar to the following: +The API returns the following result: [source,js] -------------------------------------------------- diff --git a/docs/reference/cluster/voting-exclusions.asciidoc b/docs/reference/cluster/voting-exclusions.asciidoc index 82ef64ee9d31..ab48a89a5fba 100644 --- a/docs/reference/cluster/voting-exclusions.asciidoc +++ b/docs/reference/cluster/voting-exclusions.asciidoc @@ -7,21 +7,17 @@ Adds or removes master-eligible nodes from the <>. -[float] -==== Request -`POST _cluster/voting_config_exclusions/` + +[[voting-config-exclusions-api-request]] +==== {api-request-title} + +`POST _cluster/voting_config_exclusions/{node_name}` + `DELETE _cluster/voting_config_exclusions` -[float] -==== Path parameters -`node_name`:: - A <> that identifies {es} nodes. - -[float] -==== Description +[[voting-config-exclusions-api-desc]] +==== {api-description-title} By default, if there are more than three master-eligible nodes in the cluster and you remove fewer than half of the master-eligible nodes in the cluster at @@ -44,23 +40,35 @@ master-eligible nodes from a cluster in a short time period. They are not required when removing master-ineligible nodes or fewer than half of the master-eligible nodes. -The <> limits the size of the voting configuration exclusion list. The -default value is `10`. Since voting configuration exclusions are persistent and -limited in number, you must clear the voting config exclusions list once the -exclusions are no longer required. - -There is also a -<>, -which is set to true by default. If it is set to false, you must use this API to -maintain the voting configuration. - For more information, see <>. -[float] -==== Examples + +[[voting-config-exclusions-api-path-params]] +==== {api-path-parms-title} + +`{node_name}`:: + A <> that identifies {es} nodes. + + +[[voting-config-exclusions-api-query-params]] +==== {api-query-parms-title} + +`cluster.auto_shrink_voting_configuration`:: + (Optional, boolean) If `false`, you must use this API to maintain the voting + configuration. Defaults to `true`. + +`cluster.max_voting_config_exclusions`:: + (Optional, integer) Limits the size of the voting configuration exclusion + list. The default value is `10`. Since voting configuration exclusions are + persistent and limited in number, you must clear the voting config + exclusions list once the exclusions are no longer required. + + +[[voting-config-exclusions-api-example]] +==== {api-examples-title} Add `nodeId1` to the voting configuration exclusions list: + [source,js] -------------------------------------------------- POST /_cluster/voting_config_exclusions/nodeId1 @@ -68,7 +76,9 @@ POST /_cluster/voting_config_exclusions/nodeId1 // CONSOLE // TEST[catch:bad_request] + Remove all exclusions from the list: + [source,js] -------------------------------------------------- DELETE /_cluster/voting_config_exclusions diff --git a/docs/reference/getting-started.asciidoc b/docs/reference/getting-started.asciidoc index 3d951e997fb3..3ca58d692236 100755 --- a/docs/reference/getting-started.asciidoc +++ b/docs/reference/getting-started.asciidoc @@ -22,7 +22,7 @@ how {es} works. If you're already familiar with {es} and want to see how it work with the rest of the stack, you might want to jump to the {stack-gs}/get-started-elastic-stack.html[Elastic Stack Tutorial] to see how to set up a system monitoring solution with {es}, {kib}, -{beats}, and {ls}. +{beats}, and {ls}. TIP: The fastest way to get started with {es} is to https://www.elastic.co/cloud/elasticsearch-service/signup[start a free 14-day @@ -87,7 +87,7 @@ tar -xvf elasticsearch-{version}-darwin-x86_64.tar.gz + Windows PowerShell: + -["source","sh",subs="attributes,callouts"] +["source","powershell",subs="attributes,callouts"] -------------------------------------------------- Expand-Archive elasticsearch-{version}-windows-x86_64.zip -------------------------------------------------- @@ -104,10 +104,10 @@ cd elasticsearch-{version}/bin + Windows: + -["source","sh",subs="attributes,callouts"] +["source","powershell",subs="attributes,callouts"] -------------------------------------------------- -cd %PROGRAMFILES%\Elastic\Elasticsearch\bin -.\elasticsearch.exe +cd elasticsearch-{version}\bin +.\elasticsearch.bat -------------------------------------------------- + You now have a single-node {es} cluster up and running! @@ -126,17 +126,17 @@ Linux and macOS: + Windows: + -["source","sh",subs="attributes,callouts"] +["source","powershell",subs="attributes,callouts"] -------------------------------------------------- -.\elasticsearch.exe -Epath.data=data2 -Epath.logs=log2 -.\elasticsearch.exe -Epath.data=data3 -Epath.logs=log3 +.\elasticsearch.bat -E path.data=data2 -E path.logs=log2 +.\elasticsearch.bat -E path.data=data3 -E path.logs=log3 -------------------------------------------------- + The additional nodes are assigned unique IDs. Because you're running all three nodes locally, they automatically join the cluster with the first node. -. Use the `cat health` API to verify that your three-node cluster is up running. -The `cat` APIs return information about your cluster and indices in a +. Use the cat health API to verify that your three-node cluster is up running. +The cat APIs return information about your cluster and indices in a format that's easier to read than raw JSON. + You can interact directly with your cluster by submitting HTTP requests to @@ -155,8 +155,8 @@ GET /_cat/health?v -------------------------------------------------- // CONSOLE + -The response should indicate that the status of the _elasticsearch_ cluster -is _green_ and it has three nodes: +The response should indicate that the status of the `elasticsearch` cluster +is `green` and it has three nodes: + [source,txt] -------------------------------------------------- @@ -185,14 +185,14 @@ packages on Linux, install using Homebrew on macOS, or install using the MSI package installer on Windows. See <> for more information. [[getting-started-index]] -=== Index some documents +== Index some documents Once you have a cluster up and running, you're ready to index some data. There are a variety of ingest options for {es}, but in the end they all do the same thing: put JSON documents into an {es} index. -You can do this directly with a simple POST request that identifies -the index you want to add the document to and specifies one or more +You can do this directly with a simple PUT request that specifies +the index you want to add the document, a unique document ID, and one or more `"field": "value"` pairs in the request body: [source,js] @@ -204,9 +204,9 @@ PUT /customer/_doc/1 -------------------------------------------------- // CONSOLE -This request automatically creates the _customer_ index if it doesn't already +This request automatically creates the `customer` index if it doesn't already exist, adds a new document that has an ID of `1`, and stores and -indexes the _name_ field. +indexes the `name` field. Since this is a new document, the response shows that the result of the operation was that version 1 of the document was created: @@ -264,46 +264,22 @@ and shows the original source fields that were indexed. // TESTRESPONSE[s/"_seq_no" : \d+/"_seq_no" : $body._seq_no/ ] // TESTRESPONSE[s/"_primary_term" : \d+/"_primary_term" : $body._primary_term/] - [float] [[getting-started-batch-processing]] -==== Batch processing +=== Indexing documents in bulk -In addition to being able to index, update, and delete individual documents, Elasticsearch also provides the ability to perform any of the above operations in batches using the {ref}/docs-bulk.html[`_bulk` API]. This functionality is important in that it provides a very efficient mechanism to do multiple operations as fast as possible with as few network roundtrips as possible. +If you have a lot of documents to index, you can submit them in batches with +the {ref}/docs-bulk.html[bulk API]. Using bulk to batch document +operations is significantly faster than submitting requests individually as it minimizes network roundtrips. -As a quick example, the following call indexes two documents (ID 1 - John Doe and ID 2 - Jane Doe) in one bulk operation: +The optimal batch size depends a number of factors: the document size and complexity, the indexing and search load, and the resources available to your cluster. A good place to start is with batches of 1,000 to 5,000 documents +and a total payload between 5MB and 15MB. From there, you can experiment +to find the sweet spot. -[source,js] --------------------------------------------------- -POST /customer/_bulk?pretty -{"index":{"_id":"1"}} -{"name": "John Doe" } -{"index":{"_id":"2"}} -{"name": "Jane Doe" } --------------------------------------------------- -// CONSOLE - -This example updates the first document (ID of 1) and then deletes the second document (ID of 2) in one bulk operation: - -[source,sh] --------------------------------------------------- -POST /customer/_bulk -{"update":{"_id":"1"}} -{"doc": { "name": "John Doe becomes Jane Doe" } } -{"delete":{"_id":"2"}} --------------------------------------------------- -// CONSOLE -// TEST[continued] - -Note above that for the delete action, there is no corresponding source document after it since deletes only require the ID of the document to be deleted. - -The Bulk API does not fail due to failures in one of the actions. If a single action fails for whatever reason, it will continue to process the remainder of the actions after it. When the bulk API returns, it will provide a status for each action (in the same order it was sent in) so that you can check if a specific action failed or not. - -[float] -==== Sample dataset - -Now that we've gotten a glimpse of the basics, let's try to work on a more realistic dataset. I've prepared a sample of fictitious JSON documents of customer bank account information. Each document has the following schema: +To get some data into {es} that you can start searching and analyzing: +. Download the https://github.com/elastic/elasticsearch/blob/master/docs/src/test/resources/accounts.json?raw=true[`accounts.json`] sample data set. The documents in this randomly-generated data set represent user accounts with the following information: ++ [source,js] -------------------------------------------------- { @@ -322,21 +298,19 @@ Now that we've gotten a glimpse of the basics, let's try to work on a more reali -------------------------------------------------- // NOTCONSOLE -For the curious, this data was generated using http://www.json-generator.com/[`www.json-generator.com/`], so please ignore the actual values and semantics of the data as these are all randomly generated. - -You can download the sample dataset (accounts.json) from https://github.com/elastic/elasticsearch/blob/master/docs/src/test/resources/accounts.json?raw=true[here]. Extract it to our current directory and let's load it into our cluster as follows: - +. Index the account data into the `bank` index with the following `_bulk` request: ++ [source,sh] -------------------------------------------------- curl -H "Content-Type: application/json" -XPOST "localhost:9200/bank/_bulk?pretty&refresh" --data-binary "@accounts.json" curl "localhost:9200/_cat/indices?v" -------------------------------------------------- // NOTCONSOLE - ++ //// This replicates the above in a document-testing friendly way but isn't visible in the docs: - ++ [source,js] -------------------------------------------------- GET /_cat/indices?v @@ -344,9 +318,9 @@ GET /_cat/indices?v // CONSOLE // TEST[setup:bank] //// - -And the response: - ++ +The response indicates that 1,000 documents were indexed successfully. ++ [source,txt] -------------------------------------------------- health status index uuid pri rep docs.count docs.deleted store.size pri.store.size @@ -355,10 +329,8 @@ yellow open bank l7sSYV2cQXmu6_4rJWVIww 5 1 1000 0 12 // TESTRESPONSE[s/128.6kb/\\d+(\\.\\d+)?[mk]?b/] // TESTRESPONSE[s/l7sSYV2cQXmu6_4rJWVIww/.+/ non_json] -Which means that we just successfully bulk indexed 1000 documents into the bank index. - [[getting-started-search]] -=== Start searching +== Start searching Now let's start with some simple searches. There are two basic ways to run searches: one is by sending search parameters through the {ref}/search-uri-request.html[REST request URI] and the other by sending them through the {ref}/search-request-body.html[REST request body]. The request body method allows you to be more expressive and also to define your searches in a more readable JSON format. We'll try one example of the request URI method but for the remainder of this tutorial, we will exclusively be using the request body method. @@ -501,7 +473,7 @@ It is important to understand that once you get your search results back, Elasti [float] [[getting-started-query-lang]] -==== Introducing the Query Language +=== Introducing the Query Language Elasticsearch provides a JSON-style domain-specific language that you can use to execute queries. This is referred to as the {ref}/query-dsl.html[Query DSL]. The query language is quite comprehensive and can be intimidating at first glance but the best way to actually learn it is to start with a few basic examples. @@ -724,7 +696,7 @@ GET /bank/_search [float] [[getting-started-filters]] -==== Executing filters +=== Executing filters In the previous section, we skipped over a little detail called the document score (`_score` field in the search results). The score is a numeric value that is a relative measure of how well the document matches the search query that we specified. The higher the score, the more relevant the document is, the lower the score, the less relevant the document is. @@ -761,7 +733,7 @@ Dissecting the above, the bool query contains a `match_all` query (the query par In addition to the `match_all`, `match`, `bool`, and `range` queries, there are a lot of other query types that are available and we won't go into them here. Since we already have a basic understanding of how they work, it shouldn't be too difficult to apply this knowledge in learning and experimenting with the other query types. [[getting-started-aggregations]] -=== Analyze results with aggregations +== Analyze results with aggregations Aggregations provide the ability to group and extract statistics from your data. The easiest way to think about aggregations is by roughly equating it to the SQL GROUP BY and the SQL aggregate functions. In Elasticsearch, you have the ability to execute searches returning hits and at the same time return aggregated results separate from the hits all in one response. This is very powerful and efficient in the sense that you can run queries and multiple aggregations and get the results back of both (or either) operations in one shot avoiding network roundtrips using a concise and simplified API. @@ -965,7 +937,7 @@ GET /bank/_search There are many other aggregations capabilities that we won't go into detail here. The {ref}/search-aggregations.html[aggregations reference guide] is a great starting point if you want to do further experimentation. [[getting-started-next-steps]] -=== Where to go from here +== Where to go from here Now that you've set up a cluster, indexed some documents, and run some searches and aggregations, you might want to: diff --git a/docs/reference/ilm/policy-definitions.asciidoc b/docs/reference/ilm/policy-definitions.asciidoc index 98cd4fd04132..8ca2b68ac365 100644 --- a/docs/reference/ilm/policy-definitions.asciidoc +++ b/docs/reference/ilm/policy-definitions.asciidoc @@ -66,8 +66,11 @@ index is rolled over, then `min_age` is the time elapsed from the time the index is rolled over. The intention here is to execute following phases and actions relative to when data was written last to a rolled over index. -The previous phase's actions must complete before {ilm} will check `min_age` -and transition into the next phase. +The previous phase's actions must complete before {ilm} will check `min_age` and +transition into the next phase. By default, {ilm} checks for indices that meet +policy criteria, like `min_age`, every 10 minutes. You can use the +`indices.lifecycle.poll_interval` cluster setting to control how often this +check occurs. === Phase Execution diff --git a/docs/reference/index-modules/translog.asciidoc b/docs/reference/index-modules/translog.asciidoc index c01671b4ae6b..414ac59f0ba2 100644 --- a/docs/reference/index-modules/translog.asciidoc +++ b/docs/reference/index-modules/translog.asciidoc @@ -76,12 +76,23 @@ commit point. Defaults to `512mb`. `index.translog.retention.size`:: -The total size of translog files to keep. Keeping more translog files increases -the chance of performing an operation based sync when recovering replicas. If -the translog files are not sufficient, replica recovery will fall back to a -file based sync. Defaults to `512mb` +When soft deletes is disabled (enabled by default in 7.0 or later), +`index.translog.retention.size` controls the total size of translog files to keep. +Keeping more translog files increases the chance of performing an operation based +sync when recovering replicas. If the translog files are not sufficient, +replica recovery will fall back to a file based sync. Defaults to `512mb` + +Both `index.translog.retention.size` and `index.translog.retention.age` should not +be specified unless soft deletes is disabled as they will be ignored. `index.translog.retention.age`:: -The maximum duration for which translog files will be kept. Defaults to `12h`. +When soft deletes is disabled (enabled by default in 7.0 or later), +`index.translog.retention.age` controls the maximum duration for which translog +files to keep. Keeping more translog files increases the chance of performing an +operation based sync when recovering replicas. If the translog files are not sufficient, +replica recovery will fall back to a file based sync. Defaults to `12h` + +Both `index.translog.retention.size` and `index.translog.retention.age` should not +be specified unless soft deletes is disabled as they will be ignored. diff --git a/docs/reference/indices/put-mapping.asciidoc b/docs/reference/indices/put-mapping.asciidoc index d5d73f8fc3f2..d5edfe51be81 100644 --- a/docs/reference/indices/put-mapping.asciidoc +++ b/docs/reference/indices/put-mapping.asciidoc @@ -57,12 +57,23 @@ PUT /twitter-1,twitter-2/_mapping <1> [float] ==== Updating field mappings -In general, the mapping for existing fields cannot be updated. There are some -exceptions to this rule. For instance: +// tag::put-field-mapping-exceptions[] -* new <> can be added to <> fields. -* new <> can be added to existing fields. -* the <> parameter can be updated. +You can't change the mapping of an existing field, with the following +exceptions: + +* You can add new <> to an <> field. +* You can use the <> mapping parameter to enable +multi-fields. +* You can change the value of the <> mapping +parameter. + +Changing the mapping of an existing field could invalidate data that's already +indexed. If you need to change the mapping of a field, create a new index with +the correct mappings and <> your data into that index. If +you only want to rename a field, consider adding an <> field. + +// end::put-field-mapping-exceptions[] For example: diff --git a/docs/reference/mapping.asciidoc b/docs/reference/mapping.asciidoc index 8b6b1af3e589..b5d2e6ae37a7 100644 --- a/docs/reference/mapping.asciidoc +++ b/docs/reference/mapping.asciidoc @@ -118,49 +118,151 @@ You know more about your data than Elasticsearch can guess, so while dynamic mapping can be useful to get started, at some point you will want to specify your own explicit mappings. -You can create field mappings when you -<>, and you can add -fields to an existing index with the <>. +You can create field mappings when you <> and +<>. [float] -== Updating existing field mappings +[[create-mapping]] +== Create an index with an explicit mapping -Other than where documented, *existing field mappings cannot be -updated*. Changing the mapping would mean invalidating already indexed -documents. Instead, you should create a new index with the correct mappings -and <> your data into that index. If you only wish -to rename a field and not change its mappings, it may make sense to introduce -an <> field. - -[float] -== Example mapping - -A mapping can be specified when creating an index, as follows: +You can use the <> API to create a new index +with an explicit mapping. [source,js] ---------------------------------------- -PUT my_index <1> +---- +PUT /my-index { "mappings": { - "properties": { <2> - "title": { "type": "text" }, <3> - "name": { "type": "text" }, <4> - "age": { "type": "integer" }, <5> - "created": { - "type": "date", <6> - "format": "strict_date_optional_time||epoch_millis" + "properties": { + "age": { "type": "integer" }, <1> + "email": { "type": "keyword" }, <2> + "name": { "type": "text" } <3> + } + } +} +---- +// CONSOLE + +<1> Creates `age`, an <> field +<2> Creates `email`, a <> field +<3> Creates `name`, a <> field + +[float] +[[add-field-mapping]] +== Add a field to an existing mapping + +You can use the <> API to add one or more new +fields to an existing index. + +The following example adds `employee-id`, a `keyword` field with an +<> mapping parameter value of `false`. This means values +for the `employee-id` field are stored but not indexed or available for search. + +[source,js] +---- +PUT /my-index/_mapping +{ + "properties": { + "employee-id": { + "type": "keyword", + "index": false + } + } +} +---- +// CONSOLE +// TEST[continued] + +[float] +[[update-mapping]] +=== Update the mapping of a field + +include::{docdir}/indices/put-mapping.asciidoc[tag=put-field-mapping-exceptions] + +[float] +[[view-mapping]] +== View the mapping of an index + +You can use the <> API to view the mapping of +an existing index. + +[source,js] +---- +GET /my-index/_mapping +---- +// CONSOLE +// TEST[continued] + +The API returns the following response: + +[source,js] +---- +{ + "my-index" : { + "mappings" : { + "properties" : { + "age" : { + "type" : "integer" + }, + "email" : { + "type" : "keyword" + }, + "employee-id" : { + "type" : "keyword", + "index" : false + }, + "name" : { + "type" : "text" + } } } } } ---------------------------------------- +---- +// TESTRESPONSE + + +[float] +[[view-field-mapping]] +== View the mapping of specific fields + +If you only want to view the mapping of one or more specific fields, you can use +the <> API. + +This is useful if you don't need the complete mapping of an index or your index +contains a large number of fields. + +The following request retrieves the mapping for the `employee-id` field. + +[source,js] +---- +GET /my-index/_mapping/field/employee-id +---- // CONSOLE -<1> Create an index called `my_index`. -<2> Specify the fields or _properties_ in the mapping. -<3> Specify that the `title` field contains `text` values. -<4> Specify that the `name` field contains `text` values. -<5> Specify that the `age` field contains `integer` values. -<6> Specify that the `created` field contains `date` values in two possible formats. +// TEST[continued] + +The API returns the following response: + +[source,js] +---- +{ + "my-index" : { + "mappings" : { + "employee-id" : { + "full_name" : "employee-id", + "mapping" : { + "employee-id" : { + "type" : "keyword", + "index" : false + } + } + } + } + } +} + +---- +// TESTRESPONSE -- diff --git a/docs/reference/ml/df-analytics/apis/estimate-memory-usage-dfanalytics.asciidoc b/docs/reference/ml/df-analytics/apis/estimate-memory-usage-dfanalytics.asciidoc index 9f1f77052d64..4393a3365fe7 100644 --- a/docs/reference/ml/df-analytics/apis/estimate-memory-usage-dfanalytics.asciidoc +++ b/docs/reference/ml/df-analytics/apis/estimate-memory-usage-dfanalytics.asciidoc @@ -42,14 +42,14 @@ Serves as an advice on how to set `model_memory_limit` when creating {dfanalytic [[ml-estimate-memory-usage-dfanalytics-results]] ==== {api-response-body-title} -`expected_memory_usage_with_one_partition`:: +`expected_memory_without_disk`:: (string) Estimated memory usage under the assumption that the whole {dfanalytics} should happen in memory (i.e. without overflowing to disk). -`expected_memory_usage_with_max_partitions`:: +`expected_memory_with_disk`:: (string) Estimated memory usage under the assumption that overflowing to disk is allowed during {dfanalytics}. - `expected_memory_usage_with_max_partitions` is usually smaller than `expected_memory_usage_with_one_partition` - as using disk allows to limit the main memory needed to perform {dfanalytics}. + `expected_memory_with_disk` is usually smaller than `expected_memory_without_disk` as using disk allows to + limit the main memory needed to perform {dfanalytics}. [[ml-estimate-memory-usage-dfanalytics-example]] ==== {api-examples-title} @@ -76,8 +76,8 @@ The API returns the following results: [source,js] ---- { - "expected_memory_usage_with_one_partition": "128MB", - "expected_memory_usage_with_max_partitions": "32MB" + "expected_memory_without_disk": "128MB", + "expected_memory_with_disk": "32MB" } ---- // TESTRESPONSE \ No newline at end of file diff --git a/docs/reference/modules/snapshots.asciidoc b/docs/reference/modules/snapshots.asciidoc index 50d037b5ffb2..7383dd5d1929 100644 --- a/docs/reference/modules/snapshots.asciidoc +++ b/docs/reference/modules/snapshots.asciidoc @@ -332,6 +332,42 @@ POST /_snapshot/my_unverified_backup/_verify It returns a list of nodes where repository was successfully verified or an error message if verification process failed. +[float] +===== Repository Cleanup +Repositories can over time accumulate data that is not referenced by any existing snapshot. This is a result of the data safety guarantees +the snapshot functionality provides in failure scenarios during snapshot creation and the decentralized nature of the snapshot creation +process. This unreferenced data does in no way negatively impact the performance or safety of a snapshot repository but leads to higher +than necessary storage use. In order to clean up this unreferenced data, users can call the cleanup endpoint for a repository which will +trigger a complete accounting of the repositories contents and subsequent deletion of all unreferenced data that was found. + +[source,js] +----------------------------------- +POST /_snapshot/my_repository/_cleanup +----------------------------------- +// CONSOLE +// TEST[continued] + +The response to a cleanup request looks as follows: + +[source,js] +-------------------------------------------------- +{ + "results": { + "deleted_bytes": 20, + "deleted_blobs": 5 + } +} +-------------------------------------------------- +// TESTRESPONSE + +Depending on the concrete repository implementation the numbers shown for bytes free as well as the number of blobs removed will either +be an approximation or an exact result. Any non-zero value for the number of blobs removed implies that unreferenced blobs were found and +subsequently cleaned up. + +Please note that most of the cleanup operations executed by this endpoint are automatically executed when deleting any snapshot from a +repository. If you regularly delete snapshots, you will in most cases not get any or only minor space savings from using this functionality +and should lower your frequency of invoking it accordingly. + [float] [[snapshots-take-snapshot]] === Snapshot diff --git a/docs/reference/query-dsl/pinned-query.asciidoc b/docs/reference/query-dsl/pinned-query.asciidoc new file mode 100644 index 000000000000..9768a1aa8179 --- /dev/null +++ b/docs/reference/query-dsl/pinned-query.asciidoc @@ -0,0 +1,37 @@ +[role="xpack"] +[testenv="basic"] +[[query-dsl-pinned-query]] +=== Pinned Query +Promotes selected documents to rank higher than those matching a given query. +This feature is typically used to guide searchers to curated documents that are +promoted over and above any "organic" matches for a search. +The promoted or "pinned" documents are identified using the document IDs stored in +the <> field. + +==== Example request + +[source,js] +-------------------------------------------------- +GET /_search +{ + "query": { + "pinned" : { + "ids" : ["1", "4", "100"], + "organic" : { + "match":{ + "description": "iphone" + } + } + } + } +} +-------------------------------------------------- +// CONSOLE + +[[pinned-query-top-level-parameters]] +==== Top-level parameters for `pinned` + +`ids`:: +An array of <> listed in the order they are to appear in results. +`organic`:: +Any choice of query used to rank documents which will be ranked below the "pinned" document ids. \ No newline at end of file diff --git a/docs/reference/query-dsl/special-queries.asciidoc b/docs/reference/query-dsl/special-queries.asciidoc index c2e95a4d818c..06f7cc98a734 100644 --- a/docs/reference/query-dsl/special-queries.asciidoc +++ b/docs/reference/query-dsl/special-queries.asciidoc @@ -31,6 +31,8 @@ A query that allows to modify the score of a sub-query with a script. <>:: A query that accepts other queries as json or yaml string. +<>:: +A query that promotes selected documents over others matching a given query. include::distance-feature-query.asciidoc[] @@ -44,4 +46,6 @@ include::script-query.asciidoc[] include::script-score-query.asciidoc[] -include::wrapper-query.asciidoc[] \ No newline at end of file +include::wrapper-query.asciidoc[] + +include::pinned-query.asciidoc[] \ No newline at end of file diff --git a/docs/reference/rollup/apis/put-job.asciidoc b/docs/reference/rollup/apis/put-job.asciidoc index bba6a1714dd8..9f473d7f2023 100644 --- a/docs/reference/rollup/apis/put-job.asciidoc +++ b/docs/reference/rollup/apis/put-job.asciidoc @@ -11,31 +11,31 @@ Creates a {rollup-job}. experimental[] -[[sample-api-request]] +[[rollup-put-job-api-request]] ==== {api-request-title} `PUT _rollup/job/` -[[sample-api-prereqs]] +[[rollup-put-job-api-prereqs]] ==== {api-prereq-title} * If the {es} {security-features} are enabled, you must have `manage` or `manage_rollup` cluster privileges to use this API. For more information, see {stack-ov}/security-privileges.html[Security privileges]. -[[sample-api-desc]] +[[rollup-put-job-api-desc]] ==== {api-description-title} Jobs are created in a `STOPPED` state. You can start them with the <>. -[[sample-api-path-params]] +[[rollup-put-job-api-path-params]] ==== {api-path-parms-title} `job_id`:: (Required, string) Identifier for the {rollup-job}. -[[sample-api-request-body]] +[[rollup-put-job-api-request-body]] ==== {api-request-body-title} `cron`:: @@ -64,7 +64,7 @@ Jobs are created in a `STOPPED` state. You can start them with the For more details about the job configuration, see <>. -[[sample-api-example]] +[[rollup-put-job-api-example]] ==== {api-example-title} The following example creates a {rollup-job} named "sensor", targeting the diff --git a/docs/reference/search/request/highlighting.asciidoc b/docs/reference/search/request/highlighting.asciidoc index 907ef3a511b8..58448f246f61 100644 --- a/docs/reference/search/request/highlighting.asciidoc +++ b/docs/reference/search/request/highlighting.asciidoc @@ -175,7 +175,7 @@ snippets: `simple` or `span`. Only valid for the `plain` highlighter. Defaults to `span`. `simple`::: Breaks up text into same-sized fragments. -`span`::: Breaks up text into same-sized fragments, but tried to avoid +`span`::: Breaks up text into same-sized fragments, but tries to avoid breaking up text between highlighted terms. This is helpful when you're querying for phrases. Default. diff --git a/docs/reference/settings/ilm-settings.asciidoc b/docs/reference/settings/ilm-settings.asciidoc index 7de2036273b9..0f0d94cedc2b 100644 --- a/docs/reference/settings/ilm-settings.asciidoc +++ b/docs/reference/settings/ilm-settings.asciidoc @@ -13,3 +13,7 @@ The index alias to update when the index rolls over. Specify when using a policy that contains a rollover action. When the index rolls over, the alias is updated to reflect that the index is no longer the write index. For more information about rollover, see <>. + +`indices.lifecycle.poll_interval`:: +(<>) How often {ilm} checks for indices that meet policy +criteria. Defaults to `10m`. diff --git a/modules/lang-painless/build.gradle b/modules/lang-painless/build.gradle index b29a1c18ed1b..30e222cec12d 100644 --- a/modules/lang-painless/build.gradle +++ b/modules/lang-painless/build.gradle @@ -28,6 +28,8 @@ esplugin { testClusters.integTest { module file(project(':modules:mapper-extras').tasks.bundlePlugin.archiveFile) systemProperty 'es.scripting.update.ctx_in_params', 'false' + // TODO: remove this once cname is prepended to transport.publish_address by default in 8.0 + systemProperty 'es.transport.cname_in_publish_address', 'true' } dependencies { diff --git a/modules/repository-url/src/main/java/org/elasticsearch/common/blobstore/url/URLBlobContainer.java b/modules/repository-url/src/main/java/org/elasticsearch/common/blobstore/url/URLBlobContainer.java index 604dc7c083ef..7a74078894c9 100644 --- a/modules/repository-url/src/main/java/org/elasticsearch/common/blobstore/url/URLBlobContainer.java +++ b/modules/repository-url/src/main/java/org/elasticsearch/common/blobstore/url/URLBlobContainer.java @@ -23,6 +23,7 @@ import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.blobstore.BlobPath; +import org.elasticsearch.common.blobstore.DeleteResult; import org.elasticsearch.common.blobstore.support.AbstractBlobContainer; import java.io.BufferedInputStream; @@ -97,7 +98,7 @@ public class URLBlobContainer extends AbstractBlobContainer { } @Override - public void delete() { + public DeleteResult delete() { throw new UnsupportedOperationException("URL repository is read only"); } diff --git a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiTokenizerFactory.java b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiTokenizerFactory.java index 22000cf7979e..c7e71fab6348 100644 --- a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiTokenizerFactory.java +++ b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiTokenizerFactory.java @@ -23,17 +23,22 @@ import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.ja.JapaneseTokenizer; import org.apache.lucene.analysis.ja.JapaneseTokenizer.Mode; import org.apache.lucene.analysis.ja.dict.UserDictionary; +import org.apache.lucene.analysis.ja.util.CSVUtil; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; import java.io.IOException; -import java.io.Reader; +import java.io.StringReader; +import java.util.HashSet; +import java.util.List; +import java.util.Set; public class KuromojiTokenizerFactory extends AbstractTokenizerFactory { - private static final String USER_DICT_OPTION = "user_dictionary"; + private static final String USER_DICT_PATH_OPTION = "user_dictionary"; + private static final String USER_DICT_RULES_OPTION = "user_dictionary_rules"; private static final String NBEST_COST = "nbest_cost"; private static final String NBEST_EXAMPLES = "nbest_examples"; @@ -54,17 +59,33 @@ public class KuromojiTokenizerFactory extends AbstractTokenizerFactory { } public static UserDictionary getUserDictionary(Environment env, Settings settings) { + if (settings.get(USER_DICT_PATH_OPTION) != null && settings.get(USER_DICT_RULES_OPTION) != null) { + throw new IllegalArgumentException("It is not allowed to use [" + USER_DICT_PATH_OPTION + "] in conjunction" + + " with [" + USER_DICT_RULES_OPTION + "]"); + } try { - final Reader reader = Analysis.getReaderFromFile(env, settings, USER_DICT_OPTION); - if (reader == null) { + List ruleList = Analysis.getWordList(env, settings, USER_DICT_PATH_OPTION, USER_DICT_RULES_OPTION, false); + if (ruleList == null || ruleList.isEmpty()) { return null; - } else { - try { - return UserDictionary.open(reader); - } finally { - reader.close(); - } } + Set dup = new HashSet<>(); + int lineNum = 0; + for (String line : ruleList) { + // ignore comments + if (line.startsWith("#") == false) { + String[] values = CSVUtil.parse(line); + if (dup.add(values[0]) == false) { + throw new IllegalArgumentException("Found duplicate term [" + values[0] + "] in user dictionary " + + "at line [" + lineNum + "]"); + } + } + ++ lineNum; + } + StringBuilder sb = new StringBuilder(); + for (String line : ruleList) { + sb.append(line).append(System.lineSeparator()); + } + return UserDictionary.open(new StringReader(sb.toString())); } catch (IOException e) { throw new ElasticsearchException("failed to load kuromoji user dictionary", e); } diff --git a/plugins/analysis-kuromoji/src/test/java/org/elasticsearch/index/analysis/KuromojiAnalysisTests.java b/plugins/analysis-kuromoji/src/test/java/org/elasticsearch/index/analysis/KuromojiAnalysisTests.java index 29e73d5a9fa2..9add830c26c6 100644 --- a/plugins/analysis-kuromoji/src/test/java/org/elasticsearch/index/analysis/KuromojiAnalysisTests.java +++ b/plugins/analysis-kuromoji/src/test/java/org/elasticsearch/index/analysis/KuromojiAnalysisTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.analysis; +import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.ja.JapaneseAnalyzer; @@ -39,6 +40,8 @@ import java.io.StringReader; import java.nio.file.Files; import java.nio.file.Path; +import static org.apache.lucene.analysis.BaseTokenStreamTestCase.assertTokenStreamContents; +import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.instanceOf; @@ -307,4 +310,55 @@ public class KuromojiAnalysisTests extends ESTestCase { tokenizer.setReader(new StringReader(source)); assertSimpleTSOutput(tokenFilter.create(tokenizer), expected); } + + public void testKuromojiAnalyzerUserDict() throws Exception { + Settings settings = Settings.builder() + .put("index.analysis.analyzer.my_analyzer.type", "kuromoji") + .putList("index.analysis.analyzer.my_analyzer.user_dictionary_rules", "c++,c++,w,w", "制限スピード,制限スピード,セイゲンスピード,テスト名詞") + .build(); + TestAnalysis analysis = createTestAnalysis(settings); + Analyzer analyzer = analysis.indexAnalyzers.get("my_analyzer"); + try (TokenStream stream = analyzer.tokenStream("", "制限スピード")) { + assertTokenStreamContents(stream, new String[]{"制限スピード"}); + } + + try (TokenStream stream = analyzer.tokenStream("", "c++world")) { + assertTokenStreamContents(stream, new String[]{"c++", "world"}); + } + } + + public void testKuromojiAnalyzerInvalidUserDictOption() throws Exception { + Settings settings = Settings.builder() + .put("index.analysis.analyzer.my_analyzer.type", "kuromoji") + .put("index.analysis.analyzer.my_analyzer.user_dictionary", "user_dict.txt") + .putList("index.analysis.analyzer.my_analyzer.user_dictionary_rules", "c++,c++,w,w") + .build(); + IllegalArgumentException exc = expectThrows(IllegalArgumentException.class, () -> createTestAnalysis(settings)); + assertThat(exc.getMessage(), containsString("It is not allowed to use [user_dictionary] in conjunction " + + "with [user_dictionary_rules]")); + } + + public void testKuromojiAnalyzerDuplicateUserDictRule() throws Exception { + Settings settings = Settings.builder() + .put("index.analysis.analyzer.my_analyzer.type", "kuromoji") + .putList("index.analysis.analyzer.my_analyzer.user_dictionary_rules", + "c++,c++,w,w", "#comment", "制限スピード,制限スピード,セイゲンスピード,テスト名詞", "制限スピード,制限スピード,セイゲンスピード,テスト名詞") + .build(); + IllegalArgumentException exc = expectThrows(IllegalArgumentException.class, () -> createTestAnalysis(settings)); + assertThat(exc.getMessage(), containsString("[制限スピード] in user dictionary at line [3]")); + } + + private TestAnalysis createTestAnalysis(Settings analysisSettings) throws IOException { + InputStream dict = KuromojiAnalysisTests.class.getResourceAsStream("user_dict.txt"); + Path home = createTempDir(); + Path config = home.resolve("config"); + Files.createDirectory(config); + Files.copy(dict, config.resolve("user_dict.txt")); + Settings settings = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(Environment.PATH_HOME_SETTING.getKey(), home) + .put(analysisSettings) + .build(); + return AnalysisTestsHelper.createTestAnalysisFromSettings(settings, new AnalysisKuromojiPlugin()); + } } diff --git a/plugins/analysis-kuromoji/src/test/resources/rest-api-spec/test/analysis_nori/10_basic.yml b/plugins/analysis-kuromoji/src/test/resources/rest-api-spec/test/analysis_kuromoji/10_basic.yml similarity index 100% rename from plugins/analysis-kuromoji/src/test/resources/rest-api-spec/test/analysis_nori/10_basic.yml rename to plugins/analysis-kuromoji/src/test/resources/rest-api-spec/test/analysis_kuromoji/10_basic.yml diff --git a/plugins/analysis-kuromoji/src/test/resources/rest-api-spec/test/analysis_nori/20_search.yml b/plugins/analysis-kuromoji/src/test/resources/rest-api-spec/test/analysis_kuromoji/20_search.yml similarity index 100% rename from plugins/analysis-kuromoji/src/test/resources/rest-api-spec/test/analysis_nori/20_search.yml rename to plugins/analysis-kuromoji/src/test/resources/rest-api-spec/test/analysis_kuromoji/20_search.yml diff --git a/plugins/analysis-nori/src/main/java/org/elasticsearch/index/analysis/NoriTokenizerFactory.java b/plugins/analysis-nori/src/main/java/org/elasticsearch/index/analysis/NoriTokenizerFactory.java index 8830cf7c9772..bac5dd2a7706 100644 --- a/plugins/analysis-nori/src/main/java/org/elasticsearch/index/analysis/NoriTokenizerFactory.java +++ b/plugins/analysis-nori/src/main/java/org/elasticsearch/index/analysis/NoriTokenizerFactory.java @@ -51,7 +51,7 @@ public class NoriTokenizerFactory extends AbstractTokenizerFactory { throw new IllegalArgumentException("It is not allowed to use [" + USER_DICT_PATH_OPTION + "] in conjunction" + " with [" + USER_DICT_RULES_OPTION + "]"); } - List ruleList = Analysis.getWordList(env, settings, USER_DICT_PATH_OPTION, USER_DICT_RULES_OPTION); + List ruleList = Analysis.getWordList(env, settings, USER_DICT_PATH_OPTION, USER_DICT_RULES_OPTION, true); StringBuilder sb = new StringBuilder(); if (ruleList == null || ruleList.isEmpty()) { return null; diff --git a/plugins/ingest-attachment/build.gradle b/plugins/ingest-attachment/build.gradle index 835147c255c8..00a36629be49 100644 --- a/plugins/ingest-attachment/build.gradle +++ b/plugins/ingest-attachment/build.gradle @@ -23,10 +23,10 @@ esplugin { } versions << [ - 'tika': '1.19.1', - 'pdfbox': '2.0.12', - 'poi': '4.0.0', - 'mime4j': '0.8.2' + 'tika': '1.22', + 'pdfbox': '2.0.16', + 'poi': '4.0.1', + 'mime4j': '0.8.3' ] dependencies { @@ -66,6 +66,8 @@ dependencies { // Outlook documents compile "org.apache.james:apache-mime4j-core:${versions.mime4j}" compile "org.apache.james:apache-mime4j-dom:${versions.mime4j}" + // EPUB books + compile 'org.apache.commons:commons-lang3:3.9' } dependencyLicenses { diff --git a/plugins/ingest-attachment/licenses/apache-mime4j-core-0.8.2.jar.sha1 b/plugins/ingest-attachment/licenses/apache-mime4j-core-0.8.2.jar.sha1 deleted file mode 100644 index da79885de912..000000000000 --- a/plugins/ingest-attachment/licenses/apache-mime4j-core-0.8.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -94919d81969c67c5894646338bf10fbc35f5a946 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/apache-mime4j-core-0.8.3.jar.sha1 b/plugins/ingest-attachment/licenses/apache-mime4j-core-0.8.3.jar.sha1 new file mode 100644 index 000000000000..464a34dd9764 --- /dev/null +++ b/plugins/ingest-attachment/licenses/apache-mime4j-core-0.8.3.jar.sha1 @@ -0,0 +1 @@ +1179b56c9919c1a8e20d3a528ee4c6cee19bcbe0 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/apache-mime4j-dom-0.8.2.jar.sha1 b/plugins/ingest-attachment/licenses/apache-mime4j-dom-0.8.2.jar.sha1 deleted file mode 100644 index f88f3fa3f370..000000000000 --- a/plugins/ingest-attachment/licenses/apache-mime4j-dom-0.8.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -32c9a9afe84eca86a3b0b3c66a956ced249ceade \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/apache-mime4j-dom-0.8.3.jar.sha1 b/plugins/ingest-attachment/licenses/apache-mime4j-dom-0.8.3.jar.sha1 new file mode 100644 index 000000000000..4f98753aa0af --- /dev/null +++ b/plugins/ingest-attachment/licenses/apache-mime4j-dom-0.8.3.jar.sha1 @@ -0,0 +1 @@ +e80733714eb6a70895bfc74a9528c658504c2c83 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/commons-lang3-3.9.jar.sha1 b/plugins/ingest-attachment/licenses/commons-lang3-3.9.jar.sha1 new file mode 100644 index 000000000000..2adcfd377f87 --- /dev/null +++ b/plugins/ingest-attachment/licenses/commons-lang3-3.9.jar.sha1 @@ -0,0 +1 @@ +0122c7cee69b53ed4a7681c03d4ee4c0e2765da5 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/commons-lang3-LICENSE.txt b/plugins/ingest-attachment/licenses/commons-lang3-LICENSE.txt new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/plugins/ingest-attachment/licenses/commons-lang3-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/ingest-attachment/licenses/commons-lang3-NOTICE.txt b/plugins/ingest-attachment/licenses/commons-lang3-NOTICE.txt new file mode 100644 index 000000000000..13a314089747 --- /dev/null +++ b/plugins/ingest-attachment/licenses/commons-lang3-NOTICE.txt @@ -0,0 +1,5 @@ +Apache Commons Lang +Copyright 2001-2019 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). diff --git a/plugins/ingest-attachment/licenses/fontbox-2.0.12.jar.sha1 b/plugins/ingest-attachment/licenses/fontbox-2.0.12.jar.sha1 deleted file mode 100644 index d342b59edfbf..000000000000 --- a/plugins/ingest-attachment/licenses/fontbox-2.0.12.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -566fd1d6b25012bb82078da08b82e6d0ba8c884a \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/fontbox-2.0.16.jar.sha1 b/plugins/ingest-attachment/licenses/fontbox-2.0.16.jar.sha1 new file mode 100644 index 000000000000..9d3ceaff6834 --- /dev/null +++ b/plugins/ingest-attachment/licenses/fontbox-2.0.16.jar.sha1 @@ -0,0 +1 @@ +3f7819279a0b90a01b07a870d1d27dffd8de24db \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/pdfbox-2.0.12.jar.sha1 b/plugins/ingest-attachment/licenses/pdfbox-2.0.12.jar.sha1 deleted file mode 100644 index e297ab7f91cd..000000000000 --- a/plugins/ingest-attachment/licenses/pdfbox-2.0.12.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a7311cd267c19e1ba8154b076a63d29537154784 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/pdfbox-2.0.16.jar.sha1 b/plugins/ingest-attachment/licenses/pdfbox-2.0.16.jar.sha1 new file mode 100644 index 000000000000..fd4245823e59 --- /dev/null +++ b/plugins/ingest-attachment/licenses/pdfbox-2.0.16.jar.sha1 @@ -0,0 +1 @@ +5dce5e41fc472d02800df5ef060a1f3a58c36902 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/poi-4.0.0.jar.sha1 b/plugins/ingest-attachment/licenses/poi-4.0.0.jar.sha1 deleted file mode 100644 index baab27284819..000000000000 --- a/plugins/ingest-attachment/licenses/poi-4.0.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7ddb9b983ed682c93a986e8bb596d5935b13086c \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/poi-4.0.1.jar.sha1 b/plugins/ingest-attachment/licenses/poi-4.0.1.jar.sha1 new file mode 100644 index 000000000000..bce6d5f9d9ab --- /dev/null +++ b/plugins/ingest-attachment/licenses/poi-4.0.1.jar.sha1 @@ -0,0 +1 @@ +d8eaa341687a7dc48048d964d0d53238959ca9b5 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/poi-ooxml-4.0.0.jar.sha1 b/plugins/ingest-attachment/licenses/poi-ooxml-4.0.0.jar.sha1 deleted file mode 100644 index 1baa4d062de3..000000000000 --- a/plugins/ingest-attachment/licenses/poi-ooxml-4.0.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f3fa9c2bd64eb3ec15378de960a07d077ae5b26d \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/poi-ooxml-4.0.1.jar.sha1 b/plugins/ingest-attachment/licenses/poi-ooxml-4.0.1.jar.sha1 new file mode 100644 index 000000000000..1edaed3b1fa3 --- /dev/null +++ b/plugins/ingest-attachment/licenses/poi-ooxml-4.0.1.jar.sha1 @@ -0,0 +1 @@ +9ec84728bf4236b8f9ec7fef3fe1b705eef2b408 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/poi-ooxml-schemas-4.0.0.jar.sha1 b/plugins/ingest-attachment/licenses/poi-ooxml-schemas-4.0.0.jar.sha1 deleted file mode 100644 index 5d2e3c79308b..000000000000 --- a/plugins/ingest-attachment/licenses/poi-ooxml-schemas-4.0.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -125f9ccd2cf652fa4169b1c30e9023362e23324f \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/poi-ooxml-schemas-4.0.1.jar.sha1 b/plugins/ingest-attachment/licenses/poi-ooxml-schemas-4.0.1.jar.sha1 new file mode 100644 index 000000000000..d3022b4052d5 --- /dev/null +++ b/plugins/ingest-attachment/licenses/poi-ooxml-schemas-4.0.1.jar.sha1 @@ -0,0 +1 @@ +d2a066340008d36cb289b71f0f7b6ad562940644 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/poi-scratchpad-4.0.0.jar.sha1 b/plugins/ingest-attachment/licenses/poi-scratchpad-4.0.0.jar.sha1 deleted file mode 100644 index 7cd201c3c8f7..000000000000 --- a/plugins/ingest-attachment/licenses/poi-scratchpad-4.0.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1038d3bb1ec34e93c184b4c5b690e2f51c6f7a60 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/poi-scratchpad-4.0.1.jar.sha1 b/plugins/ingest-attachment/licenses/poi-scratchpad-4.0.1.jar.sha1 new file mode 100644 index 000000000000..5bf9aedc34cd --- /dev/null +++ b/plugins/ingest-attachment/licenses/poi-scratchpad-4.0.1.jar.sha1 @@ -0,0 +1 @@ +89b1ce1b932338204ffa3fab225b65b5d33dab5d \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-core-1.19.1.jar.sha1 b/plugins/ingest-attachment/licenses/tika-core-1.19.1.jar.sha1 deleted file mode 100644 index 0145026a76e9..000000000000 --- a/plugins/ingest-attachment/licenses/tika-core-1.19.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c1f075aa01586c2c28a249ad60bcfb733b69b866 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-core-1.22.jar.sha1 b/plugins/ingest-attachment/licenses/tika-core-1.22.jar.sha1 new file mode 100644 index 000000000000..d988ea2ebd19 --- /dev/null +++ b/plugins/ingest-attachment/licenses/tika-core-1.22.jar.sha1 @@ -0,0 +1 @@ +b193f1f977e64ff77025a4cecd7997cff344c4bc \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-parsers-1.19.1.jar.sha1 b/plugins/ingest-attachment/licenses/tika-parsers-1.19.1.jar.sha1 deleted file mode 100644 index a3a804cb87c2..000000000000 --- a/plugins/ingest-attachment/licenses/tika-parsers-1.19.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -06d45a8683a7479f0e0d9d252f834d0ae44abd6b \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-parsers-1.22.jar.sha1 b/plugins/ingest-attachment/licenses/tika-parsers-1.22.jar.sha1 new file mode 100644 index 000000000000..893a7115e765 --- /dev/null +++ b/plugins/ingest-attachment/licenses/tika-parsers-1.22.jar.sha1 @@ -0,0 +1 @@ +b8a823128f6165882ae41de3ded8655609d62d88 \ No newline at end of file diff --git a/plugins/ingest-attachment/src/test/java/org/elasticsearch/ingest/attachment/AttachmentProcessorTests.java b/plugins/ingest-attachment/src/test/java/org/elasticsearch/ingest/attachment/AttachmentProcessorTests.java index 654bc361f53a..5658eb567177 100644 --- a/plugins/ingest-attachment/src/test/java/org/elasticsearch/ingest/attachment/AttachmentProcessorTests.java +++ b/plugins/ingest-attachment/src/test/java/org/elasticsearch/ingest/attachment/AttachmentProcessorTests.java @@ -216,8 +216,8 @@ public class AttachmentProcessorTests extends ESTestCase { // See (https://issues.apache.org/jira/browse/COMPRESS-432) for information // about the issue that causes a zip file to hang in Tika versions prior to 1.18. - public void testZipFileDoesNotHang() { - expectThrows(Exception.class, () -> parseDocument("bad_tika.zip", processor)); + public void testZipFileDoesNotHang() throws Exception { + parseDocument("bad_tika.zip", processor); } public void testParseAsBytesArray() throws Exception { diff --git a/plugins/ingest-attachment/src/test/resources/org/elasticsearch/ingest/attachment/test/sample-files/testEPUB.epub b/plugins/ingest-attachment/src/test/resources/org/elasticsearch/ingest/attachment/test/sample-files/testEPUB.epub index a6fc2e634d5c..a88df805858b 100644 Binary files a/plugins/ingest-attachment/src/test/resources/org/elasticsearch/ingest/attachment/test/sample-files/testEPUB.epub and b/plugins/ingest-attachment/src/test/resources/org/elasticsearch/ingest/attachment/test/sample-files/testEPUB.epub differ diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java index 15d1b37ecf81..042f08df4826 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java @@ -31,6 +31,7 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.blobstore.BlobPath; +import org.elasticsearch.common.blobstore.DeleteResult; import org.elasticsearch.common.blobstore.support.AbstractBlobContainer; import org.elasticsearch.threadpool.ThreadPool; @@ -126,9 +127,9 @@ public class AzureBlobContainer extends AbstractBlobContainer { } @Override - public void delete() throws IOException { + public DeleteResult delete() throws IOException { try { - blobStore.deleteBlobDirectory(keyPath, threadPool.executor(AzureRepositoryPlugin.REPOSITORY_THREAD_POOL_NAME)); + return blobStore.deleteBlobDirectory(keyPath, threadPool.executor(AzureRepositoryPlugin.REPOSITORY_THREAD_POOL_NAME)); } catch (URISyntaxException | StorageException e) { throw new IOException(e); } diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java index a7d9bb93a512..e4a7e3acb652 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java @@ -21,12 +21,12 @@ package org.elasticsearch.repositories.azure; import com.microsoft.azure.storage.LocationMode; import com.microsoft.azure.storage.StorageException; - import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStore; +import org.elasticsearch.common.blobstore.DeleteResult; import org.elasticsearch.repositories.azure.AzureRepository.Repository; import org.elasticsearch.threadpool.ThreadPool; @@ -92,8 +92,9 @@ public class AzureBlobStore implements BlobStore { service.deleteBlob(clientName, container, blob); } - public void deleteBlobDirectory(String path, Executor executor) throws URISyntaxException, StorageException, IOException { - service.deleteBlobDirectory(clientName, container, path, executor); + public DeleteResult deleteBlobDirectory(String path, Executor executor) + throws URISyntaxException, StorageException, IOException { + return service.deleteBlobDirectory(clientName, container, path, executor); } public InputStream getInputStream(String blob) throws URISyntaxException, StorageException, IOException { diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java index f4ee7b9dbcad..cc4335956b76 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java @@ -42,6 +42,7 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.blobstore.BlobPath; +import org.elasticsearch.common.blobstore.DeleteResult; import org.elasticsearch.common.blobstore.support.PlainBlobMetaData; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.Settings; @@ -72,7 +73,7 @@ import java.util.function.Supplier; import static java.util.Collections.emptyMap; public class AzureStorageService { - + private static final Logger logger = LogManager.getLogger(AzureStorageService.class); public static final ByteSizeValue MIN_CHUNK_SIZE = new ByteSizeValue(1, ByteSizeUnit.BYTES); @@ -192,13 +193,15 @@ public class AzureStorageService { }); } - void deleteBlobDirectory(String account, String container, String path, Executor executor) + DeleteResult deleteBlobDirectory(String account, String container, String path, Executor executor) throws URISyntaxException, StorageException, IOException { final Tuple> client = client(account); final CloudBlobContainer blobContainer = client.v1().getContainerReference(container); final Collection exceptions = Collections.synchronizedList(new ArrayList<>()); final AtomicLong outstanding = new AtomicLong(1L); final PlainActionFuture result = PlainActionFuture.newFuture(); + final AtomicLong blobsDeleted = new AtomicLong(); + final AtomicLong bytesDeleted = new AtomicLong(); SocketAccess.doPrivilegedVoidException(() -> { for (final ListBlobItem blobItem : blobContainer.listBlobs(path, true)) { // uri.getPath is of the form /container/keyPath.* and we want to strip off the /container/ @@ -208,7 +211,17 @@ public class AzureStorageService { executor.execute(new AbstractRunnable() { @Override protected void doRun() throws Exception { + final long len; + if (blobItem instanceof CloudBlob) { + len = ((CloudBlob) blobItem).getProperties().getLength(); + } else { + len = -1L; + } deleteBlob(account, container, blobPath); + blobsDeleted.incrementAndGet(); + if (len >= 0) { + bytesDeleted.addAndGet(len); + } } @Override @@ -234,6 +247,7 @@ public class AzureStorageService { exceptions.forEach(ex::addSuppressed); throw ex; } + return new DeleteResult(blobsDeleted.get(), bytesDeleted.get()); } public InputStream getInputStream(String account, String container, String blob) diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobContainer.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobContainer.java index 4657ece3c8a2..da2275024272 100644 --- a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobContainer.java +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobContainer.java @@ -22,6 +22,7 @@ package org.elasticsearch.repositories.gcs; import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.blobstore.BlobPath; +import org.elasticsearch.common.blobstore.DeleteResult; import org.elasticsearch.common.blobstore.support.AbstractBlobContainer; import java.io.IOException; @@ -77,8 +78,8 @@ class GoogleCloudStorageBlobContainer extends AbstractBlobContainer { } @Override - public void delete() throws IOException { - blobStore.deleteDirectory(path().buildAsString()); + public DeleteResult delete() throws IOException { + return blobStore.deleteDirectory(path().buildAsString()); } @Override diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java index c3fd4848a0c3..c42fe232b6e4 100644 --- a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java @@ -37,6 +37,7 @@ import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.blobstore.BlobStoreException; +import org.elasticsearch.common.blobstore.DeleteResult; import org.elasticsearch.common.blobstore.support.PlainBlobMetaData; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.core.internal.io.Streams; @@ -55,6 +56,7 @@ import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; @@ -300,15 +302,24 @@ class GoogleCloudStorageBlobStore implements BlobStore { * * @param pathStr Name of path to delete */ - void deleteDirectory(String pathStr) throws IOException { - SocketAccess.doPrivilegedVoidIOException(() -> { + DeleteResult deleteDirectory(String pathStr) throws IOException { + return SocketAccess.doPrivilegedIOException(() -> { + DeleteResult deleteResult = DeleteResult.ZERO; Page page = client().get(bucketName).list(BlobListOption.prefix(pathStr)); do { final Collection blobsToDelete = new ArrayList<>(); - page.getValues().forEach(b -> blobsToDelete.add(b.getName())); + final AtomicLong blobsDeleted = new AtomicLong(0L); + final AtomicLong bytesDeleted = new AtomicLong(0L); + page.getValues().forEach(b -> { + blobsToDelete.add(b.getName()); + blobsDeleted.incrementAndGet(); + bytesDeleted.addAndGet(b.getSize()); + }); deleteBlobsIgnoringIfNotExists(blobsToDelete); + deleteResult = deleteResult.add(blobsDeleted.get(), bytesDeleted.get()); page = page.getNextPage(); } while (page != null); + return deleteResult; }); } diff --git a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobContainer.java b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobContainer.java index e4c9af4d6c70..304906464dca 100644 --- a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobContainer.java +++ b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobContainer.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.blobstore.BlobPath; +import org.elasticsearch.common.blobstore.DeleteResult; import org.elasticsearch.common.blobstore.fs.FsBlobContainer; import org.elasticsearch.common.blobstore.support.AbstractBlobContainer; import org.elasticsearch.common.blobstore.support.PlainBlobMetaData; @@ -69,9 +70,13 @@ final class HdfsBlobContainer extends AbstractBlobContainer { } } + // TODO: See if we can get precise result reporting. + private static final DeleteResult DELETE_RESULT = new DeleteResult(1L, 0L); + @Override - public void delete() throws IOException { + public DeleteResult delete() throws IOException { store.execute(fileContext -> fileContext.delete(path, true)); + return DELETE_RESULT; } @Override diff --git a/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsRepositoryTests.java b/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsRepositoryTests.java index e34f290a8e29..d65db92f0670 100644 --- a/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsRepositoryTests.java +++ b/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsRepositoryTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.repositories.hdfs; import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; +import org.elasticsearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryResponse; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.bootstrap.JavaVersion; import org.elasticsearch.common.settings.MockSecureSettings; @@ -30,6 +31,7 @@ import org.elasticsearch.repositories.AbstractThirdPartyRepositoryTestCase; import java.util.Collection; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; @ThreadLeakFilters(filters = HdfsClientThreadLeakFilter.class) public class HdfsRepositoryTests extends AbstractThirdPartyRepositoryTestCase { @@ -58,4 +60,14 @@ public class HdfsRepositoryTests extends AbstractThirdPartyRepositoryTestCase { ).get(); assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); } + + // HDFS repository doesn't have precise cleanup stats so we only check whether or not any blobs were removed + @Override + protected void assertCleanupResponse(CleanupRepositoryResponse response, long bytes, long blobs) { + if (blobs > 0) { + assertThat(response.result().blobs(), greaterThan(0L)); + } else { + assertThat(response.result().blobs(), equalTo(0L)); + } + } } diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java index 9e9cef9cd0e6..46910d840cd0 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java @@ -32,7 +32,6 @@ import com.amazonaws.services.s3.model.ObjectMetadata; import com.amazonaws.services.s3.model.PartETag; import com.amazonaws.services.s3.model.PutObjectRequest; import com.amazonaws.services.s3.model.S3Object; -import com.amazonaws.services.s3.model.S3ObjectSummary; import com.amazonaws.services.s3.model.UploadPartRequest; import com.amazonaws.services.s3.model.UploadPartResult; import org.apache.lucene.util.SetOnce; @@ -42,6 +41,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.blobstore.BlobPath; +import org.elasticsearch.common.blobstore.DeleteResult; import org.elasticsearch.common.blobstore.support.AbstractBlobContainer; import org.elasticsearch.common.blobstore.support.PlainBlobMetaData; import org.elasticsearch.common.collect.Tuple; @@ -54,6 +54,7 @@ import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.atomic.AtomicLong; import java.util.function.Function; import java.util.stream.Collectors; @@ -121,7 +122,9 @@ class S3BlobContainer extends AbstractBlobContainer { } @Override - public void delete() throws IOException { + public DeleteResult delete() throws IOException { + final AtomicLong deletedBlobs = new AtomicLong(); + final AtomicLong deletedBytes = new AtomicLong(); try (AmazonS3Reference clientReference = blobStore.clientReference()) { ObjectListing prevListing = null; while (true) { @@ -135,8 +138,12 @@ class S3BlobContainer extends AbstractBlobContainer { listObjectsRequest.setPrefix(keyPath); list = SocketAccess.doPrivileged(() -> clientReference.client().listObjects(listObjectsRequest)); } - final List blobsToDelete = - list.getObjectSummaries().stream().map(S3ObjectSummary::getKey).collect(Collectors.toList()); + final List blobsToDelete = new ArrayList<>(); + list.getObjectSummaries().forEach(s3ObjectSummary -> { + deletedBlobs.incrementAndGet(); + deletedBytes.addAndGet(s3ObjectSummary.getSize()); + blobsToDelete.add(s3ObjectSummary.getKey()); + }); if (list.isTruncated()) { doDeleteBlobs(blobsToDelete, false); prevListing = list; @@ -150,6 +157,7 @@ class S3BlobContainer extends AbstractBlobContainer { } catch (final AmazonClientException e) { throw new IOException("Exception when deleting blob container [" + keyPath + "]", e); } + return new DeleteResult(deletedBlobs.get(), deletedBytes.get()); } @Override diff --git a/qa/full-cluster-restart/build.gradle b/qa/full-cluster-restart/build.gradle index 15c44f38f7cf..1dc5ddd1acee 100644 --- a/qa/full-cluster-restart/build.gradle +++ b/qa/full-cluster-restart/build.gradle @@ -19,9 +19,10 @@ import org.elasticsearch.gradle.Version -import org.elasticsearch.gradle.test.RestIntegTestTask +import org.elasticsearch.gradle.testclusters.RestTestRunnerTask apply plugin: 'elasticsearch.standalone-test' +apply plugin: 'elasticsearch.testclusters' // This is a top level task which we will add dependencies to below. // It is a single task that can be used to backcompat tests against all versions. @@ -30,65 +31,53 @@ task bwcTest { group = 'verification' } -for (Version version : bwcVersions.indexCompatible) { - String baseName = "v${version}" +for (Version bwcVersion : bwcVersions.indexCompatible) { + String baseName = "v${bwcVersion}" - Task oldClusterTest = tasks.create(name: "${baseName}#oldClusterTest", type: RestIntegTestTask) { - mustRunAfter(precommit) - } - tasks.getByName("${baseName}#oldClusterTestRunner").configure { - systemProperty 'tests.is_old_cluster', 'true' - systemProperty 'tests.old_cluster_version', version.toString().minus("-SNAPSHOT") - systemProperty 'tests.path.repo', new File(buildDir, "cluster/shared/repo/" + baseName) - } - - Object extension = extensions.findByName("${baseName}#oldClusterTestCluster") - configure(extensions.findByName("${baseName}#oldClusterTestCluster")) { - bwcVersion = version - numBwcNodes = 2 - numNodes = 2 - clusterName = 'full-cluster-restart' - - // some tests rely on the translog not being flushed - setting 'indices.memory.shard_inactive_time', '20m' - - if (version.onOrAfter('5.3.0')) { + testClusters { + "${baseName}" { + versions = [ bwcVersion.toString(), project.version ] + numberOfNodes = 2 + // some tests rely on the translog not being flushed + setting 'indices.memory.shard_inactive_time', '20m' setting 'http.content_type.required', 'true' + setting 'path.repo', "${buildDir}/cluster/shared/repo/${baseName}" + javaHome = project.file(project.ext.runtimeJavaHome) } } + tasks.register("${baseName}#oldClusterTest", RestTestRunnerTask) { + useCluster testClusters."${baseName}" + mustRunAfter(precommit) + doFirst { + project.delete("${buildDir}/cluster/shared/repo/${baseName}") + } - Task upgradedClusterTest = tasks.create(name: "${baseName}#upgradedClusterTest", type: RestIntegTestTask) { - dependsOn(oldClusterTest, "${baseName}#oldClusterTestCluster#node0.stop") + systemProperty 'tests.is_old_cluster', 'true' } - configure(extensions.findByName("${baseName}#upgradedClusterTestCluster")) { - dependsOn oldClusterTest, - "${baseName}#oldClusterTestCluster#node0.stop", - "${baseName}#oldClusterTestCluster#node1.stop" - clusterName = 'full-cluster-restart' - - // some tests rely on the translog not being flushed - setting 'indices.memory.shard_inactive_time', '20m' - - numNodes = 2 - dataDir = { nodeNum -> oldClusterTest.nodes[nodeNum].dataDir } - cleanShared = false // We want to keep snapshots made by the old cluster! - } - - tasks.getByName("${baseName}#upgradedClusterTestRunner").configure { + tasks.register("${baseName}#upgradedClusterTest", RestTestRunnerTask) { + useCluster testClusters."${baseName}" + dependsOn "${baseName}#oldClusterTest" + doFirst { + testClusters."${baseName}".goToNextVersion() + } systemProperty 'tests.is_old_cluster', 'false' - systemProperty 'tests.old_cluster_version', version.toString().minus("-SNAPSHOT") - systemProperty 'tests.path.repo', new File(buildDir, "cluster/shared/repo") - } - Task versionBwcTest = tasks.create(name: "${baseName}#bwcTest") { - dependsOn = [upgradedClusterTest] + tasks.matching { it.name.startsWith(baseName) && it.name.endsWith("ClusterTest") }.configureEach { + it.systemProperty 'tests.old_cluster_version', bwcVersion.toString().minus("-SNAPSHOT") + it.systemProperty 'tests.path.repo', "${buildDir}/cluster/shared/repo/${baseName}" + it.nonInputProperties.systemProperty('tests.rest.cluster', "${-> testClusters."${baseName}".allHttpSocketURI.join(",") }") + it.nonInputProperties.systemProperty('tests.clustername', "${-> testClusters."${baseName}".getName() }") } if (project.bwc_tests_enabled) { - bwcTest.dependsOn(versionBwcTest) + bwcTest.dependsOn( + tasks.register("${baseName}#bwcTest") { + dependsOn tasks.named("${baseName}#upgradedClusterTest") + } + ) } } @@ -116,4 +105,4 @@ task testJar(type: Jar) { artifacts { testArtifacts testJar -} \ No newline at end of file +} diff --git a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java index 13be323aee88..bc0c3368ee33 100644 --- a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -235,8 +235,6 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase { Map clusterState = entityAsMap(client().performRequest(new Request("GET", "/_cluster/state"))); // Check some global properties: - String clusterName = (String) clusterState.get("cluster_name"); - assertEquals("full-cluster-restart", clusterName); String numberOfShards = (String) XContentMapValues.extractValue( "metadata.templates.template_1.settings.index.number_of_shards", clusterState); assertEquals("1", numberOfShards); diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/VMTestRunner.java b/qa/os/src/test/java/org/elasticsearch/packaging/VMTestRunner.java deleted file mode 100644 index a8fd2c277078..000000000000 --- a/qa/os/src/test/java/org/elasticsearch/packaging/VMTestRunner.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.packaging; - -import org.junit.runner.JUnitCore; - -import java.nio.file.Files; -import java.nio.file.Paths; - -/** - * Ensures that the current JVM is running on a virtual machine before delegating to {@link JUnitCore}. We just check for the existence - * of a special file that we create during VM provisioning. - */ -public class VMTestRunner { - public static void main(String[] args) { - if (Files.exists(Paths.get("/is_vagrant_vm"))) { - JUnitCore.main(args); - } else { - throw new RuntimeException("This filesystem does not have an expected marker file indicating it's a virtual machine. These " + - "tests should only run in a virtual machine because they're destructive."); - } - } -} diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/ArchiveTestCase.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/ArchiveTests.java similarity index 94% rename from qa/os/src/test/java/org/elasticsearch/packaging/test/ArchiveTestCase.java rename to qa/os/src/test/java/org/elasticsearch/packaging/test/ArchiveTests.java index 46cdfc704558..8aad5185fd32 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/ArchiveTestCase.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/test/ArchiveTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.packaging.test; -import com.carrotsearch.randomizedtesting.annotations.TestCaseOrdering; import org.apache.http.client.fluent.Request; import org.elasticsearch.packaging.util.Archives; import org.elasticsearch.packaging.util.Distribution; @@ -29,6 +28,7 @@ import org.elasticsearch.packaging.util.Platforms; import org.elasticsearch.packaging.util.ServerUtils; import org.elasticsearch.packaging.util.Shell; import org.elasticsearch.packaging.util.Shell.Result; +import org.junit.BeforeClass; import java.nio.file.Files; import java.nio.file.Path; @@ -55,13 +55,15 @@ import static org.hamcrest.CoreMatchers.not; import static org.hamcrest.CoreMatchers.notNullValue; import static org.hamcrest.Matchers.isEmptyString; import static org.junit.Assume.assumeThat; +import static org.junit.Assume.assumeTrue; -/** - * Tests that apply to the archive distributions (tar, zip). To add a case for a distribution, subclass and - * override {@link ArchiveTestCase#distribution()}. These tests should be the same across all archive distributions - */ -@TestCaseOrdering(TestCaseOrdering.AlphabeticOrder.class) -public abstract class ArchiveTestCase extends PackagingTestCase { +public class ArchiveTests extends PackagingTestCase { + + @BeforeClass + public static void assumptions() { + assumeTrue("only archive distributions", + distribution().packaging == Distribution.Packaging.TAR || distribution().packaging == Distribution.Packaging.ZIP); + } public void test10Install() throws Exception { installation = installArchive(distribution()); @@ -362,7 +364,7 @@ public abstract class ArchiveTestCase extends PackagingTestCase { final Installation.Executables bin = installation.executables(); final Shell sh = newShell(); - if (distribution().equals(Distribution.DEFAULT_LINUX) || distribution().equals(Distribution.DEFAULT_WINDOWS)) { + if (distribution().isDefault()) { assertTrue(Files.exists(installation.lib.resolve("tools").resolve("security-cli"))); final Platforms.PlatformAction action = () -> { Result result = sh.run(bin.elasticsearchCertutil + " --help"); @@ -375,7 +377,7 @@ public abstract class ArchiveTestCase extends PackagingTestCase { }; Platforms.onLinux(action); Platforms.onWindows(action); - } else if (distribution().equals(Distribution.OSS_LINUX) || distribution().equals(Distribution.OSS_WINDOWS)) { + } else { assertFalse(Files.exists(installation.lib.resolve("tools").resolve("security-cli"))); } } @@ -391,7 +393,8 @@ public abstract class ArchiveTestCase extends PackagingTestCase { assertThat(result.stdout, containsString("A CLI tool to remove corrupted parts of unrecoverable shards")); }; - if (distribution().equals(Distribution.DEFAULT_LINUX) || distribution().equals(Distribution.DEFAULT_WINDOWS)) { + // TODO: this should be checked on all distributions + if (distribution().isDefault()) { Platforms.onLinux(action); Platforms.onWindows(action); } @@ -409,7 +412,8 @@ public abstract class ArchiveTestCase extends PackagingTestCase { containsString("A CLI tool to do unsafe cluster and index manipulations on current node")); }; - if (distribution().equals(Distribution.DEFAULT_LINUX) || distribution().equals(Distribution.DEFAULT_WINDOWS)) { + // TODO: this should be checked on all distributions + if (distribution().isDefault()) { Platforms.onLinux(action); Platforms.onWindows(action); } @@ -454,7 +458,8 @@ public abstract class ArchiveTestCase extends PackagingTestCase { containsString("Manages elasticsearch file users")); }; - if (distribution().equals(Distribution.DEFAULT_LINUX) || distribution().equals(Distribution.DEFAULT_WINDOWS)) { + // TODO: this should be checked on all distributions + if (distribution().isDefault()) { Platforms.onLinux(action); Platforms.onWindows(action); } diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/DebPreservationTestCase.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/DebPreservationTests.java similarity index 90% rename from qa/os/src/test/java/org/elasticsearch/packaging/test/DebPreservationTestCase.java rename to qa/os/src/test/java/org/elasticsearch/packaging/test/DebPreservationTests.java index 12597ae8b4de..dc87d685d3f0 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/DebPreservationTestCase.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/test/DebPreservationTests.java @@ -21,15 +21,12 @@ package org.elasticsearch.packaging.test; import com.carrotsearch.randomizedtesting.annotations.TestCaseOrdering; import org.elasticsearch.packaging.util.Distribution; -import org.elasticsearch.packaging.util.Installation; import org.elasticsearch.packaging.util.Shell; import org.junit.Before; -import org.junit.BeforeClass; import java.nio.file.Files; import java.nio.file.Paths; -import static org.elasticsearch.packaging.util.Cleanup.cleanEverything; import static org.elasticsearch.packaging.util.FileUtils.assertPathsDontExist; import static org.elasticsearch.packaging.util.FileUtils.assertPathsExist; import static org.elasticsearch.packaging.util.Packages.SYSVINIT_SCRIPT; @@ -46,21 +43,13 @@ import static org.junit.Assume.assumeThat; import static org.junit.Assume.assumeTrue; @TestCaseOrdering(TestCaseOrdering.AlphabeticOrder.class) -public abstract class DebPreservationTestCase extends PackagingTestCase { - - private static Installation installation; - - protected abstract Distribution distribution(); - - @BeforeClass - public static void cleanup() throws Exception { - installation = null; - cleanEverything(); - } +public class DebPreservationTests extends PackagingTestCase { @Before public void onlyCompatibleDistributions() { assumeTrue("only dpkg platforms", isDPKG()); + assumeTrue("deb distributions", distribution().packaging == Distribution.Packaging.DEB); + assumeTrue("only bundled jdk", distribution().hasJdk); assumeTrue("only compatible distributions", distribution().packaging.compatible); } diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/DefaultDebBasicTests.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/DefaultDebBasicTests.java deleted file mode 100644 index cd40c0e9e814..000000000000 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/DefaultDebBasicTests.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.packaging.test; - -import org.elasticsearch.packaging.util.Distribution; - -public class DefaultDebBasicTests extends PackageTestCase { - - @Override - protected Distribution distribution() { - return Distribution.DEFAULT_DEB; - } - -} diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/DefaultDebPreservationTests.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/DefaultDebPreservationTests.java deleted file mode 100644 index d8b8c7f562bd..000000000000 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/DefaultDebPreservationTests.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.packaging.test; - -import org.elasticsearch.packaging.util.Distribution; - -public class DefaultDebPreservationTests extends DebPreservationTestCase { - - @Override - protected Distribution distribution() { - return Distribution.DEFAULT_DEB; - } -} diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/DefaultLinuxTarTests.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/DefaultLinuxTarTests.java deleted file mode 100644 index bcca1a7e9e04..000000000000 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/DefaultLinuxTarTests.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.packaging.test; - -import org.elasticsearch.packaging.util.Distribution; - -public class DefaultLinuxTarTests extends ArchiveTestCase { - - @Override - protected Distribution distribution() { - return Distribution.DEFAULT_LINUX; - } -} diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/DefaultNoJdkLinuxTarTests.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/DefaultNoJdkLinuxTarTests.java deleted file mode 100644 index fce7c5567181..000000000000 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/DefaultNoJdkLinuxTarTests.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.packaging.test; - -import org.elasticsearch.packaging.util.Distribution; - -public class DefaultNoJdkLinuxTarTests extends ArchiveTestCase { - - @Override - protected Distribution distribution() { - return Distribution.DEFAULT_NO_JDK_LINUX; - } -} diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/DefaultNoJdkRpmBasicTests.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/DefaultNoJdkRpmBasicTests.java deleted file mode 100644 index 3bb5aa8eae89..000000000000 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/DefaultNoJdkRpmBasicTests.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.packaging.test; - -import org.elasticsearch.packaging.util.Distribution; - -public class DefaultNoJdkRpmBasicTests extends PackageTestCase { - - @Override - protected Distribution distribution() { - return Distribution.DEFAULT_NO_JDK_RPM; - } -} diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/DefaultNoJdkWindowsZipTests.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/DefaultNoJdkWindowsZipTests.java deleted file mode 100644 index d797bdaa9f31..000000000000 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/DefaultNoJdkWindowsZipTests.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.packaging.test; - -import org.elasticsearch.packaging.util.Distribution; - -public class DefaultNoJdkWindowsZipTests extends ArchiveTestCase { - - @Override - protected Distribution distribution() { - return Distribution.DEFAULT_NO_JDK_WINDOWS; - } -} diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/DefaultRpmBasicTests.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/DefaultRpmBasicTests.java deleted file mode 100644 index a8ce7b48685b..000000000000 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/DefaultRpmBasicTests.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.packaging.test; - -import org.elasticsearch.packaging.util.Distribution; - -public class DefaultRpmBasicTests extends PackageTestCase { - - @Override - protected Distribution distribution() { - return Distribution.DEFAULT_RPM; - } -} diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/DefaultRpmPreservationTests.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/DefaultRpmPreservationTests.java deleted file mode 100644 index 633492cce6c2..000000000000 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/DefaultRpmPreservationTests.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.packaging.test; - -import org.elasticsearch.packaging.util.Distribution; - -public class DefaultRpmPreservationTests extends RpmPreservationTestCase { - - @Override - protected Distribution distribution() { - return Distribution.DEFAULT_RPM; - } -} diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/DefaultWindowsServiceTests.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/DefaultWindowsServiceTests.java deleted file mode 100644 index 6fedd3b89db7..000000000000 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/DefaultWindowsServiceTests.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.packaging.test; - -import org.elasticsearch.packaging.util.Distribution; - -public class DefaultWindowsServiceTests extends WindowsServiceTestCase { - - @Override - protected Distribution distribution() { - return Distribution.DEFAULT_WINDOWS; - } -} diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/DefaultWindowsZipTests.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/DefaultWindowsZipTests.java deleted file mode 100644 index a7491d5b0ac5..000000000000 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/DefaultWindowsZipTests.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.packaging.test; - -import org.elasticsearch.packaging.util.Distribution; - -public class DefaultWindowsZipTests extends ArchiveTestCase { - - @Override - protected Distribution distribution() { - return Distribution.DEFAULT_WINDOWS; - } -} diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/OssDebBasicTests.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/OssDebBasicTests.java deleted file mode 100644 index 38ef9c36a29e..000000000000 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/OssDebBasicTests.java +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.packaging.test; - -import junit.framework.TestCase; -import org.elasticsearch.packaging.util.Distribution; -import org.elasticsearch.packaging.util.Platforms; -import org.elasticsearch.packaging.util.Shell; - -import java.util.regex.Pattern; - -import static org.elasticsearch.packaging.util.Distribution.DEFAULT_DEB; -import static org.elasticsearch.packaging.util.Distribution.OSS_DEB; -import static org.elasticsearch.packaging.util.FileUtils.getDistributionFile; -import static org.junit.Assume.assumeTrue; - -public class OssDebBasicTests extends PackageTestCase { - - @Override - protected Distribution distribution() { - return Distribution.OSS_DEB; - } - - public void test11DebDependencies() { - assumeTrue(Platforms.isDPKG()); - - final Shell sh = new Shell(); - - final Shell.Result defaultResult = sh.run("dpkg -I " + getDistributionFile(DEFAULT_DEB)); - final Shell.Result ossResult = sh.run("dpkg -I " + getDistributionFile(OSS_DEB)); - - TestCase.assertTrue(Pattern.compile("(?m)^ Depends:.*bash.*").matcher(defaultResult.stdout).find()); - TestCase.assertTrue(Pattern.compile("(?m)^ Depends:.*bash.*").matcher(ossResult.stdout).find()); - - TestCase.assertTrue(Pattern.compile("(?m)^ Conflicts: elasticsearch-oss$").matcher(defaultResult.stdout).find()); - TestCase.assertTrue(Pattern.compile("(?m)^ Conflicts: elasticsearch$").matcher(ossResult.stdout).find()); - } -} diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/OssDebPreservationTests.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/OssDebPreservationTests.java deleted file mode 100644 index cfce73bb1600..000000000000 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/OssDebPreservationTests.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.packaging.test; - -import org.elasticsearch.packaging.util.Distribution; - -public class OssDebPreservationTests extends DebPreservationTestCase { - - @Override - protected Distribution distribution() { - return Distribution.OSS_DEB; - } -} diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/OssLinuxTarTests.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/OssLinuxTarTests.java deleted file mode 100644 index bf4305aab53e..000000000000 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/OssLinuxTarTests.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.packaging.test; - -import org.elasticsearch.packaging.util.Distribution; - -public class OssLinuxTarTests extends ArchiveTestCase { - - @Override - protected Distribution distribution() { - return Distribution.OSS_LINUX; - } -} diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/OssNoJdkDebBasicTests.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/OssNoJdkDebBasicTests.java deleted file mode 100644 index 47d2f662f4d3..000000000000 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/OssNoJdkDebBasicTests.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.packaging.test; - -import org.elasticsearch.packaging.util.Distribution; - -public class OssNoJdkDebBasicTests extends PackageTestCase { - - @Override - protected Distribution distribution() { - return Distribution.OSS_NO_JDK_DEB; - } -} diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/OssNoJdkLinuxTarTests.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/OssNoJdkLinuxTarTests.java deleted file mode 100644 index dae5068f3623..000000000000 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/OssNoJdkLinuxTarTests.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.packaging.test; - -import org.elasticsearch.packaging.util.Distribution; - -public class OssNoJdkLinuxTarTests extends ArchiveTestCase { - - @Override - protected Distribution distribution() { - return Distribution.OSS_NO_JDK_LINUX; - } -} diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/OssNoJdkRpmBasicTests.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/OssNoJdkRpmBasicTests.java deleted file mode 100644 index 1ebf70430398..000000000000 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/OssNoJdkRpmBasicTests.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.packaging.test; - -import org.elasticsearch.packaging.util.Distribution; - -public class OssNoJdkRpmBasicTests extends PackageTestCase { - - @Override - protected Distribution distribution() { - return Distribution.OSS_NO_JDK_RPM; - } -} diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/OssNoJdkWindowsZipTests.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/OssNoJdkWindowsZipTests.java deleted file mode 100644 index 639137e88798..000000000000 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/OssNoJdkWindowsZipTests.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.packaging.test; - -import org.elasticsearch.packaging.util.Distribution; - -public class OssNoJdkWindowsZipTests extends ArchiveTestCase { - - @Override - protected Distribution distribution() { - return Distribution.OSS_NO_JDK_WINDOWS; - } -} diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/OssRpmBasicTests.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/OssRpmBasicTests.java deleted file mode 100644 index 91abb5e0c5c5..000000000000 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/OssRpmBasicTests.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.packaging.test; - -import junit.framework.TestCase; -import org.elasticsearch.packaging.util.Distribution; -import org.elasticsearch.packaging.util.Platforms; -import org.elasticsearch.packaging.util.Shell; - -import java.util.regex.Pattern; - -import static org.elasticsearch.packaging.util.Distribution.DEFAULT_RPM; -import static org.elasticsearch.packaging.util.Distribution.OSS_RPM; -import static org.elasticsearch.packaging.util.FileUtils.getDistributionFile; -import static org.junit.Assume.assumeTrue; - -public class OssRpmBasicTests extends PackageTestCase { - - @Override - protected Distribution distribution() { - return Distribution.OSS_RPM; - } - - public void test11RpmDependencies() { - assumeTrue(Platforms.isRPM()); - - final Shell sh = new Shell(); - - final Shell.Result defaultDeps = sh.run("rpm -qpR " + getDistributionFile(DEFAULT_RPM)); - final Shell.Result ossDeps = sh.run("rpm -qpR " + getDistributionFile(OSS_RPM)); - - TestCase.assertTrue(Pattern.compile("(?m)^/bin/bash\\s*$").matcher(defaultDeps.stdout).find()); - TestCase.assertTrue(Pattern.compile("(?m)^/bin/bash\\s*$").matcher(ossDeps.stdout).find()); - - final Shell.Result defaultConflicts = sh.run("rpm -qp --conflicts " + getDistributionFile(DEFAULT_RPM)); - final Shell.Result ossConflicts = sh.run("rpm -qp --conflicts " + getDistributionFile(OSS_RPM)); - - TestCase.assertTrue(Pattern.compile("(?m)^elasticsearch-oss\\s*$").matcher(defaultConflicts.stdout).find()); - TestCase.assertTrue(Pattern.compile("(?m)^elasticsearch\\s*$").matcher(ossConflicts.stdout).find()); - } -} diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/OssRpmPreservationTests.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/OssRpmPreservationTests.java deleted file mode 100644 index 87071d687d03..000000000000 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/OssRpmPreservationTests.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.packaging.test; - -import org.elasticsearch.packaging.util.Distribution; - -public class OssRpmPreservationTests extends RpmPreservationTestCase { - - @Override - protected Distribution distribution() { - return Distribution.OSS_RPM; - } -} diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/OssWindowsServiceTests.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/OssWindowsServiceTests.java deleted file mode 100644 index bfa220c6aaf2..000000000000 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/OssWindowsServiceTests.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.packaging.test; - -import org.elasticsearch.packaging.util.Distribution; - -public class OssWindowsServiceTests extends WindowsServiceTestCase { - - @Override - protected Distribution distribution() { - return Distribution.OSS_WINDOWS; - } -} diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/OssWindowsZipTests.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/OssWindowsZipTests.java deleted file mode 100644 index 2a0df6cab96c..000000000000 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/OssWindowsZipTests.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.packaging.test; - -import org.elasticsearch.packaging.util.Distribution; - -public class OssWindowsZipTests extends ArchiveTestCase { - - @Override - protected Distribution distribution() { - return Distribution.OSS_WINDOWS; - } -} diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/PackageConflictTests.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/PackageConflictTests.java new file mode 100644 index 000000000000..93b1146d8393 --- /dev/null +++ b/qa/os/src/test/java/org/elasticsearch/packaging/test/PackageConflictTests.java @@ -0,0 +1,82 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.packaging.test; + +import junit.framework.TestCase; +import org.elasticsearch.packaging.util.Distribution; +import org.elasticsearch.packaging.util.Platforms; +import org.elasticsearch.packaging.util.Shell; +import org.junit.Before; + +import java.util.regex.Pattern; + +import static org.elasticsearch.packaging.util.FileUtils.getDistributionFile; +import static org.junit.Assume.assumeTrue; + +public class PackageConflictTests extends PackagingTestCase { + + private Shell sh; + + @Before + public void onlyCompatibleDistributions() throws Exception { + assumeTrue("only compatible distributions", distribution().packaging.compatible); + assumeTrue("rpm or deb", + distribution().packaging == Distribution.Packaging.DEB || distribution().packaging == Distribution.Packaging.RPM); + sh = newShell(); + } + + public void test11DebDependencies() { + // TODO: rewrite this test to not use a real second distro to try and install + assumeTrue(Platforms.isDPKG()); + + final Shell sh = new Shell(); + + final Shell.Result result = sh.run("dpkg -I " + getDistributionFile(distribution())); + + TestCase.assertTrue(Pattern.compile("(?m)^ Depends:.*bash.*").matcher(result.stdout).find()); + + String oppositePackageName = "elasticsearch"; + if (distribution().isDefault()) { + oppositePackageName += "-oss"; + } + + TestCase.assertTrue(Pattern.compile("(?m)^ Conflicts: " + oppositePackageName + "$").matcher(result.stdout).find()); + } + + public void test11RpmDependencies() { + // TODO: rewrite this test to not use a real second distro to try and install + assumeTrue(Platforms.isRPM()); + + final Shell sh = new Shell(); + + final Shell.Result deps = sh.run("rpm -qpR " + getDistributionFile(distribution())); + + TestCase.assertTrue(Pattern.compile("(?m)^/bin/bash\\s*$").matcher(deps.stdout).find()); + + final Shell.Result conflicts = sh.run("rpm -qp --conflicts " + getDistributionFile(distribution())); + + String oppositePackageName = "elasticsearch"; + if (distribution().isDefault()) { + oppositePackageName += "-oss"; + } + + TestCase.assertTrue(Pattern.compile("(?m)^" + oppositePackageName + "\\s*$").matcher(conflicts.stdout).find()); + } +} diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/PackageTestCase.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/PackageTests.java similarity index 98% rename from qa/os/src/test/java/org/elasticsearch/packaging/test/PackageTestCase.java rename to qa/os/src/test/java/org/elasticsearch/packaging/test/PackageTests.java index e8ab647a41bd..f9257f8959c0 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/PackageTestCase.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/test/PackageTests.java @@ -22,6 +22,7 @@ package org.elasticsearch.packaging.test; import com.carrotsearch.randomizedtesting.annotations.TestCaseOrdering; import com.carrotsearch.randomizedtesting.generators.RandomStrings; import org.apache.http.client.fluent.Request; +import org.elasticsearch.packaging.util.Distribution; import org.elasticsearch.packaging.util.FileUtils; import org.elasticsearch.packaging.util.Shell; import org.elasticsearch.packaging.util.Shell.Result; @@ -70,12 +71,14 @@ import static org.junit.Assume.assumeThat; import static org.junit.Assume.assumeTrue; @TestCaseOrdering(TestCaseOrdering.AlphabeticOrder.class) -public abstract class PackageTestCase extends PackagingTestCase { +public class PackageTests extends PackagingTestCase { private Shell sh; @Before public void onlyCompatibleDistributions() throws Exception { assumeTrue("only compatible distributions", distribution().packaging.compatible); + assumeTrue("rpm or deb", + distribution().packaging == Distribution.Packaging.DEB || distribution().packaging == Distribution.Packaging.RPM); sh = newShell(); } diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/PackagingTestCase.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/PackagingTestCase.java index bd7738aeac4a..d9ecb62f9bcc 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/PackagingTestCase.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/test/PackagingTestCase.java @@ -36,6 +36,8 @@ import org.junit.Rule; import org.junit.rules.TestName; import org.junit.runner.RunWith; +import java.nio.file.Paths; + import static org.elasticsearch.packaging.util.Cleanup.cleanEverything; import static org.junit.Assume.assumeTrue; @@ -52,6 +54,11 @@ public abstract class PackagingTestCase extends Assert { protected final Log logger = LogFactory.getLog(getClass()); + private static Distribution distribution; + static { + distribution = new Distribution(Paths.get(System.getProperty("tests.distribution"))); + } + @Rule public final TestName testNameRule = new TestName(); @@ -70,7 +77,9 @@ public abstract class PackagingTestCase extends Assert { } /** The {@link Distribution} that should be tested in this case */ - protected abstract Distribution distribution(); + protected static Distribution distribution() { + return distribution; + } protected Shell newShell() throws Exception { Shell sh = new Shell(); diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/RpmPreservationTestCase.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/RpmPreservationTests.java similarity index 92% rename from qa/os/src/test/java/org/elasticsearch/packaging/test/RpmPreservationTestCase.java rename to qa/os/src/test/java/org/elasticsearch/packaging/test/RpmPreservationTests.java index 7b6ac039fc55..79a1f1fe4939 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/RpmPreservationTestCase.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/test/RpmPreservationTests.java @@ -21,16 +21,13 @@ package org.elasticsearch.packaging.test; import com.carrotsearch.randomizedtesting.annotations.TestCaseOrdering; import org.elasticsearch.packaging.util.Distribution; -import org.elasticsearch.packaging.util.Installation; import org.elasticsearch.packaging.util.Shell; import org.junit.Before; -import org.junit.BeforeClass; import java.nio.file.Files; import java.nio.file.Path; import java.util.stream.Stream; -import static org.elasticsearch.packaging.util.Cleanup.cleanEverything; import static org.elasticsearch.packaging.util.FileUtils.append; import static org.elasticsearch.packaging.util.FileUtils.assertPathsDontExist; import static org.elasticsearch.packaging.util.Packages.SYSTEMD_SERVICE; @@ -48,21 +45,13 @@ import static org.junit.Assume.assumeThat; import static org.junit.Assume.assumeTrue; @TestCaseOrdering(TestCaseOrdering.AlphabeticOrder.class) -public abstract class RpmPreservationTestCase extends PackagingTestCase { - - private static Installation installation; - - protected abstract Distribution distribution(); - - @BeforeClass - public static void cleanup() throws Exception { - installation = null; - cleanEverything(); - } +public class RpmPreservationTests extends PackagingTestCase { @Before public void onlyCompatibleDistributions() { assumeTrue("only rpm platforms", isRPM()); + assumeTrue("rpm distributions", distribution().packaging == Distribution.Packaging.RPM); + assumeTrue("only bundled jdk", distribution().hasJdk); assumeTrue("only compatible distributions", distribution().packaging.compatible); } diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/WindowsServiceTestCase.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/WindowsServiceTests.java similarity index 99% rename from qa/os/src/test/java/org/elasticsearch/packaging/test/WindowsServiceTestCase.java rename to qa/os/src/test/java/org/elasticsearch/packaging/test/WindowsServiceTests.java index b0827513c923..faf1d13fec60 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/WindowsServiceTestCase.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/test/WindowsServiceTests.java @@ -41,7 +41,7 @@ import static org.elasticsearch.packaging.util.FileUtils.mv; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.equalTo; -public abstract class WindowsServiceTestCase extends PackagingTestCase { +public class WindowsServiceTests extends PackagingTestCase { private static final String DEFAULT_ID = "elasticsearch-service-x64"; private static final String DEFAULT_DISPLAY_NAME = "Elasticsearch " + FileUtils.getCurrentVersion() + " (elasticsearch-service-x64)"; @@ -57,6 +57,7 @@ public abstract class WindowsServiceTestCase extends PackagingTestCase { @BeforeClass public static void ensureWindows() { assumeTrue(Platforms.WINDOWS); + assumeTrue(distribution().hasJdk); } @After diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/util/Archives.java b/qa/os/src/test/java/org/elasticsearch/packaging/util/Archives.java index 2eb3a288fbcc..6a50b266ddc2 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/util/Archives.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/util/Archives.java @@ -19,6 +19,9 @@ package org.elasticsearch.packaging.util; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; @@ -52,6 +55,8 @@ import static org.junit.Assert.assertTrue; */ public class Archives { + private static final Log logger = LogFactory.getLog(Archives.class); + // in the future we'll run as a role user on Windows public static final String ARCHIVE_OWNER = Platforms.WINDOWS ? "vagrant" @@ -71,27 +76,27 @@ public class Archives { assertThat("distribution file must exist: " + distributionFile.toString(), Files.exists(distributionFile), is(true)); assertThat("elasticsearch must not already be installed", lsGlob(baseInstallPath, "elasticsearch*"), empty()); + logger.info("Installing file: " + distributionFile); + final String installCommand; if (distribution.packaging == Distribution.Packaging.TAR) { - - Platforms.onLinux(() -> sh.run("tar -C " + baseInstallPath + " -xzpf " + distributionFile)); - if (Platforms.WINDOWS) { - throw new RuntimeException("Distribution " + distribution + " is not supported on windows"); + throw new IllegalStateException("Distribution " + distribution + " is not supported on windows"); } + installCommand = "tar -C " + baseInstallPath + " -xzpf " + distributionFile; } else if (distribution.packaging == Distribution.Packaging.ZIP) { - - Platforms.onLinux(() -> sh.run("unzip " + distributionFile + " -d " + baseInstallPath)); - - Platforms.onWindows(() -> sh.run( + if (Platforms.WINDOWS == false) { + throw new IllegalStateException("Distribution " + distribution + " is not supported on linux"); + } + installCommand = "Add-Type -AssemblyName 'System.IO.Compression.Filesystem'; " + - "[IO.Compression.ZipFile]::ExtractToDirectory('" + distributionFile + "', '" + baseInstallPath + "')" - )); + "[IO.Compression.ZipFile]::ExtractToDirectory('" + distributionFile + "', '" + baseInstallPath + "')"; } else { throw new RuntimeException("Distribution " + distribution + " is not a known archive type"); } + sh.run(installCommand); assertThat("archive was extracted", Files.exists(extractedPath), is(true)); mv(extractedPath, fullInstallPath); diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/util/Distribution.java b/qa/os/src/test/java/org/elasticsearch/packaging/util/Distribution.java index b73438bc4c95..9d78a9983659 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/util/Distribution.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/util/Distribution.java @@ -19,63 +19,26 @@ package org.elasticsearch.packaging.util; +import java.nio.file.Path; import java.util.Locale; -public enum Distribution { - - OSS_LINUX(Packaging.TAR, Platform.LINUX, Flavor.OSS, true), - OSS_WINDOWS(Packaging.ZIP, Platform.WINDOWS, Flavor.OSS, true), - OSS_DARWIN(Packaging.TAR, Platform.DARWIN, Flavor.OSS, true), - OSS_DEB(Packaging.DEB, Platform.LINUX, Flavor.OSS, true), - OSS_RPM(Packaging.RPM, Platform.LINUX, Flavor.OSS, true), - - DEFAULT_LINUX(Packaging.TAR, Platform.LINUX, Flavor.DEFAULT, true), - DEFAULT_WINDOWS(Packaging.ZIP, Platform.WINDOWS, Flavor.DEFAULT, true), - DEFAULT_DARWIN(Packaging.TAR, Platform.DARWIN, Flavor.DEFAULT, true), - DEFAULT_DEB(Packaging.DEB, Platform.LINUX, Flavor.DEFAULT, true), - DEFAULT_RPM(Packaging.RPM, Platform.LINUX, Flavor.DEFAULT, true), - - OSS_NO_JDK_LINUX(Packaging.TAR, Platform.LINUX, Flavor.OSS, false), - OSS_NO_JDK_WINDOWS(Packaging.ZIP, Platform.WINDOWS, Flavor.OSS, false), - OSS_NO_JDK_DARWIN(Packaging.TAR, Platform.DARWIN, Flavor.OSS, false), - OSS_NO_JDK_DEB(Packaging.DEB, Platform.LINUX, Flavor.OSS, false), - OSS_NO_JDK_RPM(Packaging.RPM, Platform.LINUX, Flavor.OSS, false), - - DEFAULT_NO_JDK_LINUX(Packaging.TAR, Platform.LINUX, Flavor.DEFAULT, false), - DEFAULT_NO_JDK_WINDOWS(Packaging.ZIP, Platform.WINDOWS, Flavor.DEFAULT, false), - DEFAULT_NO_JDK_DARWIN(Packaging.TAR, Platform.DARWIN, Flavor.DEFAULT, false), - DEFAULT_NO_JDK_DEB(Packaging.DEB, Platform.LINUX, Flavor.DEFAULT, false), - DEFAULT_NO_JDK_RPM(Packaging.RPM, Platform.LINUX, Flavor.DEFAULT, false); +public class Distribution { + public final Path path; public final Packaging packaging; public final Platform platform; public final Flavor flavor; public final boolean hasJdk; - Distribution(Packaging packaging, Platform platform, Flavor flavor, boolean hasJdk) { - this.packaging = packaging; - this.platform = platform; - this.flavor = flavor; - this.hasJdk = hasJdk; - } - - public String filename(String version) { - String classifier = ""; - if (version.startsWith("6.") == false) { - - if (hasJdk == false) { - classifier += "-no-jdk"; - } - if (packaging == Packaging.DEB) { - classifier += "-amd64"; - } else { - if (packaging != Packaging.RPM) { - classifier += "-" + platform.toString(); - } - classifier += "-x86_64"; - } - } - return flavor.name + "-" + version + classifier + packaging.extension; + public Distribution(Path path) { + this.path = path; + String filename = path.getFileName().toString(); + int lastDot = filename.lastIndexOf('.'); + String extension = filename.substring(lastDot + 1); + this.packaging = Packaging.valueOf(extension.equals("gz") ? "TAR" : extension.toUpperCase(Locale.ROOT)); + this.platform = filename.contains("windows") ? Platform.WINDOWS : Platform.LINUX; + this.flavor = filename.contains("oss") ? Flavor.OSS : Flavor.DEFAULT; + this.hasJdk = filename.contains("no-jdk") == false; } public boolean isDefault() { diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/util/FileUtils.java b/qa/os/src/test/java/org/elasticsearch/packaging/util/FileUtils.java index 857fad55eea9..94e156b1debf 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/util/FileUtils.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/util/FileUtils.java @@ -42,6 +42,7 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.StringJoiner; +import java.util.regex.Pattern; import java.util.zip.GZIPInputStream; import java.util.zip.ZipException; @@ -221,20 +222,19 @@ public class FileUtils { return getTempDir().resolve("elasticsearch"); } + private static final Pattern VERSION_REGEX = Pattern.compile("(\\d+\\.\\d+\\.\\d+(-SNAPSHOT)?)"); public static String getCurrentVersion() { - return slurp(getPackagingArchivesDir().resolve("version")); - } - - public static Path getPackagingArchivesDir() { - return Paths.get(""); // tests are started in the packaging archives dir, ie the empty relative path + // TODO: just load this once + String distroFile = System.getProperty("tests.distribution"); + java.util.regex.Matcher matcher = VERSION_REGEX.matcher(distroFile); + if (matcher.find()) { + return matcher.group(1); + } + throw new IllegalStateException("Could not find version in filename: " + distroFile); } public static Path getDistributionFile(Distribution distribution) { - return getDistributionFile(distribution, getCurrentVersion()); - } - - public static Path getDistributionFile(Distribution distribution, String version) { - return getPackagingArchivesDir().resolve(distribution.filename(version)); + return distribution.path; } public static void assertPathsExist(Path... paths) { diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/util/Packages.java b/qa/os/src/test/java/org/elasticsearch/packaging/util/Packages.java index 4d528b96c32e..3655a390870f 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/util/Packages.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/util/Packages.java @@ -19,6 +19,8 @@ package org.elasticsearch.packaging.util; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.elasticsearch.packaging.util.Shell.Result; import java.io.IOException; @@ -38,7 +40,6 @@ import static org.elasticsearch.packaging.util.FileMatcher.p660; import static org.elasticsearch.packaging.util.FileMatcher.p750; import static org.elasticsearch.packaging.util.FileMatcher.p755; import static org.elasticsearch.packaging.util.FileUtils.getCurrentVersion; -import static org.elasticsearch.packaging.util.FileUtils.getDistributionFile; import static org.elasticsearch.packaging.util.Platforms.isSysVInit; import static org.elasticsearch.packaging.util.Platforms.isSystemd; import static org.elasticsearch.packaging.util.ServerUtils.waitForElasticsearch; @@ -51,6 +52,8 @@ import static org.junit.Assert.assertTrue; public class Packages { + private static final Log logger = LogFactory.getLog(Packages.class); + public static final Path SYSVINIT_SCRIPT = Paths.get("/etc/init.d/elasticsearch"); public static final Path SYSTEMD_SERVICE = Paths.get("/usr/lib/systemd/system/elasticsearch.service"); @@ -81,6 +84,7 @@ public class Packages { final Shell sh = new Shell(); final Result result; + logger.info("Package type: " + distribution.packaging); if (distribution.packaging == Distribution.Packaging.RPM) { result = sh.runIgnoreExitCode("rpm -qe " + distribution.flavor.name); } else { @@ -91,18 +95,14 @@ public class Packages { } public static Installation install(Distribution distribution) throws IOException { - return install(distribution, getCurrentVersion()); - } - - public static Installation install(Distribution distribution, String version) throws IOException { Shell sh = new Shell(); String systemJavaHome = sh.run("echo $SYSTEM_JAVA_HOME").stdout.trim(); if (distribution.hasJdk == false) { sh.getEnv().put("JAVA_HOME", systemJavaHome); } - final Result result = runInstallCommand(distribution, version, sh); + final Result result = runInstallCommand(distribution, sh); if (result.exitCode != 0) { - throw new RuntimeException("Installing distribution " + distribution + " version " + version + " failed: " + result); + throw new RuntimeException("Installing distribution " + distribution + " failed: " + result); } Installation installation = Installation.ofPackage(distribution.packaging); @@ -114,8 +114,8 @@ public class Packages { return installation; } - public static Result runInstallCommand(Distribution distribution, String version, Shell sh) { - final Path distributionFile = getDistributionFile(distribution, version); + public static Result runInstallCommand(Distribution distribution, Shell sh) { + final Path distributionFile = distribution.path; if (Platforms.isRPM()) { return sh.runIgnoreExitCode("rpm -i " + distributionFile); diff --git a/qa/os/windows-2012r2/build.gradle b/qa/os/windows-2012r2/build.gradle index f49de70eae74..1a35b3ba9156 100644 --- a/qa/os/windows-2012r2/build.gradle +++ b/qa/os/windows-2012r2/build.gradle @@ -1,3 +1,4 @@ +import org.elasticsearch.gradle.test.GradleDistroTestTask String boxId = project.properties.get('vagrant.windows-2012r2.id') if (boxId != null) { @@ -5,7 +6,8 @@ if (boxId != null) { hostEnv 'VAGRANT_WINDOWS_2012R2_BOX', boxId } } else { - tasks.named('distroTest').configure { + // box id was not supplied, so disable the distro tests + tasks.withType(GradleDistroTestTask).configureEach { onlyIf { false } } } diff --git a/qa/os/windows-2016/build.gradle b/qa/os/windows-2016/build.gradle index e0cfa1c6875f..6f87222324b4 100644 --- a/qa/os/windows-2016/build.gradle +++ b/qa/os/windows-2016/build.gradle @@ -1,3 +1,4 @@ +import org.elasticsearch.gradle.test.GradleDistroTestTask String boxId = project.properties.get('vagrant.windows-2016.id') if (boxId != null) { @@ -5,7 +6,8 @@ if (boxId != null) { hostEnv 'VAGRANT_WINDOWS_2016_BOX', boxId } } else { - tasks.named('distroTest').configure { - onlyIf { true } + // box id was not supplied, so disable the distro tests + tasks.withType(GradleDistroTestTask).configureEach { + onlyIf { false } } } diff --git a/qa/wildfly/src/test/java/org/elasticsearch/wildfly/WildflyIT.java b/qa/wildfly/src/test/java/org/elasticsearch/wildfly/WildflyIT.java index 16e0227f270d..9ac33105ed43 100644 --- a/qa/wildfly/src/test/java/org/elasticsearch/wildfly/WildflyIT.java +++ b/qa/wildfly/src/test/java/org/elasticsearch/wildfly/WildflyIT.java @@ -56,6 +56,7 @@ public class WildflyIT extends LuceneTestCase { private Logger logger = LogManager.getLogger(WildflyIT.class); + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/45625") public void testRestClient() throws URISyntaxException, IOException { try (CloseableHttpClient client = HttpClientBuilder.create().build()) { final String str = String.format( diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.cleanup_repository.json b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.cleanup_repository.json new file mode 100644 index 000000000000..43c1687b8b5d --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.cleanup_repository.json @@ -0,0 +1,34 @@ +{ + "snapshot.cleanup_repository": { + "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html", + "stability": "stable", + "url": { + "paths": [ + { + "path": "/_snapshot/{repository}/_cleanup", + "methods": [ + "POST" + ], + "parts": { + "repository": { + "type": "string", + "required" : true, + "description": "A repository name" + } + } + } + ] + }, + "params": { + "master_timeout": { + "type" : "time", + "description" : "Explicit operation timeout for connection to master node" + }, + "timeout": { + "type" : "time", + "description" : "Explicit operation timeout" + } + }, + "body": {} + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/20_translog.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/20_translog.yml index 016bd5912d3a..df602f0c8da3 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/20_translog.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/20_translog.yml @@ -1,14 +1,14 @@ --- -setup: +"Translog retention without soft_deletes": - do: indices.create: - index: test + index: test + body: + settings: + soft_deletes.enabled: false - do: cluster.health: wait_for_no_initializing_shards: true - ---- -"Translog retention": - do: indices.stats: metric: [ translog ] @@ -64,6 +64,53 @@ setup: - lte: { indices.test.primaries.translog.uncommitted_size_in_bytes: $creation_size } - match: { indices.test.primaries.translog.uncommitted_operations: 0 } +--- +"Translog retention with soft_deletes": + - skip: + version: " - 7.9.99" + reason: "start ignoring translog retention policy with soft-deletes enabled in 8.0" + - do: + indices.create: + index: test + body: + settings: + soft_deletes.enabled: true + - do: + cluster.health: + wait_for_no_initializing_shards: true + - do: + indices.stats: + metric: [ translog ] + - set: { indices.test.primaries.translog.size_in_bytes: creation_size } + + - do: + index: + index: test + id: 1 + body: { "foo": "bar" } + + - do: + indices.stats: + metric: [ translog ] + - gt: { indices.test.primaries.translog.size_in_bytes: $creation_size } + - match: { indices.test.primaries.translog.operations: 1 } + - match: { indices.test.primaries.translog.uncommitted_operations: 1 } + # call flush twice to sync the global checkpoint after the last operation so that we can have the safe commit + - do: + indices.flush: + index: test + - do: + indices.flush: + index: test + - do: + indices.stats: + metric: [ translog ] + # after flushing we have one empty translog file while an empty index before flushing has two empty translog files. + - lt: { indices.test.primaries.translog.size_in_bytes: $creation_size } + - match: { indices.test.primaries.translog.operations: 0 } + - lt: { indices.test.primaries.translog.uncommitted_size_in_bytes: $creation_size } + - match: { indices.test.primaries.translog.uncommitted_operations: 0 } + --- "Translog last modified age stats": @@ -79,11 +126,20 @@ setup: - gte: { indices.test.primaries.translog.earliest_last_modified_age: 0 } --- -"Translog stats on closed indices": +"Translog stats on closed indices without soft-deletes": - skip: version: " - 7.2.99" reason: "closed indices have translog stats starting version 7.3.0" + - do: + indices.create: + index: test + body: + settings: + soft_deletes.enabled: false + - do: + cluster.health: + wait_for_no_initializing_shards: true - do: index: index: test @@ -121,3 +177,40 @@ setup: forbid_closed_indices: false - match: { indices.test.primaries.translog.operations: 3 } - match: { indices.test.primaries.translog.uncommitted_operations: 0 } + +--- +"Translog stats on closed indices with soft-deletes": + - skip: + version: " - 7.9.99" + reason: "start ignoring translog retention policy with soft-deletes enabled in 8.0" + - do: + indices.create: + index: test + body: + settings: + soft_deletes.enabled: true + - do: + cluster.health: + wait_for_no_initializing_shards: true + - do: + index: + index: test + id: 1 + body: { "foo": "bar" } + - do: + indices.stats: + metric: [ translog ] + - match: { indices.test.primaries.translog.operations: 1 } + - match: { indices.test.primaries.translog.uncommitted_operations: 1 } + - do: + indices.close: + index: test + wait_for_active_shards: 1 + - is_true: acknowledged + - do: + indices.stats: + metric: [ translog ] + expand_wildcards: all + forbid_closed_indices: false + - match: { indices.test.primaries.translog.operations: 0 } + - match: { indices.test.primaries.translog.uncommitted_operations: 0 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/snapshot.create/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/snapshot.create/10_basic.yml index 0a5a7260a27a..2a33cfbda63d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/snapshot.create/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/snapshot.create/10_basic.yml @@ -38,6 +38,51 @@ setup: - match: { acknowledged: true } +--- +"Create a snapshot and clean up repository": + - skip: + version: " - 7.99.99" + reason: cleanup introduced in 8.0 + + - do: + snapshot.cleanup_repository: + repository: test_repo_create_1 + + - match: { results.deleted_bytes: 0 } + - match: { results.deleted_blobs: 0 } + + - do: + snapshot.create: + repository: test_repo_create_1 + snapshot: test_snapshot + wait_for_completion: true + + - match: { snapshot.snapshot: test_snapshot } + - match: { snapshot.state : SUCCESS } + - match: { snapshot.shards.successful: 1 } + - match: { snapshot.shards.failed : 0 } + + - do: + snapshot.cleanup_repository: + repository: test_repo_create_1 + + - match: { results.deleted_bytes: 0 } + - match: { results.deleted_blobs: 0 } + + - do: + snapshot.delete: + repository: test_repo_create_1 + snapshot: test_snapshot + + - match: { acknowledged: true } + + - do: + snapshot.cleanup_repository: + repository: test_repo_create_1 + + - match: { results.deleted_bytes: 0 } + - match: { results.deleted_blobs: 0 } + --- "Create a snapshot for missing index": diff --git a/server/src/main/java/org/elasticsearch/action/ActionModule.java b/server/src/main/java/org/elasticsearch/action/ActionModule.java index 643e73963c9b..be5be216596c 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/server/src/main/java/org/elasticsearch/action/ActionModule.java @@ -47,6 +47,8 @@ import org.elasticsearch.action.admin.cluster.node.usage.NodesUsageAction; import org.elasticsearch.action.admin.cluster.node.usage.TransportNodesUsageAction; import org.elasticsearch.action.admin.cluster.remote.RemoteInfoAction; import org.elasticsearch.action.admin.cluster.remote.TransportRemoteInfoAction; +import org.elasticsearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryAction; +import org.elasticsearch.action.admin.cluster.repositories.cleanup.TransportCleanupRepositoryAction; import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryAction; import org.elasticsearch.action.admin.cluster.repositories.delete.TransportDeleteRepositoryAction; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesAction; @@ -229,6 +231,7 @@ import org.elasticsearch.rest.action.RestFieldCapabilitiesAction; import org.elasticsearch.rest.action.RestMainAction; import org.elasticsearch.rest.action.admin.cluster.RestAddVotingConfigExclusionAction; import org.elasticsearch.rest.action.admin.cluster.RestCancelTasksAction; +import org.elasticsearch.rest.action.admin.cluster.RestCleanupRepositoryAction; import org.elasticsearch.rest.action.admin.cluster.RestClearVotingConfigExclusionsAction; import org.elasticsearch.rest.action.admin.cluster.RestClusterAllocationExplainAction; import org.elasticsearch.rest.action.admin.cluster.RestClusterGetSettingsAction; @@ -451,6 +454,7 @@ public class ActionModule extends AbstractModule { actions.register(GetRepositoriesAction.INSTANCE, TransportGetRepositoriesAction.class); actions.register(DeleteRepositoryAction.INSTANCE, TransportDeleteRepositoryAction.class); actions.register(VerifyRepositoryAction.INSTANCE, TransportVerifyRepositoryAction.class); + actions.register(CleanupRepositoryAction.INSTANCE, TransportCleanupRepositoryAction.class); actions.register(GetSnapshotsAction.INSTANCE, TransportGetSnapshotsAction.class); actions.register(DeleteSnapshotAction.INSTANCE, TransportDeleteSnapshotAction.class); actions.register(CreateSnapshotAction.INSTANCE, TransportCreateSnapshotAction.class); @@ -582,6 +586,7 @@ public class ActionModule extends AbstractModule { registerHandler.accept(new RestGetRepositoriesAction(restController, settingsFilter)); registerHandler.accept(new RestDeleteRepositoryAction(restController)); registerHandler.accept(new RestVerifyRepositoryAction(restController)); + registerHandler.accept(new RestCleanupRepositoryAction(restController)); registerHandler.accept(new RestGetSnapshotsAction(restController)); registerHandler.accept(new RestCreateSnapshotAction(restController)); registerHandler.accept(new RestRestoreSnapshotAction(restController)); diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/DefaultNoJdkDebBasicTests.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryAction.java similarity index 63% rename from qa/os/src/test/java/org/elasticsearch/packaging/test/DefaultNoJdkDebBasicTests.java rename to server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryAction.java index 23c87b6e936c..af57e6d4f00f 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/DefaultNoJdkDebBasicTests.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryAction.java @@ -16,16 +16,16 @@ * specific language governing permissions and limitations * under the License. */ +package org.elasticsearch.action.admin.cluster.repositories.cleanup; -package org.elasticsearch.packaging.test; +import org.elasticsearch.action.ActionType; -import org.elasticsearch.packaging.util.Distribution; +public final class CleanupRepositoryAction extends ActionType { -public class DefaultNoJdkDebBasicTests extends PackageTestCase { + public static final CleanupRepositoryAction INSTANCE = new CleanupRepositoryAction(); + public static final String NAME = "cluster:admin/repository/_cleanup"; - @Override - protected Distribution distribution() { - return Distribution.DEFAULT_NO_JDK_DEB; + private CleanupRepositoryAction() { + super(NAME, CleanupRepositoryResponse::new); } - } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryRequest.java new file mode 100644 index 000000000000..168cdbb49670 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryRequest.java @@ -0,0 +1,63 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.action.admin.cluster.repositories.cleanup; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +import static org.elasticsearch.action.ValidateActions.addValidationError; + +public class CleanupRepositoryRequest extends AcknowledgedRequest { + + private String repository; + + public CleanupRepositoryRequest(String repository) { + this.repository = repository; + } + + public CleanupRepositoryRequest(StreamInput in) throws IOException { + repository = in.readString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(repository); + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (repository == null) { + validationException = addValidationError("repository is null", null); + } + return validationException; + } + + public String name() { + return repository; + } + + public void name(String repository) { + this.repository = repository; + } +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryRequestBuilder.java new file mode 100644 index 000000000000..2f7e6aefdcc9 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryRequestBuilder.java @@ -0,0 +1,38 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.action.admin.cluster.repositories.cleanup; + +import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; + +public class CleanupRepositoryRequestBuilder extends MasterNodeOperationRequestBuilder { + + public CleanupRepositoryRequestBuilder(ElasticsearchClient client, ActionType action, + String repository) { + super(client, action, new CleanupRepositoryRequest(repository)); + } + + public CleanupRepositoryRequestBuilder setName(String repository) { + request.name(repository); + return this; + } +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryResponse.java new file mode 100644 index 000000000000..8516ece92579 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryResponse.java @@ -0,0 +1,76 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.action.admin.cluster.repositories.cleanup; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.repositories.RepositoryCleanupResult; + +import java.io.IOException; + +public final class CleanupRepositoryResponse extends ActionResponse implements ToXContentObject { + + private static final ObjectParser PARSER = + new ObjectParser<>(CleanupRepositoryResponse.class.getName(), true, CleanupRepositoryResponse::new); + + static { + PARSER.declareObject((response, cleanupResult) -> response.result = cleanupResult, + RepositoryCleanupResult.PARSER, new ParseField("results")); + } + + private RepositoryCleanupResult result; + + public CleanupRepositoryResponse() { + } + + public CleanupRepositoryResponse(RepositoryCleanupResult result) { + this.result = result; + } + + public CleanupRepositoryResponse(StreamInput in) throws IOException { + result = new RepositoryCleanupResult(in); + } + + public RepositoryCleanupResult result() { + return result; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + result.writeTo(out); + } + + public static CleanupRepositoryResponse fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject().field("results"); + result.toXContent(builder, params); + builder.endObject(); + return builder; + } +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java new file mode 100644 index 000000000000..f234a9e064ae --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java @@ -0,0 +1,249 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.action.admin.cluster.repositories.cleanup; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRunnable; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.master.TransportMasterNodeAction; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.RepositoryCleanupInProgress; +import org.elasticsearch.cluster.SnapshotDeletionsInProgress; +import org.elasticsearch.cluster.SnapshotsInProgress; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.repositories.RepositoriesService; +import org.elasticsearch.repositories.Repository; +import org.elasticsearch.repositories.RepositoryCleanupResult; +import org.elasticsearch.repositories.blobstore.BlobStoreRepository; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +import java.io.IOException; + +/** + * Repository cleanup action for repository implementations based on {@link BlobStoreRepository}. + * + * The steps taken by the repository cleanup operation are as follows: + *
    + *
  1. Check that there are no running repository cleanup, snapshot create, or snapshot delete actions + * and add an entry for the repository that is to be cleaned up to {@link RepositoryCleanupInProgress}
  2. + *
  3. Run cleanup actions on the repository. Note, these are executed exclusively on the master node. + * For the precise operations execute see {@link BlobStoreRepository#cleanup}
  4. + *
  5. Remove the entry in {@link RepositoryCleanupInProgress} in the first step.
  6. + *
+ * + * On master failover during the cleanup operation it is simply removed from the cluster state. This is safe because the logic in + * {@link BlobStoreRepository#cleanup} ensures that the repository state id has not changed between creation of the cluster state entry + * and any delete/write operations. TODO: This will not work if we also want to clean up at the shard level as those will involve writes + * as well as deletes. + */ +public final class TransportCleanupRepositoryAction extends TransportMasterNodeAction { + + private static final Logger logger = LogManager.getLogger(TransportCleanupRepositoryAction.class); + + private static final Version MIN_VERSION = Version.V_8_0_0; + + private final RepositoriesService repositoriesService; + + @Override + protected String executor() { + return ThreadPool.Names.GENERIC; + } + + @Inject + public TransportCleanupRepositoryAction(TransportService transportService, ClusterService clusterService, + RepositoriesService repositoriesService, ThreadPool threadPool, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver) { + super(CleanupRepositoryAction.NAME, transportService, clusterService, threadPool, actionFilters, + CleanupRepositoryRequest::new, indexNameExpressionResolver); + this.repositoriesService = repositoriesService; + // We add a state applier that will remove any dangling repository cleanup actions on master failover. + // This is safe to do since cleanups will increment the repository state id before executing any operations to prevent concurrent + // operations from corrupting the repository. This is the same safety mechanism used by snapshot deletes. + clusterService.addStateApplier(event -> { + if (event.localNodeMaster() && event.previousState().nodes().isLocalNodeElectedMaster() == false) { + final RepositoryCleanupInProgress repositoryCleanupInProgress = event.state().custom(RepositoryCleanupInProgress.TYPE); + if (repositoryCleanupInProgress == null || repositoryCleanupInProgress.cleanupInProgress() == false) { + return; + } + clusterService.submitStateUpdateTask("clean up repository cleanup task after master failover", + new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) { + return removeInProgressCleanup(currentState); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + logger.debug("Removed repository cleanup task [{}] from cluster state", repositoryCleanupInProgress); + } + + @Override + public void onFailure(String source, Exception e) { + logger.warn( + "Failed to remove repository cleanup task [{}] from cluster state", repositoryCleanupInProgress); + } + }); + } + }); + } + + private static ClusterState removeInProgressCleanup(final ClusterState currentState) { + RepositoryCleanupInProgress cleanupInProgress = currentState.custom(RepositoryCleanupInProgress.TYPE); + if (cleanupInProgress != null) { + boolean changed = false; + if (cleanupInProgress.cleanupInProgress() == false) { + cleanupInProgress = new RepositoryCleanupInProgress(); + changed = true; + } + if (changed) { + return ClusterState.builder(currentState).putCustom( + RepositoryCleanupInProgress.TYPE, cleanupInProgress).build(); + } + } + return currentState; + } + + @Override + protected CleanupRepositoryResponse read(StreamInput in) throws IOException { + return new CleanupRepositoryResponse(in); + } + + @Override + protected void masterOperation(Task task, CleanupRepositoryRequest request, ClusterState state, + ActionListener listener) { + if (state.nodes().getMinNodeVersion().onOrAfter(MIN_VERSION)) { + cleanupRepo(request.name(), ActionListener.map(listener, CleanupRepositoryResponse::new)); + } else { + throw new IllegalArgumentException("Repository cleanup is only supported from version [" + MIN_VERSION + + "] but the oldest node version in the cluster is [" + state.nodes().getMinNodeVersion() + ']'); + } + } + + @Override + protected ClusterBlockException checkBlock(CleanupRepositoryRequest request, ClusterState state) { + // Cluster is not affected but we look up repositories in metadata + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ); + } + + /** + * Runs cleanup operations on the given repository. + * @param repositoryName Repository to clean up + * @param listener Listener for cleanup result + */ + private void cleanupRepo(String repositoryName, ActionListener listener) { + final Repository repository = repositoriesService.repository(repositoryName); + if (repository instanceof BlobStoreRepository == false) { + listener.onFailure(new IllegalArgumentException("Repository [" + repositoryName + "] does not support repository cleanup")); + return; + } + final BlobStoreRepository blobStoreRepository = (BlobStoreRepository) repository; + final long repositoryStateId = repository.getRepositoryData().getGenId(); + logger.info("Running cleanup operations on repository [{}][{}]", repositoryName, repositoryStateId); + clusterService.submitStateUpdateTask("cleanup repository [" + repositoryName + "][" + repositoryStateId + ']', + new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) { + final RepositoryCleanupInProgress repositoryCleanupInProgress = currentState.custom(RepositoryCleanupInProgress.TYPE); + if (repositoryCleanupInProgress != null && repositoryCleanupInProgress.cleanupInProgress() == false) { + throw new IllegalStateException( + "Cannot cleanup [" + repositoryName + "] - a repository cleanup is already in-progress"); + } + SnapshotDeletionsInProgress deletionsInProgress = currentState.custom(SnapshotDeletionsInProgress.TYPE); + if (deletionsInProgress != null && deletionsInProgress.hasDeletionsInProgress()) { + throw new IllegalStateException("Cannot cleanup [" + repositoryName + "] - a snapshot is currently being deleted"); + } + SnapshotsInProgress snapshots = currentState.custom(SnapshotsInProgress.TYPE); + if (snapshots != null && !snapshots.entries().isEmpty()) { + throw new IllegalStateException("Cannot cleanup [" + repositoryName + "] - a snapshot is currently running"); + } + return ClusterState.builder(currentState).putCustom(RepositoryCleanupInProgress.TYPE, + new RepositoryCleanupInProgress( + RepositoryCleanupInProgress.startedEntry(repositoryName, repositoryStateId))).build(); + } + + @Override + public void onFailure(String source, Exception e) { + after(e, null); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + logger.debug("Initialized repository cleanup in cluster state for [{}][{}]", repositoryName, repositoryStateId); + threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(ActionRunnable.wrap(listener, + l -> blobStoreRepository.cleanup( + repositoryStateId, ActionListener.wrap(result -> after(null, result), e -> after(e, null))))); + } + + private void after(@Nullable Exception failure, @Nullable RepositoryCleanupResult result) { + if (failure == null) { + logger.debug("Finished repository cleanup operations on [{}][{}]", repositoryName, repositoryStateId); + } else { + logger.debug(() -> new ParameterizedMessage( + "Failed to finish repository cleanup operations on [{}][{}]", repositoryName, repositoryStateId), failure); + } + assert failure != null || result != null; + clusterService.submitStateUpdateTask( + "remove repository cleanup task [" + repositoryName + "][" + repositoryStateId + ']', + new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) { + return removeInProgressCleanup(currentState); + } + + @Override + public void onFailure(String source, Exception e) { + if (failure != null) { + e.addSuppressed(failure); + } + logger.warn(() -> + new ParameterizedMessage("[{}] failed to remove repository cleanup task", repositoryName), e); + listener.onFailure(e); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + if (failure == null) { + logger.info("Done with repository cleanup on [{}][{}] with result [{}]", + repositoryName, repositoryStateId, result); + listener.onResponse(result); + } else { + logger.warn(() -> new ParameterizedMessage("Failed to run repository cleanup operations on [{}][{}]", + repositoryName, repositoryStateId), failure); + listener.onFailure(failure); + } + } + }); + } + }); + } +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java index c44c8ed23bcc..24a0ecef3e86 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java @@ -386,6 +386,9 @@ public class CreateIndexRequest extends AcknowledgedRequest for (Map.Entry entry : source.entrySet()) { String name = entry.getKey(); if (SETTINGS.match(name, deprecationHandler)) { + if (entry.getValue() instanceof Map == false) { + throw new ElasticsearchParseException("key [settings] must be an object"); + } settings((Map) entry.getValue()); } else if (MAPPINGS.match(name, deprecationHandler)) { Map mappings = (Map) entry.getValue(); diff --git a/server/src/main/java/org/elasticsearch/client/ClusterAdminClient.java b/server/src/main/java/org/elasticsearch/client/ClusterAdminClient.java index cd874b62a40b..fdee39fdb1f9 100644 --- a/server/src/main/java/org/elasticsearch/client/ClusterAdminClient.java +++ b/server/src/main/java/org/elasticsearch/client/ClusterAdminClient.java @@ -49,6 +49,9 @@ import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.action.admin.cluster.node.usage.NodesUsageRequest; import org.elasticsearch.action.admin.cluster.node.usage.NodesUsageRequestBuilder; import org.elasticsearch.action.admin.cluster.node.usage.NodesUsageResponse; +import org.elasticsearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryRequest; +import org.elasticsearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryRequestBuilder; +import org.elasticsearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryResponse; import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest; import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequestBuilder; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; @@ -453,6 +456,21 @@ public interface ClusterAdminClient extends ElasticsearchClient { */ GetRepositoriesRequestBuilder prepareGetRepositories(String... name); + /** + * Cleans up repository. + */ + CleanupRepositoryRequestBuilder prepareCleanupRepository(String repository); + + /** + * Cleans up repository. + */ + ActionFuture cleanupRepository(CleanupRepositoryRequest repository); + + /** + * Cleans up repository. + */ + void cleanupRepository(CleanupRepositoryRequest repository, ActionListener listener); + /** * Verifies a repository. */ diff --git a/server/src/main/java/org/elasticsearch/client/Requests.java b/server/src/main/java/org/elasticsearch/client/Requests.java index a3eb23eebfe2..fa7bc73c8b9f 100644 --- a/server/src/main/java/org/elasticsearch/client/Requests.java +++ b/server/src/main/java/org/elasticsearch/client/Requests.java @@ -26,6 +26,7 @@ import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksReque import org.elasticsearch.action.admin.cluster.node.tasks.get.GetTaskRequest; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest; import org.elasticsearch.action.admin.cluster.node.usage.NodesUsageRequest; +import org.elasticsearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryRequest; import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest; @@ -460,6 +461,16 @@ public class Requests { return new DeleteRepositoryRequest(name); } + /** + * Cleanup repository + * + * @param name repository name + * @return cleanup repository request + */ + public static CleanupRepositoryRequest cleanupRepositoryRequest(String name) { + return new CleanupRepositoryRequest(name); + } + /** * Verifies snapshot repository * diff --git a/server/src/main/java/org/elasticsearch/client/support/AbstractClient.java b/server/src/main/java/org/elasticsearch/client/support/AbstractClient.java index c3119256fc7c..283b8dc0a284 100644 --- a/server/src/main/java/org/elasticsearch/client/support/AbstractClient.java +++ b/server/src/main/java/org/elasticsearch/client/support/AbstractClient.java @@ -64,6 +64,10 @@ import org.elasticsearch.action.admin.cluster.node.usage.NodesUsageAction; import org.elasticsearch.action.admin.cluster.node.usage.NodesUsageRequest; import org.elasticsearch.action.admin.cluster.node.usage.NodesUsageRequestBuilder; import org.elasticsearch.action.admin.cluster.node.usage.NodesUsageResponse; +import org.elasticsearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryAction; +import org.elasticsearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryRequest; +import org.elasticsearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryRequestBuilder; +import org.elasticsearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryResponse; import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryAction; import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest; import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequestBuilder; @@ -1004,6 +1008,21 @@ public abstract class AbstractClient implements Client { return new GetRepositoriesRequestBuilder(this, GetRepositoriesAction.INSTANCE, name); } + @Override + public CleanupRepositoryRequestBuilder prepareCleanupRepository(String repository) { + return new CleanupRepositoryRequestBuilder(this, CleanupRepositoryAction.INSTANCE, repository); + } + + @Override + public ActionFuture cleanupRepository(CleanupRepositoryRequest request) { + return execute(CleanupRepositoryAction.INSTANCE, request); + } + + @Override + public void cleanupRepository(CleanupRepositoryRequest request, ActionListener listener) { + execute(CleanupRepositoryAction.INSTANCE, request, listener); + } + @Override public ActionFuture restoreSnapshot(RestoreSnapshotRequest request) { return execute(RestoreSnapshotAction.INSTANCE, request); diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java b/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java index d0448e2be22c..e445615e0fc7 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java @@ -118,6 +118,8 @@ public class ClusterModule extends AbstractModule { registerClusterCustom(entries, RestoreInProgress.TYPE, RestoreInProgress::new, RestoreInProgress::readDiffFrom); registerClusterCustom(entries, SnapshotDeletionsInProgress.TYPE, SnapshotDeletionsInProgress::new, SnapshotDeletionsInProgress::readDiffFrom); + registerClusterCustom(entries, RepositoryCleanupInProgress.TYPE, RepositoryCleanupInProgress::new, + RepositoryCleanupInProgress::readDiffFrom); // Metadata registerMetaDataCustom(entries, RepositoriesMetaData.TYPE, RepositoriesMetaData::new, RepositoriesMetaData::readDiffFrom); registerMetaDataCustom(entries, IngestMetadata.TYPE, IngestMetadata::new, IngestMetadata::readDiffFrom); diff --git a/server/src/main/java/org/elasticsearch/cluster/RepositoryCleanupInProgress.java b/server/src/main/java/org/elasticsearch/cluster/RepositoryCleanupInProgress.java new file mode 100644 index 000000000000..9dfd5284fd8c --- /dev/null +++ b/server/src/main/java/org/elasticsearch/cluster/RepositoryCleanupInProgress.java @@ -0,0 +1,120 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.cluster; + +import org.elasticsearch.Version; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; + +public final class RepositoryCleanupInProgress extends AbstractNamedDiffable implements ClusterState.Custom { + + public static final String TYPE = "repository_cleanup"; + + private final List entries; + + public RepositoryCleanupInProgress(Entry... entries) { + this.entries = Arrays.asList(entries); + } + + RepositoryCleanupInProgress(StreamInput in) throws IOException { + this.entries = in.readList(Entry::new); + } + + public static NamedDiff readDiffFrom(StreamInput in) throws IOException { + return readDiffFrom(ClusterState.Custom.class, TYPE, in); + } + + public static Entry startedEntry(String repository, long repositoryStateId) { + return new Entry(repository, repositoryStateId); + } + + public boolean cleanupInProgress() { + // TODO: Should we allow parallelism across repositories here maybe? + return entries.isEmpty(); + } + + @Override + public String getWriteableName() { + return TYPE; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeList(entries); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startArray(TYPE); + for (Entry entry : entries) { + builder.startObject(); + { + builder.field("repository", entry.repository); + } + builder.endObject(); + } + builder.endArray(); + return builder; + } + + @Override + public String toString() { + return Strings.toString(this); + } + + @Override + public Version getMinimalSupportedVersion() { + return Version.V_8_0_0; + } + + public static final class Entry implements Writeable { + + private final String repository; + + private final long repositoryStateId; + + private Entry(StreamInput in) throws IOException { + repository = in.readString(); + repositoryStateId = in.readLong(); + } + + private Entry(String repository, long repositoryStateId) { + this.repository = repository; + this.repositoryStateId = repositoryStateId; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(repository); + out.writeLong(repositoryStateId); + } + + @Override + public String toString() { + return "{" + repository + '}' + '{' + repositoryStateId + '}'; + } + } +} diff --git a/server/src/main/java/org/elasticsearch/common/blobstore/BlobContainer.java b/server/src/main/java/org/elasticsearch/common/blobstore/BlobContainer.java index 94c6ea43d384..83de4aba8e62 100644 --- a/server/src/main/java/org/elasticsearch/common/blobstore/BlobContainer.java +++ b/server/src/main/java/org/elasticsearch/common/blobstore/BlobContainer.java @@ -102,9 +102,11 @@ public interface BlobContainer { /** * Deletes this container and all its contents from the repository. + * + * @return delete result * @throws IOException on failure */ - void delete() throws IOException; + DeleteResult delete() throws IOException; /** * Deletes the blobs with given names. Unlike {@link #deleteBlob(String)} this method will not throw an exception diff --git a/server/src/main/java/org/elasticsearch/common/blobstore/DeleteResult.java b/server/src/main/java/org/elasticsearch/common/blobstore/DeleteResult.java new file mode 100644 index 000000000000..9f74e31ad7d5 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/blobstore/DeleteResult.java @@ -0,0 +1,52 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.blobstore; + +/** + * The result of deleting multiple blobs from a {@link BlobStore}. + */ +public final class DeleteResult { + + public static final DeleteResult ZERO = new DeleteResult(0, 0); + + private final long blobsDeleted; + private final long bytesDeleted; + + public DeleteResult(long blobsDeleted, long bytesDeleted) { + this.blobsDeleted = blobsDeleted; + this.bytesDeleted = bytesDeleted; + } + + public long blobsDeleted() { + return blobsDeleted; + } + + public long bytesDeleted() { + return bytesDeleted; + } + + public DeleteResult add(DeleteResult other) { + return new DeleteResult(blobsDeleted + other.blobsDeleted(), bytesDeleted + other.bytesDeleted()); + } + + public DeleteResult add(long blobs, long bytes) { + return new DeleteResult(blobsDeleted + blobs, bytesDeleted + bytes); + } +} diff --git a/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java b/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java index 6723a70a9abb..d333691a9bc2 100644 --- a/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java +++ b/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java @@ -23,6 +23,7 @@ import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.blobstore.BlobPath; +import org.elasticsearch.common.blobstore.DeleteResult; import org.elasticsearch.common.blobstore.support.AbstractBlobContainer; import org.elasticsearch.common.blobstore.support.PlainBlobMetaData; import org.elasticsearch.core.internal.io.IOUtils; @@ -45,6 +46,7 @@ import java.nio.file.StandardOpenOption; import java.nio.file.attribute.BasicFileAttributes; import java.util.HashMap; import java.util.Map; +import java.util.concurrent.atomic.AtomicLong; import static java.util.Collections.unmodifiableMap; @@ -110,7 +112,7 @@ public class FsBlobContainer extends AbstractBlobContainer { if (Files.isDirectory(blobPath)) { // delete directory recursively as long as it is empty (only contains empty directories), // which is the reason we aren't deleting any files, only the directories on the post-visit - Files.walkFileTree(blobPath, new SimpleFileVisitor() { + Files.walkFileTree(blobPath, new SimpleFileVisitor<>() { @Override public FileVisitResult postVisitDirectory(Path dir, IOException exc) throws IOException { Files.delete(dir); @@ -123,8 +125,26 @@ public class FsBlobContainer extends AbstractBlobContainer { } @Override - public void delete() throws IOException { - IOUtils.rm(path); + public DeleteResult delete() throws IOException { + final AtomicLong filesDeleted = new AtomicLong(0L); + final AtomicLong bytesDeleted = new AtomicLong(0L); + Files.walkFileTree(path, new SimpleFileVisitor<>() { + @Override + public FileVisitResult postVisitDirectory(Path dir, IOException impossible) throws IOException { + assert impossible == null; + Files.delete(dir); + return FileVisitResult.CONTINUE; + } + + @Override + public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { + Files.delete(file); + filesDeleted.incrementAndGet(); + bytesDeleted.addAndGet(attrs.size()); + return FileVisitResult.CONTINUE; + } + }); + return new DeleteResult(filesDeleted.get(), bytesDeleted.get()); } @Override diff --git a/server/src/main/java/org/elasticsearch/common/settings/KeyStoreWrapper.java b/server/src/main/java/org/elasticsearch/common/settings/KeyStoreWrapper.java index 7ad69c1eebe0..39d45e6e970d 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/KeyStoreWrapper.java +++ b/server/src/main/java/org/elasticsearch/common/settings/KeyStoreWrapper.java @@ -100,7 +100,7 @@ public class KeyStoreWrapper implements SecureSettings { /** * A regex for the valid characters that a setting name in the keystore may use. */ - private static final Pattern ALLOWED_SETTING_NAME = Pattern.compile("[a-z0-9_\\-.]+"); + private static final Pattern ALLOWED_SETTING_NAME = Pattern.compile("[A-Za-z0-9_\\-.]+"); public static final Setting SEED_SETTING = SecureSetting.secureString("keystore.seed", null); diff --git a/server/src/main/java/org/elasticsearch/common/transport/BoundTransportAddress.java b/server/src/main/java/org/elasticsearch/common/transport/BoundTransportAddress.java index 13e0f2b3a7a2..f5488f68fe9c 100644 --- a/server/src/main/java/org/elasticsearch/common/transport/BoundTransportAddress.java +++ b/server/src/main/java/org/elasticsearch/common/transport/BoundTransportAddress.java @@ -22,6 +22,7 @@ package org.elasticsearch.common.transport; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.network.InetAddresses; import java.io.IOException; @@ -75,7 +76,12 @@ public class BoundTransportAddress implements Writeable { @Override public String toString() { StringBuilder builder = new StringBuilder("publish_address {"); - builder.append(publishAddress); + String hostString = publishAddress.address().getHostString(); + String publishAddressString = publishAddress.toString(); + if (InetAddresses.isInetAddress(hostString) == false) { + publishAddressString = hostString + '/' + publishAddress.toString(); + } + builder.append(publishAddressString); builder.append("}, bound_addresses "); boolean firstAdded = false; for (TransportAddress address : boundAddresses) { diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/AbstractAsyncTask.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/AbstractAsyncTask.java index 3c1716cda152..1ef9a484a277 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/AbstractAsyncTask.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/AbstractAsyncTask.java @@ -134,6 +134,9 @@ public abstract class AbstractAsyncTask implements Runnable, Closeable { @Override public final void run() { synchronized (this) { + if (isClosed()) { + return; + } cancellable = null; isScheduledOrRunning = autoReschedule; } diff --git a/server/src/main/java/org/elasticsearch/index/IndexSettings.java b/server/src/main/java/org/elasticsearch/index/IndexSettings.java index 4350166acff0..e610691b8512 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexSettings.java +++ b/server/src/main/java/org/elasticsearch/index/IndexSettings.java @@ -195,24 +195,6 @@ public final class IndexSettings { new ByteSizeValue(Long.MAX_VALUE, ByteSizeUnit.BYTES), Property.Dynamic, Property.IndexScope); - /** - * Controls how long translog files that are no longer needed for persistence reasons - * will be kept around before being deleted. A longer retention policy is useful to increase - * the chance of ops based recoveries. - **/ - public static final Setting INDEX_TRANSLOG_RETENTION_AGE_SETTING = - Setting.timeSetting("index.translog.retention.age", TimeValue.timeValueHours(12), TimeValue.timeValueMillis(-1), - Property.Dynamic, Property.IndexScope); - - /** - * Controls how many translog files that are no longer needed for persistence reasons - * will be kept around before being deleted. Keeping more files is useful to increase - * the chance of ops based recoveries. - **/ - public static final Setting INDEX_TRANSLOG_RETENTION_SIZE_SETTING = - Setting.byteSizeSetting("index.translog.retention.size", new ByteSizeValue(512, ByteSizeUnit.MB), Property.Dynamic, - Property.IndexScope); - /** * The maximum size of a translog generation. This is independent of the maximum size of * translog operations that have not been flushed. @@ -258,6 +240,27 @@ public final class IndexSettings { Setting.longSetting("index.soft_deletes.retention.operations", 0, 0, Property.IndexScope, Property.Dynamic); + /** + * Controls how long translog files that are no longer needed for persistence reasons + * will be kept around before being deleted. Keeping more files is useful to increase + * the chance of ops based recoveries for indices with soft-deletes disabled. + * This setting will be ignored if soft-deletes is enabled. + **/ + public static final Setting INDEX_TRANSLOG_RETENTION_AGE_SETTING = + Setting.timeSetting("index.translog.retention.age", + settings -> INDEX_SOFT_DELETES_SETTING.get(settings) ? TimeValue.MINUS_ONE : TimeValue.timeValueHours(12), TimeValue.MINUS_ONE, + Property.Dynamic, Property.IndexScope); + + /** + * Controls how many translog files that are no longer needed for persistence reasons + * will be kept around before being deleted. Keeping more files is useful to increase + * the chance of ops based recoveries for indices with soft-deletes disabled. + * This setting will be ignored if soft-deletes is enabled. + **/ + public static final Setting INDEX_TRANSLOG_RETENTION_SIZE_SETTING = + Setting.byteSizeSetting("index.translog.retention.size", settings -> INDEX_SOFT_DELETES_SETTING.get(settings) ? "-1" : "512MB", + Property.Dynamic, Property.IndexScope); + /** * Controls the maximum length of time since a retention lease is created or renewed before it is considered expired. */ @@ -466,8 +469,6 @@ public final class IndexSettings { syncInterval = INDEX_TRANSLOG_SYNC_INTERVAL_SETTING.get(settings); refreshInterval = scopedSettings.get(INDEX_REFRESH_INTERVAL_SETTING); flushThresholdSize = scopedSettings.get(INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING); - translogRetentionAge = scopedSettings.get(INDEX_TRANSLOG_RETENTION_AGE_SETTING); - translogRetentionSize = scopedSettings.get(INDEX_TRANSLOG_RETENTION_SIZE_SETTING); generationThresholdSize = scopedSettings.get(INDEX_TRANSLOG_GENERATION_THRESHOLD_SIZE_SETTING); mergeSchedulerConfig = new MergeSchedulerConfig(this); gcDeletesInMillis = scopedSettings.get(INDEX_GC_DELETES_SETTING).getMillis(); @@ -493,6 +494,8 @@ public final class IndexSettings { this.indexSortConfig = new IndexSortConfig(this); searchIdleAfter = scopedSettings.get(INDEX_SEARCH_IDLE_AFTER); defaultPipeline = scopedSettings.get(DEFAULT_PIPELINE); + setTranslogRetentionAge(scopedSettings.get(INDEX_TRANSLOG_RETENTION_AGE_SETTING)); + setTranslogRetentionSize(scopedSettings.get(INDEX_TRANSLOG_RETENTION_SIZE_SETTING)); scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING, mergePolicyConfig::setNoCFSRatio); scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING, @@ -553,11 +556,21 @@ public final class IndexSettings { } private void setTranslogRetentionSize(ByteSizeValue byteSizeValue) { - this.translogRetentionSize = byteSizeValue; + if (softDeleteEnabled && byteSizeValue.getBytes() >= 0) { + // ignore the translog retention settings if soft-deletes enabled + this.translogRetentionSize = new ByteSizeValue(-1); + } else { + this.translogRetentionSize = byteSizeValue; + } } private void setTranslogRetentionAge(TimeValue age) { - this.translogRetentionAge = age; + if (softDeleteEnabled && age.millis() >= 0) { + // ignore the translog retention settings if soft-deletes enabled + this.translogRetentionAge = TimeValue.MINUS_ONE; + } else { + this.translogRetentionAge = age; + } } private void setGenerationThresholdSize(final ByteSizeValue generationThresholdSize) { @@ -734,13 +747,19 @@ public final class IndexSettings { /** * Returns the transaction log retention size which controls how much of the translog is kept around to allow for ops based recoveries */ - public ByteSizeValue getTranslogRetentionSize() { return translogRetentionSize; } + public ByteSizeValue getTranslogRetentionSize() { + assert softDeleteEnabled == false || translogRetentionSize.getBytes() == -1L : translogRetentionSize; + return translogRetentionSize; + } /** * Returns the transaction log retention age which controls the maximum age (time from creation) that translog files will be kept * around */ - public TimeValue getTranslogRetentionAge() { return translogRetentionAge; } + public TimeValue getTranslogRetentionAge() { + assert softDeleteEnabled == false || translogRetentionAge.millis() == -1L : translogRetentionSize; + return translogRetentionAge; + } /** * Returns the generation threshold size. As sequence numbers can cause multiple generations to diff --git a/server/src/main/java/org/elasticsearch/index/analysis/Analysis.java b/server/src/main/java/org/elasticsearch/index/analysis/Analysis.java index 303e7049306a..01b3e97af598 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/Analysis.java +++ b/server/src/main/java/org/elasticsearch/index/analysis/Analysis.java @@ -215,7 +215,7 @@ public class Analysis { * If the word list cannot be found at either key. */ public static List getWordList(Environment env, Settings settings, String settingPrefix) { - return getWordList(env, settings, settingPrefix + "_path", settingPrefix); + return getWordList(env, settings, settingPrefix + "_path", settingPrefix, true); } /** @@ -225,7 +225,8 @@ public class Analysis { * @throws IllegalArgumentException * If the word list cannot be found at either key. */ - public static List getWordList(Environment env, Settings settings, String settingPath, String settingList) { + public static List getWordList(Environment env, Settings settings, + String settingPath, String settingList, boolean removeComments) { String wordListPath = settings.get(settingPath, null); if (wordListPath == null) { @@ -240,7 +241,7 @@ public class Analysis { final Path path = env.configFile().resolve(wordListPath); try { - return loadWordList(path, "#"); + return loadWordList(path, removeComments); } catch (CharacterCodingException ex) { String message = String.format(Locale.ROOT, "Unsupported character encoding detected while reading %s: %s - files must be UTF-8 encoded", @@ -252,15 +253,15 @@ public class Analysis { } } - private static List loadWordList(Path path, String comment) throws IOException { + private static List loadWordList(Path path, boolean removeComments) throws IOException { final List result = new ArrayList<>(); try (BufferedReader br = Files.newBufferedReader(path, StandardCharsets.UTF_8)) { String word; while ((word = br.readLine()) != null) { - if (!Strings.hasText(word)) { + if (Strings.hasText(word) == false) { continue; } - if (!word.startsWith(comment)) { + if (removeComments == false || word.startsWith("#") == false) { result.add(word.trim()); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java index 997a03d731c5..34b02825bf4e 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -770,10 +770,6 @@ public class MapperService extends AbstractIndexComponent implements Closeable { * Given a type (eg. long, string, ...), return an anonymous field mapper that can be used for search operations. */ public MappedFieldType unmappedFieldType(String type) { - if (type.equals("string")) { - deprecationLogger.deprecated("[unmapped_type:string] should be replaced with [unmapped_type:keyword]"); - type = "keyword"; - } MappedFieldType fieldType = unmappedFieldTypes.get(type); if (fieldType == null) { final Mapper.TypeParser.ParserContext parserContext = documentMapperParser().parserContext(); diff --git a/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshots.java b/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshots.java index c1fde6d8b4e3..309a3c3a3869 100644 --- a/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshots.java +++ b/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshots.java @@ -44,6 +44,8 @@ import static java.util.Collections.unmodifiableMap; */ public class BlobStoreIndexShardSnapshots implements Iterable, ToXContentFragment { + public static final BlobStoreIndexShardSnapshots EMPTY = new BlobStoreIndexShardSnapshots(Collections.emptyList()); + private final List shardSnapshots; private final Map files; private final Map> physicalFiles; diff --git a/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogAction.java b/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogAction.java index 7cf165a5b112..55a24d30991c 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogAction.java +++ b/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogAction.java @@ -177,11 +177,19 @@ public class TruncateTranslogAction { final TranslogConfig translogConfig = new TranslogConfig(shardPath.getShardId(), translogPath, indexSettings, BigArrays.NON_RECYCLING_INSTANCE); long primaryTerm = indexSettings.getIndexMetaData().primaryTerm(shardPath.getShardId().id()); - final TranslogDeletionPolicy translogDeletionPolicy = - new TranslogDeletionPolicy(indexSettings.getTranslogRetentionSize().getBytes(), - indexSettings.getTranslogRetentionAge().getMillis()); + // We open translog to check for corruption, do not clean anything. + final TranslogDeletionPolicy retainAllTranslogPolicy = new TranslogDeletionPolicy(Long.MAX_VALUE, Long.MAX_VALUE) { + @Override + long minTranslogGenRequired(List readers, TranslogWriter writer) { + long minGen = writer.generation; + for (TranslogReader reader : readers) { + minGen = Math.min(reader.generation, minGen); + } + return minGen; + } + }; try (Translog translog = new Translog(translogConfig, translogUUID, - translogDeletionPolicy, () -> translogGlobalCheckpoint, () -> primaryTerm, seqNo -> {}); + retainAllTranslogPolicy, () -> translogGlobalCheckpoint, () -> primaryTerm, seqNo -> {}); Translog.Snapshot snapshot = translog.newSnapshot()) { //noinspection StatementWithEmptyBody we are just checking that we can iterate through the whole snapshot while (snapshot.next() != null) { diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java index 5d58325d4cff..a1b19f14b3a8 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java @@ -253,7 +253,7 @@ public class PeerRecoveryTargetService implements IndexEventListener { // the issues that a missing call to this could cause are sneaky and hard to debug. If we don't need it on this // call we can potentially remove it altogether which we should do it in a major release only with enough // time to test. This shoudl be done for 7.0 if possible - transportService.submitRequest(request.sourceNode(), PeerRecoverySourceService.Actions.START_RECOVERY, request, + transportService.sendRequest(request.sourceNode(), PeerRecoverySourceService.Actions.START_RECOVERY, request, new TransportResponseHandler() { @Override public void handleResponse(RecoveryResponse recoveryResponse) { diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RemoteRecoveryTargetHandler.java b/server/src/main/java/org/elasticsearch/indices/recovery/RemoteRecoveryTargetHandler.java index 3140de8169d8..4408fd020504 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RemoteRecoveryTargetHandler.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RemoteRecoveryTargetHandler.java @@ -33,6 +33,7 @@ import org.elasticsearch.index.store.StoreFileMetaData; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.EmptyTransportResponseHandler; +import org.elasticsearch.transport.PlainTransportFuture; import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportService; @@ -78,7 +79,7 @@ public class RemoteRecoveryTargetHandler implements RecoveryTargetHandler { @Override public void prepareForTranslogOperations(int totalTranslogOps, ActionListener listener) { - transportService.submitRequest(targetNode, PeerRecoveryTargetService.Actions.PREPARE_TRANSLOG, + transportService.sendRequest(targetNode, PeerRecoveryTargetService.Actions.PREPARE_TRANSLOG, new RecoveryPrepareForTranslogOperationsRequest(recoveryId, shardId, totalTranslogOps), TransportRequestOptions.builder().withTimeout(recoverySettings.internalActionTimeout()).build(), new ActionListenerResponseHandler<>(ActionListener.map(listener, r -> null), @@ -87,7 +88,7 @@ public class RemoteRecoveryTargetHandler implements RecoveryTargetHandler { @Override public void finalizeRecovery(final long globalCheckpoint, final long trimAboveSeqNo, final ActionListener listener) { - transportService.submitRequest(targetNode, PeerRecoveryTargetService.Actions.FINALIZE, + transportService.sendRequest(targetNode, PeerRecoveryTargetService.Actions.FINALIZE, new RecoveryFinalizeRecoveryRequest(recoveryId, shardId, globalCheckpoint, trimAboveSeqNo), TransportRequestOptions.builder().withTimeout(recoverySettings.internalActionLongTimeout()).build(), new ActionListenerResponseHandler<>(ActionListener.map(listener, r -> null), @@ -96,12 +97,12 @@ public class RemoteRecoveryTargetHandler implements RecoveryTargetHandler { @Override public void handoffPrimaryContext(final ReplicationTracker.PrimaryContext primaryContext) { - transportService.submitRequest( - targetNode, - PeerRecoveryTargetService.Actions.HANDOFF_PRIMARY_CONTEXT, - new RecoveryHandoffPrimaryContextRequest(recoveryId, shardId, primaryContext), - TransportRequestOptions.builder().withTimeout(recoverySettings.internalActionTimeout()).build(), - EmptyTransportResponseHandler.INSTANCE_SAME).txGet(); + PlainTransportFuture handler = new PlainTransportFuture<>(EmptyTransportResponseHandler.INSTANCE_SAME); + transportService.sendRequest( + targetNode, PeerRecoveryTargetService.Actions.HANDOFF_PRIMARY_CONTEXT, + new RecoveryHandoffPrimaryContextRequest(recoveryId, shardId, primaryContext), + TransportRequestOptions.builder().withTimeout(recoverySettings.internalActionTimeout()).build(), handler); + handler.txGet(); } @Override @@ -122,7 +123,7 @@ public class RemoteRecoveryTargetHandler implements RecoveryTargetHandler { maxSeqNoOfDeletesOrUpdatesOnPrimary, retentionLeases, mappingVersionOnPrimary); - transportService.submitRequest(targetNode, PeerRecoveryTargetService.Actions.TRANSLOG_OPS, request, translogOpsRequestOptions, + transportService.sendRequest(targetNode, PeerRecoveryTargetService.Actions.TRANSLOG_OPS, request, translogOpsRequestOptions, new ActionListenerResponseHandler<>(ActionListener.map(listener, r -> r.localCheckpoint), RecoveryTranslogOperationsResponse::new, ThreadPool.Names.GENERIC)); } @@ -132,7 +133,7 @@ public class RemoteRecoveryTargetHandler implements RecoveryTargetHandler { List phase1ExistingFileSizes, int totalTranslogOps, ActionListener listener) { RecoveryFilesInfoRequest recoveryInfoFilesRequest = new RecoveryFilesInfoRequest(recoveryId, shardId, phase1FileNames, phase1FileSizes, phase1ExistingFileNames, phase1ExistingFileSizes, totalTranslogOps); - transportService.submitRequest(targetNode, PeerRecoveryTargetService.Actions.FILES_INFO, recoveryInfoFilesRequest, + transportService.sendRequest(targetNode, PeerRecoveryTargetService.Actions.FILES_INFO, recoveryInfoFilesRequest, TransportRequestOptions.builder().withTimeout(recoverySettings.internalActionTimeout()).build(), new ActionListenerResponseHandler<>(ActionListener.map(listener, r -> null), in -> TransportResponse.Empty.INSTANCE, ThreadPool.Names.GENERIC)); @@ -141,7 +142,7 @@ public class RemoteRecoveryTargetHandler implements RecoveryTargetHandler { @Override public void cleanFiles(int totalTranslogOps, long globalCheckpoint, Store.MetadataSnapshot sourceMetaData, ActionListener listener) { - transportService.submitRequest(targetNode, PeerRecoveryTargetService.Actions.CLEAN_FILES, + transportService.sendRequest(targetNode, PeerRecoveryTargetService.Actions.CLEAN_FILES, new RecoveryCleanFilesRequest(recoveryId, shardId, sourceMetaData, totalTranslogOps, globalCheckpoint), TransportRequestOptions.builder().withTimeout(recoverySettings.internalActionTimeout()).build(), new ActionListenerResponseHandler<>(ActionListener.map(listener, r -> null), @@ -173,7 +174,7 @@ public class RemoteRecoveryTargetHandler implements RecoveryTargetHandler { throttleTimeInNanos = 0; } - transportService.submitRequest(targetNode, PeerRecoveryTargetService.Actions.FILE_CHUNK, + transportService.sendRequest(targetNode, PeerRecoveryTargetService.Actions.FILE_CHUNK, new RecoveryFileChunkRequest(recoveryId, shardId, fileMetaData, position, content, lastChunk, totalTranslogOps, /* we send estimateTotalOperations with every request since we collect stats on the target and that way we can diff --git a/server/src/main/java/org/elasticsearch/monitor/os/OsProbe.java b/server/src/main/java/org/elasticsearch/monitor/os/OsProbe.java index 320bc15fda1f..b33ab21e1dc5 100644 --- a/server/src/main/java/org/elasticsearch/monitor/os/OsProbe.java +++ b/server/src/main/java/org/elasticsearch/monitor/os/OsProbe.java @@ -511,17 +511,26 @@ public class OsProbe { assert !controllerMap.isEmpty(); final String cpuAcctControlGroup = controllerMap.get("cpuacct"); - assert cpuAcctControlGroup != null; + if (cpuAcctControlGroup == null) { + logger.debug("no [cpuacct] data found in cgroup stats"); + return null; + } final long cgroupCpuAcctUsageNanos = getCgroupCpuAcctUsageNanos(cpuAcctControlGroup); final String cpuControlGroup = controllerMap.get("cpu"); - assert cpuControlGroup != null; + if (cpuControlGroup == null) { + logger.debug("no [cpu] data found in cgroup stats"); + return null; + } final long cgroupCpuAcctCpuCfsPeriodMicros = getCgroupCpuAcctCpuCfsPeriodMicros(cpuControlGroup); final long cgroupCpuAcctCpuCfsQuotaMicros = getCgroupCpuAcctCpuCfsQuotaMicros(cpuControlGroup); final OsStats.Cgroup.CpuStat cpuStat = getCgroupCpuAcctCpuStat(cpuControlGroup); final String memoryControlGroup = controllerMap.get("memory"); - assert memoryControlGroup != null; + if (memoryControlGroup == null) { + logger.debug("no [memory] data found in cgroup stats"); + return null; + } final String cgroupMemoryLimitInBytes = getCgroupMemoryLimitInBytes(memoryControlGroup); final String cgroupMemoryUsageInBytes = getCgroupMemoryUsageInBytes(memoryControlGroup); diff --git a/server/src/main/java/org/elasticsearch/repositories/RepositoryCleanupResult.java b/server/src/main/java/org/elasticsearch/repositories/RepositoryCleanupResult.java new file mode 100644 index 000000000000..bec61e02ee8f --- /dev/null +++ b/server/src/main/java/org/elasticsearch/repositories/RepositoryCleanupResult.java @@ -0,0 +1,88 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.repositories; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.blobstore.DeleteResult; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; + +public final class RepositoryCleanupResult implements Writeable, ToXContentObject { + + public static final ObjectParser PARSER = + new ObjectParser<>(RepositoryCleanupResult.class.getName(), true, RepositoryCleanupResult::new); + + private static final String DELETED_BLOBS = "deleted_blobs"; + + private static final String DELETED_BYTES = "deleted_bytes"; + + static { + PARSER.declareLong((result, bytes) -> result.bytes = bytes, new ParseField(DELETED_BYTES)); + PARSER.declareLong((result, blobs) -> result.blobs = blobs, new ParseField(DELETED_BLOBS)); + } + + private long bytes; + + private long blobs; + + private RepositoryCleanupResult() { + this(DeleteResult.ZERO); + } + + public RepositoryCleanupResult(DeleteResult result) { + this.blobs = result.blobsDeleted(); + this.bytes = result.bytesDeleted(); + } + + public RepositoryCleanupResult(StreamInput in) throws IOException { + bytes = in.readLong(); + blobs = in.readLong(); + } + + public long bytes() { + return bytes; + } + + public long blobs() { + return blobs; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeLong(bytes); + out.writeLong(blobs); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.startObject().field(DELETED_BYTES, bytes).field(DELETED_BLOBS, blobs).endObject(); + } + + @Override + public String toString() { + return Strings.toString(this); + } +} diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index 947fdd4dfa95..681f5734334a 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -44,6 +44,7 @@ import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStore; +import org.elasticsearch.common.blobstore.DeleteResult; import org.elasticsearch.common.blobstore.fs.FsBlobContainer; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -81,6 +82,7 @@ import org.elasticsearch.index.store.StoreFileMetaData; import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.repositories.Repository; +import org.elasticsearch.repositories.RepositoryCleanupResult; import org.elasticsearch.repositories.RepositoryData; import org.elasticsearch.repositories.RepositoryException; import org.elasticsearch.repositories.RepositoryVerificationException; @@ -402,7 +404,8 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp updatedRepositoryData = repositoryData.removeSnapshot(snapshotId); // Cache the indices that were found before writing out the new index-N blob so that a stuck master will never // delete an index that was created by another master node after writing this index-N blob. - foundIndices = blobStore().blobContainer(basePath().add("indices")).children(); + + foundIndices = blobStore().blobContainer(indicesPath()).children(); writeIndexGen(updatedRepositoryData, repositoryStateId); } catch (Exception ex) { listener.onFailure(new RepositoryException(metadata.name(), "failed to delete snapshot [" + snapshotId + "]", ex)); @@ -425,18 +428,61 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp .orElse(Collections.emptyList()), snapshotId, ActionListener.map(listener, v -> { - cleanupStaleIndices(foundIndices, survivingIndices); - cleanupStaleRootFiles(Sets.difference(rootBlobs, new HashSet<>(snapMetaFilesToDelete)), updatedRepositoryData); + cleanupStaleIndices(foundIndices, survivingIndices.values().stream().map(IndexId::getId).collect(Collectors.toSet())); + cleanupStaleRootFiles( + staleRootBlobs(updatedRepositoryData, Sets.difference(rootBlobs, new HashSet<>(snapMetaFilesToDelete)))); return null; }) ); } } - private void cleanupStaleRootFiles(Set rootBlobNames, RepositoryData repositoryData) { + /** + * Runs cleanup actions on the repository. Increments the repository state id by one before executing any modifications on the + * repository. + * TODO: Add shard level cleanups + *
    + *
  • Deleting stale indices {@link #cleanupStaleIndices}
  • + *
  • Deleting unreferenced root level blobs {@link #cleanupStaleRootFiles}
  • + *
+ * @param repositoryStateId Current repository state id + * @param listener Lister to complete when done + */ + public void cleanup(long repositoryStateId, ActionListener listener) { + ActionListener.completeWith(listener, () -> { + if (isReadOnly()) { + throw new RepositoryException(metadata.name(), "cannot run cleanup on readonly repository"); + } + final RepositoryData repositoryData = getRepositoryData(); + if (repositoryData.getGenId() != repositoryStateId) { + // Check that we are working on the expected repository version before gathering the data to clean up + throw new RepositoryException(metadata.name(), "concurrent modification of the repository before cleanup started, " + + "expected current generation [" + repositoryStateId + "], actual current generation [" + + repositoryData.getGenId() + "]"); + } + Map rootBlobs = blobContainer().listBlobs(); + final Map foundIndices = blobStore().blobContainer(indicesPath()).children(); + final Set survivingIndexIds = + repositoryData.getIndices().values().stream().map(IndexId::getId).collect(Collectors.toSet()); + final List staleRootBlobs = staleRootBlobs(repositoryData, rootBlobs.keySet()); + if (survivingIndexIds.equals(foundIndices.keySet()) && staleRootBlobs.isEmpty()) { + // Nothing to clean up we return + return new RepositoryCleanupResult(DeleteResult.ZERO); + } + // write new index-N blob to ensure concurrent operations will fail + writeIndexGen(repositoryData, repositoryStateId); + final DeleteResult deleteIndicesResult = cleanupStaleIndices(foundIndices, survivingIndexIds); + List cleaned = cleanupStaleRootFiles(staleRootBlobs); + return new RepositoryCleanupResult( + deleteIndicesResult.add(cleaned.size(), cleaned.stream().mapToLong(name -> rootBlobs.get(name).length()).sum())); + }); + } + + // Finds all blobs directly under the repository root path that are not referenced by the current RepositoryData + private List staleRootBlobs(RepositoryData repositoryData, Set rootBlobNames) { final Set allSnapshotIds = repositoryData.getSnapshotIds().stream().map(SnapshotId::getUUID).collect(Collectors.toSet()); - final List blobsToDelete = rootBlobNames.stream().filter( + return rootBlobNames.stream().filter( blob -> { if (FsBlobContainer.isTempBlobName(blob)) { return true; @@ -457,12 +503,16 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp return false; } ).collect(Collectors.toList()); + } + + private List cleanupStaleRootFiles(List blobsToDelete) { if (blobsToDelete.isEmpty()) { - return; + return blobsToDelete; } try { logger.info("[{}] Found stale root level blobs {}. Cleaning them up", metadata.name(), blobsToDelete); blobContainer().deleteBlobsIgnoringIfNotExists(blobsToDelete); + return blobsToDelete; } catch (IOException e) { logger.warn(() -> new ParameterizedMessage( "[{}] The following blobs are no longer part of any snapshot [{}] but failed to remove them", @@ -474,18 +524,18 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp assert false : e; logger.warn(new ParameterizedMessage("[{}] Exception during cleanup of root level blobs", metadata.name()), e); } + return Collections.emptyList(); } - private void cleanupStaleIndices(Map foundIndices, Map survivingIndices) { + private DeleteResult cleanupStaleIndices(Map foundIndices, Set survivingIndexIds) { + DeleteResult deleteResult = DeleteResult.ZERO; try { - final Set survivingIndexIds = survivingIndices.values().stream() - .map(IndexId::getId).collect(Collectors.toSet()); for (Map.Entry indexEntry : foundIndices.entrySet()) { final String indexSnId = indexEntry.getKey(); try { if (survivingIndexIds.contains(indexSnId) == false) { logger.debug("[{}] Found stale index [{}]. Cleaning it up", metadata.name(), indexSnId); - indexEntry.getValue().delete(); + deleteResult = deleteResult.add(indexEntry.getValue().delete()); logger.debug("[{}] Cleaned up stale index [{}]", metadata.name(), indexSnId); } } catch (IOException e) { @@ -501,6 +551,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp assert false : e; logger.warn(new ParameterizedMessage("[{}] Exception during cleanup of stale indices", metadata.name()), e); } + return deleteResult; } private void deleteIndices(RepositoryData repositoryData, List indices, SnapshotId snapshotId, @@ -843,7 +894,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp final BlobContainer shardContainer = shardContainer(indexId, shardId); final Map blobs; try { - blobs = shardContainer.listBlobs(); + blobs = shardContainer.listBlobsByPrefix(INDEX_FILE_PREFIX); } catch (IOException e) { throw new IndexShardSnapshotFailedException(shardId, "failed to list blobs", e); } @@ -888,7 +939,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp List filesInfo = snapshots.findPhysicalIndexFiles(fileName); if (filesInfo != null) { for (BlobStoreIndexShardSnapshot.FileInfo fileInfo : filesInfo) { - if (fileInfo.isSame(md) && snapshotFileExistsInBlobs(fileInfo, blobs)) { + if (fileInfo.isSame(md)) { // a commit point file with the same name, size and checksum was already copied to repository // we will reuse it for this snapshot existingFileInfo = fileInfo; @@ -1159,23 +1210,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp } else if (blobKeys.isEmpty() == false) { logger.warn("Could not find a readable index-N file in a non-empty shard snapshot directory [{}]", shardContainer.path()); } - - // We couldn't load the index file - falling back to loading individual snapshots - List snapshots = new ArrayList<>(); - for (String name : blobKeys) { - try { - BlobStoreIndexShardSnapshot snapshot = null; - if (name.startsWith(SNAPSHOT_PREFIX)) { - snapshot = indexShardSnapshotFormat.readBlob(shardContainer, name); - } - if (snapshot != null) { - snapshots.add(new SnapshotFiles(snapshot.snapshot(), snapshot.indexFiles())); - } - } catch (IOException e) { - logger.warn(() -> new ParameterizedMessage("Failed to read blob [{}]", name), e); - } - } - return new Tuple<>(new BlobStoreIndexShardSnapshots(snapshots), latest); + return new Tuple<>(BlobStoreIndexShardSnapshots.EMPTY, latest); } /** diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCleanupRepositoryAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCleanupRepositoryAction.java new file mode 100644 index 000000000000..3eca34ff2d3d --- /dev/null +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCleanupRepositoryAction.java @@ -0,0 +1,55 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.rest.action.admin.cluster; + +import org.elasticsearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryRequest; +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; + +import java.io.IOException; + +import static org.elasticsearch.client.Requests.cleanupRepositoryRequest; +import static org.elasticsearch.rest.RestRequest.Method.POST; + +/** + * Cleans up a repository + */ +public class RestCleanupRepositoryAction extends BaseRestHandler { + + public RestCleanupRepositoryAction(RestController controller) { + controller.registerHandler(POST, "/_snapshot/{repository}/_cleanup", this); + } + + @Override + public String getName() { + return "cleanup_repository_action"; + } + + @Override + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { + CleanupRepositoryRequest cleanupRepositoryRequest = cleanupRepositoryRequest(request.param("repository")); + cleanupRepositoryRequest.timeout(request.paramAsTime("timeout", cleanupRepositoryRequest.timeout())); + cleanupRepositoryRequest.masterNodeTimeout(request.paramAsTime("master_timeout", cleanupRepositoryRequest.masterNodeTimeout())); + return channel -> client.admin().cluster().cleanupRepository(cleanupRepositoryRequest, new RestToXContentListener<>(channel)); + } +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java b/server/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java index d664b10cfa16..4b929c692edc 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java @@ -82,6 +82,7 @@ public abstract class InternalAggregation implements Aggregation, NamedWriteable public void consumeBucketsAndMaybeBreak(int size) { multiBucketConsumer.accept(size); } + } protected final String name; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/InternalMultiBucketAggregation.java b/server/src/main/java/org/elasticsearch/search/aggregations/InternalMultiBucketAggregation.java index e11899fff339..41b1a9aef623 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/InternalMultiBucketAggregation.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/InternalMultiBucketAggregation.java @@ -66,6 +66,12 @@ public abstract class InternalMultiBucketAggregation buckets, ReduceContext context); + @Override public abstract List getBuckets(); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/adjacency/InternalAdjacencyMatrix.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/adjacency/InternalAdjacencyMatrix.java index 46eac6ce55dd..57c7d703cbdf 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/adjacency/InternalAdjacencyMatrix.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/adjacency/InternalAdjacencyMatrix.java @@ -88,21 +88,6 @@ public class InternalAdjacencyMatrix return aggregations; } - InternalBucket reduce(List buckets, ReduceContext context) { - InternalBucket reduced = null; - List aggregationsList = new ArrayList<>(buckets.size()); - for (InternalBucket bucket : buckets) { - if (reduced == null) { - reduced = new InternalBucket(bucket.key, bucket.docCount, bucket.aggregations); - } else { - reduced.docCount += bucket.docCount; - } - aggregationsList.add(bucket.aggregations); - } - reduced.aggregations = InternalAggregations.reduce(aggregationsList, context); - return reduced; - } - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); @@ -212,7 +197,7 @@ public class InternalAdjacencyMatrix ArrayList reducedBuckets = new ArrayList<>(bucketsMap.size()); for (List sameRangeList : bucketsMap.values()) { - InternalBucket reducedBucket = sameRangeList.get(0).reduce(sameRangeList, reduceContext); + InternalBucket reducedBucket = reduceBucket(sameRangeList, reduceContext); if(reducedBucket.docCount >= 1){ reduceContext.consumeBucketsAndMaybeBreak(1); reducedBuckets.add(reducedBucket); @@ -228,6 +213,23 @@ public class InternalAdjacencyMatrix return reduced; } + @Override + protected InternalBucket reduceBucket(List buckets, ReduceContext context) { + assert buckets.size() > 0; + InternalBucket reduced = null; + List aggregationsList = new ArrayList<>(buckets.size()); + for (InternalBucket bucket : buckets) { + if (reduced == null) { + reduced = new InternalBucket(bucket.key, bucket.docCount, bucket.aggregations); + } else { + reduced.docCount += bucket.docCount; + } + aggregationsList.add(bucket.aggregations); + } + reduced.aggregations = InternalAggregations.reduce(aggregationsList, context); + return reduced; + } + @Override public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { builder.startArray(CommonFields.BUCKETS.getPreferredName()); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/InternalComposite.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/InternalComposite.java index ac51301766cb..e11db15aceca 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/InternalComposite.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/InternalComposite.java @@ -158,7 +158,7 @@ public class InternalComposite while (pq.size() > 0) { BucketIterator bucketIt = pq.poll(); if (lastBucket != null && bucketIt.current.compareKey(lastBucket) != 0) { - InternalBucket reduceBucket = buckets.get(0).reduce(buckets, reduceContext); + InternalBucket reduceBucket = reduceBucket(buckets, reduceContext); buckets.clear(); reduceContext.consumeBucketsAndMaybeBreak(1); result.add(reduceBucket); @@ -173,7 +173,7 @@ public class InternalComposite } } if (buckets.size() > 0) { - InternalBucket reduceBucket = buckets.get(0).reduce(buckets, reduceContext); + InternalBucket reduceBucket = reduceBucket(buckets, reduceContext); reduceContext.consumeBucketsAndMaybeBreak(1); result.add(reduceBucket); } @@ -181,6 +181,19 @@ public class InternalComposite return new InternalComposite(name, size, sourceNames, formats, result, lastKey, reverseMuls, pipelineAggregators(), metaData); } + @Override + protected InternalBucket reduceBucket(List buckets, ReduceContext context) { + assert buckets.size() > 0; + List aggregations = new ArrayList<>(buckets.size()); + long docCount = 0; + for (InternalBucket bucket : buckets) { + docCount += bucket.docCount; + aggregations.add(bucket.aggregations); + } + InternalAggregations aggs = InternalAggregations.reduce(aggregations, context); + return new InternalBucket(sourceNames, formats, buckets.get(0).key, reverseMuls, docCount, aggs); + } + @Override public boolean equals(Object obj) { if (this == obj) return true; @@ -308,17 +321,6 @@ public class InternalComposite return aggregations; } - InternalBucket reduce(List buckets, ReduceContext reduceContext) { - List aggregations = new ArrayList<>(buckets.size()); - long docCount = 0; - for (InternalBucket bucket : buckets) { - docCount += bucket.docCount; - aggregations.add(bucket.aggregations); - } - InternalAggregations aggs = InternalAggregations.reduce(aggregations, reduceContext); - return new InternalBucket(sourceNames, formats, key, reverseMuls, docCount, aggs); - } - @Override public int compareKey(InternalBucket other) { for (int i = 0; i < key.size(); i++) { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/InternalFilters.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/InternalFilters.java index d866754c5626..271d1c54d589 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/InternalFilters.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/InternalFilters.java @@ -87,21 +87,6 @@ public class InternalFilters extends InternalMultiBucketAggregation buckets, ReduceContext context) { - InternalBucket reduced = null; - List aggregationsList = new ArrayList<>(buckets.size()); - for (InternalBucket bucket : buckets) { - if (reduced == null) { - reduced = new InternalBucket(bucket.key, bucket.docCount, bucket.aggregations, bucket.keyed); - } else { - reduced.docCount += bucket.docCount; - } - aggregationsList.add(bucket.aggregations); - } - reduced.aggregations = InternalAggregations.reduce(aggregationsList, context); - return reduced; - } - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { if (keyed) { @@ -227,11 +212,28 @@ public class InternalFilters extends InternalMultiBucketAggregation(bucketsList.size()), keyed, pipelineAggregators(), getMetaData()); for (List sameRangeList : bucketsList) { - reduced.buckets.add((sameRangeList.get(0)).reduce(sameRangeList, reduceContext)); + reduced.buckets.add(reduceBucket(sameRangeList, reduceContext)); } return reduced; } + @Override + protected InternalBucket reduceBucket(List buckets, ReduceContext context) { + assert buckets.size() > 0; + InternalBucket reduced = null; + List aggregationsList = new ArrayList<>(buckets.size()); + for (InternalBucket bucket : buckets) { + if (reduced == null) { + reduced = new InternalBucket(bucket.key, bucket.docCount, bucket.aggregations, bucket.keyed); + } else { + reduced.docCount += bucket.docCount; + } + aggregationsList.add(bucket.aggregations); + } + reduced.aggregations = InternalAggregations.reduce(aggregationsList, context); + return reduced; + } + @Override public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { if (keyed) { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregator.java index 4935b6c6ba7d..c91f763b603c 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregator.java @@ -25,7 +25,6 @@ import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.util.LongHash; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; -import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; import org.elasticsearch.search.aggregations.bucket.BucketsAggregator; @@ -96,48 +95,6 @@ public abstract class GeoGridAggregator extends Bucke }; } - // private impl that stores a bucket ord. This allows for computing the aggregations lazily. - static class OrdinalBucket extends InternalGeoGridBucket { - - long bucketOrd; - InternalGeoGridBucket sourceBucket; // used to keep track of appropriate getKeyAsString method - - OrdinalBucket(InternalGeoGridBucket sourceBucket) { - super(sourceBucket.hashAsLong, sourceBucket.docCount, sourceBucket.aggregations); - this.sourceBucket = sourceBucket; - } - - void hashAsLong(long hashAsLong) { - this.hashAsLong = hashAsLong; - this.sourceBucket.hashAsLong = hashAsLong; - } - - @Override - InternalGeoGridBucket buildBucket(InternalGeoGridBucket bucket, long hashAsLong, long docCount, - InternalAggregations aggregations) { - OrdinalBucket ordBucket = new OrdinalBucket(bucket); - ordBucket.hashAsLong = hashAsLong; - ordBucket.docCount = docCount; - ordBucket.aggregations = aggregations; - // this is done because the aggregator may be rebuilt from cache (non OrdinalBucket), - // or it may be rebuilding from a new calculation, and therefore copying bucketOrd. - if (bucket instanceof OrdinalBucket) { - ordBucket.bucketOrd = ((OrdinalBucket) bucket).bucketOrd; - } - return ordBucket; - } - - @Override - public Object getKey() { - return sourceBucket.getKey(); - } - - @Override - public String getKeyAsString() { - return sourceBucket.getKeyAsString(); - } - } - abstract T buildAggregation(String name, int requiredSize, List buckets, List pipelineAggregators, Map metaData); @@ -154,24 +111,24 @@ public abstract class GeoGridAggregator extends Bucke final int size = (int) Math.min(bucketOrds.size(), shardSize); consumeBucketsAndMaybeBreak(size); - BucketPriorityQueue ordered = new BucketPriorityQueue(size); - OrdinalBucket spare = null; + BucketPriorityQueue ordered = new BucketPriorityQueue<>(size); + InternalGeoGridBucket spare = null; for (long i = 0; i < bucketOrds.size(); i++) { if (spare == null) { - spare = new OrdinalBucket(newEmptyBucket()); + spare = newEmptyBucket(); } // need a special function to keep the source bucket // up-to-date so it can get the appropriate key - spare.hashAsLong(bucketOrds.get(i)); + spare.hashAsLong = bucketOrds.get(i); spare.docCount = bucketDocCount(i); spare.bucketOrd = i; - spare = (OrdinalBucket) ordered.insertWithOverflow(spare); + spare = ordered.insertWithOverflow(spare); } final InternalGeoGridBucket[] list = new InternalGeoGridBucket[ordered.size()]; for (int i = ordered.size() - 1; i >= 0; --i) { - final OrdinalBucket bucket = (OrdinalBucket) ordered.pop(); + final InternalGeoGridBucket bucket = ordered.pop(); bucket.aggregations = bucketAggregations(bucket.bucketOrd); list[i] = bucket; } @@ -183,10 +140,8 @@ public abstract class GeoGridAggregator extends Bucke return buildAggregation(name, requiredSize, Collections.emptyList(), pipelineAggregators(), metaData()); } - @Override public void doClose() { Releasables.close(bucketOrds); } - } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoGrid.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoGrid.java index ae77e69b3e1e..61c06a062cc0 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoGrid.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoGrid.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.util.LongObjectPagedHashMap; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; @@ -81,15 +82,15 @@ public abstract class InternalGeoGrid @Override public InternalGeoGrid doReduce(List aggregations, ReduceContext reduceContext) { - LongObjectPagedHashMap> buckets = null; + LongObjectPagedHashMap> buckets = null; for (InternalAggregation aggregation : aggregations) { InternalGeoGrid grid = (InternalGeoGrid) aggregation; if (buckets == null) { buckets = new LongObjectPagedHashMap<>(grid.buckets.size(), reduceContext.bigArrays()); } for (Object obj : grid.buckets) { - B bucket = (B) obj; - List existingBuckets = buckets.get(bucket.hashAsLong()); + InternalGeoGridBucket bucket = (InternalGeoGridBucket) obj; + List existingBuckets = buckets.get(bucket.hashAsLong()); if (existingBuckets == null) { existingBuckets = new ArrayList<>(aggregations.size()); buckets.put(bucket.hashAsLong(), existingBuckets); @@ -100,9 +101,9 @@ public abstract class InternalGeoGrid final int size = Math.toIntExact(reduceContext.isFinalReduce() == false ? buckets.size() : Math.min(requiredSize, buckets.size())); BucketPriorityQueue ordered = new BucketPriorityQueue<>(size); - for (LongObjectPagedHashMap.Cursor> cursor : buckets) { - List sameCellBuckets = cursor.value; - InternalGeoGridBucket removed = ordered.insertWithOverflow(sameCellBuckets.get(0).reduce(sameCellBuckets, reduceContext)); + for (LongObjectPagedHashMap.Cursor> cursor : buckets) { + List sameCellBuckets = cursor.value; + InternalGeoGridBucket removed = ordered.insertWithOverflow(reduceBucket(sameCellBuckets, reduceContext)); if (removed != null) { reduceContext.consumeBucketsAndMaybeBreak(-countInnerBucket(removed)); } else { @@ -117,6 +118,21 @@ public abstract class InternalGeoGrid return create(getName(), requiredSize, Arrays.asList(list), pipelineAggregators(), getMetaData()); } + @Override + protected InternalGeoGridBucket reduceBucket(List buckets, ReduceContext context) { + assert buckets.size() > 0; + List aggregationsList = new ArrayList<>(buckets.size()); + long docCount = 0; + for (InternalGeoGridBucket bucket : buckets) { + docCount += bucket.docCount; + aggregationsList.add(bucket.aggregations); + } + final InternalAggregations aggs = InternalAggregations.reduce(aggregationsList, context); + return createBucket(buckets.get(0).hashAsLong, docCount, aggs); + } + + abstract B createBucket(long hashAsLong, long docCount, InternalAggregations aggregations); + @Override public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { builder.startArray(CommonFields.BUCKETS.getPreferredName()); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoGridBucket.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoGridBucket.java index 93002d607eaf..0df9661aa9ec 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoGridBucket.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoGridBucket.java @@ -23,13 +23,10 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.aggregations.Aggregation; import org.elasticsearch.search.aggregations.Aggregations; -import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; import java.io.IOException; -import java.util.ArrayList; -import java.util.List; import java.util.Objects; public abstract class InternalGeoGridBucket @@ -39,6 +36,8 @@ public abstract class InternalGeoGridBucket protected long docCount; protected InternalAggregations aggregations; + long bucketOrd; + public InternalGeoGridBucket(long hashAsLong, long docCount, InternalAggregations aggregations) { this.docCount = docCount; this.aggregations = aggregations; @@ -61,9 +60,6 @@ public abstract class InternalGeoGridBucket aggregations.writeTo(out); } - abstract B buildBucket(InternalGeoGridBucket bucket, long hashAsLong, long docCount, InternalAggregations aggregations); - - long hashAsLong() { return hashAsLong; } @@ -89,17 +85,6 @@ public abstract class InternalGeoGridBucket return 0; } - public B reduce(List buckets, InternalAggregation.ReduceContext context) { - List aggregationsList = new ArrayList<>(buckets.size()); - long docCount = 0; - for (B bucket : buckets) { - docCount += bucket.docCount; - aggregationsList.add(bucket.aggregations); - } - final InternalAggregations aggs = InternalAggregations.reduce(aggregationsList, context); - return buildBucket(this, hashAsLong, docCount, aggs); - } - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java index 7c874781d0c2..31650fa820b1 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java @@ -57,6 +57,11 @@ public class InternalGeoHashGrid extends InternalGeoGrid buckets, Rounding rounding, ReduceContext context) { - List aggregations = new ArrayList<>(buckets.size()); - long docCount = 0; - for (Bucket bucket : buckets) { - docCount += bucket.docCount; - aggregations.add((InternalAggregations) bucket.getAggregations()); - } - InternalAggregations aggs = InternalAggregations.reduce(aggregations, context); - return new InternalAutoDateHistogram.Bucket(rounding.round(key), docCount, format, aggs); - } - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { String keyAsString = format.format(key).toString(); @@ -328,14 +317,14 @@ public final class InternalAutoDateHistogram extends if (pq.size() > 0) { // list of buckets coming from different shards that have the same key List currentBuckets = new ArrayList<>(); - double key = reduceRounding.round(pq.top().current.key); + long key = reduceRounding.round(pq.top().current.key); do { final IteratorAndCurrent top = pq.top(); if (reduceRounding.round(top.current.key) != key) { // the key changes, reduce what we already buffered and reset the buffer for current buckets - final Bucket reduced = currentBuckets.get(0).reduce(currentBuckets, reduceRounding, reduceContext); + final Bucket reduced = reduceBucket(currentBuckets, reduceContext); reduceContext.consumeBucketsAndMaybeBreak(1); reducedBuckets.add(reduced); currentBuckets.clear(); @@ -355,7 +344,7 @@ public final class InternalAutoDateHistogram extends } while (pq.size() > 0); if (currentBuckets.isEmpty() == false) { - final Bucket reduced = currentBuckets.get(0).reduce(currentBuckets, reduceRounding, reduceContext); + final Bucket reduced = reduceBucket(currentBuckets, reduceContext); reduceContext.consumeBucketsAndMaybeBreak(1); reducedBuckets.add(reduced); } @@ -391,7 +380,7 @@ public final class InternalAutoDateHistogram extends sameKeyedBuckets.add(createBucket(key, bucket.docCount, bucket.aggregations)); } else { reduceContext.consumeBucketsAndMaybeBreak(1); - mergedBuckets.add(sameKeyedBuckets.get(0).reduce(sameKeyedBuckets, reduceRounding, reduceContext)); + mergedBuckets.add(reduceBucket(sameKeyedBuckets, reduceContext)); sameKeyedBuckets.clear(); key = roundedBucketKey; reduceContext.consumeBucketsAndMaybeBreak(-countInnerBucket(bucket) - 1); @@ -400,12 +389,25 @@ public final class InternalAutoDateHistogram extends } if (sameKeyedBuckets.isEmpty() == false) { reduceContext.consumeBucketsAndMaybeBreak(1); - mergedBuckets.add(sameKeyedBuckets.get(0).reduce(sameKeyedBuckets, reduceRounding, reduceContext)); + mergedBuckets.add(reduceBucket(sameKeyedBuckets, reduceContext)); } reducedBuckets = mergedBuckets; return reducedBuckets; } + @Override + protected Bucket reduceBucket(List buckets, ReduceContext context) { + assert buckets.size() > 0; + List aggregations = new ArrayList<>(buckets.size()); + long docCount = 0; + for (Bucket bucket : buckets) { + docCount += bucket.docCount; + aggregations.add((InternalAggregations) bucket.getAggregations()); + } + InternalAggregations aggs = InternalAggregations.reduce(aggregations, context); + return new InternalAutoDateHistogram.Bucket(buckets.get(0).key, docCount, format, aggs); + } + private static class BucketReduceResult { List buckets; RoundingInfo roundingInfo; @@ -547,7 +549,7 @@ public final class InternalAutoDateHistogram extends Bucket bucket = reducedBuckets.get(i); if (i % mergeInterval == 0 && sameKeyedBuckets.isEmpty() == false) { reduceContext.consumeBucketsAndMaybeBreak(1); - mergedBuckets.add(sameKeyedBuckets.get(0).reduce(sameKeyedBuckets, roundingInfo.rounding, reduceContext)); + mergedBuckets.add(reduceBucket(sameKeyedBuckets, reduceContext)); sameKeyedBuckets.clear(); key = roundingInfo.rounding.round(bucket.key); } @@ -556,7 +558,7 @@ public final class InternalAutoDateHistogram extends } if (sameKeyedBuckets.isEmpty() == false) { reduceContext.consumeBucketsAndMaybeBreak(1); - mergedBuckets.add(sameKeyedBuckets.get(0).reduce(sameKeyedBuckets, roundingInfo.rounding, reduceContext)); + mergedBuckets.add(reduceBucket(sameKeyedBuckets, reduceContext)); } return new BucketReduceResult(mergedBuckets, roundingInfo, roundingIdx, mergeInterval); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java index 8ac4ce5f27dc..1e79b60ca797 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java @@ -125,17 +125,6 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation< return aggregations; } - Bucket reduce(List buckets, ReduceContext context) { - List aggregations = new ArrayList<>(buckets.size()); - long docCount = 0; - for (Bucket bucket : buckets) { - docCount += bucket.docCount; - aggregations.add((InternalAggregations) bucket.getAggregations()); - } - InternalAggregations aggs = InternalAggregations.reduce(aggregations, context); - return new InternalDateHistogram.Bucket(key, docCount, keyed, format, aggs); - } - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { String keyAsString = format.format(key).toString(); @@ -342,7 +331,7 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation< if (top.current.key != key) { // the key changes, reduce what we already buffered and reset the buffer for current buckets - final Bucket reduced = currentBuckets.get(0).reduce(currentBuckets, reduceContext); + final Bucket reduced = reduceBucket(currentBuckets, reduceContext); if (reduced.getDocCount() >= minDocCount || reduceContext.isFinalReduce() == false) { reduceContext.consumeBucketsAndMaybeBreak(1); reducedBuckets.add(reduced); @@ -366,7 +355,7 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation< } while (pq.size() > 0); if (currentBuckets.isEmpty() == false) { - final Bucket reduced = currentBuckets.get(0).reduce(currentBuckets, reduceContext); + final Bucket reduced = reduceBucket(currentBuckets, reduceContext); if (reduced.getDocCount() >= minDocCount || reduceContext.isFinalReduce() == false) { reduceContext.consumeBucketsAndMaybeBreak(1); reducedBuckets.add(reduced); @@ -379,6 +368,23 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation< return reducedBuckets; } + /** + * Reduce a list of same-keyed buckets (from multiple shards) to a single bucket. This + * requires all buckets to have the same key. + */ + @Override + protected Bucket reduceBucket(List buckets, ReduceContext context) { + assert buckets.size() > 0; + List aggregations = new ArrayList<>(buckets.size()); + long docCount = 0; + for (Bucket bucket : buckets) { + docCount += bucket.docCount; + aggregations.add((InternalAggregations) bucket.getAggregations()); + } + InternalAggregations aggs = InternalAggregations.reduce(aggregations, context); + return createBucket(buckets.get(0).key, docCount, aggs); + } + private void addEmptyBuckets(List list, ReduceContext reduceContext) { Bucket lastBucket = null; ExtendedBounds bounds = emptyBucketInfo.bounds; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java index 0bd18ab753e9..f4f7db5cd64a 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java @@ -121,17 +121,6 @@ public final class InternalHistogram extends InternalMultiBucketAggregation buckets, ReduceContext context) { - List aggregations = new ArrayList<>(buckets.size()); - long docCount = 0; - for (Bucket bucket : buckets) { - docCount += bucket.docCount; - aggregations.add((InternalAggregations) bucket.getAggregations()); - } - InternalAggregations aggs = InternalAggregations.reduce(aggregations, context); - return new InternalHistogram.Bucket(key, docCount, keyed, format, aggs); - } - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { String keyAsString = format.format(key).toString(); @@ -324,7 +313,7 @@ public final class InternalHistogram extends InternalMultiBucketAggregation= minDocCount || reduceContext.isFinalReduce() == false) { reduceContext.consumeBucketsAndMaybeBreak(1); reducedBuckets.add(reduced); @@ -348,7 +337,7 @@ public final class InternalHistogram extends InternalMultiBucketAggregation 0); if (currentBuckets.isEmpty() == false) { - final Bucket reduced = currentBuckets.get(0).reduce(currentBuckets, reduceContext); + final Bucket reduced = reduceBucket(currentBuckets, reduceContext); if (reduced.getDocCount() >= minDocCount || reduceContext.isFinalReduce() == false) { reduceContext.consumeBucketsAndMaybeBreak(1); reducedBuckets.add(reduced); @@ -361,6 +350,19 @@ public final class InternalHistogram extends InternalMultiBucketAggregation buckets, ReduceContext context) { + assert buckets.size() > 0; + List aggregations = new ArrayList<>(buckets.size()); + long docCount = 0; + for (Bucket bucket : buckets) { + docCount += bucket.docCount; + aggregations.add((InternalAggregations) bucket.getAggregations()); + } + InternalAggregations aggs = InternalAggregations.reduce(aggregations, context); + return createBucket(buckets.get(0).key, docCount, aggs); + } + private double nextKey(double key) { return round(key + emptyBucketInfo.interval + emptyBucketInfo.interval / 2); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalBinaryRange.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalBinaryRange.java index 7051403b302a..04252c0a25a5 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalBinaryRange.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalBinaryRange.java @@ -36,6 +36,7 @@ import java.util.Arrays; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.stream.Collectors; import static java.util.Collections.unmodifiableList; @@ -256,6 +257,14 @@ public final class InternalBinaryRange return new InternalBinaryRange(name, format, keyed, buckets, pipelineAggregators(), metaData); } + @Override + protected Bucket reduceBucket(List buckets, ReduceContext context) { + assert buckets.size() > 0; + List aggregationsList = buckets.stream().map(bucket -> bucket.aggregations).collect(Collectors.toList()); + final InternalAggregations aggs = InternalAggregations.reduce(aggregationsList, context); + return createBucket(aggs, buckets.get(0)); + } + @Override public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalRange.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalRange.java index 94b8d2361703..cff67d6c8a14 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalRange.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalRange.java @@ -121,17 +121,6 @@ public class InternalRange ranges, ReduceContext context) { - long docCount = 0; - List aggregationsList = new ArrayList<>(ranges.size()); - for (Bucket range : ranges) { - docCount += range.docCount; - aggregationsList.add(range.aggregations); - } - final InternalAggregations aggs = InternalAggregations.reduce(aggregationsList, context); - return getFactory().createBucket(key, from, to, docCount, aggs, keyed, format); - } - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { if (keyed) { @@ -300,25 +289,39 @@ public class InternalRange aggregations, ReduceContext reduceContext) { reduceContext.consumeBucketsAndMaybeBreak(ranges.size()); - List[] rangeList = new List[ranges.size()]; + List[] rangeList = new List[ranges.size()]; for (int i = 0; i < rangeList.length; ++i) { rangeList[i] = new ArrayList<>(); } for (InternalAggregation aggregation : aggregations) { InternalRange ranges = (InternalRange) aggregation; int i = 0; - for (Bucket range : ranges.ranges) { + for (B range : ranges.ranges) { rangeList[i++].add(range); } } final List ranges = new ArrayList<>(); for (int i = 0; i < this.ranges.size(); ++i) { - ranges.add((B) rangeList[i].get(0).reduce(rangeList[i], reduceContext)); + ranges.add((B) reduceBucket(rangeList[i], reduceContext)); } return getFactory().create(name, ranges, format, keyed, pipelineAggregators(), getMetaData()); } + @Override + protected B reduceBucket(List buckets, ReduceContext context) { + assert buckets.size() > 0; + long docCount = 0; + List aggregationsList = new ArrayList<>(buckets.size()); + for (Bucket bucket : buckets) { + docCount += bucket.docCount; + aggregationsList.add(bucket.aggregations); + } + final InternalAggregations aggs = InternalAggregations.reduce(aggregationsList, context); + Bucket prototype = buckets.get(0); + return getFactory().createBucket(prototype.key, prototype.from, prototype.to, docCount, aggs, keyed, format); + } + @Override public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { if (keyed) { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/InternalSignificantTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/InternalSignificantTerms.java index 5d8bc893a260..49c2718baaf2 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/InternalSignificantTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/InternalSignificantTerms.java @@ -119,21 +119,6 @@ public abstract class InternalSignificantTerms buckets, ReduceContext context) { - long subsetDf = 0; - long supersetDf = 0; - List aggregationsList = new ArrayList<>(buckets.size()); - for (B bucket : buckets) { - subsetDf += bucket.subsetDf; - supersetDf += bucket.supersetDf; - aggregationsList.add(bucket.aggregations); - } - InternalAggregations aggs = InternalAggregations.reduce(aggregationsList, context); - return newBucket(subsetDf, subsetSize, supersetDf, supersetSize, aggs); - } - - abstract B newBucket(long subsetDf, long subsetSize, long supersetDf, long supersetSize, InternalAggregations aggregations); - @Override public double getSignificanceScore() { return score; @@ -229,8 +214,8 @@ public abstract class InternalSignificantTerms ordered = new BucketSignificancePriorityQueue<>(size); for (Map.Entry> entry : buckets.entrySet()) { List sameTermBuckets = entry.getValue(); - final B b = sameTermBuckets.get(0).reduce(sameTermBuckets, reduceContext); + final B b = reduceBucket(sameTermBuckets, reduceContext); b.updateScore(heuristic); if (((b.score > 0) && (b.subsetDf >= minDocCount)) || reduceContext.isFinalReduce() == false) { B removed = ordered.insertWithOverflow(b); @@ -258,6 +243,24 @@ public abstract class InternalSignificantTerms buckets, ReduceContext context) { + assert buckets.size() > 0; + long subsetDf = 0; + long supersetDf = 0; + List aggregationsList = new ArrayList<>(buckets.size()); + for (B bucket : buckets) { + subsetDf += bucket.subsetDf; + supersetDf += bucket.supersetDf; + aggregationsList.add(bucket.aggregations); + } + InternalAggregations aggs = InternalAggregations.reduce(aggregationsList, context); + return createBucket(subsetDf, buckets.get(0).subsetSize, supersetDf, buckets.get(0).supersetSize, aggs, buckets.get(0)); + } + + abstract B createBucket(long subsetDf, long subsetSize, long supersetDf, long supersetSize, + InternalAggregations aggregations, B prototype); + protected abstract A create(long subsetSize, long supersetSize, List buckets); /** diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantLongTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantLongTerms.java index fd4eec825774..582346f529a8 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantLongTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantLongTerms.java @@ -86,11 +86,6 @@ public class SignificantLongTerms extends InternalMappedSignificantTerms aggregations, ReduceContext reduceContext) { return new UnmappedSignificantTerms(name, requiredSize, minDocCount, pipelineAggregators(), metaData); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTerms.java index 65e684e31502..8bc0e83c8d6a 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTerms.java @@ -81,11 +81,6 @@ public class DoubleTerms extends InternalMappedTerms, final List rare = new ArrayList<>(); for (List sameTermBuckets : buckets.values()) { - final B b = sameTermBuckets.get(0).reduce(sameTermBuckets, reduceContext); + final B b = reduceBucket(sameTermBuckets, reduceContext); if ((b.getDocCount() <= maxDocCount && containsTerm(filter, b) == false)) { rare.add(b); reduceContext.consumeBucketsAndMaybeBreak(1); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalRareTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalRareTerms.java index dd1a0c19200c..ae9f8e27ec6a 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalRareTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalRareTerms.java @@ -91,19 +91,6 @@ public abstract class InternalRareTerms, B ext return aggregations; } - abstract B newBucket(long docCount, InternalAggregations aggs); - - public B reduce(List buckets, ReduceContext context) { - long docCount = 0; - List aggregationsList = new ArrayList<>(buckets.size()); - for (B bucket : buckets) { - docCount += bucket.docCount; - aggregationsList.add(bucket.aggregations); - } - InternalAggregations aggs = InternalAggregations.reduce(aggregationsList, context); - return newBucket(docCount, aggs); - } - @Override public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); @@ -171,6 +158,21 @@ public abstract class InternalRareTerms, B ext throw new UnsupportedOperationException(); } + abstract B createBucket(long docCount, InternalAggregations aggs, B prototype); + + @Override + protected B reduceBucket(List buckets, ReduceContext context) { + assert buckets.size() > 0; + long docCount = 0; + List aggregationsList = new ArrayList<>(buckets.size()); + for (B bucket : buckets) { + docCount += bucket.docCount; + aggregationsList.add(bucket.aggregations); + } + InternalAggregations aggs = InternalAggregations.reduce(aggregationsList, context); + return createBucket(docCount, aggs, buckets.get(0)); + } + protected abstract A createWithFilter(String name, List buckets, SetBackedScalingCuckooFilter filter); /** diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java index fc607621ff7e..3eefc9bee010 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java @@ -118,31 +118,6 @@ public abstract class InternalTerms, B extends Int return aggregations; } - abstract B newBucket(long docCount, InternalAggregations aggs, long docCountError); - - public B reduce(List buckets, ReduceContext context) { - long docCount = 0; - // For the per term doc count error we add up the errors from the - // shards that did not respond with the term. To do this we add up - // the errors from the shards that did respond with the terms and - // subtract that from the sum of the error from all shards - long docCountError = 0; - List aggregationsList = new ArrayList<>(buckets.size()); - for (B bucket : buckets) { - docCount += bucket.docCount; - if (docCountError != -1) { - if (bucket.docCountError == -1) { - docCountError = -1; - } else { - docCountError += bucket.docCountError; - } - } - aggregationsList.add(bucket.aggregations); - } - InternalAggregations aggs = InternalAggregations.reduce(aggregationsList, context); - return newBucket(docCount, aggs, docCountError); - } - @Override public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); @@ -283,7 +258,7 @@ public abstract class InternalTerms, B extends Int final int size = reduceContext.isFinalReduce() == false ? buckets.size() : Math.min(requiredSize, buckets.size()); final BucketPriorityQueue ordered = new BucketPriorityQueue<>(size, order.comparator(null)); for (List sameTermBuckets : buckets.values()) { - final B b = sameTermBuckets.get(0).reduce(sameTermBuckets, reduceContext); + final B b = reduceBucket(sameTermBuckets, reduceContext); if (sumDocCountError == -1) { b.docCountError = -1; } else { @@ -314,6 +289,31 @@ public abstract class InternalTerms, B extends Int return create(name, Arrays.asList(list), docCountError, otherDocCount); } + @Override + protected B reduceBucket(List buckets, ReduceContext context) { + assert buckets.size() > 0; + long docCount = 0; + // For the per term doc count error we add up the errors from the + // shards that did not respond with the term. To do this we add up + // the errors from the shards that did respond with the terms and + // subtract that from the sum of the error from all shards + long docCountError = 0; + List aggregationsList = new ArrayList<>(buckets.size()); + for (B bucket : buckets) { + docCount += bucket.docCount; + if (docCountError != -1) { + if (bucket.docCountError == -1) { + docCountError = -1; + } else { + docCountError += bucket.docCountError; + } + } + aggregationsList.add(bucket.aggregations); + } + InternalAggregations aggs = InternalAggregations.reduce(aggregationsList, context); + return createBucket(docCount, aggs, docCountError, buckets.get(0)); + } + protected abstract void setDocCountError(long docCountError); protected abstract int getShardSize(); @@ -325,6 +325,8 @@ public abstract class InternalTerms, B extends Int */ protected abstract B[] createBucketsArray(int size); + abstract B createBucket(long docCount, InternalAggregations aggs, long docCountError, B prototype); + @Override public boolean equals(Object obj) { if (this == obj) return true; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongRareTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongRareTerms.java index 29f84fb6030e..83552b8e078e 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongRareTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongRareTerms.java @@ -80,11 +80,6 @@ public class LongRareTerms extends InternalMappedRareTerms return Long.compare(term, other.term); } - @Override - Bucket newBucket(long docCount, InternalAggregations aggs, long docCountError) { - return new Bucket(term, docCount, aggs, showDocCountError, docCountError, format); - } - @Override protected final XContentBuilder keyToXContent(XContentBuilder builder) throws IOException { builder.field(CommonFields.KEY.getPreferredName(), term); @@ -158,6 +153,11 @@ public class LongTerms extends InternalMappedTerms return super.doReduce(aggregations, reduceContext); } + @Override + Bucket createBucket(long docCount, InternalAggregations aggs, long docCountError, LongTerms.Bucket prototype) { + return new Bucket(prototype.term, docCount, aggs, prototype.showDocCountError, docCountError, format); + } + /** * Converts a {@link LongTerms} into a {@link DoubleTerms}, returning the value of the specified long terms as doubles. */ diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringRareTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringRareTerms.java index 3c3e19664a63..384e20bf69f5 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringRareTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringRareTerms.java @@ -86,11 +86,6 @@ public class StringRareTerms extends InternalMappedRareTerms buckets, long docCountError, long otherDocCount) { return new StringTerms(name, order, requiredSize, minDocCount, pipelineAggregators(), getMetaData(), format, shardSize, diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/UnmappedRareTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/UnmappedRareTerms.java index eff5441a1d7e..c4a019e6fe9b 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/UnmappedRareTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/UnmappedRareTerms.java @@ -82,6 +82,11 @@ public class UnmappedRareTerms extends InternalRareTerms buckets, SetBackedScalingCuckooFilter filter) { throw new UnsupportedOperationException("not supported for UnmappedRareTerms"); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/UnmappedTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/UnmappedTerms.java index 17a3e603b6fc..8096366f6d65 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/UnmappedTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/UnmappedTerms.java @@ -88,6 +88,11 @@ public class UnmappedTerms extends InternalTerms buckets, long docCountError, long otherDocCount) { throw new UnsupportedOperationException("not supported for UnmappedTerms"); diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index 85497ad60c6e..7364f1e859de 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -33,6 +33,7 @@ import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateApplier; import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.RepositoryCleanupInProgress; import org.elasticsearch.cluster.RestoreInProgress; import org.elasticsearch.cluster.SnapshotDeletionsInProgress; import org.elasticsearch.cluster.SnapshotsInProgress; @@ -264,6 +265,11 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus throw new ConcurrentSnapshotExecutionException(repositoryName, snapshotName, "cannot snapshot while a snapshot deletion is in-progress"); } + final RepositoryCleanupInProgress repositoryCleanupInProgress = currentState.custom(RepositoryCleanupInProgress.TYPE); + if (repositoryCleanupInProgress != null && repositoryCleanupInProgress.cleanupInProgress() == false) { + throw new ConcurrentSnapshotExecutionException(repositoryName, snapshotName, + "cannot snapshot while a repository cleanup is in-progress"); + } SnapshotsInProgress snapshots = currentState.custom(SnapshotsInProgress.TYPE); if (snapshots == null || snapshots.entries().isEmpty()) { // Store newSnapshot here to be processed in clusterStateProcessed @@ -1134,6 +1140,11 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus throw new ConcurrentSnapshotExecutionException(snapshot, "cannot delete - another snapshot is currently being deleted"); } + final RepositoryCleanupInProgress repositoryCleanupInProgress = currentState.custom(RepositoryCleanupInProgress.TYPE); + if (repositoryCleanupInProgress != null && repositoryCleanupInProgress.cleanupInProgress() == false) { + throw new ConcurrentSnapshotExecutionException(snapshot.getRepository(), snapshot.getSnapshotId().getName(), + "cannot delete snapshot while a repository cleanup is in-progress"); + } RestoreInProgress restoreInProgress = currentState.custom(RestoreInProgress.TYPE); if (restoreInProgress != null) { // don't allow snapshot deletions while a restore is taking place, diff --git a/server/src/main/java/org/elasticsearch/transport/FutureTransportResponseHandler.java b/server/src/main/java/org/elasticsearch/transport/FutureTransportResponseHandler.java deleted file mode 100644 index 36b69de23260..000000000000 --- a/server/src/main/java/org/elasticsearch/transport/FutureTransportResponseHandler.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.transport; - -import org.elasticsearch.threadpool.ThreadPool; - -/** - * A response handler to be used when all interaction will be done through the {@link TransportFuture}. - */ -public abstract class FutureTransportResponseHandler implements TransportResponseHandler { - - @Override - public void handleResponse(T response) { - } - - @Override - public void handleException(TransportException exp) { - } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } -} diff --git a/server/src/main/java/org/elasticsearch/transport/PlainTransportFuture.java b/server/src/main/java/org/elasticsearch/transport/PlainTransportFuture.java index a16aebe4eb76..a22ae9e311df 100644 --- a/server/src/main/java/org/elasticsearch/transport/PlainTransportFuture.java +++ b/server/src/main/java/org/elasticsearch/transport/PlainTransportFuture.java @@ -25,9 +25,9 @@ import org.elasticsearch.common.util.concurrent.BaseFuture; import java.io.IOException; import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; -public class PlainTransportFuture extends BaseFuture - implements TransportFuture, TransportResponseHandler { +public class PlainTransportFuture extends BaseFuture implements Future, TransportResponseHandler { private final TransportResponseHandler handler; @@ -35,7 +35,6 @@ public class PlainTransportFuture extends BaseFutur this.handler = handler; } - @Override public V txGet() { try { return get(); diff --git a/server/src/main/java/org/elasticsearch/transport/TransportFuture.java b/server/src/main/java/org/elasticsearch/transport/TransportFuture.java deleted file mode 100644 index 431a00aa6fb6..000000000000 --- a/server/src/main/java/org/elasticsearch/transport/TransportFuture.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.transport; - -import java.util.concurrent.Future; - -public interface TransportFuture extends Future { - - /** - * Waits if necessary for the computation to complete, and then - * retrieves its result. - */ - V txGet(); -} - diff --git a/server/src/main/java/org/elasticsearch/transport/TransportInfo.java b/server/src/main/java/org/elasticsearch/transport/TransportInfo.java index b27bfe993257..9212fef63177 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportInfo.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportInfo.java @@ -19,11 +19,15 @@ package org.elasticsearch.transport; +import org.apache.logging.log4j.LogManager; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.network.InetAddresses; import org.elasticsearch.common.transport.BoundTransportAddress; +import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -31,14 +35,29 @@ import java.io.IOException; import java.util.HashMap; import java.util.Map; +import static org.elasticsearch.common.Booleans.parseBoolean; + public class TransportInfo implements Writeable, ToXContentFragment { + private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(TransportInfo.class)); + + /** Whether to add hostname to publish host field when serializing. */ + private static final boolean CNAME_IN_PUBLISH_ADDRESS = + parseBoolean(System.getProperty("es.transport.cname_in_publish_address"), false); + private BoundTransportAddress address; private Map profileAddresses; + private final boolean cnameInPublishAddress; public TransportInfo(BoundTransportAddress address, @Nullable Map profileAddresses) { + this(address, profileAddresses, CNAME_IN_PUBLISH_ADDRESS); + } + + public TransportInfo(BoundTransportAddress address, @Nullable Map profileAddresses, + boolean cnameInPublishAddress) { this.address = address; this.profileAddresses = profileAddresses; + this.cnameInPublishAddress = cnameInPublishAddress; } public TransportInfo(StreamInput in) throws IOException { @@ -52,6 +71,7 @@ public class TransportInfo implements Writeable, ToXContentFragment { profileAddresses.put(key, value); } } + this.cnameInPublishAddress = CNAME_IN_PUBLISH_ADDRESS; } @Override @@ -77,17 +97,35 @@ public class TransportInfo implements Writeable, ToXContentFragment { static final String PROFILES = "profiles"; } + private String formatPublishAddressString(String propertyName, TransportAddress publishAddress){ + String publishAddressString = publishAddress.toString(); + String hostString = publishAddress.address().getHostString(); + if (InetAddresses.isInetAddress(hostString) == false) { + if (cnameInPublishAddress) { + publishAddressString = hostString + '/' + publishAddress.toString(); + } else { + deprecationLogger.deprecated( + propertyName + " was printed as [ip:port] instead of [hostname/ip:port]. " + + "This format is deprecated and will change to [hostname/ip:port] in a future version. " + + "Use -Des.transport.cname_in_publish_address=true to enforce non-deprecated formatting." + ); + } + } + return publishAddressString; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(Fields.TRANSPORT); builder.array(Fields.BOUND_ADDRESS, (Object[]) address.boundAddresses()); - builder.field(Fields.PUBLISH_ADDRESS, address.publishAddress().toString()); + builder.field(Fields.PUBLISH_ADDRESS, formatPublishAddressString("transport.publish_address", address.publishAddress())); builder.startObject(Fields.PROFILES); if (profileAddresses != null && profileAddresses.size() > 0) { for (Map.Entry entry : profileAddresses.entrySet()) { builder.startObject(entry.getKey()); builder.array(Fields.BOUND_ADDRESS, (Object[]) entry.getValue().boundAddresses()); - builder.field(Fields.PUBLISH_ADDRESS, entry.getValue().publishAddress().toString()); + String propertyName = "transport." + entry.getKey() + ".publish_address"; + builder.field(Fields.PUBLISH_ADDRESS, formatPublishAddressString(propertyName, entry.getValue().publishAddress())); builder.endObject(); } } diff --git a/server/src/main/java/org/elasticsearch/transport/TransportResponseHandler.java b/server/src/main/java/org/elasticsearch/transport/TransportResponseHandler.java index c43f57754d62..29720216cf40 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportResponseHandler.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportResponseHandler.java @@ -19,12 +19,8 @@ package org.elasticsearch.transport; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.Writeable; -import java.io.IOException; -import java.util.function.Function; - public interface TransportResponseHandler extends Writeable.Reader { void handleResponse(T response); @@ -32,29 +28,4 @@ public interface TransportResponseHandler extends W void handleException(TransportException exp); String executor(); - - default TransportResponseHandler wrap(Function converter, Writeable.Reader reader) { - final TransportResponseHandler self = this; - return new TransportResponseHandler() { - @Override - public void handleResponse(Q response) { - self.handleResponse(converter.apply(response)); - } - - @Override - public void handleException(TransportException exp) { - self.handleException(exp); - } - - @Override - public String executor() { - return self.executor(); - } - - @Override - public Q read(StreamInput in) throws IOException { - return reader.read(in); - } - }; - } } diff --git a/server/src/main/java/org/elasticsearch/transport/TransportService.java b/server/src/main/java/org/elasticsearch/transport/TransportService.java index ad63ae518e76..ddd7a0d4cab1 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportService.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportService.java @@ -541,25 +541,6 @@ public class TransportService extends AbstractLifecycleComponent implements Tran connectionManager.removeListener(listener); } - public TransportFuture submitRequest(DiscoveryNode node, String action, TransportRequest request, - TransportResponseHandler handler) throws TransportException { - return submitRequest(node, action, request, TransportRequestOptions.EMPTY, handler); - } - - public TransportFuture submitRequest(DiscoveryNode node, String action, TransportRequest request, - TransportRequestOptions options, - TransportResponseHandler handler) throws TransportException { - PlainTransportFuture futureHandler = new PlainTransportFuture<>(handler); - try { - Transport.Connection connection = getConnection(node); - sendRequest(connection, action, request, options, futureHandler); - } catch (NodeNotConnectedException ex) { - // the caller might not handle this so we invoke the handler - futureHandler.handleException(ex); - } - return futureHandler; - } - public void sendRequest(final DiscoveryNode node, final String action, final TransportRequest request, final TransportResponseHandler handler) { diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestTests.java index 419d669d0663..e95c4e437746 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestTests.java @@ -44,6 +44,7 @@ import java.util.Set; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; import static org.elasticsearch.common.xcontent.ToXContent.EMPTY_PARAMS; +import static org.hamcrest.CoreMatchers.equalTo; public class CreateIndexRequestTests extends ESTestCase { @@ -196,6 +197,15 @@ public class CreateIndexRequestTests extends ESTestCase { ElasticsearchAssertions.assertToXContentEquivalent(originalBytes, finalBytes, xContentType); } + public void testSettingsType() throws IOException { + XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); + builder.startObject().startArray("settings").endArray().endObject(); + + CreateIndexRequest parsedCreateIndexRequest = new CreateIndexRequest(); + ElasticsearchParseException e = expectThrows(ElasticsearchParseException.class, () -> parsedCreateIndexRequest.source(builder)); + assertThat(e.getMessage(), equalTo("key [settings] must be an object")); + } + public static void assertMappingsEqual(Map expected, Map actual) throws IOException { assertEquals(expected.keySet(), actual.keySet()); diff --git a/server/src/test/java/org/elasticsearch/common/settings/SettingsTests.java b/server/src/test/java/org/elasticsearch/common/settings/SettingsTests.java index 17a2bd8eff25..5cf387047253 100644 --- a/server/src/test/java/org/elasticsearch/common/settings/SettingsTests.java +++ b/server/src/test/java/org/elasticsearch/common/settings/SettingsTests.java @@ -496,10 +496,10 @@ public class SettingsTests extends ESTestCase { public void testSecureSettingIllegalName() { IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> - SecureSetting.secureString("UpperCaseSetting", null)); + SecureSetting.secureString("*IllegalName", null)); assertTrue(e.getMessage().contains("does not match the allowed setting name pattern")); e = expectThrows(IllegalArgumentException.class, () -> - SecureSetting.secureFile("UpperCaseSetting", null)); + SecureSetting.secureFile("*IllegalName", null)); assertTrue(e.getMessage().contains("does not match the allowed setting name pattern")); } diff --git a/server/src/test/java/org/elasticsearch/common/util/concurrent/AbstractAsyncTaskTests.java b/server/src/test/java/org/elasticsearch/common/util/concurrent/AbstractAsyncTaskTests.java index 3a1cab90f0d8..10eec2c13d3a 100644 --- a/server/src/test/java/org/elasticsearch/common/util/concurrent/AbstractAsyncTaskTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/concurrent/AbstractAsyncTaskTests.java @@ -18,18 +18,23 @@ */ package org.elasticsearch.common.util.concurrent; +import org.elasticsearch.common.Randomness; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.junit.AfterClass; import org.junit.BeforeClass; +import java.util.ArrayList; +import java.util.List; import java.util.concurrent.CountDownLatch; import java.util.concurrent.CyclicBarrier; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; public class AbstractAsyncTaskTests extends ESTestCase { @@ -203,4 +208,31 @@ public class AbstractAsyncTaskTests extends ESTestCase { assertFalse(task.isScheduled()); assertTrue(task.isClosed()); } + + public void testIsScheduledRemainFalseAfterClose() throws Exception { + int numTasks = between(10, 50); + List tasks = new ArrayList<>(numTasks); + AtomicLong counter = new AtomicLong(); + for (int i = 0; i < numTasks; i++) { + AbstractAsyncTask task = new AbstractAsyncTask(logger, threadPool, TimeValue.timeValueMillis(randomIntBetween(1, 2)), true) { + @Override + protected boolean mustReschedule() { + return counter.get() <= 1000; + } + @Override + protected void runInternal() { + counter.incrementAndGet(); + } + }; + task.rescheduleIfNecessary(); + tasks.add(task); + } + Randomness.shuffle(tasks); + IOUtils.close(tasks); + Randomness.shuffle(tasks); + for (AbstractAsyncTask task : tasks) { + assertTrue(task.isClosed()); + assertFalse(task.isScheduled()); + } + } } diff --git a/server/src/test/java/org/elasticsearch/index/IndexServiceTests.java b/server/src/test/java/org/elasticsearch/index/IndexServiceTests.java index e788d988cb95..86a56bc50a05 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexServiceTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexServiceTests.java @@ -390,20 +390,24 @@ public class IndexServiceTests extends ESSingleNodeTestCase { IndexService indexService = createIndex(indexName, Settings.builder() .put(TRANSLOG_RETENTION_CHECK_INTERVAL_SETTING.getKey(), "100ms") .build()); - Translog translog = IndexShardTestCase.getTranslog(indexService.getShard(0)); final Path translogPath = translog.getConfig().getTranslogPath(); final String translogUuid = translog.getTranslogUUID(); + int translogOps = 0; final int numDocs = scaledRandomIntBetween(10, 100); for (int i = 0; i < numDocs; i++) { client().prepareIndex().setIndex(indexName).setId(String.valueOf(i)).setSource("{\"foo\": \"bar\"}", XContentType.JSON).get(); + translogOps++; if (randomBoolean()) { client().admin().indices().prepareFlush(indexName).get(); + if (indexService.getIndexSettings().isSoftDeleteEnabled()) { + translogOps = 0; + } } } - assertThat(translog.totalOperations(), equalTo(numDocs)); - assertThat(translog.stats().estimatedNumberOfOperations(), equalTo(numDocs)); + assertThat(translog.totalOperations(), equalTo(translogOps)); + assertThat(translog.stats().estimatedNumberOfOperations(), equalTo(translogOps)); assertAcked(client().admin().indices().prepareClose("test")); indexService = getInstanceFromNode(IndicesService.class).indexServiceSafe(indexService.index()); diff --git a/server/src/test/java/org/elasticsearch/index/IndexSettingsTests.java b/server/src/test/java/org/elasticsearch/index/IndexSettingsTests.java index 31a92745a424..0b57d92edd4b 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexSettingsTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexSettingsTests.java @@ -26,6 +26,7 @@ import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.translog.Translog; @@ -568,4 +569,64 @@ public class IndexSettingsTests extends ESTestCase { Settings settings = Settings.builder().put(IndexMetaData.SETTING_INDEX_VERSION_CREATED.getKey(), createdVersion).build(); assertTrue(IndexSettings.INDEX_SOFT_DELETES_SETTING.get(settings)); } + + public void testIgnoreTranslogRetentionSettingsIfSoftDeletesEnabled() { + Settings.Builder settings = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, VersionUtils.randomIndexCompatibleVersion(random())); + if (randomBoolean()) { + settings.put(IndexSettings.INDEX_TRANSLOG_RETENTION_AGE_SETTING.getKey(), randomPositiveTimeValue()); + } + if (randomBoolean()) { + settings.put(IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.getKey(), between(1, 1024) + "b"); + } + IndexMetaData metaData = newIndexMeta("index", settings.build()); + IndexSettings indexSettings = new IndexSettings(metaData, Settings.EMPTY); + assertThat(indexSettings.getTranslogRetentionAge().millis(), equalTo(-1L)); + assertThat(indexSettings.getTranslogRetentionSize().getBytes(), equalTo(-1L)); + + Settings.Builder newSettings = Settings.builder().put(settings.build()); + if (randomBoolean()) { + newSettings.put(IndexSettings.INDEX_TRANSLOG_RETENTION_AGE_SETTING.getKey(), randomPositiveTimeValue()); + } + if (randomBoolean()) { + newSettings.put(IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.getKey(), between(1, 1024) + "b"); + } + indexSettings.updateIndexMetaData(newIndexMeta("index", newSettings.build())); + assertThat(indexSettings.getTranslogRetentionAge().millis(), equalTo(-1L)); + assertThat(indexSettings.getTranslogRetentionSize().getBytes(), equalTo(-1L)); + } + + public void testUpdateTranslogRetentionSettingsWithSoftDeletesDisabled() { + Settings.Builder settings = Settings.builder() + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), false) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT); + + TimeValue ageSetting = TimeValue.timeValueHours(12); + if (randomBoolean()) { + ageSetting = randomBoolean() ? TimeValue.MINUS_ONE : TimeValue.timeValueMillis(randomIntBetween(0, 10000)); + settings.put(IndexSettings.INDEX_TRANSLOG_RETENTION_AGE_SETTING.getKey(), ageSetting); + } + ByteSizeValue sizeSetting = new ByteSizeValue(512, ByteSizeUnit.MB); + if (randomBoolean()) { + sizeSetting = randomBoolean() ? new ByteSizeValue(-1) : new ByteSizeValue(randomIntBetween(0, 1024)); + settings.put(IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.getKey(), sizeSetting); + } + IndexMetaData metaData = newIndexMeta("index", settings.build()); + IndexSettings indexSettings = new IndexSettings(metaData, Settings.EMPTY); + assertThat(indexSettings.getTranslogRetentionAge(), equalTo(ageSetting)); + assertThat(indexSettings.getTranslogRetentionSize(), equalTo(sizeSetting)); + + Settings.Builder newSettings = Settings.builder().put(settings.build()); + if (randomBoolean()) { + ageSetting = randomBoolean() ? TimeValue.MINUS_ONE : TimeValue.timeValueMillis(randomIntBetween(0, 10000)); + newSettings.put(IndexSettings.INDEX_TRANSLOG_RETENTION_AGE_SETTING.getKey(), ageSetting); + } + if (randomBoolean()) { + sizeSetting = randomBoolean() ? new ByteSizeValue(-1) : new ByteSizeValue(randomIntBetween(0, 1024)); + newSettings.put(IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.getKey(), sizeSetting); + } + indexSettings.updateIndexMetaData(newIndexMeta("index", newSettings.build())); + assertThat(indexSettings.getTranslogRetentionAge(), equalTo(ageSetting)); + assertThat(indexSettings.getTranslogRetentionSize(), equalTo(sizeSetting)); + } } diff --git a/server/src/test/java/org/elasticsearch/index/engine/NoOpEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/NoOpEngineTests.java index bd934f683fb9..623bbe0ec50d 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/NoOpEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/NoOpEngineTests.java @@ -169,13 +169,14 @@ public class NoOpEngineTests extends EngineTestCase { tracker.updateFromMaster(1L, Collections.singleton(allocationId.getId()), table); tracker.activatePrimaryMode(SequenceNumbers.NO_OPS_PERFORMED); + boolean softDeleteEnabled = engine.config().getIndexSettings().isSoftDeleteEnabled(); final int numDocs = scaledRandomIntBetween(10, 3000); for (int i = 0; i < numDocs; i++) { engine.index(indexForDoc(createParsedDoc(Integer.toString(i), null))); + tracker.updateLocalCheckpoint(allocationId.getId(), i); if (rarely()) { engine.flush(); } - tracker.updateLocalCheckpoint(allocationId.getId(), i); } engine.flush(true, true); @@ -195,7 +196,7 @@ public class NoOpEngineTests extends EngineTestCase { } assertThat(Translog.readMinTranslogGeneration(translogPath, translogUuid), equalTo(minFileGeneration)); - assertThat(noOpEngine.getTranslogStats().estimatedNumberOfOperations(), equalTo(numDocs)); + assertThat(noOpEngine.getTranslogStats().estimatedNumberOfOperations(), equalTo(softDeleteEnabled ? 0 : numDocs)); assertThat(noOpEngine.getTranslogStats().getUncommittedOperations(), equalTo(0)); noOpEngine.trimUnreferencedTranslogFiles(); diff --git a/server/src/test/java/org/elasticsearch/index/engine/ReadOnlyEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/ReadOnlyEngineTests.java index 506be95c2259..c01aca80825c 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/ReadOnlyEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/ReadOnlyEngineTests.java @@ -250,7 +250,7 @@ public class ReadOnlyEngineTests extends EngineTestCase { try (Store store = createStore()) { final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); EngineConfig config = config(defaultSettings, store, createTempDir(), newMergePolicy(), null, null, globalCheckpoint::get); - + final boolean softDeletesEnabled = config.getIndexSettings().isSoftDeleteEnabled(); final int numDocs = frequently() ? scaledRandomIntBetween(10, 200) : 0; int uncommittedDocs = 0; @@ -259,16 +259,17 @@ public class ReadOnlyEngineTests extends EngineTestCase { ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), new BytesArray("{}"), null); engine.index(new Engine.Index(newUid(doc), doc, i, primaryTerm.get(), 1, null, Engine.Operation.Origin.REPLICA, System.nanoTime(), -1, false, SequenceNumbers.UNASSIGNED_SEQ_NO, 0)); + globalCheckpoint.set(i); if (rarely()) { engine.flush(); uncommittedDocs = 0; } else { uncommittedDocs += 1; } - globalCheckpoint.set(i); } - assertThat(engine.getTranslogStats().estimatedNumberOfOperations(), equalTo(numDocs)); + assertThat(engine.getTranslogStats().estimatedNumberOfOperations(), + equalTo(softDeletesEnabled ? uncommittedDocs : numDocs)); assertThat(engine.getTranslogStats().getUncommittedOperations(), equalTo(uncommittedDocs)); assertThat(engine.getTranslogStats().getTranslogSizeInBytes(), greaterThan(0L)); assertThat(engine.getTranslogStats().getUncommittedSizeInBytes(), greaterThan(0L)); @@ -278,7 +279,7 @@ public class ReadOnlyEngineTests extends EngineTestCase { } try (ReadOnlyEngine readOnlyEngine = new ReadOnlyEngine(config, null, null, true, Function.identity())) { - assertThat(readOnlyEngine.getTranslogStats().estimatedNumberOfOperations(), equalTo(numDocs)); + assertThat(readOnlyEngine.getTranslogStats().estimatedNumberOfOperations(), equalTo(softDeletesEnabled ? 0 : numDocs)); assertThat(readOnlyEngine.getTranslogStats().getUncommittedOperations(), equalTo(0)); assertThat(readOnlyEngine.getTranslogStats().getTranslogSizeInBytes(), greaterThan(0L)); assertThat(readOnlyEngine.getTranslogStats().getUncommittedSizeInBytes(), greaterThan(0L)); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java b/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java index 3e02e2a6be05..c67c2563fb30 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java @@ -185,9 +185,6 @@ public class MapperServiceTests extends ESSingleNodeTestCase { MapperService mapperService = createIndex("index").mapperService(); assertThat(mapperService.unmappedFieldType("keyword"), instanceOf(KeywordFieldType.class)); assertThat(mapperService.unmappedFieldType("long"), instanceOf(NumberFieldType.class)); - // back compat - assertThat(mapperService.unmappedFieldType("string"), instanceOf(KeywordFieldType.class)); - assertWarnings("[unmapped_type:string] should be replaced with [unmapped_type:keyword]"); } public void testPartitionedConstraints() { diff --git a/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java b/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java index 3809d002483d..2817c51d33aa 100644 --- a/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java +++ b/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java @@ -467,7 +467,12 @@ public class IndexLevelReplicationTests extends ESIndexLevelReplicationTestCase shards.startReplicas(nReplica); for (IndexShard shard : shards) { try (Translog.Snapshot snapshot = getTranslog(shard).newSnapshot()) { - assertThat(snapshot, SnapshotMatchers.containsOperationsInAnyOrder(expectedTranslogOps)); + // we flush at the end of peer recovery + if (shard.routingEntry().primary() || shard.indexSettings().isSoftDeleteEnabled() == false) { + assertThat(snapshot, SnapshotMatchers.containsOperationsInAnyOrder(expectedTranslogOps)); + } else { + assertThat(snapshot.totalOperations(), equalTo(0)); + } } try (Translog.Snapshot snapshot = shard.getHistoryOperations("test", 0)) { assertThat(snapshot, SnapshotMatchers.containsOperationsInAnyOrder(expectedTranslogOps)); @@ -476,11 +481,16 @@ public class IndexLevelReplicationTests extends ESIndexLevelReplicationTestCase // the failure replicated directly from the replication channel. indexResp = shards.index(new IndexRequest(index.getName(), "type", "any").source("{}", XContentType.JSON)); assertThat(indexResp.getFailure().getCause(), equalTo(indexException)); - expectedTranslogOps.add(new Translog.NoOp(1, primaryTerm, indexException.toString())); + Translog.NoOp noop2 = new Translog.NoOp(1, primaryTerm, indexException.toString()); + expectedTranslogOps.add(noop2); for (IndexShard shard : shards) { try (Translog.Snapshot snapshot = getTranslog(shard).newSnapshot()) { - assertThat(snapshot, SnapshotMatchers.containsOperationsInAnyOrder(expectedTranslogOps)); + if (shard.routingEntry().primary() || shard.indexSettings().isSoftDeleteEnabled() == false) { + assertThat(snapshot, SnapshotMatchers.containsOperationsInAnyOrder(expectedTranslogOps)); + } else { + assertThat(snapshot, SnapshotMatchers.containsOperationsInAnyOrder(Collections.singletonList(noop2))); + } } try (Translog.Snapshot snapshot = shard.getHistoryOperations("test", 0)) { assertThat(snapshot, SnapshotMatchers.containsOperationsInAnyOrder(expectedTranslogOps)); diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java index a49d82f95ea0..1d7969dbfde9 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java @@ -709,10 +709,16 @@ public class IndexShardIT extends ESSingleNodeTestCase { // with ZERO we are guaranteed to see the doc since we will wait for a refresh in the background assertFalse(hasRefreshed); assertTrue(shard.isSearchIdle()); - } else if (randomTimeValue == null){ - // with null we are guaranteed to see the doc since do execute the refresh. - // we can't assert on hasRefreshed since it might have been refreshed in the background on the shard concurrently - assertFalse(shard.isSearchIdle()); + } else { + if (randomTimeValue == null) { + assertFalse(shard.isSearchIdle()); + } + // we can't assert on hasRefreshed since it might have been refreshed in the background on the shard concurrently. + // and if the background refresh wins the refresh race (both call maybeRefresh), the document might not be visible + // until the background refresh is done. + if (hasRefreshed == false) { + ensureNoPendingScheduledRefresh(indexService.getThreadPool()); + } } CountDownLatch started = new CountDownLatch(1); Thread t = new Thread(() -> { diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index 3e507a3cfb68..8947f4e9905e 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -2129,11 +2129,13 @@ public class IndexShardTests extends IndexShardTestCase { /* This test just verifies that we fill up local checkpoint up to max seen seqID on primary recovery */ public void testRecoverFromStoreWithNoOps() throws IOException { - final IndexShard shard = newStartedShard(true); + final Settings settings = Settings.builder() + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean()).build(); + final IndexShard shard = newStartedShard(true, settings); indexDoc(shard, "_doc", "0"); indexDoc(shard, "_doc", "1"); // start a replica shard and index the second doc - final IndexShard otherShard = newStartedShard(false); + final IndexShard otherShard = newStartedShard(false, settings); updateMappings(otherShard, shard.indexSettings().getIndexMetaData()); SourceToParse sourceToParse = new SourceToParse(shard.shardId().getIndexName(), "_doc", "1", new BytesArray("{}"), XContentType.JSON); @@ -2172,7 +2174,7 @@ public class IndexShardTests extends IndexShardTestCase { newShard.markAsRecovering("store", new RecoveryState(newShard.routingEntry(), localNode, null)); assertTrue(newShard.recoverFromStore()); try (Translog.Snapshot snapshot = getTranslog(newShard).newSnapshot()) { - assertThat(snapshot.totalOperations(), equalTo(2)); + assertThat(snapshot.totalOperations(), equalTo(newShard.indexSettings.isSoftDeleteEnabled() ? 0 : 2)); } } closeShards(newShard, shard); @@ -3794,7 +3796,13 @@ public class IndexShardTests extends IndexShardTestCase { engineResetLatch.await(); assertThat(getShardDocUIDs(shard), equalTo(docBelowGlobalCheckpoint)); assertThat(shard.seqNoStats().getMaxSeqNo(), equalTo(globalCheckpoint)); - assertThat(shard.translogStats().estimatedNumberOfOperations(), equalTo(translogStats.estimatedNumberOfOperations())); + if (shard.indexSettings.isSoftDeleteEnabled()) { + // we might have trimmed some operations if the translog retention policy is ignored (when soft-deletes enabled). + assertThat(shard.translogStats().estimatedNumberOfOperations(), + lessThanOrEqualTo(translogStats.estimatedNumberOfOperations())); + } else { + assertThat(shard.translogStats().estimatedNumberOfOperations(), equalTo(translogStats.estimatedNumberOfOperations())); + } assertThat(shard.getMaxSeqNoOfUpdatesOrDeletes(), equalTo(maxSeqNoBeforeRollback)); done.set(true); thread.join(); diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java index 3b338ff824f6..b340d8c52bec 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java @@ -82,7 +82,7 @@ public class RecoveryTests extends ESIndexLevelReplicationTestCase { shards.startAll(); final IndexShard replica = shards.getReplicas().get(0); boolean softDeletesEnabled = replica.indexSettings().isSoftDeleteEnabled(); - assertThat(getTranslog(replica).totalOperations(), equalTo(softDeletesEnabled ? moreDocs : docs + moreDocs)); + assertThat(getTranslog(replica).totalOperations(), equalTo(softDeletesEnabled ? 0 : docs + moreDocs)); shards.assertAllEqual(docs + moreDocs); } } @@ -298,7 +298,7 @@ public class RecoveryTests extends ESIndexLevelReplicationTestCase { // file based recovery should be made assertThat(newReplica.recoveryState().getIndex().fileDetails(), not(empty())); boolean softDeletesEnabled = replica.indexSettings().isSoftDeleteEnabled(); - assertThat(getTranslog(newReplica).totalOperations(), equalTo(softDeletesEnabled ? nonFlushedDocs : numDocs)); + assertThat(getTranslog(newReplica).totalOperations(), equalTo(softDeletesEnabled ? 0 : numDocs)); // history uuid was restored assertThat(newReplica.getHistoryUUID(), equalTo(historyUUID)); @@ -385,7 +385,12 @@ public class RecoveryTests extends ESIndexLevelReplicationTestCase { shards.recoverReplica(newReplica); try (Translog.Snapshot snapshot = getTranslog(newReplica).newSnapshot()) { - assertThat("Sequence based recovery should keep existing translog", snapshot, SnapshotMatchers.size(initDocs + moreDocs)); + if (newReplica.indexSettings().isSoftDeleteEnabled()) { + assertThat(snapshot.totalOperations(), equalTo(0)); + } else { + assertThat("Sequence based recovery should keep existing translog", + snapshot, SnapshotMatchers.size(initDocs + moreDocs)); + } } assertThat(newReplica.recoveryState().getTranslog().recoveredOperations(), equalTo(uncommittedDocs + moreDocs)); assertThat(newReplica.recoveryState().getIndex().fileDetails(), empty()); diff --git a/server/src/test/java/org/elasticsearch/indices/state/OpenCloseIndexIT.java b/server/src/test/java/org/elasticsearch/indices/state/OpenCloseIndexIT.java index cd9cb8c44137..e0daa1c5ac16 100644 --- a/server/src/test/java/org/elasticsearch/indices/state/OpenCloseIndexIT.java +++ b/server/src/test/java/org/elasticsearch/indices/state/OpenCloseIndexIT.java @@ -35,6 +35,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESIntegTestCase; @@ -356,6 +357,8 @@ public class OpenCloseIndexIT extends ESIntegTestCase { createIndex(indexName, Settings.builder() .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) .build()); + boolean softDeletesEnabled = IndexSettings.INDEX_SOFT_DELETES_SETTING.get( + client().admin().indices().prepareGetSettings(indexName).get().getIndexToSettings().get(indexName)); final int nbDocs = randomIntBetween(0, 50); int uncommittedOps = 0; @@ -373,7 +376,8 @@ public class OpenCloseIndexIT extends ESIntegTestCase { IndicesStatsResponse stats = client().admin().indices().prepareStats(indexName).clear().setTranslog(true).get(); assertThat(stats.getIndex(indexName), notNullValue()); - assertThat(stats.getIndex(indexName).getPrimaries().getTranslog().estimatedNumberOfOperations(), equalTo(nbDocs)); + assertThat(stats.getIndex(indexName).getPrimaries().getTranslog().estimatedNumberOfOperations(), equalTo( + softDeletesEnabled ? uncommittedOps : nbDocs)); assertThat(stats.getIndex(indexName).getPrimaries().getTranslog().getUncommittedOperations(), equalTo(uncommittedOps)); assertAcked(client().admin().indices().prepareClose("test")); @@ -381,7 +385,8 @@ public class OpenCloseIndexIT extends ESIntegTestCase { IndicesOptions indicesOptions = IndicesOptions.STRICT_EXPAND_OPEN; stats = client().admin().indices().prepareStats(indexName).setIndicesOptions(indicesOptions).clear().setTranslog(true).get(); assertThat(stats.getIndex(indexName), notNullValue()); - assertThat(stats.getIndex(indexName).getPrimaries().getTranslog().estimatedNumberOfOperations(), equalTo(nbDocs)); + assertThat(stats.getIndex(indexName).getPrimaries().getTranslog().estimatedNumberOfOperations(), + equalTo(softDeletesEnabled ? 0 : nbDocs)); assertThat(stats.getIndex(indexName).getPrimaries().getTranslog().getUncommittedOperations(), equalTo(0)); } } diff --git a/server/src/test/java/org/elasticsearch/monitor/os/OsProbeTests.java b/server/src/test/java/org/elasticsearch/monitor/os/OsProbeTests.java index f76ac7289486..c473e4155ae4 100644 --- a/server/src/test/java/org/elasticsearch/monitor/os/OsProbeTests.java +++ b/server/src/test/java/org/elasticsearch/monitor/os/OsProbeTests.java @@ -28,6 +28,7 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Locale; +import java.util.stream.Collectors; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.anyOf; @@ -188,22 +189,121 @@ public class OsProbeTests extends ESTestCase { final boolean areCgroupStatsAvailable = randomBoolean(); final String hierarchy = randomAlphaOfLength(16); - final OsProbe probe = new OsProbe() { + final OsProbe probe = buildStubOsProbe(areCgroupStatsAvailable, hierarchy); + final OsStats.Cgroup cgroup = probe.osStats().getCgroup(); + + if (areCgroupStatsAvailable) { + assertNotNull(cgroup); + assertThat(cgroup.getCpuAcctControlGroup(), equalTo("/" + hierarchy)); + assertThat(cgroup.getCpuAcctUsageNanos(), equalTo(364869866063112L)); + assertThat(cgroup.getCpuControlGroup(), equalTo("/" + hierarchy)); + assertThat(cgroup.getCpuCfsPeriodMicros(), equalTo(100000L)); + assertThat(cgroup.getCpuCfsQuotaMicros(), equalTo(50000L)); + assertThat(cgroup.getCpuStat().getNumberOfElapsedPeriods(), equalTo(17992L)); + assertThat(cgroup.getCpuStat().getNumberOfTimesThrottled(), equalTo(1311L)); + assertThat(cgroup.getCpuStat().getTimeThrottledNanos(), equalTo(139298645489L)); + assertThat(cgroup.getMemoryLimitInBytes(), equalTo("18446744073709551615")); + assertThat(cgroup.getMemoryUsageInBytes(), equalTo("4796416")); + } else { + assertNull(cgroup); + } + } + + public void testCgroupProbeWithMissingCpuAcct() { + assumeTrue("test runs on Linux only", Constants.LINUX); + + final String hierarchy = randomAlphaOfLength(16); + + // This cgroup data is missing a line about cpuacct + List procSelfCgroupLines = getProcSelfGroupLines(hierarchy) + .stream() + .map(line -> line.replaceFirst(",cpuacct", "")) + .collect(Collectors.toList()); + + final OsProbe probe = buildStubOsProbe(true, hierarchy, procSelfCgroupLines); + + final OsStats.Cgroup cgroup = probe.osStats().getCgroup(); + + assertNull(cgroup); + } + + public void testCgroupProbeWithMissingCpu() { + assumeTrue("test runs on Linux only", Constants.LINUX); + + final String hierarchy = randomAlphaOfLength(16); + + // This cgroup data is missing a line about cpu + List procSelfCgroupLines = getProcSelfGroupLines(hierarchy) + .stream() + .map(line -> line.replaceFirst(":cpu,", ":")) + .collect(Collectors.toList()); + + + final OsProbe probe = buildStubOsProbe(true, hierarchy, procSelfCgroupLines); + + final OsStats.Cgroup cgroup = probe.osStats().getCgroup(); + + assertNull(cgroup); + } + + public void testCgroupProbeWithMissingMemory() { + assumeTrue("test runs on Linux only", Constants.LINUX); + + final String hierarchy = randomAlphaOfLength(16); + + // This cgroup data is missing a line about memory + List procSelfCgroupLines = getProcSelfGroupLines(hierarchy) + .stream() + .filter(line -> !line.contains(":memory:")) + .collect(Collectors.toList()); + + final OsProbe probe = buildStubOsProbe(true, hierarchy, procSelfCgroupLines); + + final OsStats.Cgroup cgroup = probe.osStats().getCgroup(); + + assertNull(cgroup); + } + + private static List getProcSelfGroupLines(String hierarchy) { + return Arrays.asList( + "10:freezer:/", + "9:net_cls,net_prio:/", + "8:pids:/", + "7:blkio:/", + "6:memory:/" + hierarchy, + "5:devices:/user.slice", + "4:hugetlb:/", + "3:perf_event:/", + "2:cpu,cpuacct,cpuset:/" + hierarchy, + "1:name=systemd:/user.slice/user-1000.slice/session-2359.scope", + "0::/cgroup2"); + } + + private static OsProbe buildStubOsProbe(final boolean areCgroupStatsAvailable, final String hierarchy) { + List procSelfCgroupLines = getProcSelfGroupLines(hierarchy); + + return buildStubOsProbe(areCgroupStatsAvailable, hierarchy, procSelfCgroupLines); + } + + /** + * Builds a test instance of OsProbe. Methods that ordinarily read from the filesystem are overridden to return values based upon + * the arguments to this method. + * + * @param areCgroupStatsAvailable whether or not cgroup data is available. Normally OsProbe establishes this for itself. + * @param hierarchy a mock value used to generate a cgroup hierarchy. + * @param procSelfCgroupLines the lines that will be used as the content of /proc/self/cgroup + * @return a test instance + */ + private static OsProbe buildStubOsProbe( + final boolean areCgroupStatsAvailable, + final String hierarchy, + List procSelfCgroupLines + ) { + return new OsProbe() { @Override List readProcSelfCgroup() { - return Arrays.asList( - "10:freezer:/", - "9:net_cls,net_prio:/", - "8:pids:/", - "7:blkio:/", - "6:memory:/" + hierarchy, - "5:devices:/user.slice", - "4:hugetlb:/", - "3:perf_event:/", - "2:cpu,cpuacct,cpuset:/" + hierarchy, - "1:name=systemd:/user.slice/user-1000.slice/session-2359.scope", - "0::/cgroup2"); + return procSelfCgroupLines; } @Override @@ -249,26 +349,6 @@ public class OsProbeTests extends ESTestCase { boolean areCgroupStatsAvailable() { return areCgroupStatsAvailable; } - }; - - final OsStats.Cgroup cgroup = probe.osStats().getCgroup(); - - if (areCgroupStatsAvailable) { - assertNotNull(cgroup); - assertThat(cgroup.getCpuAcctControlGroup(), equalTo("/" + hierarchy)); - assertThat(cgroup.getCpuAcctUsageNanos(), equalTo(364869866063112L)); - assertThat(cgroup.getCpuControlGroup(), equalTo("/" + hierarchy)); - assertThat(cgroup.getCpuCfsPeriodMicros(), equalTo(100000L)); - assertThat(cgroup.getCpuCfsQuotaMicros(), equalTo(50000L)); - assertThat(cgroup.getCpuStat().getNumberOfElapsedPeriods(), equalTo(17992L)); - assertThat(cgroup.getCpuStat().getNumberOfTimesThrottled(), equalTo(1311L)); - assertThat(cgroup.getCpuStat().getTimeThrottledNanos(), equalTo(139298645489L)); - assertThat(cgroup.getMemoryLimitInBytes(), equalTo("18446744073709551615")); - assertThat(cgroup.getMemoryUsageInBytes(), equalTo("4796416")); - } else { - assertNull(cgroup); - } } - } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgAggregatorTests.java index b86acbeea046..4dfdfd2fd91c 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgAggregatorTests.java @@ -480,7 +480,8 @@ public class AvgAggregatorTests extends AggregatorTestCase { Document document = new Document(); document.add(new SortedNumericDocValuesField("values", i + 2)); document.add(new SortedNumericDocValuesField("values", i + 3)); - iw.addDocument(document); } + iw.addDocument(document); + } }, avg -> { assertEquals((double) (3+4+4+5+5+6+6+7+7+8+8+9+9+10+10+11+11+12+12+13) / 20, avg.getValue(), 0); assertTrue(AggregationInspectionHelper.hasValue(avg)); @@ -501,7 +502,8 @@ public class AvgAggregatorTests extends AggregatorTestCase { Document document = new Document(); document.add(new SortedNumericDocValuesField("values", i + 2)); document.add(new SortedNumericDocValuesField("values", i + 3)); - iw.addDocument(document); } + iw.addDocument(document); + } }, avg -> { assertEquals((double) (2+3+3+4+4+5+5+6+6+7+7+8+8+9+9+10+10+11+11+12) / 20, avg.getValue(), 0); assertTrue(AggregationInspectionHelper.hasValue(avg)); @@ -589,4 +591,95 @@ public class AvgAggregatorTests extends AggregatorTestCase { indexReader.close(); directory.close(); } + + /** + * Make sure that an aggregation not using a script does get cached. + */ + public void testCacheAggregation() throws IOException { + Directory directory = newDirectory(); + RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory); + final int numDocs = 10; + for (int i = 0; i < numDocs; i++) { + indexWriter.addDocument(singleton(new NumericDocValuesField("value", i + 1))); + } + indexWriter.close(); + + Directory unmappedDirectory = newDirectory(); + RandomIndexWriter unmappedIndexWriter = new RandomIndexWriter(random(), unmappedDirectory); + unmappedIndexWriter.close(); + + IndexReader indexReader = DirectoryReader.open(directory); + IndexReader unamappedIndexReader = DirectoryReader.open(unmappedDirectory); + MultiReader multiReader = new MultiReader(indexReader, unamappedIndexReader); + IndexSearcher indexSearcher = newSearcher(multiReader, true, true); + + MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.INTEGER); + fieldType.setName("value"); + AvgAggregationBuilder aggregationBuilder = new AvgAggregationBuilder("avg") + .field("value"); + + AvgAggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType); + aggregator.preCollection(); + indexSearcher.search(new MatchAllDocsQuery(), aggregator); + aggregator.postCollection(); + + InternalAvg avg = (InternalAvg) aggregator.buildAggregation(0L); + + assertEquals(5.5, avg.getValue(), 0); + assertEquals("avg", avg.getName()); + assertTrue(AggregationInspectionHelper.hasValue(avg)); + + // Test that an aggregation not using a script does get cached + assertTrue(aggregator.context().getQueryShardContext().isCacheable()); + + multiReader.close(); + directory.close(); + unmappedDirectory.close(); + } + + /** + * Make sure that an aggregation using a script does not get cached. + */ + public void testDontCacheScripts() throws IOException { + Directory directory = newDirectory(); + RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory); + final int numDocs = 10; + for (int i = 0; i < numDocs; i++) { + indexWriter.addDocument(singleton(new NumericDocValuesField("value", i + 1))); + } + indexWriter.close(); + + Directory unmappedDirectory = newDirectory(); + RandomIndexWriter unmappedIndexWriter = new RandomIndexWriter(random(), unmappedDirectory); + unmappedIndexWriter.close(); + + IndexReader indexReader = DirectoryReader.open(directory); + IndexReader unamappedIndexReader = DirectoryReader.open(unmappedDirectory); + MultiReader multiReader = new MultiReader(indexReader, unamappedIndexReader); + IndexSearcher indexSearcher = newSearcher(multiReader, true, true); + + MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.INTEGER); + fieldType.setName("value"); + AvgAggregationBuilder aggregationBuilder = new AvgAggregationBuilder("avg") + .field("value") + .script(new Script(ScriptType.INLINE, MockScriptEngine.NAME, VALUE_SCRIPT, Collections.emptyMap())); + + AvgAggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType); + aggregator.preCollection(); + indexSearcher.search(new MatchAllDocsQuery(), aggregator); + aggregator.postCollection(); + + InternalAvg avg = (InternalAvg) aggregator.buildAggregation(0L); + + assertEquals(5.5, avg.getValue(), 0); + assertEquals("avg", avg.getName()); + assertTrue(AggregationInspectionHelper.hasValue(avg)); + + // Test that an aggregation using a script does not get cached + assertFalse(aggregator.context().getQueryShardContext().isCacheable()); + + multiReader.close(); + directory.close(); + unmappedDirectory.close(); + } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgIT.java deleted file mode 100644 index e6652c74c8be..000000000000 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgIT.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.search.aggregations.metrics; - -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.script.Script; -import org.elasticsearch.script.ScriptType; -import org.elasticsearch.test.ESIntegTestCase; - -import java.util.Collection; -import java.util.Collections; - -import static org.elasticsearch.search.aggregations.AggregationBuilders.avg; -import static org.elasticsearch.search.aggregations.metrics.MetricAggScriptPlugin.METRIC_SCRIPT_ENGINE; -import static org.elasticsearch.search.aggregations.metrics.MetricAggScriptPlugin.VALUE_FIELD_SCRIPT; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; -import static org.hamcrest.Matchers.equalTo; - -public class AvgIT extends ESIntegTestCase { - - @Override - protected Collection> nodePlugins() { - return Collections.singleton(MetricAggScriptPlugin.class); - } - - /** - * Make sure that a request using a script does not get cached and a request - * not using a script does get cached. - */ - public void testDontCacheScripts() throws Exception { - assertAcked(prepareCreate("cache_test_idx").addMapping("type", "d", "type=long") - .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) - .get()); - indexRandom(true, client().prepareIndex("cache_test_idx", "type", "1").setSource("s", 1), - client().prepareIndex("cache_test_idx", "type", "2").setSource("s", 2)); - - // Make sure we are starting with a clear cache - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(0L)); - - // Test that a request using a script does not get cached - SearchResponse r = client().prepareSearch("cache_test_idx").setSize(0) - .addAggregation(avg("foo").field("d").script( - new Script(ScriptType.INLINE, METRIC_SCRIPT_ENGINE, VALUE_FIELD_SCRIPT, Collections.emptyMap()))).get(); - assertSearchResponse(r); - - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(0L)); - - // To make sure that the cache is working test that a request not using - // a script is cached - r = client().prepareSearch("cache_test_idx").setSize(0).addAggregation(avg("foo").field("d")).get(); - assertSearchResponse(r); - - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(1L)); - } -} diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MaxAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MaxAggregatorTests.java index 66d2b9c6d5fe..b841fffd0ba3 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MaxAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MaxAggregatorTests.java @@ -33,6 +33,7 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.MultiReader; import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.index.PointValues; import org.apache.lucene.index.RandomIndexWriter; @@ -55,31 +56,94 @@ import org.elasticsearch.script.ScriptEngine; import org.elasticsearch.script.ScriptModule; import org.elasticsearch.script.ScriptService; import org.elasticsearch.script.ScriptType; +import org.elasticsearch.search.aggregations.AggregationBuilder; +import org.elasticsearch.search.aggregations.AggregationBuilders; +import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorTestCase; +import org.elasticsearch.search.aggregations.BucketCollector; +import org.elasticsearch.search.aggregations.BucketOrder; +import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.MultiBucketCollector; +import org.elasticsearch.search.aggregations.bucket.filter.Filter; +import org.elasticsearch.search.aggregations.bucket.global.Global; +import org.elasticsearch.search.aggregations.bucket.global.GlobalAggregator; +import org.elasticsearch.search.aggregations.bucket.terms.Terms; +import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregator; import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; +import org.elasticsearch.search.aggregations.support.ValueType; +import org.elasticsearch.search.lookup.LeafDocLookup; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.Comparator; +import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.function.BiFunction; import java.util.function.Consumer; import java.util.function.Function; import java.util.function.Supplier; import static java.util.Collections.singleton; +import static org.elasticsearch.index.query.QueryBuilders.termQuery; import static org.hamcrest.Matchers.equalTo; public class MaxAggregatorTests extends AggregatorTestCase { + private final String SCRIPT_NAME = "script_name"; + private final long SCRIPT_VALUE = 19L; + /** Script to take a field name in params and sum the values of the field. */ + public static final String SUM_FIELD_PARAMS_SCRIPT = "sum_field_params"; + + /** Script to sum the values of a field named {@code values}. */ + public static final String SUM_VALUES_FIELD_SCRIPT = "sum_values_field"; + + /** Script to return the value of a field named {@code value}. */ + public static final String VALUE_FIELD_SCRIPT = "value_field"; + + /** Script to return the {@code _value} provided by aggs framework. */ + public static final String VALUE_SCRIPT = "_value"; + @Override protected ScriptService getMockScriptService() { + Map, Object>> scripts = new HashMap<>(); + Function, Integer> getInc = vars -> { + if (vars == null || vars.containsKey("inc") == false) { + return 0; + } else { + return ((Number) vars.get("inc")).intValue(); + } + }; + + BiFunction, String, Object> sum = (vars, fieldname) -> { + int inc = getInc.apply(vars); + LeafDocLookup docLookup = (LeafDocLookup) vars.get("doc"); + List values = new ArrayList<>(); + for (Object v : docLookup.get(fieldname)) { + values.add(((Number) v).longValue() + inc); + } + return values; + }; + + scripts.put(SCRIPT_NAME, script -> SCRIPT_VALUE); + scripts.put(SUM_FIELD_PARAMS_SCRIPT, vars -> { + String fieldname = (String) vars.get("field"); + return sum.apply(vars, fieldname); + }); + scripts.put(SUM_VALUES_FIELD_SCRIPT, vars -> sum.apply(vars, "values")); + scripts.put(VALUE_FIELD_SCRIPT, vars -> sum.apply(vars, "value")); + scripts.put(VALUE_SCRIPT, vars -> { + int inc = getInc.apply(vars); + return ((Number) vars.get("_value")).doubleValue() + inc; + }); + MockScriptEngine scriptEngine = new MockScriptEngine(MockScriptEngine.NAME, - Collections.singletonMap(SCRIPT_NAME, script -> SCRIPT_VALUE), // return 19 from script + scripts, Collections.emptyMap()); Map engines = Collections.singletonMap(scriptEngine.getType(), scriptEngine); @@ -201,7 +265,6 @@ public class MaxAggregatorTests extends AggregatorTestCase { RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory); buildIndex.accept(indexWriter); indexWriter.close(); - IndexReader indexReader = DirectoryReader.open(directory); IndexSearcher indexSearcher = newSearcher(indexReader, true, true); @@ -321,4 +384,596 @@ public class MaxAggregatorTests extends AggregatorTestCase { assertTrue(seen[0]); } + public void testSingleValuedField() throws IOException { + testCase( new MatchAllDocsQuery(), iw -> { + final int numDocs = 10; + for (int i = 0; i < numDocs; i++) { + iw.addDocument(singleton(new NumericDocValuesField("number", i + 1))); + } + }, max -> { + assertEquals(10, max.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(max)); + }); + } + + public void testSingleValuedFieldWithFormatter() throws IOException { + MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.INTEGER); + fieldType.setName("value"); + + MaxAggregationBuilder aggregationBuilder = new MaxAggregationBuilder("_name") + .format("0000.0") + .field("value"); + + testCase(aggregationBuilder, new MatchAllDocsQuery(), iw -> { + final int numDocs = 10; + for (int i = 0; i < numDocs; i++) { + iw.addDocument(singleton(new NumericDocValuesField("value", i + 1))); + } + }, max -> { + assertEquals(10.0, max.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(max)); + assertEquals("0010.0", max.getValueAsString()); + }, fieldType); + } + + public void testSingleValuedFieldGetProperty() throws IOException { + MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.INTEGER); + fieldType.setName("value"); + fieldType.setHasDocValues(true); + + AggregationBuilder aggregationBuilder = AggregationBuilders.global("global") + .subAggregation(AggregationBuilders.max("max").field("value")); + + Directory directory = newDirectory(); + RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory); + final int numDocs = 10; + for (int i = 0; i < numDocs; i++) { + indexWriter.addDocument(singleton(new NumericDocValuesField("value", i + 1))); + } + indexWriter.close(); + + IndexReader indexReader = DirectoryReader.open(directory); + IndexSearcher indexSearcher = newSearcher(indexReader, true, true); + + GlobalAggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType); + aggregator.preCollection(); + indexSearcher.search(new MatchAllDocsQuery(), aggregator); + aggregator.postCollection(); + + Global global = (Global) aggregator.buildAggregation(0L); + assertNotNull(global); + assertEquals("global", global.getName()); + assertEquals(10L, global.getDocCount()); + assertNotNull(global.getAggregations()); + assertEquals(1, global.getAggregations().asMap().size()); + + Max max = global.getAggregations().get("max"); + assertNotNull(max); + assertEquals("max", max.getName()); + assertEquals(10.0, max.getValue(), 0); + assertEquals(max, ((InternalAggregation) global).getProperty("max")); + assertEquals(10.0, (double) ((InternalAggregation)global).getProperty("max.value"), 0); + assertEquals(10.0, (double) ((InternalAggregation)max).getProperty("value"), 0); + + indexReader.close(); + directory.close(); + } + + public void testSingleValuedFieldPartiallyUnmapped() throws IOException { + Directory directory = newDirectory(); + RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory); + final int numDocs = 10; + for (int i = 0; i < numDocs; i++) { + indexWriter.addDocument(singleton(new NumericDocValuesField("value", i + 1))); + } + indexWriter.close(); + + Directory unmappedDirectory = newDirectory(); + RandomIndexWriter unmappedIndexWriter = new RandomIndexWriter(random(), unmappedDirectory); + unmappedIndexWriter.close(); + + IndexReader indexReader = DirectoryReader.open(directory); + IndexReader unamappedIndexReader = DirectoryReader.open(unmappedDirectory); + MultiReader multiReader = new MultiReader(indexReader, unamappedIndexReader); + IndexSearcher indexSearcher = newSearcher(multiReader, true, true); + + + MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.INTEGER); + fieldType.setName("value"); + AggregationBuilder aggregationBuilder = new MaxAggregationBuilder("max").field("value"); + + MaxAggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType); + aggregator.preCollection(); + indexSearcher.search(new MatchAllDocsQuery(), aggregator); + aggregator.postCollection(); + + InternalMax max = (InternalMax) aggregator.buildAggregation(0L); + + assertEquals(10.0, max.getValue(), 0); + assertEquals("max", max.getName()); + assertTrue(AggregationInspectionHelper.hasValue(max)); + + multiReader.close(); + directory.close(); + unmappedDirectory.close(); + } + + public void testSingleValuedFieldWithValueScript() throws IOException { + MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.INTEGER); + fieldType.setName("value"); + + MaxAggregationBuilder aggregationBuilder = new MaxAggregationBuilder("max") + .field("value") + .script(new Script(ScriptType.INLINE, MockScriptEngine.NAME, VALUE_SCRIPT, Collections.emptyMap())); + + testCase(aggregationBuilder, new MatchAllDocsQuery(), iw -> { + final int numDocs = 10; + for (int i = 0; i < numDocs; i++) { + iw.addDocument(singleton(new NumericDocValuesField("value", i + 1))); + } + }, max -> { + assertTrue(AggregationInspectionHelper.hasValue(max)); + assertEquals(10.0, max.getValue(), 0); + assertEquals("max", max.getName()); + }, fieldType); + } + + public void testSingleValuedFieldWithValueScriptWithParams() throws IOException { + MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.INTEGER); + fieldType.setName("value"); + + Map params = Collections.singletonMap("inc", 1); + MaxAggregationBuilder aggregationBuilder = new MaxAggregationBuilder("max") + .field("value") + .script(new Script(ScriptType.INLINE, MockScriptEngine.NAME, VALUE_SCRIPT, params)); + + testCase(aggregationBuilder, new MatchAllDocsQuery(), iw -> { + final int numDocs = 10; + for (int i = 0; i < numDocs; i++) { + iw.addDocument(singleton(new NumericDocValuesField("value", i + 1))); + } + }, max -> { + assertEquals(11.0, max.getValue(), 0); + assertEquals("max", max.getName()); + assertTrue(AggregationInspectionHelper.hasValue(max)); + }, fieldType); + } + + public void testMultiValuedField() throws IOException { + testCase(new MatchAllDocsQuery(), iw -> { + final int numDocs = 10; + for (int i = 0; i < numDocs; i++) { + Document document = new Document(); + document.add(new SortedNumericDocValuesField("number", i + 2)); + document.add(new SortedNumericDocValuesField("number", i + 3)); + iw.addDocument(document); + } + }, max -> { + assertEquals(12.0, max.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(max)); + }); + } + + public void testMultiValuedFieldWithValueScript() throws IOException { + MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.INTEGER); + fieldType.setName("values"); + + MaxAggregationBuilder aggregationBuilder = new MaxAggregationBuilder("max") + .field("values") + .script(new Script(ScriptType.INLINE, MockScriptEngine.NAME, VALUE_SCRIPT, Collections.emptyMap())); + + testCase(aggregationBuilder, new MatchAllDocsQuery(), iw -> { + final int numDocs = 10; + for (int i = 0; i < numDocs; i++) { + Document document = new Document(); + document.add(new SortedNumericDocValuesField("values", i + 2)); + document.add(new SortedNumericDocValuesField("values", i + 3)); + iw.addDocument(document); + } + }, max -> { + assertEquals(12.0, max.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(max)); + }, fieldType); + } + + public void testMultiValuedFieldWithValueScriptWithParams() throws IOException { + MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.INTEGER); + fieldType.setName("values"); + + Map params = Collections.singletonMap("inc", 1); + MaxAggregationBuilder aggregationBuilder = new MaxAggregationBuilder("max") + .field("values") + .script(new Script(ScriptType.INLINE, MockScriptEngine.NAME, VALUE_SCRIPT, params)); + + testCase(aggregationBuilder, new MatchAllDocsQuery(), iw -> { + final int numDocs = 10; + for (int i = 0; i < numDocs; i++) { + Document document = new Document(); + document.add(new SortedNumericDocValuesField("values", i + 2)); + document.add(new SortedNumericDocValuesField("values", i + 3)); + iw.addDocument(document); + } + }, max -> { + assertEquals(13.0, max.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(max)); + }, fieldType); + } + + public void testScriptSingleValued() throws IOException { + MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.INTEGER); + fieldType.setName("value"); + + MaxAggregationBuilder aggregationBuilder = new MaxAggregationBuilder("max") + .script(new Script(ScriptType.INLINE, MockScriptEngine.NAME, VALUE_FIELD_SCRIPT, Collections.emptyMap())); + + testCase(aggregationBuilder, new MatchAllDocsQuery(), iw -> { + final int numDocs = 10; + for (int i = 0; i < numDocs; i++) { + iw.addDocument(singleton(new NumericDocValuesField("value", i + 1))); + } + }, max -> { + assertEquals(10.0, max.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(max)); + }, fieldType); + } + + public void testScriptSingleValuedWithParams() throws IOException { + MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.INTEGER); + fieldType.setName("value"); + + Map params = new HashMap<>(); + params.put("inc", 1); + params.put("field", "value"); + + MaxAggregationBuilder aggregationBuilder = new MaxAggregationBuilder("max") + .script(new Script(ScriptType.INLINE, MockScriptEngine.NAME, SUM_FIELD_PARAMS_SCRIPT, params)); + + testCase(aggregationBuilder, new MatchAllDocsQuery(), iw -> { + final int numDocs = 10; + for (int i = 0; i < numDocs; i++) { + iw.addDocument(singleton(new NumericDocValuesField("value", i + 1))); + } + }, max -> { + assertEquals(11.0, max.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(max)); + }, fieldType); + } + + public void testScriptMultiValued() throws IOException { + MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.INTEGER); + fieldType.setName("values"); + + MaxAggregationBuilder aggregationBuilder = new MaxAggregationBuilder("max") + .script(new Script(ScriptType.INLINE, MockScriptEngine.NAME, SUM_VALUES_FIELD_SCRIPT, Collections.emptyMap())); + + testCase(aggregationBuilder, new MatchAllDocsQuery(), iw -> { + final int numDocs = 10; + for (int i = 0; i < numDocs; i++) { + Document document = new Document(); + document.add(new SortedNumericDocValuesField("values", i + 2)); + document.add(new SortedNumericDocValuesField("values", i + 3)); + iw.addDocument(document); + } + }, max -> { + assertEquals(12.0, max.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(max)); + }, fieldType); + } + + public void testScriptMultiValuedWithParams() throws IOException { + Map params = new HashMap<>(); + params.put("inc", 1); + params.put("field", "values"); + + MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.INTEGER); + fieldType.setName("values"); + + MaxAggregationBuilder aggregationBuilder = new MaxAggregationBuilder("max") + .script(new Script(ScriptType.INLINE, MockScriptEngine.NAME, SUM_FIELD_PARAMS_SCRIPT, params)); + + testCase(aggregationBuilder, new MatchAllDocsQuery(), iw -> { + final int numDocs = 10; + for (int i = 0; i < numDocs; i++) { + Document document = new Document(); + document.add(new SortedNumericDocValuesField("values", i + 2)); + document.add(new SortedNumericDocValuesField("values", i + 3)); + iw.addDocument(document); + } + }, max -> { + assertEquals(13.0, max.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(max)); + }, fieldType); + } + + public void testEmptyAggregation() throws Exception { + MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.INTEGER); + fieldType.setName("value"); + fieldType.setHasDocValues(true); + + AggregationBuilder aggregationBuilder = AggregationBuilders.global("global") + .subAggregation(AggregationBuilders.max("max").field("value")); + + Directory directory = newDirectory(); + RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory); + // Do not add any documents + indexWriter.close(); + + IndexReader indexReader = DirectoryReader.open(directory); + IndexSearcher indexSearcher = newSearcher(indexReader, true, true); + + GlobalAggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType); + aggregator.preCollection(); + indexSearcher.search(new MatchAllDocsQuery(), aggregator); + aggregator.postCollection(); + + Global global = (Global) aggregator.buildAggregation(0L); + assertNotNull(global); + assertEquals("global", global.getName()); + assertEquals(0L, global.getDocCount()); + assertNotNull(global.getAggregations()); + assertEquals(1, global.getAggregations().asMap().size()); + + Max max = global.getAggregations().get("max"); + assertNotNull(max); + assertEquals("max", max.getName()); + assertEquals(Double.NEGATIVE_INFINITY, max.getValue(), 0); + + indexReader.close(); + directory.close(); + } + + public void testOrderByEmptyAggregation() throws IOException { + MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.INTEGER); + fieldType.setName("value"); + fieldType.setHasDocValues(true); + + TermsAggregationBuilder aggregationBuilder = new TermsAggregationBuilder("terms", ValueType.NUMERIC) + .field("value") + .order(BucketOrder.compound(BucketOrder.aggregation("filter>max", true))) + .subAggregation(AggregationBuilders.filter("filter", termQuery("value", 100)) + .subAggregation(AggregationBuilders.max("max").field("value"))); + + Directory directory = newDirectory(); + RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory); + final int numDocs = 10; + for (int i = 0; i < numDocs; i++) { + indexWriter.addDocument(singleton(new NumericDocValuesField("value", i + 1))); + } + indexWriter.close(); + + IndexReader indexReader = DirectoryReader.open(directory); + IndexSearcher indexSearcher = newSearcher(indexReader, true, true); + + TermsAggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType); + aggregator.preCollection(); + indexSearcher.search(new MatchAllDocsQuery(), aggregator); + aggregator.postCollection(); + + Terms terms = (Terms) aggregator.buildAggregation(0L); + assertNotNull(terms); + List buckets = terms.getBuckets(); + assertNotNull(buckets); + assertEquals(10, buckets.size()); + + for (int i = 0; i < 10; i++) { + Terms.Bucket bucket = buckets.get(i); + assertNotNull(bucket); + assertEquals((long) i + 1, bucket.getKeyAsNumber()); + assertEquals(1L, bucket.getDocCount()); + + Filter filter = bucket.getAggregations().get("filter"); + assertNotNull(filter); + assertEquals(0L, filter.getDocCount()); + + Max max = filter.getAggregations().get("max"); + assertNotNull(max); + assertEquals(Double.NEGATIVE_INFINITY, max.getValue(), 0); + } + + indexReader.close(); + directory.close(); + } + + public void testEarlyTermination() throws Exception { + MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.INTEGER); + fieldType.setName("values"); + + Directory directory = newDirectory(); + RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory); + final int numDocs = 10; + for (int i = 0; i < numDocs; i++) { + Document document = new Document(); + document.add(new SortedNumericDocValuesField("values", i + 2)); + document.add(new SortedNumericDocValuesField("values", i + 3)); + indexWriter.addDocument(document); + } + indexWriter.close(); + + IndexReader indexReader = DirectoryReader.open(directory); + IndexSearcher indexSearcher = newSearcher(indexReader, true, true); + + MaxAggregationBuilder maxAggregationBuilder = new MaxAggregationBuilder("max") .field("values"); + ValueCountAggregationBuilder countAggregationBuilder = new ValueCountAggregationBuilder("count", null) + .field("values"); + + MaxAggregator maxAggregator = createAggregator(maxAggregationBuilder, indexSearcher, fieldType); + ValueCountAggregator countAggregator = createAggregator(countAggregationBuilder, indexSearcher, fieldType); + + BucketCollector bucketCollector = MultiBucketCollector.wrap(maxAggregator, countAggregator); + bucketCollector.preCollection(); + indexSearcher.search(new MatchAllDocsQuery(), bucketCollector); + bucketCollector.postCollection(); + + InternalMax max = (InternalMax) maxAggregator.buildAggregation(0L); + assertNotNull(max); + assertEquals(12.0, max.getValue(), 0); + assertEquals("max", max.getName()); + + InternalValueCount count = (InternalValueCount) countAggregator.buildAggregation(0L); + assertNotNull(count); + assertEquals(20L, count.getValue()); + assertEquals("count", count.getName()); + + indexReader.close(); + directory.close(); + } + + public void testNestedEarlyTermination() throws Exception { + MappedFieldType multiValuesfieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.INTEGER); + multiValuesfieldType.setName("values"); + + MappedFieldType singleValueFieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.INTEGER); + singleValueFieldType.setName("value"); + + Directory directory = newDirectory(); + RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory); + final int numDocs = 10; + for (int i = 0; i < numDocs; i++) { + Document document = new Document(); + document.add(new NumericDocValuesField("value", i + 1)); + document.add(new SortedNumericDocValuesField("values", i + 2)); + document.add(new SortedNumericDocValuesField("values", i + 3)); + indexWriter.addDocument(document); + } + indexWriter.close(); + + IndexReader indexReader = DirectoryReader.open(directory); + IndexSearcher indexSearcher = newSearcher(indexReader, true, true); + + for (Aggregator.SubAggCollectionMode collectionMode : Aggregator.SubAggCollectionMode.values()) { + MaxAggregationBuilder maxAggregationBuilder = new MaxAggregationBuilder("max") + .field("values"); + ValueCountAggregationBuilder countAggregationBuilder = new ValueCountAggregationBuilder("count", null) + .field("values"); + TermsAggregationBuilder termsAggregationBuilder = new TermsAggregationBuilder("terms", ValueType.NUMERIC) + .field("value").collectMode(collectionMode) + .subAggregation(new MaxAggregationBuilder("sub_max").field("invalid")); + + MaxAggregator maxAggregator = createAggregator(maxAggregationBuilder, indexSearcher, multiValuesfieldType); + ValueCountAggregator countAggregator = createAggregator(countAggregationBuilder, indexSearcher, multiValuesfieldType); + TermsAggregator termsAggregator = createAggregator(termsAggregationBuilder, indexSearcher, singleValueFieldType); + + BucketCollector bucketCollector = MultiBucketCollector.wrap(maxAggregator, countAggregator, termsAggregator); + bucketCollector.preCollection(); + indexSearcher.search(new MatchAllDocsQuery(), bucketCollector); + bucketCollector.postCollection(); + + InternalMax max = (InternalMax) maxAggregator.buildAggregation(0L); + assertNotNull(max); + assertEquals(12.0, max.getValue(), 0); + assertEquals("max", max.getName()); + + InternalValueCount count = (InternalValueCount) countAggregator.buildAggregation(0L); + assertNotNull(count); + assertEquals(20L, count.getValue()); + assertEquals("count", count.getName()); + + Terms terms = (Terms) termsAggregator.buildAggregation(0L); + assertNotNull(terms); + List buckets = terms.getBuckets(); + assertNotNull(buckets); + assertEquals(10, buckets.size()); + + for (Terms.Bucket b : buckets) { + InternalMax subMax = b.getAggregations().get("sub_max"); + assertEquals(Double.NEGATIVE_INFINITY, subMax.getValue(), 0); + } + } + + indexReader.close(); + directory.close(); + } + + /** + * Make sure that an aggregation not using a script does get cached. + */ + public void testCacheAggregation() throws IOException { + Directory directory = newDirectory(); + RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory); + final int numDocs = 10; + for (int i = 0; i < numDocs; i++) { + indexWriter.addDocument(singleton(new NumericDocValuesField("value", i + 1))); + } + indexWriter.close(); + + Directory unmappedDirectory = newDirectory(); + RandomIndexWriter unmappedIndexWriter = new RandomIndexWriter(random(), unmappedDirectory); + unmappedIndexWriter.close(); + + IndexReader indexReader = DirectoryReader.open(directory); + IndexReader unamappedIndexReader = DirectoryReader.open(unmappedDirectory); + MultiReader multiReader = new MultiReader(indexReader, unamappedIndexReader); + IndexSearcher indexSearcher = newSearcher(multiReader, true, true); + + + MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.INTEGER); + fieldType.setName("value"); + MaxAggregationBuilder aggregationBuilder = new MaxAggregationBuilder("max") + .field("value"); + + MaxAggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType); + aggregator.preCollection(); + indexSearcher.search(new MatchAllDocsQuery(), aggregator); + aggregator.postCollection(); + + InternalMax max = (InternalMax) aggregator.buildAggregation(0L); + + assertEquals(10.0, max.getValue(), 0); + assertEquals("max", max.getName()); + assertTrue(AggregationInspectionHelper.hasValue(max)); + + // Test that an aggregation not using a script does get cached + assertTrue(aggregator.context().getQueryShardContext().isCacheable()); + + multiReader.close(); + directory.close(); + unmappedDirectory.close(); + } + + /** + * Make sure that an aggregation using a script does not get cached. + */ + public void testDontCacheScripts() throws IOException { + Directory directory = newDirectory(); + RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory); + final int numDocs = 10; + for (int i = 0; i < numDocs; i++) { + indexWriter.addDocument(singleton(new NumericDocValuesField("value", i + 1))); + } + indexWriter.close(); + + Directory unmappedDirectory = newDirectory(); + RandomIndexWriter unmappedIndexWriter = new RandomIndexWriter(random(), unmappedDirectory); + unmappedIndexWriter.close(); + + IndexReader indexReader = DirectoryReader.open(directory); + IndexReader unamappedIndexReader = DirectoryReader.open(unmappedDirectory); + MultiReader multiReader = new MultiReader(indexReader, unamappedIndexReader); + IndexSearcher indexSearcher = newSearcher(multiReader, true, true); + + + MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.INTEGER); + fieldType.setName("value"); + MaxAggregationBuilder aggregationBuilder = new MaxAggregationBuilder("max") + .field("value") + .script(new Script(ScriptType.INLINE, MockScriptEngine.NAME, VALUE_SCRIPT, Collections.emptyMap())); + + MaxAggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType); + aggregator.preCollection(); + indexSearcher.search(new MatchAllDocsQuery(), aggregator); + aggregator.postCollection(); + + InternalMax max = (InternalMax) aggregator.buildAggregation(0L); + + assertEquals(10.0, max.getValue(), 0); + assertEquals("max", max.getName()); + assertTrue(AggregationInspectionHelper.hasValue(max)); + + // Test that an aggregation using a script does not get cached + assertFalse(aggregator.context().getQueryShardContext().isCacheable()); + + multiReader.close(); + directory.close(); + unmappedDirectory.close(); + } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MaxIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MaxIT.java deleted file mode 100644 index 61786ab6dcd0..000000000000 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MaxIT.java +++ /dev/null @@ -1,445 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.search.aggregations.metrics; - -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.script.Script; -import org.elasticsearch.script.ScriptType; -import org.elasticsearch.search.aggregations.AggregationTestScriptsPlugin; -import org.elasticsearch.search.aggregations.Aggregator; -import org.elasticsearch.search.aggregations.InternalAggregation; -import org.elasticsearch.search.aggregations.bucket.filter.Filter; -import org.elasticsearch.search.aggregations.bucket.global.Global; -import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; -import org.elasticsearch.search.aggregations.bucket.terms.Terms; -import org.elasticsearch.search.aggregations.BucketOrder; - -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import static java.util.Collections.emptyMap; -import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; -import static org.elasticsearch.index.query.QueryBuilders.termQuery; -import static org.elasticsearch.search.aggregations.AggregationBuilders.count; -import static org.elasticsearch.search.aggregations.AggregationBuilders.filter; -import static org.elasticsearch.search.aggregations.AggregationBuilders.global; -import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; -import static org.elasticsearch.search.aggregations.AggregationBuilders.max; -import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.notNullValue; - -public class MaxIT extends AbstractNumericTestCase { - @Override - protected Collection> nodePlugins() { - return Collections.singleton(AggregationTestScriptsPlugin.class); - } - - @Override - public void testEmptyAggregation() throws Exception { - SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx") - .setQuery(matchAllQuery()) - .addAggregation(histogram("histo").field("value").interval(1L).minDocCount(0).subAggregation(max("max").field("value"))) - .get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(2L)); - Histogram histo = searchResponse.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - Histogram.Bucket bucket = histo.getBuckets().get(1); - assertThat(bucket, notNullValue()); - - Max max = bucket.getAggregations().get("max"); - assertThat(max, notNullValue()); - assertThat(max.getName(), equalTo("max")); - assertThat(max.getValue(), equalTo(Double.NEGATIVE_INFINITY)); - } - - @Override - public void testUnmapped() throws Exception { - SearchResponse searchResponse = client().prepareSearch("idx_unmapped") - .setQuery(matchAllQuery()) - .addAggregation(max("max").field("value")) - .get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(0L)); - - Max max = searchResponse.getAggregations().get("max"); - assertThat(max, notNullValue()); - assertThat(max.getName(), equalTo("max")); - assertThat(max.getValue(), equalTo(Double.NEGATIVE_INFINITY)); - } - - @Override - public void testSingleValuedField() throws Exception { - SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation(max("max").field("value")) - .get(); - - assertHitCount(searchResponse, 10); - - Max max = searchResponse.getAggregations().get("max"); - assertThat(max, notNullValue()); - assertThat(max.getName(), equalTo("max")); - assertThat(max.getValue(), equalTo(10.0)); - } - - public void testSingleValuedFieldWithFormatter() throws Exception { - SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(max("max").format("0000.0").field("value")).get(); - - assertHitCount(searchResponse, 10); - - Max max = searchResponse.getAggregations().get("max"); - assertThat(max, notNullValue()); - assertThat(max.getName(), equalTo("max")); - assertThat(max.getValue(), equalTo(10.0)); - assertThat(max.getValueAsString(), equalTo("0010.0")); - } - - @Override - public void testSingleValuedFieldGetProperty() throws Exception { - SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(global("global").subAggregation(max("max").field("value"))).get(); - - assertHitCount(searchResponse, 10); - - Global global = searchResponse.getAggregations().get("global"); - assertThat(global, notNullValue()); - assertThat(global.getName(), equalTo("global")); - assertThat(global.getDocCount(), equalTo(10L)); - assertThat(global.getAggregations(), notNullValue()); - assertThat(global.getAggregations().asMap().size(), equalTo(1)); - - Max max = global.getAggregations().get("max"); - assertThat(max, notNullValue()); - assertThat(max.getName(), equalTo("max")); - double expectedMaxValue = 10.0; - assertThat(max.getValue(), equalTo(expectedMaxValue)); - assertThat((Max) ((InternalAggregation)global).getProperty("max"), equalTo(max)); - assertThat((double) ((InternalAggregation)global).getProperty("max.value"), equalTo(expectedMaxValue)); - assertThat((double) ((InternalAggregation)max).getProperty("value"), equalTo(expectedMaxValue)); - } - - @Override - public void testSingleValuedFieldPartiallyUnmapped() throws Exception { - SearchResponse searchResponse = client().prepareSearch("idx", "idx_unmapped") - .setQuery(matchAllQuery()) - .addAggregation(max("max").field("value")) - .get(); - - assertHitCount(searchResponse, 10); - - Max max = searchResponse.getAggregations().get("max"); - assertThat(max, notNullValue()); - assertThat(max.getName(), equalTo("max")); - assertThat(max.getValue(), equalTo(10.0)); - } - - @Override - public void testSingleValuedFieldWithValueScript() throws Exception { - SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - max("max") - .field("value") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value + 1", emptyMap()))) - .get(); - - assertHitCount(searchResponse, 10); - - Max max = searchResponse.getAggregations().get("max"); - assertThat(max, notNullValue()); - assertThat(max.getName(), equalTo("max")); - assertThat(max.getValue(), equalTo(11.0)); - } - - @Override - public void testSingleValuedFieldWithValueScriptWithParams() throws Exception { - Map params = new HashMap<>(); - params.put("inc", 1); - SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - max("max") - .field("value") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value + inc", params))) - .get(); - - assertHitCount(searchResponse, 10); - - Max max = searchResponse.getAggregations().get("max"); - assertThat(max, notNullValue()); - assertThat(max.getName(), equalTo("max")); - assertThat(max.getValue(), equalTo(11.0)); - } - - @Override - public void testMultiValuedField() throws Exception { - SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation(max("max").field("values")) - .get(); - - assertHitCount(searchResponse, 10); - - Max max = searchResponse.getAggregations().get("max"); - assertThat(max, notNullValue()); - assertThat(max.getName(), equalTo("max")); - assertThat(max.getValue(), equalTo(12.0)); - } - - @Override - public void testMultiValuedFieldWithValueScript() throws Exception { - SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - max("max") - .field("values") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value + 1", emptyMap()))) - .get(); - - assertHitCount(searchResponse, 10); - - Max max = searchResponse.getAggregations().get("max"); - assertThat(max, notNullValue()); - assertThat(max.getName(), equalTo("max")); - assertThat(max.getValue(), equalTo(13.0)); - } - - @Override - public void testMultiValuedFieldWithValueScriptWithParams() throws Exception { - Map params = new HashMap<>(); - params.put("inc", 1); - SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - max("max") - .field("values") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value + inc", params))) - .get(); - - assertHitCount(searchResponse, 10); - - Max max = searchResponse.getAggregations().get("max"); - assertThat(max, notNullValue()); - assertThat(max.getName(), equalTo("max")); - assertThat(max.getValue(), equalTo(13.0)); - } - - @Override - public void testScriptSingleValued() throws Exception { - SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - max("max") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['value'].value", emptyMap()))) - .get(); - - assertHitCount(searchResponse, 10); - - Max max = searchResponse.getAggregations().get("max"); - assertThat(max, notNullValue()); - assertThat(max.getName(), equalTo("max")); - assertThat(max.getValue(), equalTo(10.0)); - } - - @Override - public void testScriptSingleValuedWithParams() throws Exception { - Map params = new HashMap<>(); - params.put("inc", 1); - - Script script = new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['value'].value + inc", params); - - SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation(max("max").script(script)) - .get(); - - assertHitCount(searchResponse, 10); - - Max max = searchResponse.getAggregations().get("max"); - assertThat(max, notNullValue()); - assertThat(max.getName(), equalTo("max")); - assertThat(max.getValue(), equalTo(11.0)); - } - - @Override - public void testScriptMultiValued() throws Exception { - SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - max("max") - .script(new Script(ScriptType.INLINE, - AggregationTestScriptsPlugin.NAME, "doc['values']", Collections.emptyMap()))) - .get(); - - assertHitCount(searchResponse, 10); - - Max max = searchResponse.getAggregations().get("max"); - assertThat(max, notNullValue()); - assertThat(max.getName(), equalTo("max")); - assertThat(max.getValue(), equalTo(12.0)); - } - - @Override - public void testScriptMultiValuedWithParams() throws Exception { - Map params = new HashMap<>(); - params.put("inc", 1); - - Script script = new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "[ doc['value'].value, doc['value'].value + inc ]", - params); - - SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(max("max").script(script)) - .get(); - - assertHitCount(searchResponse, 10); - - Max max = searchResponse.getAggregations().get("max"); - assertThat(max, notNullValue()); - assertThat(max.getName(), equalTo("max")); - assertThat(max.getValue(), equalTo(11.0)); - } - - @Override - public void testOrderByEmptyAggregation() throws Exception { - SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(terms("terms").field("value").order(BucketOrder.compound(BucketOrder.aggregation("filter>max", true))) - .subAggregation(filter("filter", termQuery("value", 100)).subAggregation(max("max").field("value")))) - .get(); - - assertHitCount(searchResponse, 10); - - Terms terms = searchResponse.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - List buckets = terms.getBuckets(); - assertThat(buckets, notNullValue()); - assertThat(buckets.size(), equalTo(10)); - - for (int i = 0; i < 10; i++) { - Terms.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsNumber(), equalTo((long) i + 1)); - assertThat(bucket.getDocCount(), equalTo(1L)); - Filter filter = bucket.getAggregations().get("filter"); - assertThat(filter, notNullValue()); - assertThat(filter.getDocCount(), equalTo(0L)); - Max max = filter.getAggregations().get("max"); - assertThat(max, notNullValue()); - assertThat(max.value(), equalTo(Double.NEGATIVE_INFINITY)); - - } - } - - /** - * Make sure that a request using a script does not get cached and a request - * not using a script does get cached. - */ - public void testDontCacheScripts() throws Exception { - assertAcked(prepareCreate("cache_test_idx").addMapping("type", "d", "type=long") - .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) - .get()); - indexRandom(true, client().prepareIndex("cache_test_idx", "type", "1").setSource("s", 1), - client().prepareIndex("cache_test_idx", "type", "2").setSource("s", 2)); - - // Make sure we are starting with a clear cache - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(0L)); - - // Test that a request using a script does not get cached - SearchResponse r = client().prepareSearch("cache_test_idx").setSize(0).addAggregation( - max("foo").field("d").script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value + 1", emptyMap()))) - .get(); - assertSearchResponse(r); - - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(0L)); - - // To make sure that the cache is working test that a request not using - // a script is cached - r = client().prepareSearch("cache_test_idx").setSize(0).addAggregation(max("foo").field("d")).get(); - assertSearchResponse(r); - - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(1L)); - } - - public void testEarlyTermination() throws Exception { - SearchResponse searchResponse = client().prepareSearch("idx") - .setTrackTotalHits(false) - .setQuery(matchAllQuery()) - .addAggregation(max("max").field("values")) - .addAggregation(count("count").field("values")) - .get(); - - Max max = searchResponse.getAggregations().get("max"); - assertThat(max, notNullValue()); - assertThat(max.getName(), equalTo("max")); - assertThat(max.getValue(), equalTo(12.0)); - - ValueCount count = searchResponse.getAggregations().get("count"); - assertThat(count.getName(), equalTo("count")); - assertThat(count.getValue(), equalTo(20L)); - } - - public void testNestedEarlyTermination() throws Exception { - for (Aggregator.SubAggCollectionMode collectionMode : Aggregator.SubAggCollectionMode.values()) { - SearchResponse searchResponse = client().prepareSearch("idx") - .setTrackTotalHits(false) - .setQuery(matchAllQuery()) - .addAggregation(max("max").field("values")) - .addAggregation(count("count").field("values")) - .addAggregation(terms("terms").field("value") - .collectMode(collectionMode) - .subAggregation(max("sub_max").field("invalid"))) - .get(); - - Max max = searchResponse.getAggregations().get("max"); - assertThat(max, notNullValue()); - assertThat(max.getName(), equalTo("max")); - assertThat(max.getValue(), equalTo(12.0)); - - ValueCount count = searchResponse.getAggregations().get("count"); - assertThat(count.getName(), equalTo("count")); - assertThat(count.getValue(), equalTo(20L)); - - Terms terms = searchResponse.getAggregations().get("terms"); - assertThat(terms.getBuckets().size(), equalTo(10)); - for (Terms.Bucket b : terms.getBuckets()) { - InternalMax subMax = b.getAggregations().get("sub_max"); - assertThat(subMax.getValue(), equalTo(Double.NEGATIVE_INFINITY)); - } - } - } -} diff --git a/server/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java b/server/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java index 9f680f11f211..9766663d58b7 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java +++ b/server/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java @@ -20,7 +20,6 @@ package org.elasticsearch.snapshots; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.cluster.SnapshotsInProgress; -import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; @@ -72,17 +71,13 @@ public abstract class AbstractSnapshotIntegTestCase extends ESIntegTestCase { @After public void assertRepoConsistency() { if (skipRepoConsistencyCheckReason == null) { - client().admin().cluster().prepareGetRepositories().get().repositories() - .stream() - .map(RepositoryMetaData::name) - .forEach(name -> { - final List snapshots = client().admin().cluster().prepareGetSnapshots(name).get().getSnapshots(name); - // Delete one random snapshot to trigger repository cleanup. - if (snapshots.isEmpty() == false) { - client().admin().cluster().prepareDeleteSnapshot(name, randomFrom(snapshots).snapshotId().getName()).get(); - } - BlobStoreTestUtil.assertRepoConsistency(internalCluster(), name); - }); + client().admin().cluster().prepareGetRepositories().get().repositories().forEach(repositoryMetaData -> { + final String name = repositoryMetaData.name(); + if (repositoryMetaData.settings().getAsBoolean("readonly", false) == false) { + client().admin().cluster().prepareCleanupRepository(name).get(); + } + BlobStoreTestUtil.assertRepoConsistency(internalCluster(), name); + }); } else { logger.info("--> skipped repo consistency checks because [{}]", skipRepoConsistencyCheckReason); } diff --git a/server/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/server/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java index 6538ccf40e73..6a6aecc0d8ed 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java +++ b/server/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java @@ -486,13 +486,8 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest () -> client().admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap") .execute().actionGet().getSnapshots("test-repo")); - // TODO: Replace this by repository cleanup endpoint call once that's available logger.info("--> Go through a loop of creating and deleting a snapshot to trigger repository cleanup"); - client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-tmp") - .setWaitForCompletion(true) - .setIndices("test-idx") - .get(); - client().admin().cluster().prepareDeleteSnapshot("test-repo", "test-snap-tmp").get(); + client().admin().cluster().prepareCleanupRepository("test-repo").get(); // Subtract four files that will remain in the repository: // (1) index-(N+1) diff --git a/server/src/test/java/org/elasticsearch/snapshots/mockstore/BlobContainerWrapper.java b/server/src/test/java/org/elasticsearch/snapshots/mockstore/BlobContainerWrapper.java index 15faecf46ca4..c38ddb45ab85 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/mockstore/BlobContainerWrapper.java +++ b/server/src/test/java/org/elasticsearch/snapshots/mockstore/BlobContainerWrapper.java @@ -21,6 +21,7 @@ package org.elasticsearch.snapshots.mockstore; import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.blobstore.BlobPath; +import org.elasticsearch.common.blobstore.DeleteResult; import java.io.IOException; import java.io.InputStream; @@ -60,8 +61,8 @@ public class BlobContainerWrapper implements BlobContainer { } @Override - public void delete() throws IOException { - delegate.delete(); + public DeleteResult delete() throws IOException { + return delegate.delete(); } @Override diff --git a/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockEventuallyConsistentRepository.java b/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockEventuallyConsistentRepository.java index d21f3db81e69..9a2e4e246ec4 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockEventuallyConsistentRepository.java +++ b/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockEventuallyConsistentRepository.java @@ -26,6 +26,7 @@ import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStore; +import org.elasticsearch.common.blobstore.DeleteResult; import org.elasticsearch.common.blobstore.support.PlainBlobMetaData; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.util.Maps; @@ -48,6 +49,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicLong; import java.util.function.Function; import java.util.stream.Collectors; @@ -215,13 +217,20 @@ public class MockEventuallyConsistentRepository extends BlobStoreRepository { } @Override - public void delete() { + public DeleteResult delete() { ensureNotClosed(); final String thisPath = path.buildAsString(); + final AtomicLong bytesDeleted = new AtomicLong(0L); + final AtomicLong blobsDeleted = new AtomicLong(0L); synchronized (context.actions) { consistentView(context.actions).stream().filter(action -> action.path.startsWith(thisPath)) - .forEach(a -> context.actions.add(new BlobStoreAction(Operation.DELETE, a.path))); + .forEach(a -> { + context.actions.add(new BlobStoreAction(Operation.DELETE, a.path)); + bytesDeleted.addAndGet(a.data.length); + blobsDeleted.incrementAndGet(); + }); } + return new DeleteResult(blobsDeleted.get(), bytesDeleted.get()); } @Override diff --git a/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java b/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java index a552e7ac5466..bd0a5cc772fd 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java +++ b/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java @@ -30,6 +30,7 @@ import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStore; +import org.elasticsearch.common.blobstore.DeleteResult; import org.elasticsearch.common.blobstore.fs.FsBlobContainer; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.settings.Setting; @@ -330,14 +331,20 @@ public class MockRepository extends FsRepository { } @Override - public void delete() throws IOException { + public DeleteResult delete() throws IOException { + DeleteResult deleteResult = DeleteResult.ZERO; for (BlobContainer child : children().values()) { - child.delete(); + deleteResult = deleteResult.add(child.delete()); } - for (String blob : listBlobs().values().stream().map(BlobMetaData::name).collect(Collectors.toList())) { + final Map blobs = listBlobs(); + long deleteBlobCount = blobs.size(); + long deleteByteCount = 0L; + for (String blob : blobs.values().stream().map(BlobMetaData::name).collect(Collectors.toList())) { deleteBlobIgnoringIfNotExists(blob); + deleteByteCount += blobs.get(blob).length(); } blobStore().blobContainer(path().parent()).deleteBlob(path().toArray()[path().toArray().length - 1]); + return deleteResult.add(deleteBlobCount, deleteByteCount); } @Override diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java index 804f7242c644..f962ad5f0194 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java @@ -44,6 +44,7 @@ import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; @@ -199,13 +200,7 @@ public class RemoteClusterConnectionTests extends ESTestCase { assertTrue(connectionManager.nodeConnected(seedNode)); assertTrue(connectionManager.nodeConnected(discoverableNode)); assertTrue(connection.assertNoRunningConnections()); - PlainTransportFuture futureHandler = new PlainTransportFuture<>( - new FutureTransportResponseHandler() { - @Override - public ClusterSearchShardsResponse read(StreamInput in) throws IOException { - return new ClusterSearchShardsResponse(in); - } - }); + PlainTransportFuture futureHandler = transportFuture(ClusterSearchShardsResponse::new); TransportRequestOptions options = TransportRequestOptions.builder().withType(TransportRequestOptions.Type.BULK) .build(); IllegalStateException ise = (IllegalStateException) expectThrows(SendRequestTransportException.class, () -> { @@ -242,13 +237,7 @@ public class RemoteClusterConnectionTests extends ESTestCase { assertTrue(connectionManager.nodeConnected(seedNode)); assertTrue(connectionManager.nodeConnected(discoverableNode)); assertTrue(connection.assertNoRunningConnections()); - PlainTransportFuture futureHandler = new PlainTransportFuture<>( - new FutureTransportResponseHandler() { - @Override - public ClusterSearchShardsResponse read(StreamInput in) throws IOException { - return new ClusterSearchShardsResponse(in); - } - }); + PlainTransportFuture futureHandler = transportFuture(ClusterSearchShardsResponse::new); TransportRequestOptions options = TransportRequestOptions.builder().withType(TransportRequestOptions.Type.BULK) .build(); IllegalStateException ise = (IllegalStateException) expectThrows(SendRequestTransportException.class, () -> { @@ -258,13 +247,7 @@ public class RemoteClusterConnectionTests extends ESTestCase { }).getCause(); assertEquals(ise.getMessage(), "can't select channel size is 0 for types: [RECOVERY, BULK, STATE]"); - PlainTransportFuture handler = new PlainTransportFuture<>( - new FutureTransportResponseHandler() { - @Override - public ClusterSearchShardsResponse read(StreamInput in) throws IOException { - return new ClusterSearchShardsResponse(in); - } - }); + PlainTransportFuture handler = transportFuture(ClusterSearchShardsResponse::new); TransportRequestOptions ops = TransportRequestOptions.builder().withType(TransportRequestOptions.Type.REG) .build(); service.sendRequest(connection.getConnection(), ClusterSearchShardsAction.NAME, new ClusterSearchShardsRequest(), @@ -1273,4 +1256,26 @@ public class RemoteClusterConnectionTests extends ESTestCase { }); return stubbableTransport; } + + private static PlainTransportFuture transportFuture(Writeable.Reader reader) { + return new PlainTransportFuture<>(new TransportResponseHandler<>() { + @Override + public void handleResponse(V response) { + } + + @Override + public void handleException(final TransportException exp) { + } + + @Override + public String executor() { + return ThreadPool.Names.SAME; + } + + @Override + public V read(StreamInput in) throws IOException { + return reader.read(in); + } + }); + } } diff --git a/server/src/test/java/org/elasticsearch/transport/TransportInfoTests.java b/server/src/test/java/org/elasticsearch/transport/TransportInfoTests.java new file mode 100644 index 000000000000..ea6ca96bb9b6 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/transport/TransportInfoTests.java @@ -0,0 +1,107 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport; + +import org.elasticsearch.common.network.NetworkAddress; +import org.elasticsearch.common.transport.BoundTransportAddress; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.net.InetAddress; +import java.util.Collections; +import java.util.Map; + +public class TransportInfoTests extends ESTestCase { + + private TransportInfo createTransportInfo(InetAddress address, int port, boolean cnameInPublishAddress) { + BoundTransportAddress boundAddress = new BoundTransportAddress( + new TransportAddress[]{new TransportAddress(address, port)}, + new TransportAddress(address, port) + ); + Map profiles = Collections.singletonMap("test_profile", boundAddress); + return new TransportInfo(boundAddress, profiles, cnameInPublishAddress); + } + + public void testCorrectlyDisplayPublishedCname() throws Exception { + InetAddress address = InetAddress.getByName("localhost"); + int port = 9200; + assertPublishAddress( + createTransportInfo(address, port,true), + "localhost/" + NetworkAddress.format(address) + ':' + port + ); + } + + public void testHideCnameIfDeprecatedFormat() throws Exception { + InetAddress address = InetAddress.getByName("localhost"); + int port = 9200; + assertPublishAddress( + createTransportInfo(address, port,false), + NetworkAddress.format(address) + ':' + port + ); + assertWarnings("transport.publish_address was printed as [ip:port] instead of [hostname/ip:port]. " + + "This format is deprecated and will change to [hostname/ip:port] in a future version. " + + "Use -Des.transport.cname_in_publish_address=true to enforce non-deprecated formatting.", + + "transport.test_profile.publish_address was printed as [ip:port] instead of [hostname/ip:port]. " + + "This format is deprecated and will change to [hostname/ip:port] in a future version. " + + "Use -Des.transport.cname_in_publish_address=true to enforce non-deprecated formatting."); + } + + public void testCorrectDisplayPublishedIp() throws Exception { + InetAddress address = InetAddress.getByName(NetworkAddress.format(InetAddress.getByName("localhost"))); + int port = 9200; + assertPublishAddress( + createTransportInfo(address, port,true), + NetworkAddress.format(address) + ':' + port + ); + } + + public void testCorrectDisplayPublishedIpv6() throws Exception { + InetAddress address = InetAddress.getByName(NetworkAddress.format(InetAddress.getByName("0:0:0:0:0:0:0:1"))); + int port = 9200; + assertPublishAddress( + createTransportInfo(address, port,true), + new TransportAddress(address, port).toString() + ); + } + + @SuppressWarnings("unchecked") + private void assertPublishAddress(TransportInfo httpInfo, String expected) throws IOException { + XContentBuilder builder = XContentFactory.jsonBuilder(); + builder.startObject(); + httpInfo.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + + Map transportMap = (Map) createParser(builder).map().get(TransportInfo.Fields.TRANSPORT); + Map profilesMap = (Map) transportMap.get("profiles"); + assertEquals( + expected, + transportMap.get(TransportInfo.Fields.PUBLISH_ADDRESS) + ); + assertEquals( + expected, + ((Map)profilesMap.get("test_profile")).get(TransportInfo.Fields.PUBLISH_ADDRESS) + ); + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/repositories/AbstractThirdPartyRepositoryTestCase.java b/test/framework/src/main/java/org/elasticsearch/repositories/AbstractThirdPartyRepositoryTestCase.java index 11a359c0ded4..a78a4b9323e4 100644 --- a/test/framework/src/main/java/org/elasticsearch/repositories/AbstractThirdPartyRepositoryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/repositories/AbstractThirdPartyRepositoryTestCase.java @@ -19,6 +19,7 @@ package org.elasticsearch.repositories; import org.elasticsearch.action.ActionRunnable; +import org.elasticsearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryResponse; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotRequest; import org.elasticsearch.action.support.PlainActionFuture; @@ -210,30 +211,49 @@ public abstract class AbstractThirdPartyRepositoryTestCase extends ESSingleNodeT .state(), equalTo(SnapshotState.SUCCESS)); - logger.info("--> creating a dangling index folder"); final BlobStoreRepository repo = (BlobStoreRepository) getInstanceFromNode(RepositoriesService.class).repository("test-repo"); - final PlainActionFuture future = PlainActionFuture.newFuture(); final Executor genericExec = repo.threadPool().executor(ThreadPool.Names.GENERIC); + + logger.info("--> creating a dangling index folder"); + + createDanglingIndex(repo, genericExec); + + logger.info("--> deleting a snapshot to trigger repository cleanup"); + client().admin().cluster().deleteSnapshot(new DeleteSnapshotRequest("test-repo", snapshotName)).actionGet(); + + assertConsistentRepository(repo, genericExec); + + logger.info("--> Create dangling index"); + createDanglingIndex(repo, genericExec); + + logger.info("--> Execute repository cleanup"); + final CleanupRepositoryResponse response = client().admin().cluster().prepareCleanupRepository("test-repo").get(); + assertCleanupResponse(response, 3L, 1L); + } + + protected void assertCleanupResponse(CleanupRepositoryResponse response, long bytes, long blobs) { + assertThat(response.result().blobs(), equalTo(1L + 2L)); + assertThat(response.result().bytes(), equalTo(3L + 2 * 3L)); + } + + private void createDanglingIndex(final BlobStoreRepository repo, final Executor genericExec) throws Exception { + final PlainActionFuture future = PlainActionFuture.newFuture(); genericExec.execute(new ActionRunnable<>(future) { @Override protected void doRun() throws Exception { final BlobStore blobStore = repo.blobStore(); blobStore.blobContainer(repo.basePath().add("indices").add("foo")) - .writeBlob("bar", new ByteArrayInputStream(new byte[0]), 0, false); + .writeBlob("bar", new ByteArrayInputStream(new byte[3]), 3, false); for (String prefix : Arrays.asList("snap-", "meta-")) { blobStore.blobContainer(repo.basePath()) - .writeBlob(prefix + "foo.dat", new ByteArrayInputStream(new byte[0]), 0, false); + .writeBlob(prefix + "foo.dat", new ByteArrayInputStream(new byte[3]), 3, false); } future.onResponse(null); } }); future.actionGet(); assertTrue(assertCorruptionVisible(repo, genericExec)); - logger.info("--> deleting a snapshot to trigger repository cleanup"); - client().admin().cluster().deleteSnapshot(new DeleteSnapshotRequest("test-repo", snapshotName)).actionGet(); - - assertConsistentRepository(repo, genericExec); } protected boolean assertCorruptionVisible(BlobStoreRepository repo, Executor executor) throws Exception { diff --git a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java index b92dacdb5ee1..6cc2e0ca7177 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java @@ -92,7 +92,6 @@ import java.util.stream.Collectors; import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; -import static org.elasticsearch.test.ESTestCase.getPortRange; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; @@ -251,7 +250,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { } }); - TransportFuture res = serviceB.submitRequest(nodeA, "internal:sayHello", + PlainTransportFuture res = submitRequest(serviceB, nodeA, "internal:sayHello", new StringMessageRequest("moshe"), new TransportResponseHandler() { @Override public StringMessageResponse read(StreamInput in) throws IOException { @@ -282,8 +281,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { assertThat(e.getMessage(), false, equalTo(true)); } - res = serviceB.submitRequest(nodeA, "internal:sayHello", new StringMessageRequest("moshe"), - TransportRequestOptions.EMPTY, new TransportResponseHandler() { + res = submitRequest(serviceB, nodeA, "internal:sayHello", new StringMessageRequest("moshe"), new TransportResponseHandler<>() { @Override public StringMessageResponse read(StreamInput in) throws IOException { return new StringMessageResponse(in); @@ -361,7 +359,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { threadPool.getThreadContext().putHeader("test.ping.user", "ping_user"); threadPool.getThreadContext().putTransient("my_private_context", context); - TransportFuture res = serviceB.submitRequest(nodeA, "internal:ping_pong", ping, responseHandler); + PlainTransportFuture res = submitRequest(serviceB, nodeA, "internal:ping_pong", ping, responseHandler); StringMessageResponse message = res.get(); assertThat("pong", equalTo(message.message)); @@ -482,7 +480,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { serviceB.addMessageListener(tracerB); try { - serviceA.submitRequest(nodeB, ACTION, TransportRequest.Empty.INSTANCE, EmptyTransportResponseHandler.INSTANCE_SAME).get(); + submitRequest(serviceA, nodeB, ACTION, TransportRequest.Empty.INSTANCE, EmptyTransportResponseHandler.INSTANCE_SAME).get(); } catch (ExecutionException e) { assertThat(e.getCause(), instanceOf(ElasticsearchException.class)); assertThat(ExceptionsHelper.unwrapCause(e.getCause()).getMessage(), equalTo("simulated")); @@ -501,7 +499,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { }); try { - serviceB.submitRequest(nodeA, ACTION, TransportRequest.Empty.INSTANCE, EmptyTransportResponseHandler.INSTANCE_SAME).get(); + submitRequest(serviceB, nodeA, ACTION, TransportRequest.Empty.INSTANCE, EmptyTransportResponseHandler.INSTANCE_SAME).get(); } catch (ExecutionException e) { assertThat(e.getCause(), instanceOf(ElasticsearchException.class)); assertThat(ExceptionsHelper.unwrapCause(e.getCause()).getMessage(), equalTo("simulated")); @@ -521,7 +519,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { // use assert busy as callbacks are called on a different thread try { - serviceA.submitRequest(nodeA, ACTION, TransportRequest.Empty.INSTANCE, EmptyTransportResponseHandler.INSTANCE_SAME).get(); + submitRequest(serviceA, nodeA, ACTION, TransportRequest.Empty.INSTANCE, EmptyTransportResponseHandler.INSTANCE_SAME).get(); } catch (ExecutionException e) { assertThat(e.getCause(), instanceOf(ElasticsearchException.class)); assertThat(ExceptionsHelper.unwrapCause(e.getCause()).getMessage(), equalTo("simulated")); @@ -559,10 +557,8 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { ConnectionProfile connectionProfile = ConnectionProfile.buildDefaultConnectionProfile(settingsWithCompress); serviceC.connectToNode(serviceA.getLocalDiscoNode(), connectionProfile); - - TransportFuture res = serviceC.submitRequest(nodeA, "internal:sayHello", - TransportRequest.Empty.INSTANCE, TransportRequestOptions.EMPTY, - new TransportResponseHandler() { + PlainTransportFuture res = submitRequest(serviceC, nodeA, "internal:sayHello", + TransportRequest.Empty.INSTANCE, new TransportResponseHandler<>() { @Override public TransportResponse.Empty read(StreamInput in) { return TransportResponse.Empty.INSTANCE; @@ -613,9 +609,8 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { ConnectionProfile connectionProfile = ConnectionProfile.buildDefaultConnectionProfile(settingsWithCompress); serviceC.connectToNode(serviceA.getLocalDiscoNode(), connectionProfile); - TransportFuture res = serviceC.submitRequest(nodeA, "internal:sayHello", - new StringMessageRequest("moshe"), TransportRequestOptions.EMPTY, - new TransportResponseHandler() { + PlainTransportFuture res = submitRequest(serviceC, nodeA, "internal:sayHello", + new StringMessageRequest("moshe"), new TransportResponseHandler<>() { @Override public StringMessageResponse read(StreamInput in) throws IOException { return new StringMessageResponse(in); @@ -654,7 +649,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { throw new RuntimeException("bad message !!!"); }); - TransportFuture res = serviceB.submitRequest(nodeA, "internal:sayHelloException", + PlainTransportFuture res = submitRequest(serviceB, nodeA, "internal:sayHelloException", new StringMessageRequest("moshe"), new TransportResponseHandler() { @Override public StringMessageResponse read(StreamInput in) throws IOException { @@ -843,8 +838,8 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { latch3.countDown(); } }); - TransportFuture foobar = serviceB.submitRequest(nodeA, "internal:foobar", - new StringMessageRequest(""), TransportRequestOptions.EMPTY, EmptyTransportResponseHandler.INSTANCE_SAME); + PlainTransportFuture foobar = submitRequest(serviceB, nodeA, "internal:foobar", + new StringMessageRequest(""), EmptyTransportResponseHandler.INSTANCE_SAME); latch2.countDown(); try { foobar.txGet(); @@ -869,7 +864,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { } }); - TransportFuture res = serviceB.submitRequest(nodeA, "internal:sayHelloTimeoutNoResponse", + PlainTransportFuture res = submitRequest(serviceB, nodeA, "internal:sayHelloTimeoutNoResponse", new StringMessageRequest("moshe"), TransportRequestOptions.builder().withTimeout(100).build(), new TransportResponseHandler() { @Override @@ -933,7 +928,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { } }); final CountDownLatch latch = new CountDownLatch(1); - TransportFuture res = serviceB.submitRequest(nodeA, "internal:sayHelloTimeoutDelayedResponse", + PlainTransportFuture res = submitRequest(serviceB, nodeA, "internal:sayHelloTimeoutDelayedResponse", new StringMessageRequest("forever"), TransportRequestOptions.builder().withTimeout(100).build(), new TransportResponseHandler() { @Override @@ -971,7 +966,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { for (int i = 0; i < 10; i++) { final int counter = i; // now, try and send another request, this times, with a short timeout - TransportFuture result = serviceB.submitRequest(nodeA, "internal:sayHelloTimeoutDelayedResponse", + PlainTransportFuture result = submitRequest(serviceB, nodeA, "internal:sayHelloTimeoutDelayedResponse", new StringMessageRequest(counter + "ms"), TransportRequestOptions.builder().withTimeout(3000).build(), new TransportResponseHandler() { @Override @@ -1291,7 +1286,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { Version0Request version0Request = new Version0Request(); version0Request.value1 = 1; - Version0Response version0Response = serviceA.submitRequest(nodeB, "internal:version", version0Request, + Version0Response version0Response = submitRequest(serviceA, nodeB, "internal:version", version0Request, new TransportResponseHandler() { @Override public Version0Response read(StreamInput in) throws IOException { @@ -1333,7 +1328,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { Version1Request version1Request = new Version1Request(); version1Request.value1 = 1; version1Request.value2 = 2; - Version1Response version1Response = serviceB.submitRequest(nodeA, "internal:version", version1Request, + Version1Response version1Response = submitRequest(serviceB, nodeA, "internal:version", version1Request, new TransportResponseHandler() { @Override public Version1Response read(StreamInput in) throws IOException { @@ -1375,7 +1370,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { Version1Request version1Request = new Version1Request(); version1Request.value1 = 1; version1Request.value2 = 2; - Version1Response version1Response = serviceB.submitRequest(nodeB, "internal:version", version1Request, + Version1Response version1Response = submitRequest(serviceB, nodeB, "internal:version", version1Request, new TransportResponseHandler() { @Override public Version1Response read(StreamInput in) throws IOException { @@ -1415,7 +1410,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { Version0Request version0Request = new Version0Request(); version0Request.value1 = 1; - Version0Response version0Response = serviceA.submitRequest(nodeA, "internal:version", version0Request, + Version0Response version0Response = submitRequest(serviceA, nodeA, "internal:version", version0Request, new TransportResponseHandler() { @Override public Version0Response read(StreamInput in) throws IOException { @@ -1451,7 +1446,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { serviceB.addFailToSendNoConnectRule(serviceA); - TransportFuture res = serviceB.submitRequest(nodeA, "internal:sayHello", + PlainTransportFuture res = submitRequest(serviceB, nodeA, "internal:sayHello", new StringMessageRequest("moshe"), new TransportResponseHandler() { @Override public StringMessageResponse read(StreamInput in) throws IOException { @@ -1508,7 +1503,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { serviceB.addUnresponsiveRule(serviceA); - TransportFuture res = serviceB.submitRequest(nodeA, "internal:sayHello", + PlainTransportFuture res = submitRequest(serviceB, nodeA, "internal:sayHello", new StringMessageRequest("moshe"), TransportRequestOptions.builder().withTimeout(100).build(), new TransportResponseHandler() { @Override @@ -2759,4 +2754,25 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { return transport.getAcceptedChannels(); } + public static PlainTransportFuture submitRequest(TransportService transportService, + DiscoveryNode node, String action, + TransportRequest request, + TransportResponseHandler handler) + throws TransportException { + return submitRequest(transportService, node, action, request, TransportRequestOptions.EMPTY, handler); + } + + public static PlainTransportFuture submitRequest(TransportService transportService, DiscoveryNode node, + String action, TransportRequest request, + TransportRequestOptions options, + TransportResponseHandler handler) + throws TransportException { + PlainTransportFuture futureHandler = new PlainTransportFuture<>(handler); + try { + transportService.sendRequest(node, action, request, options, futureHandler); + } catch (NodeNotConnectedException ex) { + futureHandler.handleException(ex); + } + return futureHandler; + } } diff --git a/x-pack/docs/en/rest-api/watcher/put-watch.asciidoc b/x-pack/docs/en/rest-api/watcher/put-watch.asciidoc index 1349e8def05d..a824afb49b27 100644 --- a/x-pack/docs/en/rest-api/watcher/put-watch.asciidoc +++ b/x-pack/docs/en/rest-api/watcher/put-watch.asciidoc @@ -68,7 +68,18 @@ A watch has the following fields: | `throttle_period` | The minimum time between actions being run, the default for this is 5 seconds. This default can be changed in the - config file with the setting `xpack.watcher.throttle.period.default_period`. + config file with the setting + `xpack.watcher.throttle.period.default_period`. If both + this value and the `throttle_period_in_millis` parameter + are specified, {watcher} uses the last parameter + included in the request. + +| `throttle_period_in_millis` | Minimum time in milliseconds between actions + being run. Defaults to `5000`. If both this + value and the `throttle_period` parameter are + specified, {watcher} uses the last parameter + included in the request. + |====== [float] diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java index 39b11f08ec69..24ebd05a2d10 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java @@ -30,6 +30,7 @@ import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.engine.MissingHistoryOperationsException; +import org.elasticsearch.index.seqno.RetentionLease; import org.elasticsearch.index.seqno.SeqNoStats; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardNotStartedException; @@ -44,6 +45,7 @@ import org.elasticsearch.xpack.ccr.Ccr; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.List; import java.util.Objects; import java.util.concurrent.TimeUnit; @@ -529,8 +531,10 @@ public class ShardChangesAction extends ActionType } } } catch (MissingHistoryOperationsException e) { - String message = "Operations are no longer available for replicating. Maybe increase the retention setting [" + - IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey() + "]?"; + final Collection retentionLeases = indexShard.getRetentionLeases().leases(); + final String message = "Operations are no longer available for replicating. " + + "Existing retention leases [" + retentionLeases + "]; maybe increase the retention lease period setting " + + "[" + IndexSettings.INDEX_SOFT_DELETES_RETENTION_LEASE_PERIOD_SETTING.getKey() + "]?"; // Make it easy to detect this error in ShardFollowNodeTask: // (adding a metadata header instead of introducing a new exception that extends ElasticsearchException) ResourceNotFoundException wrapper = new ResourceNotFoundException(message, e); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesTests.java index 16e7a90c3c67..93723b6ab223 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesTests.java @@ -17,6 +17,9 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.seqno.RetentionLease; +import org.elasticsearch.index.seqno.RetentionLeaseActions; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; @@ -128,8 +131,12 @@ public class ShardChangesTests extends ESSingleNodeTestCase { forceMergeRequest.maxNumSegments(1); client().admin().indices().forceMerge(forceMergeRequest).actionGet(); + client().admin().indices().execute(RetentionLeaseActions.Add.INSTANCE, new RetentionLeaseActions.AddRequest( + new ShardId(resolveIndex("index"), 0), "test", RetentionLeaseActions.RETAIN_ALL, "ccr")).get(); + ShardStats shardStats = client().admin().indices().prepareStats("index").get().getIndex("index").getShards()[0]; String historyUUID = shardStats.getCommitStats().getUserData().get(Engine.HISTORY_UUID_KEY); + Collection retentionLeases = shardStats.getRetentionLeaseStats().retentionLeases().leases(); ShardChangesAction.Request request = new ShardChangesAction.Request(shardStats.getShardRouting().shardId(), historyUUID); request.setFromSeqNo(0L); request.setMaxOperationCount(1); @@ -137,8 +144,9 @@ public class ShardChangesTests extends ESSingleNodeTestCase { { ResourceNotFoundException e = expectThrows(ResourceNotFoundException.class, () -> client().execute(ShardChangesAction.INSTANCE, request).actionGet()); - assertThat(e.getMessage(), equalTo("Operations are no longer available for replicating. Maybe increase the retention setting " + - "[index.soft_deletes.retention.operations]?")); + assertThat(e.getMessage(), equalTo("Operations are no longer available for replicating. " + + "Existing retention leases [" + retentionLeases + "]; maybe increase the retention lease period setting " + + "[index.soft_deletes.retention_lease.period]?")); assertThat(e.getMetadataKeys().size(), equalTo(1)); assertThat(e.getMetadata(Ccr.REQUESTED_OPS_MISSING_METADATA_KEY), notNullValue()); @@ -157,7 +165,8 @@ public class ShardChangesTests extends ESSingleNodeTestCase { ResourceNotFoundException cause = (ResourceNotFoundException) e.getCause(); assertThat(cause.getMessage(), equalTo("Operations are no longer available for replicating. " + - "Maybe increase the retention setting [index.soft_deletes.retention.operations]?")); + "Existing retention leases [" + retentionLeases + "]; maybe increase the retention lease period setting " + + "[index.soft_deletes.retention_lease.period]?")); assertThat(cause.getMetadataKeys().size(), equalTo(1)); assertThat(cause.getMetadata(Ccr.REQUESTED_OPS_MISSING_METADATA_KEY), notNullValue()); assertThat(cause.getMetadata(Ccr.REQUESTED_OPS_MISSING_METADATA_KEY), contains("0", "0")); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/common/notifications/Auditor.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/common/notifications/AbstractAuditor.java similarity index 87% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/common/notifications/Auditor.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/common/notifications/AbstractAuditor.java index d4a1f14f1885..102e07dc4c14 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/common/notifications/Auditor.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/common/notifications/AbstractAuditor.java @@ -21,20 +21,20 @@ import java.util.Objects; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; -public class Auditor { +public abstract class AbstractAuditor { - private static final Logger logger = LogManager.getLogger(Auditor.class); + private static final Logger logger = LogManager.getLogger(AbstractAuditor.class); private final Client client; private final String nodeName; private final String auditIndex; private final String executionOrigin; private final AbstractAuditMessage.AbstractBuilder messageBuilder; - public Auditor(Client client, - String nodeName, - String auditIndex, - String executionOrigin, - AbstractAuditMessage.AbstractBuilder messageBuilder) { + public AbstractAuditor(Client client, + String nodeName, + String auditIndex, + String executionOrigin, + AbstractAuditMessage.AbstractBuilder messageBuilder) { this.client = Objects.requireNonNull(client); this.nodeName = Objects.requireNonNull(nodeName); this.auditIndex = auditIndex; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/DataFrameMessages.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/DataFrameMessages.java index 45e923de231d..32f639a1fac2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/DataFrameMessages.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/DataFrameMessages.java @@ -34,6 +34,10 @@ public class DataFrameMessages { public static final String DATA_FRAME_CANNOT_STOP_FAILED_TRANSFORM = "Unable to stop data frame transform [{0}] as it is in a failed state with reason [{1}]." + " Use force stop to stop the data frame transform."; + public static final String DATA_FRAME_CANNOT_START_FAILED_TRANSFORM = + "Unable to start data frame transform [{0}] as it is in a failed state with failure: [{1}]. " + + "Use force start to restart data frame transform once error is resolved."; + public static final String FAILED_TO_CREATE_DESTINATION_INDEX = "Could not create destination index [{0}] for transform [{1}]"; public static final String FAILED_TO_RELOAD_TRANSFORM_CONFIGURATION = "Failed to reload data frame transform configuration for transform [{0}]"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformTaskAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformTaskAction.java index e1ebe4eb0ab3..4fe87d9727f9 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformTaskAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformTaskAction.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.core.dataframe.action; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.tasks.BaseTasksRequest; @@ -34,24 +35,40 @@ public class StartDataFrameTransformTaskAction extends ActionType { private final String id; + private final boolean force; - public Request(String id) { + public Request(String id, boolean force) { this.id = ExceptionsHelper.requireNonNull(id, DataFrameField.ID.getPreferredName()); + this.force = force; } public Request(StreamInput in) throws IOException { super(in); id = in.readString(); + if (in.getVersion().onOrAfter(Version.V_7_4_0)) { + force = in.readBoolean(); + } else { + // The behavior before V_7_4_0 was that this flag did not exist, + // assuming previous checks allowed this task to be started. + force = true; + } } public String getId() { return id; } + public boolean isForce() { + return force; + } + @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(id); + if (out.getVersion().onOrAfter(Version.V_7_4_0)) { + out.writeBoolean(force); + } } @Override @@ -66,7 +83,7 @@ public class StartDataFrameTransformTaskAction extends ActionType { - finishAndSetState(); - onFailure(e); - })); + }, + this::finishWithFailure)); }); logger.debug("Beginning to index [" + getJobId() + "], state: [" + currentState + "]"); return true; @@ -250,8 +248,9 @@ public abstract class AsyncTwoPhaseIndexer onFailure(exc)); + onFailure(exc); + doSaveState(finishAndSetState(), position.get(), () -> {}); } private void finishWithIndexingFailure(Exception exc) { stats.incrementIndexingFailures(); - doSaveState(finishAndSetState(), position.get(), () -> onFailure(exc)); + onFailure(exc); + doSaveState(finishAndSetState(), position.get(), () -> {}); + } + + private void finishWithFailure(Exception exc) { + onFailure(exc); + finishAndSetState(); } private IndexerState finishAndSetState() { @@ -390,8 +396,7 @@ public abstract class AsyncTwoPhaseIndexer listener = ActionListener.wrap(this::onSearchResponse, this::finishWithSearchFailure); nextSearch(listener); } catch (Exception e) { - finishAndSetState(); - onFailure(e); + finishWithFailure(e); } } } catch (Exception e) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/EstimateMemoryUsageAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/EstimateMemoryUsageAction.java index 62a8220d1a53..529db21cced7 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/EstimateMemoryUsageAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/EstimateMemoryUsageAction.java @@ -5,8 +5,6 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.action.ActionRequest; -import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionType; import org.elasticsearch.common.Nullable; @@ -18,14 +16,10 @@ import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsConfig; -import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import java.io.IOException; import java.util.Objects; -import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; public class EstimateMemoryUsageAction extends ActionType { @@ -37,88 +31,12 @@ public class EstimateMemoryUsageAction extends ActionType PARSER = - new ConstructingObjectParser<>( - NAME, - args -> { - DataFrameAnalyticsConfig.Builder configBuilder = (DataFrameAnalyticsConfig.Builder) args[0]; - DataFrameAnalyticsConfig config = configBuilder.buildForMemoryEstimation(); - return new EstimateMemoryUsageAction.Request(config); - }); - - static { - PARSER.declareObject(constructorArg(), DataFrameAnalyticsConfig.STRICT_PARSER, DATA_FRAME_ANALYTICS_CONFIG); - } - - public static EstimateMemoryUsageAction.Request parseRequest(XContentParser parser) { - return PARSER.apply(parser, null); - } - - private final DataFrameAnalyticsConfig config; - - public Request(DataFrameAnalyticsConfig config) { - this.config = ExceptionsHelper.requireNonNull(config, DATA_FRAME_ANALYTICS_CONFIG); - } - - public Request(StreamInput in) throws IOException { - super(in); - this.config = new DataFrameAnalyticsConfig(in); - } - - @Override - public ActionRequestValidationException validate() { - return null; - } - - public DataFrameAnalyticsConfig getConfig() { - return config; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - config.writeTo(out); - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - builder.field(DATA_FRAME_ANALYTICS_CONFIG.getPreferredName(), config); - builder.endObject(); - return builder; - } - - @Override - public boolean equals(Object other) { - if (this == other) { - return true; - } - if (other == null || getClass() != other.getClass()) { - return false; - } - - Request that = (Request) other; - return Objects.equals(config, that.config); - } - - @Override - public int hashCode() { - return Objects.hash(config); - } - } - public static class Response extends ActionResponse implements ToXContentObject { public static final ParseField TYPE = new ParseField("memory_usage_estimation_result"); - public static final ParseField EXPECTED_MEMORY_USAGE_WITH_ONE_PARTITION = - new ParseField("expected_memory_usage_with_one_partition"); - public static final ParseField EXPECTED_MEMORY_USAGE_WITH_MAX_PARTITIONS = - new ParseField("expected_memory_usage_with_max_partitions"); + public static final ParseField EXPECTED_MEMORY_WITHOUT_DISK = new ParseField("expected_memory_without_disk"); + public static final ParseField EXPECTED_MEMORY_WITH_DISK = new ParseField("expected_memory_with_disk"); static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( @@ -128,55 +46,52 @@ public class EstimateMemoryUsageAction extends ActionType ByteSizeValue.parseBytesSizeValue(p.text(), EXPECTED_MEMORY_USAGE_WITH_ONE_PARTITION.getPreferredName()), - EXPECTED_MEMORY_USAGE_WITH_ONE_PARTITION, + (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), EXPECTED_MEMORY_WITHOUT_DISK.getPreferredName()), + EXPECTED_MEMORY_WITHOUT_DISK, ObjectParser.ValueType.VALUE); PARSER.declareField( optionalConstructorArg(), - (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), EXPECTED_MEMORY_USAGE_WITH_MAX_PARTITIONS.getPreferredName()), - EXPECTED_MEMORY_USAGE_WITH_MAX_PARTITIONS, + (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), EXPECTED_MEMORY_WITH_DISK.getPreferredName()), + EXPECTED_MEMORY_WITH_DISK, ObjectParser.ValueType.VALUE); } - private final ByteSizeValue expectedMemoryUsageWithOnePartition; - private final ByteSizeValue expectedMemoryUsageWithMaxPartitions; + private final ByteSizeValue expectedMemoryWithoutDisk; + private final ByteSizeValue expectedMemoryWithDisk; - public Response(@Nullable ByteSizeValue expectedMemoryUsageWithOnePartition, - @Nullable ByteSizeValue expectedMemoryUsageWithMaxPartitions) { - this.expectedMemoryUsageWithOnePartition = expectedMemoryUsageWithOnePartition; - this.expectedMemoryUsageWithMaxPartitions = expectedMemoryUsageWithMaxPartitions; + public Response(@Nullable ByteSizeValue expectedMemoryWithoutDisk, @Nullable ByteSizeValue expectedMemoryWithDisk) { + this.expectedMemoryWithoutDisk = expectedMemoryWithoutDisk; + this.expectedMemoryWithDisk = expectedMemoryWithDisk; } public Response(StreamInput in) throws IOException { super(in); - this.expectedMemoryUsageWithOnePartition = in.readOptionalWriteable(ByteSizeValue::new); - this.expectedMemoryUsageWithMaxPartitions = in.readOptionalWriteable(ByteSizeValue::new); + this.expectedMemoryWithoutDisk = in.readOptionalWriteable(ByteSizeValue::new); + this.expectedMemoryWithDisk = in.readOptionalWriteable(ByteSizeValue::new); } - public ByteSizeValue getExpectedMemoryUsageWithOnePartition() { - return expectedMemoryUsageWithOnePartition; + public ByteSizeValue getExpectedMemoryWithoutDisk() { + return expectedMemoryWithoutDisk; } - public ByteSizeValue getExpectedMemoryUsageWithMaxPartitions() { - return expectedMemoryUsageWithMaxPartitions; + public ByteSizeValue getExpectedMemoryWithDisk() { + return expectedMemoryWithDisk; } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeOptionalWriteable(expectedMemoryUsageWithOnePartition); - out.writeOptionalWriteable(expectedMemoryUsageWithMaxPartitions); + out.writeOptionalWriteable(expectedMemoryWithoutDisk); + out.writeOptionalWriteable(expectedMemoryWithDisk); } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - if (expectedMemoryUsageWithOnePartition != null) { - builder.field( - EXPECTED_MEMORY_USAGE_WITH_ONE_PARTITION.getPreferredName(), expectedMemoryUsageWithOnePartition.getStringRep()); + if (expectedMemoryWithoutDisk != null) { + builder.field(EXPECTED_MEMORY_WITHOUT_DISK.getPreferredName(), expectedMemoryWithoutDisk.getStringRep()); } - if (expectedMemoryUsageWithMaxPartitions != null) { - builder.field( - EXPECTED_MEMORY_USAGE_WITH_MAX_PARTITIONS.getPreferredName(), expectedMemoryUsageWithMaxPartitions.getStringRep()); + if (expectedMemoryWithDisk != null) { + builder.field(EXPECTED_MEMORY_WITH_DISK.getPreferredName(), expectedMemoryWithDisk.getStringRep()); } builder.endObject(); return builder; @@ -192,13 +107,13 @@ public class EstimateMemoryUsageAction extends ActionType implements ToXContentObject { + /** + * Parses request. + */ public static Request parseRequest(String id, XContentParser parser) { DataFrameAnalyticsConfig.Builder config = DataFrameAnalyticsConfig.STRICT_PARSER.apply(parser, null); if (config.getId() == null) { @@ -47,6 +50,17 @@ public class PutDataFrameAnalyticsAction extends ActionType getRequiredFields(); + + /** + * @return {@code true} if this analysis supports data frame rows with missing values + */ + boolean supportsMissingValues(); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/OutlierDetection.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/OutlierDetection.java index 35b3b5d3e95c..32a478905729 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/OutlierDetection.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/OutlierDetection.java @@ -164,6 +164,11 @@ public class OutlierDetection implements DataFrameAnalysis { return Collections.emptySet(); } + @Override + public boolean supportsMissingValues() { + return false; + } + public enum Method { LOF, LDOF, DISTANCE_KTH_NN, DISTANCE_KNN; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/Regression.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/Regression.java index a6b7c983a29c..9c779cc5ee74 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/Regression.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/Regression.java @@ -184,6 +184,11 @@ public class Regression implements DataFrameAnalysis { return Collections.singleton(dependentVariable); } + @Override + public boolean supportsMissingValues() { + return true; + } + @Override public int hashCode() { return Objects.hash(dependentVariable, lambda, gamma, eta, maximumNumberTrees, featureBagFraction, predictionFieldName); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/regression/MeanSquaredError.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/regression/MeanSquaredError.java index 8dd922b6ac26..e48cb46b5c0a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/regression/MeanSquaredError.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/regression/MeanSquaredError.java @@ -69,7 +69,7 @@ public class MeanSquaredError implements RegressionMetric { @Override public EvaluationMetricResult evaluate(Aggregations aggs) { NumericMetricsAggregation.SingleValue value = aggs.get(AGG_NAME); - return value == null ? null : new Result(value.value()); + return value == null ? new Result(0.0) : new Result(value.value()); } @Override @@ -137,5 +137,18 @@ public class MeanSquaredError implements RegressionMetric { builder.endObject(); return builder; } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Result other = (Result)o; + return error == other.error; + } + + @Override + public int hashCode() { + return Objects.hashCode(error); + } } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/regression/RSquared.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/regression/RSquared.java index 871f166733f4..a55306561833 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/regression/RSquared.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/regression/RSquared.java @@ -79,7 +79,7 @@ public class RSquared implements RegressionMetric { ExtendedStats extendedStats = aggs.get(ExtendedStatsAggregationBuilder.NAME + "_actual"); // extendedStats.getVariance() is the statistical sumOfSquares divided by count return residualSumOfSquares == null || extendedStats == null || extendedStats.getCount() == 0 ? - null : + new Result(0.0) : new Result(1 - (residualSumOfSquares.value() / (extendedStats.getVariance() * extendedStats.getCount()))); } @@ -148,5 +148,18 @@ public class RSquared implements RegressionMetric { builder.endObject(); return builder; } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Result other = (Result)o; + return value == other.value; + } + + @Override + public int hashCode() { + return Objects.hashCode(value); + } } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/regression/Regression.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/regression/Regression.java index e3869dce2ee5..610c065fd810 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/regression/Regression.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/regression/Regression.java @@ -121,6 +121,12 @@ public class Regression implements Evaluation { @Override public void evaluate(SearchResponse searchResponse, ActionListener> listener) { List results = new ArrayList<>(metrics.size()); + if (searchResponse.getHits().getTotalHits().value == 0) { + listener.onFailure(ExceptionsHelper.badRequestException("No documents found containing both [{}, {}] fields", + actualField, + predictedField)); + return; + } for (RegressionMetric metric : metrics) { results.add(metric.evaluate(searchResponse.getAggregations())); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/softclassification/Recall.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/softclassification/Recall.java index 5c4ab57241d9..f7103aceedae 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/softclassification/Recall.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/softclassification/Recall.java @@ -81,7 +81,7 @@ public class Recall extends AbstractConfusionMatrixMetric { for (int i = 0; i < recalls.length; i++) { double threshold = thresholds[i]; Filter tpAgg = aggs.get(aggName(classInfo, threshold, Condition.TP)); - Filter fnAgg =aggs.get(aggName(classInfo, threshold, Condition.FN)); + Filter fnAgg = aggs.get(aggName(classInfo, threshold, Condition.FN)); long tp = tpAgg.getDocCount(); long fn = fnAgg.getDocCount(); recalls[i] = tp + fn == 0 ? 0.0 : (double) tp / (tp + fn); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappings.java index 11674bf26f46..13dd077f605f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappings.java @@ -58,7 +58,7 @@ import org.elasticsearch.xpack.core.ml.job.results.Influencer; import org.elasticsearch.xpack.core.ml.job.results.ModelPlot; import org.elasticsearch.xpack.core.ml.job.results.ReservedFieldNames; import org.elasticsearch.xpack.core.ml.job.results.Result; -import org.elasticsearch.xpack.core.ml.notifications.AuditMessage; +import org.elasticsearch.xpack.core.ml.notifications.AnomalyDetectionAuditMessage; import org.elasticsearch.xpack.core.ml.utils.ExponentialAverageCalculationContext; import java.io.IOException; @@ -1122,10 +1122,10 @@ public class ElasticsearchMappings { .startObject(Job.ID.getPreferredName()) .field(TYPE, KEYWORD) .endObject() - .startObject(AuditMessage.LEVEL.getPreferredName()) + .startObject(AnomalyDetectionAuditMessage.LEVEL.getPreferredName()) .field(TYPE, KEYWORD) .endObject() - .startObject(AuditMessage.MESSAGE.getPreferredName()) + .startObject(AnomalyDetectionAuditMessage.MESSAGE.getPreferredName()) .field(TYPE, TEXT) .startObject(FIELDS) .startObject(RAW) @@ -1133,10 +1133,10 @@ public class ElasticsearchMappings { .endObject() .endObject() .endObject() - .startObject(AuditMessage.TIMESTAMP.getPreferredName()) + .startObject(AnomalyDetectionAuditMessage.TIMESTAMP.getPreferredName()) .field(TYPE, DATE) .endObject() - .startObject(AuditMessage.NODE_NAME.getPreferredName()) + .startObject(AnomalyDetectionAuditMessage.NODE_NAME.getPreferredName()) .field(TYPE, KEYWORD) .endObject() .endObject() diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/notifications/AuditMessage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/notifications/AnomalyDetectionAuditMessage.java similarity index 68% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/notifications/AuditMessage.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/notifications/AnomalyDetectionAuditMessage.java index 6daa4223afd7..3c00a1f032b4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/notifications/AuditMessage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/notifications/AnomalyDetectionAuditMessage.java @@ -18,12 +18,12 @@ import java.util.Date; import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; -public class AuditMessage extends AbstractAuditMessage { +public class AnomalyDetectionAuditMessage extends AbstractAuditMessage { - public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( "ml_audit_message", true, - a -> new AuditMessage((String)a[0], (String)a[1], (Level)a[2], (Date)a[3], (String)a[4])); + a -> new AnomalyDetectionAuditMessage((String)a[0], (String)a[1], (Level)a[2], (Date)a[3], (String)a[4])); static { PARSER.declareString(optionalConstructorArg(), Job.ID); @@ -41,11 +41,11 @@ public class AuditMessage extends AbstractAuditMessage { PARSER.declareString(optionalConstructorArg(), NODE_NAME); } - public AuditMessage(String resourceId, String message, Level level, String nodeName) { + public AnomalyDetectionAuditMessage(String resourceId, String message, Level level, String nodeName) { super(resourceId, message, level, nodeName); } - protected AuditMessage(String resourceId, String message, Level level, Date timestamp, String nodeName) { + protected AnomalyDetectionAuditMessage(String resourceId, String message, Level level, Date timestamp, String nodeName) { super(resourceId, message, level, timestamp, nodeName); } @@ -54,11 +54,11 @@ public class AuditMessage extends AbstractAuditMessage { return Job.ID.getPreferredName(); } - public static AbstractBuilder builder() { - return new AbstractBuilder() { + public static AbstractBuilder builder() { + return new AbstractBuilder() { @Override - protected AuditMessage newMessage(Level level, String resourceId, String message, String nodeName) { - return new AuditMessage(resourceId, message, level, nodeName); + protected AnomalyDetectionAuditMessage newMessage(Level level, String resourceId, String message, String nodeName) { + return new AnomalyDetectionAuditMessage(resourceId, message, level, nodeName); } }; } diff --git a/x-pack/plugin/core/src/main/resources/security-index-template-7.json b/x-pack/plugin/core/src/main/resources/security-index-template-7.json index dae6462b7a6f..8b4eed3bb1e1 100644 --- a/x-pack/plugin/core/src/main/resources/security-index-template-7.json +++ b/x-pack/plugin/core/src/main/resources/security-index-template-7.json @@ -6,6 +6,7 @@ "number_of_replicas" : 0, "auto_expand_replicas" : "0-1", "index.priority": 1000, + "index.refresh_interval": "1s", "index.format": 6, "analysis" : { "filter" : { diff --git a/x-pack/plugin/core/src/main/resources/security-tokens-index-template-7.json b/x-pack/plugin/core/src/main/resources/security-tokens-index-template-7.json index 312d9ff9e3f5..502daae3f79b 100644 --- a/x-pack/plugin/core/src/main/resources/security-tokens-index-template-7.json +++ b/x-pack/plugin/core/src/main/resources/security-tokens-index-template-7.json @@ -6,6 +6,7 @@ "number_of_replicas" : 0, "auto_expand_replicas" : "0-1", "index.priority": 1000, + "index.refresh_interval": "1s", "index.format": 7 }, "mappings" : { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/common/notifications/AuditorTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/common/notifications/AbstractAuditorTests.java similarity index 86% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/common/notifications/AuditorTests.java rename to x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/common/notifications/AbstractAuditorTests.java index 1389af62dc71..a3c168d391d3 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/common/notifications/AuditorTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/common/notifications/AbstractAuditorTests.java @@ -28,14 +28,15 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; -public class AuditorTests extends ESTestCase { - private Client client; - private ArgumentCaptor indexRequestCaptor; +public class AbstractAuditorTests extends ESTestCase { + + private static final String TEST_NODE_NAME = "node_1"; private static final String TEST_ORIGIN = "test_origin"; private static final String TEST_INDEX = "test_index"; - private static final AbstractAuditMessage.AbstractBuilder builder = - AbstractAuditMessageTests.TestAuditMessage.newBuilder(); - + + private Client client; + private ArgumentCaptor indexRequestCaptor; + @Before public void setUpMocks() { client = mock(Client.class); @@ -47,7 +48,7 @@ public class AuditorTests extends ESTestCase { } public void testInfo() throws IOException { - Auditor auditor = new Auditor<>(client, "node_1", TEST_INDEX, TEST_ORIGIN, builder); + AbstractAuditor auditor = new TestAuditor(client); auditor.info("foo", "Here is my info"); verify(client).index(indexRequestCaptor.capture(), any()); @@ -61,7 +62,7 @@ public class AuditorTests extends ESTestCase { } public void testWarning() throws IOException { - Auditor auditor = new Auditor<>(client, "node_1", TEST_INDEX, TEST_ORIGIN, builder); + AbstractAuditor auditor = new TestAuditor(client); auditor.warning("bar", "Here is my warning"); verify(client).index(indexRequestCaptor.capture(), any()); @@ -75,7 +76,7 @@ public class AuditorTests extends ESTestCase { } public void testError() throws IOException { - Auditor auditor = new Auditor<>(client, "node_1", TEST_INDEX, TEST_ORIGIN, builder); + AbstractAuditor auditor = new TestAuditor(client); auditor.error("foobar", "Here is my error"); verify(client).index(indexRequestCaptor.capture(), any()); @@ -93,4 +94,10 @@ public class AuditorTests extends ESTestCase { .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, msg.streamInput()); return AbstractAuditMessageTests.TestAuditMessage.PARSER.apply(parser, null); } + + static class TestAuditor extends AbstractAuditor { + TestAuditor(Client client) { + super(client, TEST_NODE_NAME, TEST_INDEX, TEST_ORIGIN, AbstractAuditMessageTests.TestAuditMessage.newBuilder()); + } + } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/common/notifications/LevelTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/common/notifications/LevelTests.java index a66d230b4678..78952c6d75cf 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/common/notifications/LevelTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/common/notifications/LevelTests.java @@ -12,18 +12,18 @@ import static org.hamcrest.Matchers.equalTo; public class LevelTests extends ESTestCase { public void testFromString() { - assertEquals(Level.INFO, Level.fromString("info")); - assertEquals(Level.INFO, Level.fromString("INFO")); - assertEquals(Level.WARNING, Level.fromString("warning")); - assertEquals(Level.WARNING, Level.fromString("WARNING")); - assertEquals(Level.ERROR, Level.fromString("error")); - assertEquals(Level.ERROR, Level.fromString("ERROR")); + assertThat(Level.fromString("info"), equalTo(Level.INFO)); + assertThat(Level.fromString("INFO"), equalTo(Level.INFO)); + assertThat(Level.fromString("warning"), equalTo(Level.WARNING)); + assertThat(Level.fromString("WARNING"), equalTo(Level.WARNING)); + assertThat(Level.fromString("error"), equalTo(Level.ERROR)); + assertThat(Level.fromString("ERROR"), equalTo(Level.ERROR)); } public void testToString() { - assertEquals("info", Level.INFO.toString()); - assertEquals("warning", Level.WARNING.toString()); - assertEquals("error", Level.ERROR.toString()); + assertThat(Level.INFO.toString(), equalTo("info")); + assertThat(Level.WARNING.toString(), equalTo("warning")); + assertThat(Level.ERROR.toString(), equalTo("error")); } public void testValidOrdinals() { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/DataFrameMessagesTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/DataFrameMessagesTests.java index fc67dc8ce64e..b6284af6c58d 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/DataFrameMessagesTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/DataFrameMessagesTests.java @@ -51,7 +51,7 @@ public class DataFrameMessagesTests extends ESTestCase { try { innerAssertSingleMessage(message); } catch (Exception e) { - fail(e.getMessage()); + fail("message: " + message + " failure: " + e.getMessage()); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformTaskActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformTaskActionRequestTests.java index 8d3d8e3ac789..b6fb6b94d541 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformTaskActionRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformTaskActionRequestTests.java @@ -13,7 +13,7 @@ public class StartDataFrameTransformTaskActionRequestTests extends AbstractWireSerializingTestCase { @Override protected StartDataFrameTransformTaskAction.Request createTestInstance() { - return new StartDataFrameTransformTaskAction.Request(randomAlphaOfLength(4)); + return new StartDataFrameTransformTaskAction.Request(randomAlphaOfLength(4), randomBoolean()); } @Override diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/EstimateMemoryUsageActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/EstimateMemoryUsageActionRequestTests.java deleted file mode 100644 index 6a8f82412e99..000000000000 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/EstimateMemoryUsageActionRequestTests.java +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.core.ml.action; - -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.NamedXContentRegistry; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.search.SearchModule; -import org.elasticsearch.test.AbstractSerializingTestCase; -import org.elasticsearch.xpack.core.ml.action.EstimateMemoryUsageAction.Request; -import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsConfigTests; -import org.elasticsearch.xpack.core.ml.dataframe.analyses.MlDataFrameAnalysisNamedXContentProvider; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; - -public class EstimateMemoryUsageActionRequestTests extends AbstractSerializingTestCase { - - @Override - protected NamedWriteableRegistry getNamedWriteableRegistry() { - List namedWriteables = new ArrayList<>(); - namedWriteables.addAll(new MlDataFrameAnalysisNamedXContentProvider().getNamedWriteables()); - namedWriteables.addAll(new SearchModule(Settings.EMPTY, Collections.emptyList()).getNamedWriteables()); - return new NamedWriteableRegistry(namedWriteables); - } - - @Override - protected NamedXContentRegistry xContentRegistry() { - List namedXContent = new ArrayList<>(); - namedXContent.addAll(new MlDataFrameAnalysisNamedXContentProvider().getNamedXContentParsers()); - namedXContent.addAll(new SearchModule(Settings.EMPTY, Collections.emptyList()).getNamedXContents()); - return new NamedXContentRegistry(namedXContent); - } - - @Override - protected Request createTestInstance() { - return new Request(DataFrameAnalyticsConfigTests.createRandom("dummy")); - } - - @Override - protected Writeable.Reader instanceReader() { - return Request::new; - } - - @Override - protected Request doParseInstance(XContentParser parser) { - return Request.parseRequest(parser); - } -} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/EstimateMemoryUsageActionResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/EstimateMemoryUsageActionResponseTests.java index e6b9f4a99a25..9120f878d22c 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/EstimateMemoryUsageActionResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/EstimateMemoryUsageActionResponseTests.java @@ -35,13 +35,13 @@ public class EstimateMemoryUsageActionResponseTests extends AbstractSerializingT public void testConstructor_NullValues() { Response response = new Response(null, null); - assertThat(response.getExpectedMemoryUsageWithOnePartition(), nullValue()); - assertThat(response.getExpectedMemoryUsageWithMaxPartitions(), nullValue()); + assertThat(response.getExpectedMemoryWithoutDisk(), nullValue()); + assertThat(response.getExpectedMemoryWithDisk(), nullValue()); } public void testConstructor() { Response response = new Response(new ByteSizeValue(2048), new ByteSizeValue(1024)); - assertThat(response.getExpectedMemoryUsageWithOnePartition(), equalTo(new ByteSizeValue(2048))); - assertThat(response.getExpectedMemoryUsageWithMaxPartitions(), equalTo(new ByteSizeValue(1024))); + assertThat(response.getExpectedMemoryWithoutDisk(), equalTo(new ByteSizeValue(2048))); + assertThat(response.getExpectedMemoryWithDisk(), equalTo(new ByteSizeValue(1024))); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/regression/MeanSquaredErrorTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/regression/MeanSquaredErrorTests.java index 435135147476..a22c499220ce 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/regression/MeanSquaredErrorTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/regression/MeanSquaredErrorTests.java @@ -17,9 +17,7 @@ import java.io.IOException; import java.util.Arrays; import java.util.Collections; -import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.nullValue; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -64,7 +62,7 @@ public class MeanSquaredErrorTests extends AbstractSerializingTestCase { RSquared rSquared = new RSquared(); EvaluationMetricResult result = rSquared.evaluate(aggs); - assertThat(result, is(nullValue())); + assertThat(result, equalTo(new RSquared.Result(0.0))); } public void testEvaluate_GivenMissingAggs() { + EvaluationMetricResult zeroResult = new RSquared.Result(0.0); Aggregations aggs = new Aggregations(Collections.singletonList( createSingleMetricAgg("some_other_single_metric_agg", 0.2377) )); RSquared rSquared = new RSquared(); EvaluationMetricResult result = rSquared.evaluate(aggs); - assertThat(result, is(nullValue())); + assertThat(result, equalTo(zeroResult)); aggs = new Aggregations(Arrays.asList( createSingleMetricAgg("some_other_single_metric_agg", 0.2377), @@ -88,7 +87,7 @@ public class RSquaredTests extends AbstractSerializingTestCase { )); result = rSquared.evaluate(aggs); - assertThat(result, is(nullValue())); + assertThat(result, equalTo(zeroResult)); aggs = new Aggregations(Arrays.asList( createSingleMetricAgg("some_other_single_metric_agg", 0.2377), @@ -96,7 +95,7 @@ public class RSquaredTests extends AbstractSerializingTestCase { )); result = rSquared.evaluate(aggs); - assertThat(result, is(nullValue())); + assertThat(result, equalTo(zeroResult)); } private static NumericMetricsAggregation.SingleValue createSingleMetricAgg(String name, double value) { diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameAuditorIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameAuditorIT.java index 0877fe22c78f..b7aaa5c567cc 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameAuditorIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameAuditorIT.java @@ -68,7 +68,7 @@ public class DataFrameAuditorIT extends DataFrameRestTestCase { assertBusy(() -> { assertTrue(indexExists(DataFrameInternalIndex.AUDIT_INDEX)); }); - // Since calls to write the Auditor are sent and forgot (async) we could have returned from the start, + // Since calls to write the AbstractAuditor are sent and forgot (async) we could have returned from the start, // finished the job (as this is a very short DF job), all without the audit being fully written. assertBusy(() -> { refreshIndex(DataFrameInternalIndex.AUDIT_INDEX); diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTaskFailedStateIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTaskFailedStateIT.java index 445c00e9b372..edd8eb44a9fe 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTaskFailedStateIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTaskFailedStateIT.java @@ -37,9 +37,14 @@ public class DataFrameTaskFailedStateIT extends DataFrameRestTestCase { @Before public void setClusterSettings() throws IOException { // Make sure we never retry on failure to speed up the test + // Set logging level to trace + // see: https://github.com/elastic/elasticsearch/issues/45562 Request addFailureRetrySetting = new Request("PUT", "/_cluster/settings"); addFailureRetrySetting.setJsonEntity( - "{\"persistent\": {\"xpack.data_frame.num_transform_failure_retries\": \"" + 0 + "\"}}"); + "{\"transient\": {\"xpack.data_frame.num_transform_failure_retries\": \"" + 0 + "\"," + + "\"logger.org.elasticsearch.action.bulk\": \"info\"," + // reduces bulk failure spam + "\"logger.org.elasticsearch.xpack.core.indexing.AsyncTwoPhaseIndexer\": \"trace\"," + + "\"logger.org.elasticsearch.xpack.dataframe\": \"trace\"}}"); client().performRequest(addFailureRetrySetting); } @@ -84,7 +89,6 @@ public class DataFrameTaskFailedStateIT extends DataFrameRestTestCase { assertThat(XContentMapValues.extractValue("reason", fullState), is(nullValue())); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/45609") public void testForceStartFailedTransform() throws Exception { String transformId = "test-force-start-failed-transform"; createReviewsIndex(REVIEWS_INDEX_NAME, 10); @@ -100,13 +104,16 @@ public class DataFrameTaskFailedStateIT extends DataFrameRestTestCase { // Verify we have failed for the expected reason assertThat(XContentMapValues.extractValue("reason", fullState), equalTo(failureReason)); + final String expectedFailure = "Unable to start data frame transform [test-force-start-failed-transform] " + + "as it is in a failed state with failure: [" + failureReason + + "]. Use force start to restart data frame transform once error is resolved."; // Verify that we cannot start the transform when the task is in a failed state - ResponseException ex = expectThrows(ResponseException.class, () -> startDataframeTransform(transformId, false)); - assertThat(ex.getResponse().getStatusLine().getStatusCode(), equalTo(RestStatus.CONFLICT.getStatus())); - assertThat(XContentMapValues.extractValue("error.reason", entityAsMap(ex.getResponse())), - equalTo("Unable to start data frame transform [test-force-start-failed-transform] as it is in a failed state with failure: [" + - failureReason + - "]. Use force start to restart data frame transform once error is resolved.")); + assertBusy(() -> { + ResponseException ex = expectThrows(ResponseException.class, () -> startDataframeTransform(transformId, false)); + assertThat(ex.getResponse().getStatusLine().getStatusCode(), equalTo(RestStatus.CONFLICT.getStatus())); + assertThat(XContentMapValues.extractValue("error.reason", entityAsMap(ex.getResponse())), + equalTo(expectedFailure)); + }, 60, TimeUnit.SECONDS); // Correct the failure by deleting the destination index deleteIndex(dataFrameIndex); diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStartDataFrameTransformAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStartDataFrameTransformAction.java index c0f0bbc942e5..622a373b5021 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStartDataFrameTransformAction.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStartDataFrameTransformAction.java @@ -161,7 +161,7 @@ public class TransportStartDataFrameTransformAction extends ClientHelper.executeAsyncWithOrigin(client, ClientHelper.DATA_FRAME_ORIGIN, StartDataFrameTransformTaskAction.INSTANCE, - new StartDataFrameTransformTaskAction.Request(request.getId()), + new StartDataFrameTransformTaskAction.Request(request.getId(), request.isForce()), ActionListener.wrap( r -> listener.onResponse(new StartDataFrameTransformAction.Response(true)), listener::onFailure)); diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStartDataFrameTransformTaskAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStartDataFrameTransformTaskAction.java index f8e3a3f1e852..17df98e0c2b3 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStartDataFrameTransformTaskAction.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStartDataFrameTransformTaskAction.java @@ -59,7 +59,7 @@ public class TransportStartDataFrameTransformTaskAction extends protected void taskOperation(StartDataFrameTransformTaskAction.Request request, DataFrameTransformTask transformTask, ActionListener listener) { if (transformTask.getTransformId().equals(request.getId())) { - transformTask.start(null, listener); + transformTask.start(null, request.isForce(), listener); } else { listener.onFailure(new RuntimeException("ID of data frame transform task [" + transformTask.getTransformId() + "] does not match request's ID [" + request.getId() + "]")); diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStopDataFrameTransformAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStopDataFrameTransformAction.java index c2ddcdb9bac2..48ac658055f3 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStopDataFrameTransformAction.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStopDataFrameTransformAction.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.dataframe.action; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListenerResponseHandler; @@ -19,6 +20,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.logging.LoggerMessageFormat; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.discovery.MasterNotDiscoveredException; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; @@ -30,6 +32,8 @@ import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.action.util.PageParams; import org.elasticsearch.xpack.core.dataframe.DataFrameMessages; import org.elasticsearch.xpack.core.dataframe.action.StopDataFrameTransformAction; +import org.elasticsearch.xpack.core.dataframe.action.StopDataFrameTransformAction.Request; +import org.elasticsearch.xpack.core.dataframe.action.StopDataFrameTransformAction.Response; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformState; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformTaskState; import org.elasticsearch.xpack.dataframe.persistence.DataFrameInternalIndex; @@ -37,16 +41,15 @@ import org.elasticsearch.xpack.dataframe.persistence.DataFrameTransformsConfigMa import org.elasticsearch.xpack.dataframe.transforms.DataFrameTransformTask; import java.util.ArrayList; -import java.util.Collection; import java.util.HashSet; import java.util.List; +import java.util.Map; import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; import static org.elasticsearch.xpack.core.dataframe.DataFrameMessages.DATA_FRAME_CANNOT_STOP_FAILED_TRANSFORM; -public class TransportStopDataFrameTransformAction extends - TransportTasksAction { +public class TransportStopDataFrameTransformAction extends TransportTasksAction { private static final Logger logger = LogManager.getLogger(TransportStopDataFrameTransformAction.class); @@ -61,8 +64,8 @@ public class TransportStopDataFrameTransformAction extends PersistentTasksService persistentTasksService, DataFrameTransformsConfigManager dataFrameTransformsConfigManager, Client client) { - super(StopDataFrameTransformAction.NAME, clusterService, transportService, actionFilters, StopDataFrameTransformAction.Request::new, - StopDataFrameTransformAction.Response::new, StopDataFrameTransformAction.Response::new, ThreadPool.Names.SAME); + super(StopDataFrameTransformAction.NAME, clusterService, transportService, actionFilters, Request::new, + Response::new, Response::new, ThreadPool.Names.SAME); this.threadPool = threadPool; this.dataFrameTransformsConfigManager = dataFrameTransformsConfigManager; this.persistentTasksService = persistentTasksService; @@ -96,8 +99,7 @@ public class TransportStopDataFrameTransformAction extends } @Override - protected void doExecute(Task task, StopDataFrameTransformAction.Request request, - ActionListener listener) { + protected void doExecute(Task task, Request request, ActionListener listener) { final ClusterState state = clusterService.state(); final DiscoveryNodes nodes = state.nodes(); if (nodes.isLocalNodeElectedMaster() == false) { @@ -106,10 +108,10 @@ public class TransportStopDataFrameTransformAction extends listener.onFailure(new MasterNotDiscoveredException("no known master node")); } else { transportService.sendRequest(nodes.getMasterNode(), actionName, request, - new ActionListenerResponseHandler<>(listener, StopDataFrameTransformAction.Response::new)); + new ActionListenerResponseHandler<>(listener, Response::new)); } } else { - final ActionListener finalListener; + final ActionListener finalListener; if (request.waitForCompletion()) { finalListener = waitForStopListener(request, listener); } else { @@ -131,8 +133,7 @@ public class TransportStopDataFrameTransformAction extends } @Override - protected void taskOperation(StopDataFrameTransformAction.Request request, DataFrameTransformTask transformTask, - ActionListener listener) { + protected void taskOperation(Request request, DataFrameTransformTask transformTask, ActionListener listener) { Set ids = request.getExpandedIds(); if (ids == null) { @@ -141,20 +142,13 @@ public class TransportStopDataFrameTransformAction extends } if (ids.contains(transformTask.getTransformId())) { - // This should not occur as we check that none of the tasks are in a failed state earlier - // Keep this check in here for insurance. - if (transformTask.getState().getTaskState() == DataFrameTransformTaskState.FAILED && request.isForce() == false) { - listener.onFailure( - new ElasticsearchStatusException( - DataFrameMessages.getMessage(DATA_FRAME_CANNOT_STOP_FAILED_TRANSFORM, - request.getId(), - transformTask.getState().getReason()), - RestStatus.CONFLICT)); + try { + transformTask.stop(request.isForce()); + } catch (ElasticsearchException ex) { + listener.onFailure(ex); return; } - - transformTask.stop(); - listener.onResponse(new StopDataFrameTransformAction.Response(Boolean.TRUE)); + listener.onResponse(new Response(Boolean.TRUE)); } else { listener.onFailure(new RuntimeException("ID of data frame indexer task [" + transformTask.getTransformId() + "] does not match request's ID [" + request.getId() + "]")); @@ -162,24 +156,22 @@ public class TransportStopDataFrameTransformAction extends } @Override - protected StopDataFrameTransformAction.Response newResponse(StopDataFrameTransformAction.Request request, - List tasks, List taskOperationFailures, - List failedNodeExceptions) { + protected StopDataFrameTransformAction.Response newResponse(Request request, + List tasks, + List taskOperationFailures, + List failedNodeExceptions) { if (taskOperationFailures.isEmpty() == false || failedNodeExceptions.isEmpty() == false) { - return new StopDataFrameTransformAction.Response(taskOperationFailures, failedNodeExceptions, false); + return new Response(taskOperationFailures, failedNodeExceptions, false); } // if tasks is empty allMatch is 'vacuously satisfied' - boolean allAcknowledged = tasks.stream().allMatch(StopDataFrameTransformAction.Response::isAcknowledged); - return new StopDataFrameTransformAction.Response(allAcknowledged); + return new Response(tasks.stream().allMatch(Response::isAcknowledged)); } - private ActionListener - waitForStopListener(StopDataFrameTransformAction.Request request, - ActionListener listener) { + private ActionListener waitForStopListener(Request request, ActionListener listener) { - ActionListener onStopListener = ActionListener.wrap( + ActionListener onStopListener = ActionListener.wrap( waitResponse -> client.admin() .indices() @@ -198,37 +190,71 @@ public class TransportStopDataFrameTransformAction extends // Wait until the persistent task is stopped // Switch over to Generic threadpool so we don't block the network thread threadPool.generic().execute(() -> - waitForDataFrameStopped(request.getExpandedIds(), request.getTimeout(), onStopListener)); + waitForDataFrameStopped(request.getExpandedIds(), request.getTimeout(), request.isForce(), onStopListener)); }, listener::onFailure ); } - private void waitForDataFrameStopped(Collection persistentTaskIds, TimeValue timeout, - ActionListener listener) { + private void waitForDataFrameStopped(Set persistentTaskIds, + TimeValue timeout, + boolean force, + ActionListener listener) { + // This map is accessed in the predicate and the listener callbacks + final Map exceptions = new ConcurrentHashMap<>(); persistentTasksService.waitForPersistentTasksCondition(persistentTasksCustomMetaData -> { - if (persistentTasksCustomMetaData == null) { return true; } - for (String persistentTaskId : persistentTaskIds) { - if (persistentTasksCustomMetaData.getTask(persistentTaskId) != null) { - return false; + PersistentTasksCustomMetaData.PersistentTask transformsTask = persistentTasksCustomMetaData.getTask(persistentTaskId); + // Either the task has successfully stopped or we have seen that it has failed + if (transformsTask == null || exceptions.containsKey(persistentTaskId)) { + continue; } + + // If force is true, then it should eventually go away, don't add it to the collection of failures. + DataFrameTransformState taskState = (DataFrameTransformState)transformsTask.getState(); + if (force == false && taskState != null && taskState.getTaskState() == DataFrameTransformTaskState.FAILED) { + exceptions.put(persistentTaskId, new ElasticsearchStatusException( + DataFrameMessages.getMessage(DATA_FRAME_CANNOT_STOP_FAILED_TRANSFORM, + persistentTaskId, + taskState.getReason()), + RestStatus.CONFLICT)); + + // If all the tasks are now flagged as failed, do not wait for another ClusterState update. + // Return to the caller as soon as possible + return persistentTasksCustomMetaData.tasks().stream().allMatch(p -> exceptions.containsKey(p.getId())); + } + return false; } return true; + }, timeout, ActionListener.wrap( + r -> { + // No exceptions AND the tasks have gone away + if (exceptions.isEmpty()) { + listener.onResponse(new Response(Boolean.TRUE)); + return; + } - }, timeout, new ActionListener<>() { - @Override - public void onResponse(Boolean result) { - listener.onResponse(new StopDataFrameTransformAction.Response(Boolean.TRUE)); - } + // We are only stopping one task, so if there is a failure, it is the only one + if (persistentTaskIds.size() == 1) { + listener.onFailure(exceptions.get(persistentTaskIds.iterator().next())); + return; + } - @Override - public void onFailure(Exception e) { - listener.onFailure(e); - } - }); + Set stoppedTasks = new HashSet<>(persistentTaskIds); + stoppedTasks.removeAll(exceptions.keySet()); + String message = stoppedTasks.isEmpty() ? + "Could not stop any of the tasks as all were failed. Use force stop to stop the transforms." : + LoggerMessageFormat.format("Successfully stopped [{}] transforms. " + + "Could not stop the transforms {} as they were failed. Use force stop to stop the transforms.", + stoppedTasks.size(), + exceptions.keySet()); + + listener.onFailure(new ElasticsearchStatusException(message, RestStatus.CONFLICT)); + }, + listener::onFailure + )); } } diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/notifications/DataFrameAuditor.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/notifications/DataFrameAuditor.java index e02954a280b2..e756182f9c5c 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/notifications/DataFrameAuditor.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/notifications/DataFrameAuditor.java @@ -6,7 +6,7 @@ package org.elasticsearch.xpack.dataframe.notifications; import org.elasticsearch.client.Client; -import org.elasticsearch.xpack.core.common.notifications.Auditor; +import org.elasticsearch.xpack.core.common.notifications.AbstractAuditor; import org.elasticsearch.xpack.core.dataframe.notifications.DataFrameAuditMessage; import org.elasticsearch.xpack.dataframe.persistence.DataFrameInternalIndex; @@ -15,7 +15,7 @@ import static org.elasticsearch.xpack.core.ClientHelper.DATA_FRAME_ORIGIN; /** * DataFrameAuditor class that abstracts away generic templating for easier injection */ -public class DataFrameAuditor extends Auditor { +public class DataFrameAuditor extends AbstractAuditor { public DataFrameAuditor(Client client, String nodeName) { super(client, nodeName, DataFrameInternalIndex.AUDIT_INDEX, DATA_FRAME_ORIGIN, DataFrameAuditMessage.builder()); } diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformPersistentTasksExecutor.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformPersistentTasksExecutor.java index 4ce0b5d7ed57..dc37e937ea13 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformPersistentTasksExecutor.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformPersistentTasksExecutor.java @@ -35,7 +35,6 @@ import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheck import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformState; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformStoredDoc; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformTaskState; import org.elasticsearch.xpack.core.indexing.IndexerState; import org.elasticsearch.xpack.core.scheduler.SchedulerEngine; import org.elasticsearch.xpack.dataframe.DataFrame; @@ -120,13 +119,14 @@ public class DataFrameTransformPersistentTasksExecutor extends PersistentTasksEx protected void nodeOperation(AllocatedPersistentTask task, @Nullable DataFrameTransform params, PersistentTaskState state) { final String transformId = params.getId(); final DataFrameTransformTask buildTask = (DataFrameTransformTask) task; - final DataFrameTransformState transformPTaskState = (DataFrameTransformState) state; - // If the transform is failed then the Persistent Task Service will - // try to restart it on a node restart. Exiting here leaves the - // transform in the failed state and it must be force closed. - if (transformPTaskState != null && transformPTaskState.getTaskState() == DataFrameTransformTaskState.FAILED) { - return; - } + // NOTE: DataFrameTransformPersistentTasksExecutor#createTask pulls in the stored task state from the ClusterState when the object + // is created. DataFrameTransformTask#ctor takes into account setting the task as failed if that is passed in with the + // persisted state. + // DataFrameTransformPersistentTasksExecutor#startTask will fail as DataFrameTransformTask#start, when force == false, will return + // a failure indicating that a failed task cannot be started. + // + // We want the rest of the state to be populated in the task when it is loaded on the node so that users can force start it again + // later if they want. final DataFrameTransformTask.ClientDataFrameIndexerBuilder indexerBuilder = new DataFrameTransformTask.ClientDataFrameIndexerBuilder(transformId) @@ -298,7 +298,8 @@ public class DataFrameTransformPersistentTasksExecutor extends PersistentTasksEx Long previousCheckpoint, ActionListener listener) { buildTask.initializeIndexer(indexerBuilder); - buildTask.setNumFailureRetries(numFailureRetries).start(previousCheckpoint, listener); + // DataFrameTransformTask#start will fail if the task state is FAILED + buildTask.setNumFailureRetries(numFailureRetries).start(previousCheckpoint, false, listener); } private void setNumFailureRetries(int numFailureRetries) { diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java index 974566a491ab..641e3a0d1d77 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java @@ -10,6 +10,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.util.SetOnce; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.bulk.BulkAction; @@ -26,6 +27,7 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.persistent.AllocatedPersistentTask; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.ClientHelper; @@ -60,6 +62,9 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; +import static org.elasticsearch.xpack.core.dataframe.DataFrameMessages.DATA_FRAME_CANNOT_START_FAILED_TRANSFORM; +import static org.elasticsearch.xpack.core.dataframe.DataFrameMessages.DATA_FRAME_CANNOT_STOP_FAILED_TRANSFORM; + public class DataFrameTransformTask extends AllocatedPersistentTask implements SchedulerEngine.Listener { @@ -230,7 +235,7 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S } } - public void setTaskStateStopped() { + public synchronized void setTaskStateStopped() { taskState.set(DataFrameTransformTaskState.STOPPED); } @@ -240,10 +245,27 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S * current checkpoint is not set * @param listener Started listener */ - public synchronized void start(Long startingCheckpoint, ActionListener listener) { + public synchronized void start(Long startingCheckpoint, boolean force, ActionListener listener) { + logger.debug("[{}] start called with force [{}] and state [{}]", getTransformId(), force, getState()); + if (taskState.get() == DataFrameTransformTaskState.FAILED && force == false) { + listener.onFailure(new ElasticsearchStatusException( + DataFrameMessages.getMessage(DATA_FRAME_CANNOT_START_FAILED_TRANSFORM, + getTransformId(), + stateReason.get()), + RestStatus.CONFLICT)); + return; + } if (getIndexer() == null) { - listener.onFailure(new ElasticsearchException("Task for transform [{}] not fully initialized. Try again later", - getTransformId())); + // If our state is failed AND the indexer is null, the user needs to _stop?force=true so that the indexer gets + // fully initialized. + // If we are NOT failed, then we can assume that `start` was just called early in the process. + String msg = taskState.get() == DataFrameTransformTaskState.FAILED ? + "It failed during the initialization process; force stop to allow reinitialization." : + "Try again later."; + listener.onFailure(new ElasticsearchStatusException("Task for transform [{}] not fully initialized. {}", + RestStatus.CONFLICT, + getTransformId(), + msg)); return; } final IndexerState newState = getIndexer().start(); @@ -289,7 +311,8 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S )); } - public synchronized void stop() { + public synchronized void stop(boolean force) { + logger.debug("[{}] stop called with force [{}] and state [{}]", getTransformId(), force, getState()); if (getIndexer() == null) { // If there is no indexer the task has not been triggered // but it still needs to be stopped and removed @@ -301,8 +324,20 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S return; } + if (taskState.get() == DataFrameTransformTaskState.FAILED && force == false) { + throw new ElasticsearchStatusException( + DataFrameMessages.getMessage(DATA_FRAME_CANNOT_STOP_FAILED_TRANSFORM, + getTransformId(), + stateReason.get()), + RestStatus.CONFLICT); + } + IndexerState state = getIndexer().stop(); stateReason.set(null); + // We just don't want it to be failed if it is failed + // Either we are running, and the STATE is already started or failed + // doSaveState should transfer the state to STOPPED when it needs to. + taskState.set(DataFrameTransformTaskState.STARTED); if (state == IndexerState.STOPPED) { getIndexer().onStop(); getIndexer().doSaveState(state, getIndexer().getPosition(), () -> {}); @@ -311,13 +346,13 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S @Override public synchronized void triggered(Event event) { - if (getIndexer() == null) { - logger.warn("Data frame task [{}] triggered with an unintialized indexer", getTransformId()); + // Ignore if event is not for this job + if (event.getJobName().equals(schedulerJobName()) == false) { return; } - // Ignore if event is not for this job - if (event.getJobName().equals(schedulerJobName()) == false) { + if (getIndexer() == null) { + logger.warn("Data frame task [{}] triggered with an unintialized indexer", getTransformId()); return; } @@ -382,39 +417,44 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S } synchronized void markAsFailed(String reason, ActionListener listener) { + // If we are already flagged as failed, this probably means that a second trigger started firing while we were attempting to + // flag the previously triggered indexer as failed. Exit early as we are already flagged as failed. + if (taskState.get() == DataFrameTransformTaskState.FAILED) { + logger.warn("[{}] is already failed but encountered new failure; reason [{}] ", getTransformId(), reason); + listener.onResponse(null); + return; + } // If the indexer is `STOPPING` this means that `DataFrameTransformTask#stop` was called previously, but something caused // the indexer to fail. Since `ClientDataFrameIndexer#doSaveState` will persist the state to the index once the indexer stops, // it is probably best to NOT change the internal state of the task and allow the normal stopping logic to continue. if (getIndexer() != null && getIndexer().getState() == IndexerState.STOPPING) { logger.info("Attempt to fail transform [" + getTransformId() + "] with reason [" + reason + "] while it was stopping."); auditor.info(getTransformId(), "Attempted to fail transform with reason [" + reason + "] while in STOPPING state."); + listener.onResponse(null); + return; + } + // If we are stopped, this means that between the failure occurring and being handled, somebody called stop + // We should just allow that stop to continue + if (getIndexer() != null && getIndexer().getState() == IndexerState.STOPPED) { + logger.info("[{}] encountered a failure but indexer is STOPPED; reason [{}]", getTransformId(), reason); + listener.onResponse(null); return; } auditor.error(transform.getId(), reason); // We should not keep retrying. Either the task will be stopped, or started // If it is started again, it is registered again. deregisterSchedulerJob(); - DataFrameTransformState newState = new DataFrameTransformState( - DataFrameTransformTaskState.FAILED, - initialIndexerState, - initialPosition, - currentCheckpoint.get(), - reason, - getIndexer() == null ? null : getIndexer().getProgress()); + taskState.set(DataFrameTransformTaskState.FAILED); + stateReason.set(reason); + DataFrameTransformState newState = getState(); // Even though the indexer information is persisted to an index, we still need DataFrameTransformTaskState in the clusterstate // This keeps track of STARTED, FAILED, STOPPED - // This is because a FAILED state can occur because we cannot read the config from the internal index, which would imply that + // This is because a FAILED state could occur because we failed to read the config from the internal index, which would imply that // we could not read the previous state information from said index. persistStateToClusterState(newState, ActionListener.wrap( - r -> { - taskState.set(DataFrameTransformTaskState.FAILED); - stateReason.set(reason); - listener.onResponse(null); - }, + r -> listener.onResponse(null), e -> { logger.error("Failed to set task state as failed to cluster state", e); - taskState.set(DataFrameTransformTaskState.FAILED); - stateReason.set(reason); listener.onFailure(e); } )); @@ -630,6 +670,11 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S @Override protected void onStart(long now, ActionListener listener) { + if (transformTask.taskState.get() == DataFrameTransformTaskState.FAILED) { + logger.debug("[{}] attempted to start while failed.", transformId); + listener.onFailure(new ElasticsearchException("Attempted to start a failed transform [{}].", transformId)); + return; + } // On each run, we need to get the total number of docs and reset the count of processed docs // Since multiple checkpoints can be executed in the task while it is running on the same node, we need to gather // the progress here, and not in the executor. @@ -746,12 +791,24 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S @Override protected void doNextSearch(SearchRequest request, ActionListener nextPhase) { + if (transformTask.taskState.get() == DataFrameTransformTaskState.FAILED) { + logger.debug("[{}] attempted to search while failed.", transformId); + nextPhase.onFailure(new ElasticsearchException("Attempted to do a search request for failed transform [{}].", + transformId)); + return; + } ClientHelper.executeWithHeadersAsync(transformConfig.getHeaders(), ClientHelper.DATA_FRAME_ORIGIN, client, SearchAction.INSTANCE, request, nextPhase); } @Override protected void doNextBulk(BulkRequest request, ActionListener nextPhase) { + if (transformTask.taskState.get() == DataFrameTransformTaskState.FAILED) { + logger.debug("[{}] attempted to bulk index while failed.", transformId); + nextPhase.onFailure(new ElasticsearchException("Attempted to do a bulk index request for failed transform [{}].", + transformId)); + return; + } ClientHelper.executeWithHeadersAsync(transformConfig.getHeaders(), ClientHelper.DATA_FRAME_ORIGIN, client, @@ -788,6 +845,12 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S @Override protected void doSaveState(IndexerState indexerState, DataFrameIndexerPosition position, Runnable next) { + if (transformTask.taskState.get() == DataFrameTransformTaskState.FAILED) { + logger.debug("[{}] attempted to save state and stats while failed.", transformId); + // If we are failed, we should call next to allow failure handling to occur if necessary. + next.run(); + return; + } if (indexerState.equals(IndexerState.ABORTING)) { // If we're aborting, just invoke `next` (which is likely an onFailure handler) next.run(); @@ -831,7 +894,6 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S r -> { // for auto stop shutdown the task if (state.getTaskState().equals(DataFrameTransformTaskState.STOPPED)) { - onStop(); transformTask.shutdown(); } next.run(); @@ -853,8 +915,6 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S protected void onFailure(Exception exc) { // the failure handler must not throw an exception due to internal problems try { - logger.warn("Data frame transform [" + transformTask.getTransformId() + "] encountered an exception: ", exc); - // Since our schedule fires again very quickly after failures it is possible to run into the same failure numerous // times in a row, very quickly. We do not want to spam the audit log with repeated failures, so only record the first one if (exc.getMessage().equals(lastAuditedExceptionMessage) == false) { @@ -989,6 +1049,7 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S } synchronized void handleFailure(Exception e) { + logger.warn("Data frame transform [" + transformTask.getTransformId() + "] encountered an exception: ", e); if (handleCircuitBreakingException(e)) { return; } @@ -1003,7 +1064,7 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S @Override protected void failIndexer(String failureMessage) { - logger.error("Data frame transform [" + getJobId() + "]:" + failureMessage); + logger.error("Data frame transform [" + getJobId() + "]: " + failureMessage); auditor.error(transformTask.getTransformId(), failureMessage); transformTask.markAsFailed(failureMessage, ActionListener.wrap( r -> { diff --git a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationChecks.java b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationChecks.java index 88ce82729df1..9a77d94fd0e5 100644 --- a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationChecks.java +++ b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationChecks.java @@ -36,7 +36,7 @@ public class DeprecationChecks { static List> NODE_SETTINGS_CHECKS = Collections.emptyList(); static List> INDEX_SETTINGS_CHECKS = - Collections.singletonList(IndexDeprecationChecks::oldIndicesCheck); + List.of(IndexDeprecationChecks::oldIndicesCheck, IndexDeprecationChecks::translogRetentionSettingCheck); static List> ML_SETTINGS_CHECKS = List.of(MlDeprecationChecks::checkDataFeedAggregations, MlDeprecationChecks::checkDataFeedQuery); diff --git a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecks.java b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecks.java index 7defb80ccaa6..65348b3a77c7 100644 --- a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecks.java +++ b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecks.java @@ -10,6 +10,7 @@ import com.carrotsearch.hppc.cursors.ObjectCursor; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MappingMetaData; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.xpack.core.deprecation.DeprecationIssue; import java.util.ArrayList; @@ -84,4 +85,19 @@ public class IndexDeprecationChecks { } return null; } + + static DeprecationIssue translogRetentionSettingCheck(IndexMetaData indexMetaData) { + final boolean softDeletesEnabled = IndexSettings.INDEX_SOFT_DELETES_SETTING.get(indexMetaData.getSettings()); + if (softDeletesEnabled) { + if (IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.exists(indexMetaData.getSettings()) + || IndexSettings.INDEX_TRANSLOG_RETENTION_AGE_SETTING.exists(indexMetaData.getSettings())) { + return new DeprecationIssue(DeprecationIssue.Level.WARNING, + "translog retention settings are ignored", + "https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules-translog.html", + "translog retention settings [index.translog.retention.size] and [index.translog.retention.age] are ignored " + + "because translog is no longer used in peer recoveries with soft-deletes enabled (default in 7.0 or later)"); + } + } + return null; + } } diff --git a/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecksTests.java b/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecksTests.java index 57ded2d069d3..ac27e13f80e9 100644 --- a/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecksTests.java +++ b/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecksTests.java @@ -8,6 +8,8 @@ package org.elasticsearch.xpack.deprecation; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.deprecation.DeprecationIssue; @@ -15,6 +17,8 @@ import java.util.List; import static java.util.Collections.singletonList; import static org.elasticsearch.xpack.deprecation.DeprecationChecks.INDEX_SETTINGS_CHECKS; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.collection.IsIterableContainingInOrder.contains; public class IndexDeprecationChecksTests extends ESTestCase { public void testOldIndicesCheck() { @@ -32,4 +36,31 @@ public class IndexDeprecationChecksTests extends ESTestCase { List issues = DeprecationChecks.filterChecks(INDEX_SETTINGS_CHECKS, c -> c.apply(indexMetaData)); assertEquals(singletonList(expected), issues); } + + public void testTranslogRetentionSettings() { + Settings.Builder settings = settings(Version.CURRENT); + settings.put(IndexSettings.INDEX_TRANSLOG_RETENTION_AGE_SETTING.getKey(), randomPositiveTimeValue()); + settings.put(IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.getKey(), between(1, 1024) + "b"); + IndexMetaData indexMetaData = IndexMetaData.builder("test").settings(settings).numberOfShards(1).numberOfReplicas(0).build(); + List issues = DeprecationChecks.filterChecks(INDEX_SETTINGS_CHECKS, c -> c.apply(indexMetaData)); + assertThat(issues, contains( + new DeprecationIssue(DeprecationIssue.Level.WARNING, + "translog retention settings are ignored", + "https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules-translog.html", + "translog retention settings [index.translog.retention.size] and [index.translog.retention.age] are ignored " + + "because translog is no longer used in peer recoveries with soft-deletes enabled (default in 7.0 or later)") + )); + } + + public void testDefaultTranslogRetentionSettings() { + Settings.Builder settings = settings(Version.CURRENT); + if (randomBoolean()) { + settings.put(IndexSettings.INDEX_TRANSLOG_RETENTION_AGE_SETTING.getKey(), randomPositiveTimeValue()); + settings.put(IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.getKey(), between(1, 1024) + "b"); + settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), false); + } + IndexMetaData indexMetaData = IndexMetaData.builder("test").settings(settings).numberOfShards(1).numberOfReplicas(0).build(); + List issues = DeprecationChecks.filterChecks(INDEX_SETTINGS_CHECKS, c -> c.apply(indexMetaData)); + assertThat(issues, empty()); + } } diff --git a/x-pack/plugin/frozen-indices/src/test/java/org/elasticsearch/index/engine/FrozenIndexTests.java b/x-pack/plugin/frozen-indices/src/test/java/org/elasticsearch/index/engine/FrozenIndexTests.java index 6832433106be..4453830f3056 100644 --- a/x-pack/plugin/frozen-indices/src/test/java/org/elasticsearch/index/engine/FrozenIndexTests.java +++ b/x-pack/plugin/frozen-indices/src/test/java/org/elasticsearch/index/engine/FrozenIndexTests.java @@ -403,7 +403,7 @@ public class FrozenIndexTests extends ESSingleNodeTestCase { public void testTranslogStats() { final String indexName = "test"; - createIndex(indexName, Settings.builder() + IndexService indexService = createIndex(indexName, Settings.builder() .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) .build()); @@ -412,7 +412,6 @@ public class FrozenIndexTests extends ESSingleNodeTestCase { for (long i = 0; i < nbDocs; i++) { final IndexResponse indexResponse = client().prepareIndex(indexName, "_doc", Long.toString(i)).setSource("field", i).get(); assertThat(indexResponse.status(), is(RestStatus.CREATED)); - if (rarely()) { client().admin().indices().prepareFlush(indexName).get(); uncommittedOps = 0; @@ -423,7 +422,8 @@ public class FrozenIndexTests extends ESSingleNodeTestCase { IndicesStatsResponse stats = client().admin().indices().prepareStats(indexName).clear().setTranslog(true).get(); assertThat(stats.getIndex(indexName), notNullValue()); - assertThat(stats.getIndex(indexName).getPrimaries().getTranslog().estimatedNumberOfOperations(), equalTo(nbDocs)); + assertThat(stats.getIndex(indexName).getPrimaries().getTranslog().estimatedNumberOfOperations(), equalTo( + indexService.getIndexSettings().isSoftDeleteEnabled() ? uncommittedOps : nbDocs)); assertThat(stats.getIndex(indexName).getPrimaries().getTranslog().getUncommittedOperations(), equalTo(uncommittedOps)); assertAcked(client().execute(FreezeIndexAction.INSTANCE, new FreezeRequest(indexName)).actionGet()); @@ -432,7 +432,8 @@ public class FrozenIndexTests extends ESSingleNodeTestCase { IndicesOptions indicesOptions = IndicesOptions.STRICT_EXPAND_OPEN; stats = client().admin().indices().prepareStats(indexName).setIndicesOptions(indicesOptions).clear().setTranslog(true).get(); assertThat(stats.getIndex(indexName), notNullValue()); - assertThat(stats.getIndex(indexName).getPrimaries().getTranslog().estimatedNumberOfOperations(), equalTo(nbDocs)); + assertThat(stats.getIndex(indexName).getPrimaries().getTranslog().estimatedNumberOfOperations(), + equalTo(indexService.getIndexSettings().isSoftDeleteEnabled() ? 0 : nbDocs)); assertThat(stats.getIndex(indexName).getPrimaries().getTranslog().getUncommittedOperations(), equalTo(0)); } } diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/TransportExecuteSnapshotLifecycleAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/TransportExecuteSnapshotLifecycleAction.java index 01b0f209b892..12d3d62c99f3 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/TransportExecuteSnapshotLifecycleAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/TransportExecuteSnapshotLifecycleAction.java @@ -52,7 +52,7 @@ public class TransportExecuteSnapshotLifecycleAction } @Override protected String executor() { - return ThreadPool.Names.SNAPSHOT; + return ThreadPool.Names.GENERIC; } @Override diff --git a/x-pack/plugin/ml/qa/ml-with-security/build.gradle b/x-pack/plugin/ml/qa/ml-with-security/build.gradle index 1fb87b13b525..f0ddcfbfcbc4 100644 --- a/x-pack/plugin/ml/qa/ml-with-security/build.gradle +++ b/x-pack/plugin/ml/qa/ml-with-security/build.gradle @@ -89,6 +89,8 @@ integTest.runner { 'ml/evaluate_data_frame/Test binary_soft_classification given recall with empty thresholds', 'ml/evaluate_data_frame/Test binary_soft_classification given confusion_matrix with empty thresholds', 'ml/evaluate_data_frame/Test regression given evaluation with empty metrics', + 'ml/evaluate_data_frame/Test regression given missing actual_field', + 'ml/evaluate_data_frame/Test regression given missing predicted_field', 'ml/delete_job_force/Test cannot force delete a non-existent job', 'ml/delete_model_snapshot/Test delete snapshot missing snapshotId', 'ml/delete_model_snapshot/Test delete snapshot missing job_id', diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/RunDataFrameAnalyticsIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/RunDataFrameAnalyticsIT.java index e78c6015ec14..eb99135b418e 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/RunDataFrameAnalyticsIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/RunDataFrameAnalyticsIT.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.ml.integration; +import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsRequest; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; import org.elasticsearch.action.bulk.BulkRequestBuilder; @@ -13,12 +14,18 @@ import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.SearchHit; import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsConfig; +import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsDest; +import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsSource; import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsState; +import org.elasticsearch.xpack.core.ml.dataframe.analyses.OutlierDetection; import org.junit.After; import java.util.Arrays; @@ -26,13 +33,13 @@ import java.util.List; import java.util.Map; import static org.hamcrest.Matchers.allOf; -import static org.hamcrest.Matchers.closeTo; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.lessThan; import static org.hamcrest.Matchers.lessThanOrEqualTo; +import static org.hamcrest.Matchers.startsWith; public class RunDataFrameAnalyticsIT extends MlNativeDataFrameAnalyticsIntegTestCase { @@ -366,7 +373,6 @@ public class RunDataFrameAnalyticsIT extends MlNativeDataFrameAnalyticsIntegTest assertThat(searchResponse.getHits().getTotalHits().value, equalTo((long) bulkRequestBuilder.numberOfActions())); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/45425") public void testRegressionWithNumericFeatureAndFewDocuments() throws Exception { String sourceIndex = "test-regression-with-numeric-feature-and-few-docs"; @@ -405,7 +411,8 @@ public class RunDataFrameAnalyticsIT extends MlNativeDataFrameAnalyticsIntegTest waitUntilAnalyticsIsStopped(id); int resultsWithPrediction = 0; - SearchResponse sourceData = client().prepareSearch(sourceIndex).get(); + SearchResponse sourceData = client().prepareSearch(sourceIndex).setTrackTotalHits(true).setSize(1000).get(); + assertThat(sourceData.getHits().getTotalHits().value, equalTo(350L)); for (SearchHit hit : sourceData.getHits()) { GetResponse destDocGetResponse = client().prepareGet().setIndex(config.getDest().getIndex()).setId(hit.getId()).get(); assertThat(destDocGetResponse.isExists(), is(true)); @@ -420,14 +427,57 @@ public class RunDataFrameAnalyticsIT extends MlNativeDataFrameAnalyticsIntegTest @SuppressWarnings("unchecked") Map resultsObject = (Map) destDoc.get("ml"); + assertThat(resultsObject.containsKey("variable_prediction"), is(true)); if (resultsObject.containsKey("variable_prediction")) { resultsWithPrediction++; double featureValue = (double) destDoc.get("feature"); double predictionValue = (double) resultsObject.get("variable_prediction"); + // TODO reenable this assertion when the backend is stable // it seems for this case values can be as far off as 2.0 - assertThat(predictionValue, closeTo(10 * featureValue, 2.0)); + // assertThat(predictionValue, closeTo(10 * featureValue, 2.0)); } } assertThat(resultsWithPrediction, greaterThan(0)); } + + public void testModelMemoryLimitLowerThanEstimatedMemoryUsage() { + String sourceIndex = "test-model-memory-limit"; + + client().admin().indices().prepareCreate(sourceIndex) + .addMapping("_doc", "col_1", "type=double", "col_2", "type=float", "col_3", "type=keyword") + .get(); + + BulkRequestBuilder bulkRequestBuilder = client().prepareBulk().setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + for (int i = 0; i < 10000; i++) { // This number of rows should make memory usage estimate greater than 1MB + IndexRequest indexRequest = new IndexRequest(sourceIndex) + .id("doc_" + i) + .source("col_1", 1.0, "col_2", 1.0, "col_3", "str"); + bulkRequestBuilder.add(indexRequest); + } + BulkResponse bulkResponse = bulkRequestBuilder.get(); + if (bulkResponse.hasFailures()) { + fail("Failed to index data: " + bulkResponse.buildFailureMessage()); + } + + String id = "test_model_memory_limit_lower_than_estimated_memory_usage"; + ByteSizeValue modelMemoryLimit = new ByteSizeValue(1, ByteSizeUnit.MB); + DataFrameAnalyticsConfig config = new DataFrameAnalyticsConfig.Builder() + .setId(id) + .setSource(new DataFrameAnalyticsSource(new String[] { sourceIndex }, null)) + .setDest(new DataFrameAnalyticsDest(sourceIndex + "-results", null)) + .setAnalysis(new OutlierDetection()) + .setModelMemoryLimit(modelMemoryLimit) + .build(); + + registerAnalytics(config); + putAnalytics(config); + assertState(id, DataFrameAnalyticsState.STOPPED); + + ElasticsearchStatusException exception = expectThrows(ElasticsearchStatusException.class, () -> startAnalytics(id)); + assertThat(exception.status(), equalTo(RestStatus.BAD_REQUEST)); + assertThat( + exception.getMessage(), + startsWith("Cannot start because the configured model memory limit [" + modelMemoryLimit + + "] is lower than the expected memory usage")); + } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java index 954dbbcf65fc..4af18d751789 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java @@ -216,7 +216,7 @@ import org.elasticsearch.xpack.ml.job.process.normalizer.MultiplyingNormalizerPr import org.elasticsearch.xpack.ml.job.process.normalizer.NativeNormalizerProcessFactory; import org.elasticsearch.xpack.ml.job.process.normalizer.NormalizerFactory; import org.elasticsearch.xpack.ml.job.process.normalizer.NormalizerProcessFactory; -import org.elasticsearch.xpack.ml.notifications.Auditor; +import org.elasticsearch.xpack.ml.notifications.AnomalyDetectionAuditor; import org.elasticsearch.xpack.ml.process.DummyController; import org.elasticsearch.xpack.ml.process.MlController; import org.elasticsearch.xpack.ml.process.MlControllerHolder; @@ -470,7 +470,7 @@ public class MachineLearning extends Plugin implements ActionPlugin, AnalysisPlu return Collections.singletonList(new JobManagerHolder()); } - Auditor auditor = new Auditor(client, clusterService.getNodeName()); + AnomalyDetectionAuditor auditor = new AnomalyDetectionAuditor(client, clusterService.getNodeName()); JobResultsProvider jobResultsProvider = new JobResultsProvider(client, settings); JobResultsPersister jobResultsPersister = new JobResultsPersister(client); JobDataCountsPersister jobDataCountsPersister = new JobDataCountsPersister(client); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlAssignmentNotifier.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlAssignmentNotifier.java index a2c9f6eeaaf3..6ce313f95396 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlAssignmentNotifier.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlAssignmentNotifier.java @@ -21,7 +21,7 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.ml.MlTasks; import org.elasticsearch.xpack.core.ml.action.OpenJobAction; import org.elasticsearch.xpack.core.ml.action.StartDatafeedAction; -import org.elasticsearch.xpack.ml.notifications.Auditor; +import org.elasticsearch.xpack.ml.notifications.AnomalyDetectionAuditor; import java.util.Objects; @@ -29,18 +29,20 @@ import java.util.Objects; public class MlAssignmentNotifier implements ClusterStateListener { private static final Logger logger = LogManager.getLogger(MlAssignmentNotifier.class); - private final Auditor auditor; + private final AnomalyDetectionAuditor auditor; private final MlConfigMigrator mlConfigMigrator; private final ThreadPool threadPool; - MlAssignmentNotifier(Settings settings, Auditor auditor, ThreadPool threadPool, Client client, ClusterService clusterService) { + MlAssignmentNotifier(Settings settings, AnomalyDetectionAuditor auditor, ThreadPool threadPool, Client client, + ClusterService clusterService) { this.auditor = auditor; this.mlConfigMigrator = new MlConfigMigrator(settings, client, clusterService); this.threadPool = threadPool; clusterService.addListener(this); } - MlAssignmentNotifier(Auditor auditor, ThreadPool threadPool, MlConfigMigrator mlConfigMigrator, ClusterService clusterService) { + MlAssignmentNotifier(AnomalyDetectionAuditor auditor, ThreadPool threadPool, MlConfigMigrator mlConfigMigrator, + ClusterService clusterService) { this.auditor = auditor; this.mlConfigMigrator = mlConfigMigrator; this.threadPool = threadPool; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCloseJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCloseJobAction.java index 1a8aea05c458..8816807948ab 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCloseJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCloseJobAction.java @@ -37,7 +37,7 @@ import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.xpack.ml.MachineLearning; import org.elasticsearch.xpack.ml.datafeed.persistence.DatafeedConfigProvider; import org.elasticsearch.xpack.ml.job.persistence.JobConfigProvider; -import org.elasticsearch.xpack.ml.notifications.Auditor; +import org.elasticsearch.xpack.ml.notifications.AnomalyDetectionAuditor; import java.util.ArrayList; import java.util.Collection; @@ -53,14 +53,14 @@ public class TransportCloseJobAction extends TransportTasksAction openJobIds, List closingJobIds, - PersistentTasksCustomMetaData tasks, Auditor auditor) { + static TransportCloseJobAction.WaitForCloseRequest buildWaitForCloseRequest(List openJobIds, + List closingJobIds, + PersistentTasksCustomMetaData tasks, + AnomalyDetectionAuditor auditor) { TransportCloseJobAction.WaitForCloseRequest waitForCloseRequest = new TransportCloseJobAction.WaitForCloseRequest(); for (String jobId : openJobIds) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteExpiredDataAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteExpiredDataAction.java index 36d9802e9914..c33dd591a912 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteExpiredDataAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteExpiredDataAction.java @@ -23,7 +23,7 @@ import org.elasticsearch.xpack.ml.job.retention.ExpiredModelSnapshotsRemover; import org.elasticsearch.xpack.ml.job.retention.ExpiredResultsRemover; import org.elasticsearch.xpack.ml.job.retention.MlDataRemover; import org.elasticsearch.xpack.ml.job.retention.UnusedStateRemover; -import org.elasticsearch.xpack.ml.notifications.Auditor; +import org.elasticsearch.xpack.ml.notifications.AnomalyDetectionAuditor; import org.elasticsearch.xpack.ml.utils.VolatileCursorIterator; import java.util.Arrays; @@ -54,7 +54,7 @@ public class TransportDeleteExpiredDataAction extends HandledTransportAction listener) { - Auditor auditor = new Auditor(client, clusterService.getNodeName()); + AnomalyDetectionAuditor auditor = new AnomalyDetectionAuditor(client, clusterService.getNodeName()); List dataRemovers = Arrays.asList( new ExpiredResultsRemover(client, auditor), new ExpiredForecastsRemover(client, threadPool), diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteJobAction.java index 729da576ff4b..45b4e1805174 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteJobAction.java @@ -70,7 +70,7 @@ import org.elasticsearch.xpack.ml.datafeed.persistence.DatafeedConfigProvider; import org.elasticsearch.xpack.ml.job.persistence.JobConfigProvider; import org.elasticsearch.xpack.ml.job.persistence.JobDataDeleter; import org.elasticsearch.xpack.ml.job.persistence.JobResultsProvider; -import org.elasticsearch.xpack.ml.notifications.Auditor; +import org.elasticsearch.xpack.ml.notifications.AnomalyDetectionAuditor; import org.elasticsearch.xpack.ml.process.MlMemoryTracker; import org.elasticsearch.xpack.ml.utils.MlIndicesUtils; @@ -94,7 +94,7 @@ public class TransportDeleteJobAction extends TransportMasterNodeAction { + extends HandledTransportAction { private final TransportService transportService; private final ClusterService clusterService; @@ -43,7 +44,7 @@ public class TransportEstimateMemoryUsageAction ClusterService clusterService, NodeClient client, MemoryUsageEstimationProcessManager processManager) { - super(EstimateMemoryUsageAction.NAME, transportService, actionFilters, EstimateMemoryUsageAction.Request::new); + super(EstimateMemoryUsageAction.NAME, transportService, actionFilters, PutDataFrameAnalyticsAction.Request::new); this.transportService = transportService; this.clusterService = Objects.requireNonNull(clusterService); this.client = Objects.requireNonNull(client); @@ -52,7 +53,7 @@ public class TransportEstimateMemoryUsageAction @Override protected void doExecute(Task task, - EstimateMemoryUsageAction.Request request, + PutDataFrameAnalyticsAction.Request request, ActionListener listener) { DiscoveryNode localNode = clusterService.localNode(); if (MachineLearning.isMlNode(localNode)) { @@ -75,7 +76,7 @@ public class TransportEstimateMemoryUsageAction * the ML node. */ private void doEstimateMemoryUsage(String taskId, - EstimateMemoryUsageAction.Request request, + PutDataFrameAnalyticsAction.Request request, ActionListener listener) { DataFrameDataExtractorFactory.createForSourceIndices( client, @@ -90,7 +91,7 @@ public class TransportEstimateMemoryUsageAction ActionListener.wrap( result -> listener.onResponse( new EstimateMemoryUsageAction.Response( - result.getExpectedMemoryUsageWithOnePartition(), result.getExpectedMemoryUsageWithMaxPartitions())), + result.getExpectedMemoryWithoutDisk(), result.getExpectedMemoryWithDisk())), listener::onFailure ) ); @@ -103,7 +104,7 @@ public class TransportEstimateMemoryUsageAction /** * Finds the first available ML node in the cluster and redirects the request to this node. */ - private void redirectToMlNode(EstimateMemoryUsageAction.Request request, + private void redirectToMlNode(PutDataFrameAnalyticsAction.Request request, ActionListener listener) { Optional node = findMlNode(clusterService.state()); if (node.isPresent()) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportKillProcessAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportKillProcessAction.java index 0660965598b6..33cd31256fe6 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportKillProcessAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportKillProcessAction.java @@ -20,15 +20,15 @@ import org.elasticsearch.xpack.core.ml.job.messages.Messages; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.xpack.ml.MachineLearning; import org.elasticsearch.xpack.ml.job.process.autodetect.AutodetectProcessManager; -import org.elasticsearch.xpack.ml.notifications.Auditor; +import org.elasticsearch.xpack.ml.notifications.AnomalyDetectionAuditor; public class TransportKillProcessAction extends TransportJobTaskAction { - private final Auditor auditor; + private final AnomalyDetectionAuditor auditor; @Inject public TransportKillProcessAction(TransportService transportService, ClusterService clusterService, ActionFilters actionFilters, - AutodetectProcessManager processManager, Auditor auditor) { + AutodetectProcessManager processManager, AnomalyDetectionAuditor auditor) { super(KillProcessAction.NAME, clusterService, transportService, actionFilters, KillProcessAction.Request::new, KillProcessAction.Response::new, MachineLearning.UTILITY_THREAD_POOL_NAME, processManager); this.auditor = auditor; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDataFrameAnalyticsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDataFrameAnalyticsAction.java index 699416197ec1..db6dda9d3108 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDataFrameAnalyticsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDataFrameAnalyticsAction.java @@ -52,6 +52,8 @@ import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.XPackField; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.MlTasks; +import org.elasticsearch.xpack.core.ml.action.EstimateMemoryUsageAction; +import org.elasticsearch.xpack.core.ml.action.PutDataFrameAnalyticsAction; import org.elasticsearch.xpack.core.ml.action.StartDataFrameAnalyticsAction; import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsConfig; import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsState; @@ -169,11 +171,36 @@ public class TransportStartDataFrameAnalyticsAction ); // Tell the job tracker to refresh the memory requirement for this job and all other jobs that have persistent tasks + ActionListener estimateMemoryUsageListener = ActionListener.wrap( + estimateMemoryUsageResponse -> { + // Validate that model memory limit is sufficient to run the analysis + if (configHolder.get().getModelMemoryLimit() + .compareTo(estimateMemoryUsageResponse.getExpectedMemoryWithoutDisk()) < 0) { + ElasticsearchStatusException e = + ExceptionsHelper.badRequestException( + "Cannot start because the configured model memory limit [{}] is lower than the expected memory usage [{}]", + configHolder.get().getModelMemoryLimit(), estimateMemoryUsageResponse.getExpectedMemoryWithoutDisk()); + listener.onFailure(e); + return; + } + // Refresh memory requirement for jobs + memoryTracker.addDataFrameAnalyticsJobMemoryAndRefreshAllOthers( + request.getId(), configHolder.get().getModelMemoryLimit().getBytes(), memoryRequirementRefreshListener); + }, + listener::onFailure + ); + + // Perform memory usage estimation for this config ActionListener configListener = ActionListener.wrap( config -> { configHolder.set(config); - memoryTracker.addDataFrameAnalyticsJobMemoryAndRefreshAllOthers( - request.getId(), config.getModelMemoryLimit().getBytes(), memoryRequirementRefreshListener); + PutDataFrameAnalyticsAction.Request estimateMemoryUsageRequest = new PutDataFrameAnalyticsAction.Request(config); + ClientHelper.executeAsyncWithOrigin( + client, + ClientHelper.ML_ORIGIN, + EstimateMemoryUsageAction.INSTANCE, + estimateMemoryUsageRequest, + estimateMemoryUsageListener); }, listener::onFailure ); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java index 16f311d2b166..782e4d3c4ca9 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java @@ -56,7 +56,7 @@ import org.elasticsearch.xpack.ml.datafeed.DatafeedTimingStatsReporter; import org.elasticsearch.xpack.ml.datafeed.extractor.DataExtractorFactory; import org.elasticsearch.xpack.ml.datafeed.persistence.DatafeedConfigProvider; import org.elasticsearch.xpack.ml.job.persistence.JobConfigProvider; -import org.elasticsearch.xpack.ml.notifications.Auditor; +import org.elasticsearch.xpack.ml.notifications.AnomalyDetectionAuditor; import java.io.IOException; import java.util.ArrayList; @@ -82,7 +82,7 @@ public class TransportStartDatafeedAction extends TransportMasterNodeAction deprecationWarnings = new ArrayList<>(); deprecationWarnings.addAll(datafeed.getAggDeprecations(xContentRegistry)); deprecationWarnings.addAll(datafeed.getQueryDeprecations(xContentRegistry)); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJob.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJob.java index 8837d3f03f9e..85ed2f531f8d 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJob.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJob.java @@ -35,7 +35,7 @@ import org.elasticsearch.xpack.core.security.user.XPackUser; import org.elasticsearch.xpack.ml.datafeed.delayeddatacheck.DelayedDataDetector; import org.elasticsearch.xpack.ml.datafeed.delayeddatacheck.DelayedDataDetectorFactory.BucketWithMissingData; import org.elasticsearch.xpack.ml.datafeed.extractor.DataExtractorFactory; -import org.elasticsearch.xpack.ml.notifications.Auditor; +import org.elasticsearch.xpack.ml.notifications.AnomalyDetectionAuditor; import java.io.IOException; import java.io.InputStream; @@ -54,7 +54,7 @@ class DatafeedJob { private static final int NEXT_TASK_DELAY_MS = 100; static final long MISSING_DATA_CHECK_INTERVAL_MS = 900_000; //15 minutes in ms - private final Auditor auditor; + private final AnomalyDetectionAuditor auditor; private final String jobId; private final DataDescription dataDescription; private final long frequencyMs; @@ -76,7 +76,7 @@ class DatafeedJob { DatafeedJob(String jobId, DataDescription dataDescription, long frequencyMs, long queryDelayMs, DataExtractorFactory dataExtractorFactory, DatafeedTimingStatsReporter timingStatsReporter, Client client, - Auditor auditor, Supplier currentTimeSupplier, DelayedDataDetector delayedDataDetector, + AnomalyDetectionAuditor auditor, Supplier currentTimeSupplier, DelayedDataDetector delayedDataDetector, long latestFinalBucketEndTimeMs, long latestRecordTimeMs) { this.jobId = jobId; this.dataDescription = Objects.requireNonNull(dataDescription); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobBuilder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobBuilder.java index a592f4102265..728e14f6f4fe 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobBuilder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobBuilder.java @@ -27,7 +27,7 @@ import org.elasticsearch.xpack.ml.job.persistence.BucketsQueryBuilder; import org.elasticsearch.xpack.ml.job.persistence.JobConfigProvider; import org.elasticsearch.xpack.ml.job.persistence.JobResultsPersister; import org.elasticsearch.xpack.ml.job.persistence.JobResultsProvider; -import org.elasticsearch.xpack.ml.notifications.Auditor; +import org.elasticsearch.xpack.ml.notifications.AnomalyDetectionAuditor; import java.util.Collections; import java.util.Objects; @@ -39,16 +39,17 @@ public class DatafeedJobBuilder { private final Client client; private final NamedXContentRegistry xContentRegistry; - private final Auditor auditor; + private final AnomalyDetectionAuditor auditor; private final Supplier currentTimeSupplier; private final JobConfigProvider jobConfigProvider; private final JobResultsProvider jobResultsProvider; private final DatafeedConfigProvider datafeedConfigProvider; private final JobResultsPersister jobResultsPersister; - public DatafeedJobBuilder(Client client, NamedXContentRegistry xContentRegistry, Auditor auditor, Supplier currentTimeSupplier, - JobConfigProvider jobConfigProvider, JobResultsProvider jobResultsProvider, - DatafeedConfigProvider datafeedConfigProvider, JobResultsPersister jobResultsPersister) { + public DatafeedJobBuilder(Client client, NamedXContentRegistry xContentRegistry, AnomalyDetectionAuditor auditor, + Supplier currentTimeSupplier, JobConfigProvider jobConfigProvider, + JobResultsProvider jobResultsProvider, DatafeedConfigProvider datafeedConfigProvider, + JobResultsPersister jobResultsPersister) { this.client = client; this.xContentRegistry = Objects.requireNonNull(xContentRegistry); this.auditor = Objects.requireNonNull(auditor); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManager.java index 4425b624a06f..a60797562d03 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManager.java @@ -32,7 +32,7 @@ import org.elasticsearch.xpack.core.ml.job.messages.Messages; import org.elasticsearch.xpack.ml.MachineLearning; import org.elasticsearch.xpack.ml.action.TransportStartDatafeedAction; import org.elasticsearch.xpack.ml.job.process.autodetect.AutodetectProcessManager; -import org.elasticsearch.xpack.ml.notifications.Auditor; +import org.elasticsearch.xpack.ml.notifications.AnomalyDetectionAuditor; import java.util.ArrayList; import java.util.Iterator; @@ -58,7 +58,7 @@ public class DatafeedManager { private final ClusterService clusterService; private final ThreadPool threadPool; private final Supplier currentTimeSupplier; - private final Auditor auditor; + private final AnomalyDetectionAuditor auditor; // Use allocationId as key instead of datafeed id private final ConcurrentMap runningDatafeedsOnThisNode = new ConcurrentHashMap<>(); private final DatafeedJobBuilder datafeedJobBuilder; @@ -66,7 +66,8 @@ public class DatafeedManager { private final AutodetectProcessManager autodetectProcessManager; public DatafeedManager(ThreadPool threadPool, Client client, ClusterService clusterService, DatafeedJobBuilder datafeedJobBuilder, - Supplier currentTimeSupplier, Auditor auditor, AutodetectProcessManager autodetectProcessManager) { + Supplier currentTimeSupplier, AnomalyDetectionAuditor auditor, + AutodetectProcessManager autodetectProcessManager) { this.client = Objects.requireNonNull(client); this.clusterService = Objects.requireNonNull(clusterService); this.threadPool = threadPool; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/ProblemTracker.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/ProblemTracker.java index 9844631c7b5c..69a821d42464 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/ProblemTracker.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/ProblemTracker.java @@ -6,7 +6,7 @@ package org.elasticsearch.xpack.ml.datafeed; import org.elasticsearch.xpack.core.ml.job.messages.Messages; -import org.elasticsearch.xpack.ml.notifications.Auditor; +import org.elasticsearch.xpack.ml.notifications.AnomalyDetectionAuditor; import java.util.Objects; @@ -26,7 +26,7 @@ class ProblemTracker { private static final int EMPTY_DATA_WARN_COUNT = 10; - private final Auditor auditor; + private final AnomalyDetectionAuditor auditor; private final String jobId; private volatile boolean hasProblems; @@ -34,7 +34,7 @@ class ProblemTracker { private volatile String previousProblem; private volatile int emptyDataCount; - ProblemTracker(Auditor auditor, String jobId) { + ProblemTracker(AnomalyDetectionAuditor auditor, String jobId) { this.auditor = Objects.requireNonNull(auditor); this.jobId = Objects.requireNonNull(jobId); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractor.java index d9f1aa994d59..75b5ad950cb3 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractor.java @@ -51,6 +51,8 @@ public class DataFrameDataExtractor { private static final Logger LOGGER = LogManager.getLogger(DataFrameDataExtractor.class); private static final TimeValue SCROLL_TIMEOUT = new TimeValue(30, TimeUnit.MINUTES); + private static final String EMPTY_STRING = ""; + private final Client client; private final DataFrameDataExtractorContext context; private String scrollId; @@ -184,8 +186,15 @@ public class DataFrameDataExtractor { if (values.length == 1 && (values[0] instanceof Number || values[0] instanceof String)) { extractedValues[i] = Objects.toString(values[0]); } else { - extractedValues = null; - break; + if (values.length == 0 && context.includeRowsWithMissingValues) { + // if values is empty then it means it's a missing value + extractedValues[i] = EMPTY_STRING; + } else { + // we are here if we have a missing value but the analysis does not support those + // or the value type is not supported (e.g. arrays, etc.) + extractedValues = null; + break; + } } } return new Row(extractedValues, hit); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractorContext.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractorContext.java index f602a66221f7..07279cf501a5 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractorContext.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractorContext.java @@ -21,9 +21,10 @@ public class DataFrameDataExtractorContext { final int scrollSize; final Map headers; final boolean includeSource; + final boolean includeRowsWithMissingValues; DataFrameDataExtractorContext(String jobId, ExtractedFields extractedFields, List indices, QueryBuilder query, int scrollSize, - Map headers, boolean includeSource) { + Map headers, boolean includeSource, boolean includeRowsWithMissingValues) { this.jobId = Objects.requireNonNull(jobId); this.extractedFields = Objects.requireNonNull(extractedFields); this.indices = indices.toArray(new String[indices.size()]); @@ -31,5 +32,6 @@ public class DataFrameDataExtractorContext { this.scrollSize = scrollSize; this.headers = headers; this.includeSource = includeSource; + this.includeRowsWithMissingValues = includeRowsWithMissingValues; } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractorFactory.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractorFactory.java index 2e7139bca2c1..d24d157d4f5b 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractorFactory.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractorFactory.java @@ -41,14 +41,16 @@ public class DataFrameDataExtractorFactory { private final List indices; private final ExtractedFields extractedFields; private final Map headers; + private final boolean includeRowsWithMissingValues; private DataFrameDataExtractorFactory(Client client, String analyticsId, List indices, ExtractedFields extractedFields, - Map headers) { + Map headers, boolean includeRowsWithMissingValues) { this.client = Objects.requireNonNull(client); this.analyticsId = Objects.requireNonNull(analyticsId); this.indices = Objects.requireNonNull(indices); this.extractedFields = Objects.requireNonNull(extractedFields); this.headers = headers; + this.includeRowsWithMissingValues = includeRowsWithMissingValues; } public DataFrameDataExtractor newExtractor(boolean includeSource) { @@ -56,14 +58,19 @@ public class DataFrameDataExtractorFactory { analyticsId, extractedFields, indices, - allExtractedFieldsExistQuery(), + createQuery(), 1000, headers, - includeSource + includeSource, + includeRowsWithMissingValues ); return new DataFrameDataExtractor(client, context); } + private QueryBuilder createQuery() { + return includeRowsWithMissingValues ? QueryBuilders.matchAllQuery() : allExtractedFieldsExistQuery(); + } + private QueryBuilder allExtractedFieldsExistQuery() { BoolQueryBuilder query = QueryBuilders.boolQuery(); for (ExtractedField field : extractedFields.getAllFields()) { @@ -94,7 +101,8 @@ public class DataFrameDataExtractorFactory { ActionListener.wrap( extractedFields -> listener.onResponse( new DataFrameDataExtractorFactory( - client, taskId, Arrays.asList(config.getSource().getIndex()), extractedFields, config.getHeaders())), + client, taskId, Arrays.asList(config.getSource().getIndex()), extractedFields, config.getHeaders(), + config.getAnalysis().supportsMissingValues())), listener::onFailure ) ); @@ -123,7 +131,8 @@ public class DataFrameDataExtractorFactory { ActionListener.wrap( extractedFields -> listener.onResponse( new DataFrameDataExtractorFactory( - client, config.getId(), Arrays.asList(config.getDest().getIndex()), extractedFields, config.getHeaders())), + client, config.getId(), Arrays.asList(config.getDest().getIndex()), extractedFields, config.getHeaders(), + config.getAnalysis().supportsMissingValues())), listener::onFailure ) ); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/AnalyticsResultProcessor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/AnalyticsResultProcessor.java index fd5f43e8426e..8a4f134de9a2 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/AnalyticsResultProcessor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/AnalyticsResultProcessor.java @@ -15,7 +15,6 @@ import org.elasticsearch.xpack.ml.dataframe.process.results.RowResults; import java.util.Iterator; import java.util.Objects; import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; import java.util.function.Consumer; import java.util.function.Supplier; @@ -45,12 +44,10 @@ public class AnalyticsResultProcessor { public void awaitForCompletion() { try { - if (completionLatch.await(30, TimeUnit.MINUTES) == false) { - LOGGER.warn("[{}] Timeout waiting for results processor to complete", dataFrameAnalyticsId); - } + completionLatch.await(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); - LOGGER.info("[{}] Interrupted waiting for results processor to complete", dataFrameAnalyticsId); + LOGGER.error(new ParameterizedMessage("[{}] Interrupted waiting for results processor to complete", dataFrameAnalyticsId), e); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/MemoryUsageEstimationProcessManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/MemoryUsageEstimationProcessManager.java index 17595db791ee..fac084c0fc84 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/MemoryUsageEstimationProcessManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/MemoryUsageEstimationProcessManager.java @@ -9,6 +9,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsConfig; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; @@ -66,7 +67,9 @@ public class MemoryUsageEstimationProcessManager { new AnalyticsProcessConfig( dataSummary.rows, dataSummary.cols, - DataFrameAnalyticsConfig.MIN_MODEL_MEMORY_LIMIT, + // For memory estimation the model memory limit here should be set high enough not to trigger an error when C++ code + // compares the limit to the result of estimation. + new ByteSizeValue(1, ByteSizeUnit.PB), 1, "", categoricalFields, diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/results/MemoryUsageEstimationResult.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/results/MemoryUsageEstimationResult.java index 03fcb3a52ca4..7e0f2d42d225 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/results/MemoryUsageEstimationResult.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/results/MemoryUsageEstimationResult.java @@ -22,8 +22,8 @@ public class MemoryUsageEstimationResult implements ToXContentObject { public static final ParseField TYPE = new ParseField("memory_usage_estimation_result"); - public static final ParseField EXPECTED_MEMORY_USAGE_WITH_ONE_PARTITION = new ParseField("expected_memory_usage_with_one_partition"); - public static final ParseField EXPECTED_MEMORY_USAGE_WITH_MAX_PARTITIONS = new ParseField("expected_memory_usage_with_max_partitions"); + public static final ParseField EXPECTED_MEMORY_WITHOUT_DISK = new ParseField("expected_memory_without_disk"); + public static final ParseField EXPECTED_MEMORY_WITH_DISK = new ParseField("expected_memory_with_disk"); public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( @@ -34,43 +34,40 @@ public class MemoryUsageEstimationResult implements ToXContentObject { static { PARSER.declareField( optionalConstructorArg(), - (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), EXPECTED_MEMORY_USAGE_WITH_ONE_PARTITION.getPreferredName()), - EXPECTED_MEMORY_USAGE_WITH_ONE_PARTITION, + (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), EXPECTED_MEMORY_WITHOUT_DISK.getPreferredName()), + EXPECTED_MEMORY_WITHOUT_DISK, ObjectParser.ValueType.VALUE); PARSER.declareField( optionalConstructorArg(), - (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), EXPECTED_MEMORY_USAGE_WITH_MAX_PARTITIONS.getPreferredName()), - EXPECTED_MEMORY_USAGE_WITH_MAX_PARTITIONS, + (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), EXPECTED_MEMORY_WITH_DISK.getPreferredName()), + EXPECTED_MEMORY_WITH_DISK, ObjectParser.ValueType.VALUE); } - private final ByteSizeValue expectedMemoryUsageWithOnePartition; - private final ByteSizeValue expectedMemoryUsageWithMaxPartitions; + private final ByteSizeValue expectedMemoryWithoutDisk; + private final ByteSizeValue expectedMemoryWithDisk; - public MemoryUsageEstimationResult(@Nullable ByteSizeValue expectedMemoryUsageWithOnePartition, - @Nullable ByteSizeValue expectedMemoryUsageWithMaxPartitions) { - this.expectedMemoryUsageWithOnePartition = expectedMemoryUsageWithOnePartition; - this.expectedMemoryUsageWithMaxPartitions = expectedMemoryUsageWithMaxPartitions; + public MemoryUsageEstimationResult(@Nullable ByteSizeValue expectedMemoryWithoutDisk, @Nullable ByteSizeValue expectedMemoryWithDisk) { + this.expectedMemoryWithoutDisk = expectedMemoryWithoutDisk; + this.expectedMemoryWithDisk = expectedMemoryWithDisk; } - public ByteSizeValue getExpectedMemoryUsageWithOnePartition() { - return expectedMemoryUsageWithOnePartition; + public ByteSizeValue getExpectedMemoryWithoutDisk() { + return expectedMemoryWithoutDisk; } - public ByteSizeValue getExpectedMemoryUsageWithMaxPartitions() { - return expectedMemoryUsageWithMaxPartitions; + public ByteSizeValue getExpectedMemoryWithDisk() { + return expectedMemoryWithDisk; } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - if (expectedMemoryUsageWithOnePartition != null) { - builder.field( - EXPECTED_MEMORY_USAGE_WITH_ONE_PARTITION.getPreferredName(), expectedMemoryUsageWithOnePartition.getStringRep()); + if (expectedMemoryWithoutDisk != null) { + builder.field(EXPECTED_MEMORY_WITHOUT_DISK.getPreferredName(), expectedMemoryWithoutDisk.getStringRep()); } - if (expectedMemoryUsageWithMaxPartitions != null) { - builder.field( - EXPECTED_MEMORY_USAGE_WITH_MAX_PARTITIONS.getPreferredName(), expectedMemoryUsageWithMaxPartitions.getStringRep()); + if (expectedMemoryWithDisk != null) { + builder.field(EXPECTED_MEMORY_WITH_DISK.getPreferredName(), expectedMemoryWithDisk.getStringRep()); } builder.endObject(); return builder; @@ -86,12 +83,12 @@ public class MemoryUsageEstimationResult implements ToXContentObject { } MemoryUsageEstimationResult that = (MemoryUsageEstimationResult) other; - return Objects.equals(expectedMemoryUsageWithOnePartition, that.expectedMemoryUsageWithOnePartition) - && Objects.equals(expectedMemoryUsageWithMaxPartitions, that.expectedMemoryUsageWithMaxPartitions); + return Objects.equals(expectedMemoryWithoutDisk, that.expectedMemoryWithoutDisk) + && Objects.equals(expectedMemoryWithDisk, that.expectedMemoryWithDisk); } @Override public int hashCode() { - return Objects.hash(expectedMemoryUsageWithOnePartition, expectedMemoryUsageWithMaxPartitions); + return Objects.hash(expectedMemoryWithoutDisk, expectedMemoryWithDisk); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java index 683fbb7c65c1..aa7771ac21f6 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java @@ -57,7 +57,7 @@ import org.elasticsearch.xpack.ml.job.persistence.JobConfigProvider; import org.elasticsearch.xpack.ml.job.persistence.JobResultsPersister; import org.elasticsearch.xpack.ml.job.persistence.JobResultsProvider; import org.elasticsearch.xpack.ml.job.process.autodetect.UpdateParams; -import org.elasticsearch.xpack.ml.notifications.Auditor; +import org.elasticsearch.xpack.ml.notifications.AnomalyDetectionAuditor; import org.elasticsearch.xpack.ml.utils.VoidChainTaskExecutor; import java.io.IOException; @@ -91,7 +91,7 @@ public class JobManager { private final JobResultsProvider jobResultsProvider; private final JobResultsPersister jobResultsPersister; private final ClusterService clusterService; - private final Auditor auditor; + private final AnomalyDetectionAuditor auditor; private final Client client; private final ThreadPool threadPool; private final UpdateJobProcessNotifier updateJobProcessNotifier; @@ -104,8 +104,9 @@ public class JobManager { * Create a JobManager */ public JobManager(Environment environment, Settings settings, JobResultsProvider jobResultsProvider, - JobResultsPersister jobResultsPersister, ClusterService clusterService, Auditor auditor, ThreadPool threadPool, - Client client, UpdateJobProcessNotifier updateJobProcessNotifier, NamedXContentRegistry xContentRegistry) { + JobResultsPersister jobResultsPersister, ClusterService clusterService, AnomalyDetectionAuditor auditor, + ThreadPool threadPool, Client client, UpdateJobProcessNotifier updateJobProcessNotifier, + NamedXContentRegistry xContentRegistry) { this.environment = environment; this.jobResultsProvider = Objects.requireNonNull(jobResultsProvider); this.jobResultsPersister = Objects.requireNonNull(jobResultsPersister); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java index 4745228285e3..9ef733198114 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java @@ -66,7 +66,7 @@ import org.elasticsearch.xpack.ml.job.process.normalizer.NormalizerFactory; import org.elasticsearch.xpack.ml.job.process.normalizer.Renormalizer; import org.elasticsearch.xpack.ml.job.process.normalizer.ScoresUpdater; import org.elasticsearch.xpack.ml.job.process.normalizer.ShortCircuitingRenormalizer; -import org.elasticsearch.xpack.ml.notifications.Auditor; +import org.elasticsearch.xpack.ml.notifications.AnomalyDetectionAuditor; import org.elasticsearch.xpack.ml.process.NativeStorageProvider; import java.io.IOException; @@ -109,12 +109,12 @@ public class AutodetectProcessManager implements ClusterStateListener { private final NamedXContentRegistry xContentRegistry; - private final Auditor auditor; + private final AnomalyDetectionAuditor auditor; private volatile boolean upgradeInProgress; public AutodetectProcessManager(Environment environment, Settings settings, Client client, ThreadPool threadPool, - NamedXContentRegistry xContentRegistry, Auditor auditor, ClusterService clusterService, + NamedXContentRegistry xContentRegistry, AnomalyDetectionAuditor auditor, ClusterService clusterService, JobManager jobManager, JobResultsProvider jobResultsProvider, JobResultsPersister jobResultsPersister, JobDataCountsPersister jobDataCountsPersister, AutodetectProcessFactory autodetectProcessFactory, NormalizerFactory normalizerFactory, NativeStorageProvider nativeStorageProvider) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutodetectResultProcessor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutodetectResultProcessor.java index f596fbc669af..282dfa2c2f93 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutodetectResultProcessor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutodetectResultProcessor.java @@ -38,7 +38,7 @@ import org.elasticsearch.xpack.ml.job.persistence.TimingStatsReporter; import org.elasticsearch.xpack.ml.job.process.autodetect.AutodetectProcess; import org.elasticsearch.xpack.ml.job.process.normalizer.Renormalizer; import org.elasticsearch.xpack.ml.job.results.AutodetectResult; -import org.elasticsearch.xpack.ml.notifications.Auditor; +import org.elasticsearch.xpack.ml.notifications.AnomalyDetectionAuditor; import java.time.Duration; import java.util.Iterator; @@ -74,7 +74,7 @@ public class AutodetectResultProcessor { private static final Logger LOGGER = LogManager.getLogger(AutodetectResultProcessor.class); private final Client client; - private final Auditor auditor; + private final AnomalyDetectionAuditor auditor; private final String jobId; private final Renormalizer renormalizer; private final JobResultsPersister persister; @@ -96,7 +96,7 @@ public class AutodetectResultProcessor { private volatile ModelSizeStats latestModelSizeStats; public AutodetectResultProcessor(Client client, - Auditor auditor, + AnomalyDetectionAuditor auditor, String jobId, Renormalizer renormalizer, JobResultsPersister persister, @@ -107,7 +107,7 @@ public class AutodetectResultProcessor { } // Visible for testing - AutodetectResultProcessor(Client client, Auditor auditor, String jobId, Renormalizer renormalizer, + AutodetectResultProcessor(Client client, AnomalyDetectionAuditor auditor, String jobId, Renormalizer renormalizer, JobResultsPersister persister, AutodetectProcess autodetectProcess, ModelSizeStats latestModelSizeStats, TimingStats timingStats, FlushListener flushListener) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredResultsRemover.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredResultsRemover.java index e24ecfbe8b0f..3cdcdff2ed9c 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredResultsRemover.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredResultsRemover.java @@ -23,7 +23,7 @@ import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSizeSta import org.elasticsearch.xpack.core.ml.job.results.Forecast; import org.elasticsearch.xpack.core.ml.job.results.ForecastRequestStats; import org.elasticsearch.xpack.core.ml.job.results.Result; -import org.elasticsearch.xpack.ml.notifications.Auditor; +import org.elasticsearch.xpack.ml.notifications.AnomalyDetectionAuditor; import java.time.Instant; import java.time.ZoneOffset; @@ -46,9 +46,9 @@ public class ExpiredResultsRemover extends AbstractExpiredJobDataRemover { private static final Logger LOGGER = LogManager.getLogger(ExpiredResultsRemover.class); private final Client client; - private final Auditor auditor; + private final AnomalyDetectionAuditor auditor; - public ExpiredResultsRemover(Client client, Auditor auditor) { + public ExpiredResultsRemover(Client client, AnomalyDetectionAuditor auditor) { super(client); this.client = Objects.requireNonNull(client); this.auditor = Objects.requireNonNull(auditor); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/notifications/Auditor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/notifications/AnomalyDetectionAuditor.java similarity index 59% rename from x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/notifications/Auditor.java rename to x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/notifications/AnomalyDetectionAuditor.java index 25c827829b24..64397893048a 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/notifications/Auditor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/notifications/AnomalyDetectionAuditor.java @@ -6,14 +6,15 @@ package org.elasticsearch.xpack.ml.notifications; import org.elasticsearch.client.Client; +import org.elasticsearch.xpack.core.common.notifications.AbstractAuditor; import org.elasticsearch.xpack.core.ml.notifications.AuditorField; -import org.elasticsearch.xpack.core.ml.notifications.AuditMessage; +import org.elasticsearch.xpack.core.ml.notifications.AnomalyDetectionAuditMessage; import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; -public class Auditor extends org.elasticsearch.xpack.core.common.notifications.Auditor { +public class AnomalyDetectionAuditor extends AbstractAuditor { - public Auditor(Client client, String nodeName) { - super(client, nodeName, AuditorField.NOTIFICATIONS_INDEX, ML_ORIGIN, AuditMessage.builder()); + public AnomalyDetectionAuditor(Client client, String nodeName) { + super(client, nodeName, AuditorField.NOTIFICATIONS_INDEX, ML_ORIGIN, AnomalyDetectionAuditMessage.builder()); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/dataframe/RestEstimateMemoryUsageAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/dataframe/RestEstimateMemoryUsageAction.java index cf426adbb988..25f2bcb4bb87 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/dataframe/RestEstimateMemoryUsageAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/dataframe/RestEstimateMemoryUsageAction.java @@ -11,6 +11,7 @@ import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestToXContentListener; import org.elasticsearch.xpack.core.ml.action.EstimateMemoryUsageAction; +import org.elasticsearch.xpack.core.ml.action.PutDataFrameAnalyticsAction; import org.elasticsearch.xpack.ml.MachineLearning; import java.io.IOException; @@ -30,8 +31,8 @@ public class RestEstimateMemoryUsageAction extends BaseRestHandler { @Override protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { - EstimateMemoryUsageAction.Request request = - EstimateMemoryUsageAction.Request.parseRequest(restRequest.contentOrSourceParamParser()); + PutDataFrameAnalyticsAction.Request request = + PutDataFrameAnalyticsAction.Request.parseRequestForMemoryEstimation(restRequest.contentOrSourceParamParser()); return channel -> client.execute(EstimateMemoryUsageAction.INSTANCE, request, new RestToXContentListener<>(channel)); } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlAssignmentNotifierTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlAssignmentNotifierTests.java index 3e31c8d564b6..e4384138056f 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlAssignmentNotifierTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlAssignmentNotifierTests.java @@ -18,7 +18,7 @@ import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.xpack.ml.notifications.Auditor; +import org.elasticsearch.xpack.ml.notifications.AnomalyDetectionAuditor; import org.junit.Before; import java.net.InetAddress; @@ -38,7 +38,7 @@ import static org.mockito.Mockito.when; public class MlAssignmentNotifierTests extends ESTestCase { - private Auditor auditor; + private AnomalyDetectionAuditor auditor; private ClusterService clusterService; private ThreadPool threadPool; private MlConfigMigrator configMigrator; @@ -46,7 +46,7 @@ public class MlAssignmentNotifierTests extends ESTestCase { @Before @SuppressWarnings("unchecked") private void setupMocks() { - auditor = mock(Auditor.class); + auditor = mock(AnomalyDetectionAuditor.class); clusterService = mock(ClusterService.class); threadPool = mock(ThreadPool.class); configMigrator = mock(MlConfigMigrator.class); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportCloseJobActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportCloseJobActionTests.java index c4deb22e7ae8..61059e084802 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportCloseJobActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportCloseJobActionTests.java @@ -30,7 +30,7 @@ import org.elasticsearch.xpack.core.ml.datafeed.DatafeedState; import org.elasticsearch.xpack.core.ml.job.config.JobState; import org.elasticsearch.xpack.ml.datafeed.persistence.DatafeedConfigProvider; import org.elasticsearch.xpack.ml.job.persistence.JobConfigProvider; -import org.elasticsearch.xpack.ml.notifications.Auditor; +import org.elasticsearch.xpack.ml.notifications.AnomalyDetectionAuditor; import org.elasticsearch.xpack.ml.support.BaseMlIntegTestCase; import org.junit.Before; @@ -255,14 +255,15 @@ public class TransportCloseJobActionTests extends ESTestCase { addJobTask("closingjob1", null, JobState.CLOSING, tasksBuilder); TransportCloseJobAction.WaitForCloseRequest waitForCloseRequest = - TransportCloseJobAction.buildWaitForCloseRequest(openJobIds, closingJobIds, tasksBuilder.build(), mock(Auditor.class)); + TransportCloseJobAction.buildWaitForCloseRequest( + openJobIds, closingJobIds, tasksBuilder.build(), mock(AnomalyDetectionAuditor.class)); assertEquals(waitForCloseRequest.jobsToFinalize, Arrays.asList("openjob1", "openjob2")); assertEquals(waitForCloseRequest.persistentTaskIds, Arrays.asList("job-openjob1", "job-openjob2", "job-closingjob1")); assertTrue(waitForCloseRequest.hasJobsToWaitFor()); waitForCloseRequest = TransportCloseJobAction.buildWaitForCloseRequest(Collections.emptyList(), Collections.emptyList(), - tasksBuilder.build(), mock(Auditor.class)); + tasksBuilder.build(), mock(AnomalyDetectionAuditor.class)); assertFalse(waitForCloseRequest.hasJobsToWaitFor()); } @@ -275,7 +276,7 @@ public class TransportCloseJobActionTests extends ESTestCase { private TransportCloseJobAction createAction() { return new TransportCloseJobAction(mock(TransportService.class), mock(ThreadPool.class), mock(ActionFilters.class), - clusterService, mock(Client.class), mock(Auditor.class), mock(PersistentTasksService.class), + clusterService, mock(Client.class), mock(AnomalyDetectionAuditor.class), mock(PersistentTasksService.class), jobConfigProvider, datafeedConfigProvider); } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedActionTests.java index dc6b45d126a4..64dd20f61a1e 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedActionTests.java @@ -19,7 +19,7 @@ import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.config.JobState; import org.elasticsearch.xpack.ml.datafeed.DatafeedManager; import org.elasticsearch.xpack.ml.datafeed.DatafeedManagerTests; -import org.elasticsearch.xpack.ml.notifications.Auditor; +import org.elasticsearch.xpack.ml.notifications.AnomalyDetectionAuditor; import java.util.Collections; import java.util.Date; @@ -78,7 +78,7 @@ public class TransportStartDatafeedActionTests extends ESTestCase { doReturn(Collections.singletonList("Deprecated Agg")).when(config).getAggDeprecations(any(NamedXContentRegistry.class)); doReturn(Collections.singletonList("Deprecated Query")).when(config).getQueryDeprecations(any(NamedXContentRegistry.class)); - Auditor auditor = mock(Auditor.class); + AnomalyDetectionAuditor auditor = mock(AnomalyDetectionAuditor.class); TransportStartDatafeedAction.auditDeprecations(config, job1, auditor, xContentRegistry()); @@ -93,7 +93,7 @@ public class TransportStartDatafeedActionTests extends ESTestCase { doReturn(Collections.emptyList()).when(config).getAggDeprecations(any(NamedXContentRegistry.class)); doReturn(Collections.emptyList()).when(config).getQueryDeprecations(any(NamedXContentRegistry.class)); - Auditor auditor = mock(Auditor.class); + AnomalyDetectionAuditor auditor = mock(AnomalyDetectionAuditor.class); TransportStartDatafeedAction.auditDeprecations(config, job1, auditor, xContentRegistry()); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobBuilderTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobBuilderTests.java index 609b0f8612ef..11335236bfb8 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobBuilderTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobBuilderTests.java @@ -23,7 +23,7 @@ import org.elasticsearch.xpack.ml.datafeed.persistence.DatafeedConfigProvider; import org.elasticsearch.xpack.ml.job.persistence.JobConfigProvider; import org.elasticsearch.xpack.ml.job.persistence.JobResultsPersister; import org.elasticsearch.xpack.ml.job.persistence.JobResultsProvider; -import org.elasticsearch.xpack.ml.notifications.Auditor; +import org.elasticsearch.xpack.ml.notifications.AnomalyDetectionAuditor; import org.junit.Before; import java.util.Collections; @@ -44,7 +44,7 @@ import static org.mockito.Mockito.when; public class DatafeedJobBuilderTests extends ESTestCase { private Client client; - private Auditor auditor; + private AnomalyDetectionAuditor auditor; private Consumer taskHandler; private JobResultsProvider jobResultsProvider; private JobConfigProvider jobConfigProvider; @@ -61,7 +61,7 @@ public class DatafeedJobBuilderTests extends ESTestCase { when(client.threadPool()).thenReturn(threadPool); when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY)); when(client.settings()).thenReturn(Settings.EMPTY); - auditor = mock(Auditor.class); + auditor = mock(AnomalyDetectionAuditor.class); taskHandler = mock(Consumer.class); jobResultsPersister = mock(JobResultsPersister.class); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobTests.java index 7b33a59d048d..b7f960cc4b8a 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobTests.java @@ -36,7 +36,7 @@ import org.elasticsearch.xpack.ml.datafeed.delayeddatacheck.DelayedDataDetectorF import org.elasticsearch.xpack.ml.datafeed.extractor.DataExtractorFactory; import org.elasticsearch.xpack.core.ml.job.config.DataDescription; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.DataCounts; -import org.elasticsearch.xpack.ml.notifications.Auditor; +import org.elasticsearch.xpack.ml.notifications.AnomalyDetectionAuditor; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.junit.Before; import org.mockito.ArgumentCaptor; @@ -70,7 +70,7 @@ public class DatafeedJobTests extends ESTestCase { private static final String jobId = "_job_id"; - private Auditor auditor; + private AnomalyDetectionAuditor auditor; private DataExtractorFactory dataExtractorFactory; private DataExtractor dataExtractor; private DatafeedTimingStatsReporter timingStatsReporter; @@ -90,7 +90,7 @@ public class DatafeedJobTests extends ESTestCase { @Before @SuppressWarnings("unchecked") public void setup() throws Exception { - auditor = mock(Auditor.class); + auditor = mock(AnomalyDetectionAuditor.class); dataExtractorFactory = mock(DataExtractorFactory.class); dataExtractor = mock(DataExtractor.class); when(dataExtractorFactory.newExtractor(anyLong(), anyLong())).thenReturn(dataExtractor); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManagerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManagerTests.java index 4ae2ba4d0953..765c70e00ad1 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManagerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManagerTests.java @@ -37,7 +37,7 @@ import org.elasticsearch.xpack.ml.MachineLearning; import org.elasticsearch.xpack.ml.action.TransportStartDatafeedAction.DatafeedTask; import org.elasticsearch.xpack.ml.action.TransportStartDatafeedActionTests; import org.elasticsearch.xpack.ml.job.process.autodetect.AutodetectProcessManager; -import org.elasticsearch.xpack.ml.notifications.Auditor; +import org.elasticsearch.xpack.ml.notifications.AnomalyDetectionAuditor; import org.junit.Before; import org.mockito.ArgumentCaptor; @@ -71,7 +71,7 @@ public class DatafeedManagerTests extends ESTestCase { private DatafeedJob datafeedJob; private DatafeedManager datafeedManager; private long currentTime = 120000; - private Auditor auditor; + private AnomalyDetectionAuditor auditor; private ArgumentCaptor capturedClusterStateListener = ArgumentCaptor.forClass(ClusterStateListener.class); private AtomicBoolean hasOpenAutodetectCommunicator; @@ -97,9 +97,9 @@ public class DatafeedManagerTests extends ESTestCase { DiscoveryNode dNode = mock(DiscoveryNode.class); when(dNode.getName()).thenReturn("this_node_has_a_name"); when(clusterService.localNode()).thenReturn(dNode); - auditor = mock(Auditor.class); + auditor = mock(AnomalyDetectionAuditor.class); - auditor = mock(Auditor.class); + auditor = mock(AnomalyDetectionAuditor.class); threadPool = mock(ThreadPool.class); when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY)); ExecutorService executorService = mock(ExecutorService.class); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/ProblemTrackerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/ProblemTrackerTests.java index 4d3458d8577d..bfbd85ca9444 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/ProblemTrackerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/ProblemTrackerTests.java @@ -6,7 +6,7 @@ package org.elasticsearch.xpack.ml.datafeed; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xpack.ml.notifications.Auditor; +import org.elasticsearch.xpack.ml.notifications.AnomalyDetectionAuditor; import org.junit.Before; import org.mockito.Mockito; @@ -16,13 +16,13 @@ import static org.mockito.Mockito.verify; public class ProblemTrackerTests extends ESTestCase { - private Auditor auditor; + private AnomalyDetectionAuditor auditor; private ProblemTracker problemTracker; @Before public void setUpTests() { - auditor = mock(Auditor.class); + auditor = mock(AnomalyDetectionAuditor.class); problemTracker = new ProblemTracker(auditor, "foo"); } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractorTests.java index fe91f235b9c5..ed00512a81c5 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractorTests.java @@ -14,6 +14,7 @@ import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.client.Client; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.index.query.QueryBuilder; @@ -43,6 +44,7 @@ import java.util.stream.Collectors; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; import static org.mockito.Matchers.same; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -82,7 +84,7 @@ public class DataFrameDataExtractorTests extends ESTestCase { } public void testTwoPageExtraction() throws IOException { - TestExtractor dataExtractor = createExtractor(true); + TestExtractor dataExtractor = createExtractor(true, false); // First batch SearchResponse response1 = createSearchResponse(Arrays.asList(1_1, 1_2, 1_3), Arrays.asList(2_1, 2_2, 2_3)); @@ -142,7 +144,7 @@ public class DataFrameDataExtractorTests extends ESTestCase { } public void testRecoveryFromErrorOnSearchAfterRetry() throws IOException { - TestExtractor dataExtractor = createExtractor(true); + TestExtractor dataExtractor = createExtractor(true, false); // First search will fail dataExtractor.setNextResponse(createResponseWithShardFailures()); @@ -176,7 +178,7 @@ public class DataFrameDataExtractorTests extends ESTestCase { } public void testErrorOnSearchTwiceLeadsToFailure() { - TestExtractor dataExtractor = createExtractor(true); + TestExtractor dataExtractor = createExtractor(true, false); // First search will fail dataExtractor.setNextResponse(createResponseWithShardFailures()); @@ -189,7 +191,7 @@ public class DataFrameDataExtractorTests extends ESTestCase { } public void testRecoveryFromErrorOnContinueScrollAfterRetry() throws IOException { - TestExtractor dataExtractor = createExtractor(true); + TestExtractor dataExtractor = createExtractor(true, false); // Search will succeed SearchResponse response1 = createSearchResponse(Arrays.asList(1_1), Arrays.asList(2_1)); @@ -238,7 +240,7 @@ public class DataFrameDataExtractorTests extends ESTestCase { } public void testErrorOnContinueScrollTwiceLeadsToFailure() throws IOException { - TestExtractor dataExtractor = createExtractor(true); + TestExtractor dataExtractor = createExtractor(true, false); // Search will succeed SearchResponse response1 = createSearchResponse(Arrays.asList(1_1), Arrays.asList(2_1)); @@ -263,7 +265,7 @@ public class DataFrameDataExtractorTests extends ESTestCase { } public void testIncludeSourceIsFalseAndNoSourceFields() throws IOException { - TestExtractor dataExtractor = createExtractor(false); + TestExtractor dataExtractor = createExtractor(false, false); SearchResponse response = createSearchResponse(Arrays.asList(1_1), Arrays.asList(2_1)); dataExtractor.setNextResponse(response); @@ -291,7 +293,7 @@ public class DataFrameDataExtractorTests extends ESTestCase { ExtractedField.newField("field_1", Collections.singleton("keyword"), ExtractedField.ExtractionMethod.DOC_VALUE), ExtractedField.newField("field_2", Collections.singleton("text"), ExtractedField.ExtractionMethod.SOURCE))); - TestExtractor dataExtractor = createExtractor(false); + TestExtractor dataExtractor = createExtractor(false, false); SearchResponse response = createSearchResponse(Arrays.asList(1_1), Arrays.asList(2_1)); dataExtractor.setNextResponse(response); @@ -314,9 +316,77 @@ public class DataFrameDataExtractorTests extends ESTestCase { assertThat(searchRequest, containsString("\"_source\":{\"includes\":[\"field_2\"],\"excludes\":[]}")); } - private TestExtractor createExtractor(boolean includeSource) { + public void testMissingValues_GivenShouldNotInclude() throws IOException { + TestExtractor dataExtractor = createExtractor(true, false); + + // First and only batch + SearchResponse response1 = createSearchResponse(Arrays.asList(1_1, null, 1_3), Arrays.asList(2_1, 2_2, 2_3)); + dataExtractor.setNextResponse(response1); + + // Empty + SearchResponse lastAndEmptyResponse = createEmptySearchResponse(); + dataExtractor.setNextResponse(lastAndEmptyResponse); + + assertThat(dataExtractor.hasNext(), is(true)); + + // First batch + Optional> rows = dataExtractor.next(); + assertThat(rows.isPresent(), is(true)); + assertThat(rows.get().size(), equalTo(3)); + + assertThat(rows.get().get(0).getValues(), equalTo(new String[] {"11", "21"})); + assertThat(rows.get().get(1).getValues(), is(nullValue())); + assertThat(rows.get().get(2).getValues(), equalTo(new String[] {"13", "23"})); + + assertThat(rows.get().get(0).shouldSkip(), is(false)); + assertThat(rows.get().get(1).shouldSkip(), is(true)); + assertThat(rows.get().get(2).shouldSkip(), is(false)); + + assertThat(dataExtractor.hasNext(), is(true)); + + // Third batch should return empty + rows = dataExtractor.next(); + assertThat(rows.isEmpty(), is(true)); + assertThat(dataExtractor.hasNext(), is(false)); + } + + public void testMissingValues_GivenShouldInclude() throws IOException { + TestExtractor dataExtractor = createExtractor(true, true); + + // First and only batch + SearchResponse response1 = createSearchResponse(Arrays.asList(1_1, null, 1_3), Arrays.asList(2_1, 2_2, 2_3)); + dataExtractor.setNextResponse(response1); + + // Empty + SearchResponse lastAndEmptyResponse = createEmptySearchResponse(); + dataExtractor.setNextResponse(lastAndEmptyResponse); + + assertThat(dataExtractor.hasNext(), is(true)); + + // First batch + Optional> rows = dataExtractor.next(); + assertThat(rows.isPresent(), is(true)); + assertThat(rows.get().size(), equalTo(3)); + + assertThat(rows.get().get(0).getValues(), equalTo(new String[] {"11", "21"})); + assertThat(rows.get().get(1).getValues(), equalTo(new String[] {"", "22"})); + assertThat(rows.get().get(2).getValues(), equalTo(new String[] {"13", "23"})); + + assertThat(rows.get().get(0).shouldSkip(), is(false)); + assertThat(rows.get().get(1).shouldSkip(), is(false)); + assertThat(rows.get().get(2).shouldSkip(), is(false)); + + assertThat(dataExtractor.hasNext(), is(true)); + + // Third batch should return empty + rows = dataExtractor.next(); + assertThat(rows.isEmpty(), is(true)); + assertThat(dataExtractor.hasNext(), is(false)); + } + + private TestExtractor createExtractor(boolean includeSource, boolean includeRowsWithMissingValues) { DataFrameDataExtractorContext context = new DataFrameDataExtractorContext( - JOB_ID, extractedFields, indices, query, scrollSize, headers, includeSource); + JOB_ID, extractedFields, indices, query, scrollSize, headers, includeSource, includeRowsWithMissingValues); return new TestExtractor(client, context); } @@ -326,11 +396,10 @@ public class DataFrameDataExtractorTests extends ESTestCase { when(searchResponse.getScrollId()).thenReturn(randomAlphaOfLength(1000)); List hits = new ArrayList<>(); for (int i = 0; i < field1Values.size(); i++) { - SearchHit hit = new SearchHit(randomInt()); - SearchHitBuilder searchHitBuilder = new SearchHitBuilder(randomInt()) - .addField("field_1", Collections.singletonList(field1Values.get(i))) - .addField("field_2", Collections.singletonList(field2Values.get(i))) - .setSource("{\"field_1\":" + field1Values.get(i) + ",\"field_2\":" + field2Values.get(i) + "}"); + SearchHitBuilder searchHitBuilder = new SearchHitBuilder(randomInt()); + addField(searchHitBuilder, "field_1", field1Values.get(i)); + addField(searchHitBuilder, "field_2", field2Values.get(i)); + searchHitBuilder.setSource("{\"field_1\":" + field1Values.get(i) + ",\"field_2\":" + field2Values.get(i) + "}"); hits.add(searchHitBuilder.build()); } SearchHits searchHits = new SearchHits(hits.toArray(new SearchHit[0]), new TotalHits(hits.size(), TotalHits.Relation.EQUAL_TO), 1); @@ -338,6 +407,10 @@ public class DataFrameDataExtractorTests extends ESTestCase { return searchResponse; } + private static void addField(SearchHitBuilder searchHitBuilder, String field, @Nullable Number value) { + searchHitBuilder.addField(field, value == null ? Collections.emptyList() : Collections.singletonList(value)); + } + private SearchResponse createEmptySearchResponse() { return createSearchResponse(Collections.emptyList(), Collections.emptyList()); } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/process/results/MemoryUsageEstimationResultTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/process/results/MemoryUsageEstimationResultTests.java index 735606b35ea0..c63b41120829 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/process/results/MemoryUsageEstimationResultTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/process/results/MemoryUsageEstimationResultTests.java @@ -39,13 +39,13 @@ public class MemoryUsageEstimationResultTests extends AbstractXContentTestCase initialState, @@ -163,15 +166,24 @@ public class RollupIndexerStateTests extends ESTestCase { final Function searchFunction; final Function bulkFunction; final Consumer failureConsumer; + final BiConsumer> saveStateCheck; private CountDownLatch latch; NonEmptyRollupIndexer(Executor executor, RollupJob job, AtomicReference initialState, Map initialPosition, Function searchFunction, Function bulkFunction, Consumer failureConsumer) { + this(executor, job, initialState, initialPosition, searchFunction, bulkFunction, failureConsumer, (i, m) -> {}); + } + + NonEmptyRollupIndexer(Executor executor, RollupJob job, AtomicReference initialState, + Map initialPosition, Function searchFunction, + Function bulkFunction, Consumer failureConsumer, + BiConsumer> saveStateCheck) { super(executor, job, initialState, initialPosition); this.searchFunction = searchFunction; this.bulkFunction = bulkFunction; this.failureConsumer = failureConsumer; + this.saveStateCheck = saveStateCheck; } private CountDownLatch newLatch(int count) { @@ -209,6 +221,7 @@ public class RollupIndexerStateTests extends ESTestCase { @Override protected void doSaveState(IndexerState state, Map position, Runnable next) { assert state == IndexerState.STARTED || state == IndexerState.INDEXING || state == IndexerState.STOPPED; + saveStateCheck.accept(state, position); next.run(); } @@ -758,6 +771,9 @@ public class RollupIndexerStateTests extends ESTestCase { Consumer failureConsumer = e -> { assertThat(e.getMessage(), equalTo("Could not identify key in agg [foo]")); + }; + + BiConsumer> doSaveStateCheck = (indexerState, position) -> { isFinished.set(true); }; @@ -765,7 +781,7 @@ public class RollupIndexerStateTests extends ESTestCase { try { NonEmptyRollupIndexer indexer = new NonEmptyRollupIndexer(executor, job, state, null, - searchFunction, bulkFunction, failureConsumer); + searchFunction, bulkFunction, failureConsumer, doSaveStateCheck); final CountDownLatch latch = indexer.newLatch(1); indexer.start(); assertThat(indexer.getState(), equalTo(IndexerState.STARTED)); diff --git a/x-pack/plugin/search-business-rules/build.gradle b/x-pack/plugin/search-business-rules/build.gradle new file mode 100644 index 000000000000..5789ec5f0811 --- /dev/null +++ b/x-pack/plugin/search-business-rules/build.gradle @@ -0,0 +1,45 @@ +evaluationDependsOn(xpackModule('core')) + +apply plugin: 'elasticsearch.esplugin' + +esplugin { + name 'search-business-rules' + description 'A plugin for applying business rules to search result rankings' + classname 'org.elasticsearch.xpack.searchbusinessrules.SearchBusinessRules' + extendedPlugins = ['x-pack-core'] +} +archivesBaseName = 'x-pack-searchbusinessrules' + + +integTest.enabled = false + +// Instead we create a separate task to run the +// tests based on ESIntegTestCase +task internalClusterTest(type: Test) { + description = 'Java fantasy integration tests' + mustRunAfter test + + include '**/*IT.class' +} + +check.dependsOn internalClusterTest + +dependencies { + compileOnly project(path: xpackModule('core'), configuration: 'default') + testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') + testCompile project(":test:framework") + if (isEclipse) { + testCompile project(path: xpackModule('core-tests'), configuration: 'testArtifacts') + } +} + +// copied from CCR +dependencyLicenses { + ignoreSha 'x-pack-core' +} + +//testingConventions.naming { +// IT { +// baseClass "org.elasticsearch.xpack.searchbusinessrules.PinnedQueryBuilderIT" +// } +//} diff --git a/x-pack/plugin/search-business-rules/src/main/java/org/apache/lucene/search/CappedScoreQuery.java b/x-pack/plugin/search-business-rules/src/main/java/org/apache/lucene/search/CappedScoreQuery.java new file mode 100644 index 000000000000..140e26c5e974 --- /dev/null +++ b/x-pack/plugin/search-business-rules/src/main/java/org/apache/lucene/search/CappedScoreQuery.java @@ -0,0 +1,175 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.apache.lucene.search; + +import java.io.IOException; +import java.util.Objects; + +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.util.Bits; + +/** + * A query that wraps another query and ensures scores do not exceed a maximum value + */ +public final class CappedScoreQuery extends Query { + private final Query query; + private final float maxScore; + + /** Caps scores from the passed in Query to the supplied maxScore parameter */ + public CappedScoreQuery(Query query, float maxScore) { + this.query = Objects.requireNonNull(query, "Query must not be null"); + if (maxScore > 0 == false) { + throw new IllegalArgumentException(this.getClass().getName() + " maxScore must be >0, " + maxScore + " supplied."); + } + this.maxScore = maxScore; + } + + /** Returns the encapsulated query. */ + public Query getQuery() { + return query; + } + + @Override + public Query rewrite(IndexReader reader) throws IOException { + Query rewritten = query.rewrite(reader); + + if (rewritten != query) { + return new CappedScoreQuery(rewritten, maxScore); + } + + if (rewritten.getClass() == CappedScoreQuery.class) { + return rewritten; + } + + if (rewritten.getClass() == BoostQuery.class) { + return new CappedScoreQuery(((BoostQuery) rewritten).getQuery(), maxScore); + } + + return super.rewrite(reader); + } + + /** + * We return this as our {@link BulkScorer} so that if the CSQ wraps a query with its own optimized top-level scorer (e.g. + * BooleanScorer) we can use that top-level scorer. + */ + protected static class CappedBulkScorer extends BulkScorer { + final BulkScorer bulkScorer; + final Weight weight; + final float maxScore; + + public CappedBulkScorer(BulkScorer bulkScorer, Weight weight, float maxScore) { + this.bulkScorer = bulkScorer; + this.weight = weight; + this.maxScore = maxScore; + } + + @Override + public int score(LeafCollector collector, Bits acceptDocs, int min, int max) throws IOException { + return bulkScorer.score(wrapCollector(collector), acceptDocs, min, max); + } + + private LeafCollector wrapCollector(LeafCollector collector) { + return new FilterLeafCollector(collector) { + @Override + public void setScorer(Scorable scorer) throws IOException { + // we must wrap again here, but using the scorer passed in as parameter: + in.setScorer(new FilterScorable(scorer) { + @Override + public float score() throws IOException { + return Math.min(maxScore, in.score()); + } + + @Override + public void setMinCompetitiveScore(float minScore) throws IOException { + scorer.setMinCompetitiveScore(minScore); + } + + }); + } + }; + } + + @Override + public long cost() { + return bulkScorer.cost(); + } + } + + @Override + public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { + final Weight innerWeight = searcher.createWeight(query, scoreMode, boost); + if (scoreMode.needsScores()) { + return new CappedScoreWeight(this, innerWeight, maxScore) { + @Override + public BulkScorer bulkScorer(LeafReaderContext context) throws IOException { + final BulkScorer innerScorer = innerWeight.bulkScorer(context); + if (innerScorer == null) { + return null; + } + return new CappedBulkScorer(innerScorer, this, maxScore); + } + + @Override + public ScorerSupplier scorerSupplier(LeafReaderContext context) throws IOException { + ScorerSupplier innerScorerSupplier = innerWeight.scorerSupplier(context); + if (innerScorerSupplier == null) { + return null; + } + return new ScorerSupplier() { + @Override + public Scorer get(long leadCost) throws IOException { + final Scorer innerScorer = innerScorerSupplier.get(leadCost); + // short-circuit if scores will not need capping + innerScorer.advanceShallow(0); + if (innerScorer.getMaxScore(DocIdSetIterator.NO_MORE_DOCS) <= maxScore) { + return innerScorer; + } + return new CappedScorer(innerWeight, innerScorer, maxScore); + } + + @Override + public long cost() { + return innerScorerSupplier.cost(); + } + }; + } + + @Override + public Matches matches(LeafReaderContext context, int doc) throws IOException { + return innerWeight.matches(context, doc); + } + + @Override + public Scorer scorer(LeafReaderContext context) throws IOException { + ScorerSupplier scorerSupplier = scorerSupplier(context); + if (scorerSupplier == null) { + return null; + } + return scorerSupplier.get(Long.MAX_VALUE); + } + }; + } else { + return innerWeight; + } + } + + @Override + public String toString(String field) { + return new StringBuilder("CappedScore(").append(query.toString(field)).append(')').toString(); + } + + @Override + public boolean equals(Object other) { + return sameClassAs(other) && maxScore == ((CappedScoreQuery) other).maxScore && + query.equals(((CappedScoreQuery) other).query); + } + + @Override + public int hashCode() { + return 31 * classHash() + query.hashCode() + Float.hashCode(maxScore); + } +} diff --git a/x-pack/plugin/search-business-rules/src/main/java/org/apache/lucene/search/CappedScoreWeight.java b/x-pack/plugin/search-business-rules/src/main/java/org/apache/lucene/search/CappedScoreWeight.java new file mode 100644 index 000000000000..67e2c0b893d0 --- /dev/null +++ b/x-pack/plugin/search-business-rules/src/main/java/org/apache/lucene/search/CappedScoreWeight.java @@ -0,0 +1,70 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.apache.lucene.search; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.Term; + +import java.io.IOException; +import java.util.Set; + +/** + * A Weight that caps scores of the wrapped query to a maximum value + */ +public abstract class CappedScoreWeight extends Weight { + + private final float maxScore; + private final Weight innerWeight; + + protected CappedScoreWeight(Query query, Weight innerWeight, float maxScore) { + super(query); + this.maxScore = maxScore; + this.innerWeight = innerWeight; + } + + @Override + public void extractTerms(Set terms) { + innerWeight.extractTerms(terms); + } + + @Override + public boolean isCacheable(LeafReaderContext ctx) { + return innerWeight.isCacheable(ctx); + } + + @Override + public Scorer scorer(LeafReaderContext context) throws IOException { + return new CappedScorer(this, innerWeight.scorer(context), maxScore); + } + + @Override + public Explanation explain(LeafReaderContext context, int doc) throws IOException { + + final Scorer s = scorer(context); + final boolean exists; + if (s == null) { + exists = false; + } else { + final TwoPhaseIterator twoPhase = s.twoPhaseIterator(); + if (twoPhase == null) { + exists = s.iterator().advance(doc) == doc; + } else { + exists = twoPhase.approximation().advance(doc) == doc && twoPhase.matches(); + } + } + + Explanation sub = innerWeight.explain(context, doc); + if (sub.isMatch() && sub.getValue().floatValue() > maxScore) { + return Explanation.match(maxScore, "Capped score of " + innerWeight.getQuery() + ", max of", + sub, + Explanation.match(maxScore, "maximum score")); + } else { + return sub; + } + } + +} diff --git a/x-pack/plugin/search-business-rules/src/main/java/org/apache/lucene/search/CappedScorer.java b/x-pack/plugin/search-business-rules/src/main/java/org/apache/lucene/search/CappedScorer.java new file mode 100644 index 000000000000..a97fe51629bd --- /dev/null +++ b/x-pack/plugin/search-business-rules/src/main/java/org/apache/lucene/search/CappedScorer.java @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.apache.lucene.search; + +import java.io.IOException; + +public class CappedScorer extends FilterScorer { + private final float maxScore; + + public CappedScorer(Weight weight, Scorer delegate, float maxScore) { + super(delegate, weight); + this.maxScore = maxScore; + } + + @Override + public float getMaxScore(int upTo) throws IOException { + return Math.min(maxScore, in.getMaxScore(upTo)); + } + + @Override + public int advanceShallow(int target) throws IOException { + return in.advanceShallow(target); + } + + @Override + public float score() throws IOException { + return Math.min(maxScore, in.score()); + } + +} \ No newline at end of file diff --git a/x-pack/plugin/search-business-rules/src/main/java/org/elasticsearch/xpack/searchbusinessrules/PinnedQueryBuilder.java b/x-pack/plugin/search-business-rules/src/main/java/org/elasticsearch/xpack/searchbusinessrules/PinnedQueryBuilder.java new file mode 100644 index 000000000000..5e2adc5c6744 --- /dev/null +++ b/x-pack/plugin/search-business-rules/src/main/java/org/elasticsearch/xpack/searchbusinessrules/PinnedQueryBuilder.java @@ -0,0 +1,220 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.searchbusinessrules; + +import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.BoostQuery; +import org.apache.lucene.search.CappedScoreQuery; +import org.apache.lucene.search.ConstantScoreQuery; +import org.apache.lucene.search.DisjunctionMaxQuery; +import org.apache.lucene.search.MatchNoDocsQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.util.NumericUtils; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.mapper.IdFieldMapper; +import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.query.AbstractQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryRewriteContext; +import org.elasticsearch.index.query.QueryShardContext; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Objects; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; + +/** + * A query that will promote selected documents (identified by ID) above matches produced by an "organic" query. In practice, some upstream + * system will identify the promotions associated with a user's query string and use this object to ensure these are "pinned" to the top of + * the other search results. + */ +public class PinnedQueryBuilder extends AbstractQueryBuilder { + public static final String NAME = "pinned"; + public static final int MAX_NUM_PINNED_HITS = 100; + + private static final ParseField IDS_FIELD = new ParseField("ids"); + public static final ParseField ORGANIC_QUERY_FIELD = new ParseField("organic"); + + private final List ids; + private QueryBuilder organicQuery; + + // Organic queries will have their scores capped to this number range, + // We reserve the highest float exponent for scores of pinned queries + private static final float MAX_ORGANIC_SCORE = Float.intBitsToFloat((0xfe << 23)) - 1; + + /** + * Creates a new PinnedQueryBuilder + */ + public PinnedQueryBuilder(QueryBuilder organicQuery, String... ids) { + if (organicQuery == null) { + throw new IllegalArgumentException("[" + NAME + "] organicQuery cannot be null"); + } + this.organicQuery = organicQuery; + if (ids == null) { + throw new IllegalArgumentException("[" + NAME + "] ids cannot be null"); + } + if (ids.length > MAX_NUM_PINNED_HITS) { + throw new IllegalArgumentException("[" + NAME + "] Max of "+MAX_NUM_PINNED_HITS+" ids exceeded: "+ + ids.length+" provided."); + } + LinkedHashSet deduped = new LinkedHashSet<>(); + for (String id : ids) { + if (id == null) { + throw new IllegalArgumentException("[" + NAME + "] id cannot be null"); + } + if(deduped.add(id) == false) { + throw new IllegalArgumentException("[" + NAME + "] duplicate id found in list: "+id); + } + } + this.ids = new ArrayList<>(); + Collections.addAll(this.ids, ids); + + } + + /** + * Read from a stream. + */ + public PinnedQueryBuilder(StreamInput in) throws IOException { + super(in); + ids = in.readStringList(); + organicQuery = in.readNamedWriteable(QueryBuilder.class); + } + + @Override + protected void doWriteTo(StreamOutput out) throws IOException { + out.writeStringCollection(this.ids); + out.writeNamedWriteable(organicQuery); + } + + /** + * @return the organic query set in the constructor + */ + public QueryBuilder organicQuery() { + return this.organicQuery; + } + + /** + * Returns the pinned ids for the query. + */ + public List ids() { + return Collections.unmodifiableList(this.ids); + } + + @Override + protected void doXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(NAME); + if (organicQuery != null) { + builder.field(ORGANIC_QUERY_FIELD.getPreferredName()); + organicQuery.toXContent(builder, params); + } + builder.startArray(IDS_FIELD.getPreferredName()); + for (String value : ids) { + builder.value(value); + } + builder.endArray(); + printBoostAndQueryName(builder); + builder.endObject(); + } + + + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, + a -> + { + QueryBuilder organicQuery = (QueryBuilder) a[0]; + @SuppressWarnings("unchecked") + List ids = (List) a[1]; + return new PinnedQueryBuilder(organicQuery, ids.toArray(String[]::new)); + } + ); + static { + PARSER.declareObject(constructorArg(), (p, c) -> parseInnerQueryBuilder(p), ORGANIC_QUERY_FIELD); + PARSER.declareStringArray(constructorArg(), IDS_FIELD); + declareStandardFields(PARSER); + } + + public static PinnedQueryBuilder fromXContent(XContentParser parser) { + try { + return PARSER.apply(parser, null); + } catch (IllegalArgumentException e) { + throw new ParsingException(parser.getTokenLocation(), e.getMessage(), e); + } + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + protected QueryBuilder doRewrite(QueryRewriteContext queryShardContext) throws IOException { + QueryBuilder newOrganicQuery = organicQuery.rewrite(queryShardContext); + if (newOrganicQuery != organicQuery) { + PinnedQueryBuilder result = new PinnedQueryBuilder(newOrganicQuery, ids.toArray(String[]::new)); + result.boost(this.boost); + return result; + } + return this; + } + + @Override + protected Query doToQuery(QueryShardContext context) throws IOException { + MappedFieldType idField = context.fieldMapper(IdFieldMapper.NAME); + if (idField == null) { + return new MatchNoDocsQuery("No mappings"); + } + if (this.ids.isEmpty()) { + return new CappedScoreQuery(organicQuery.toQuery(context), MAX_ORGANIC_SCORE); + } else { + BooleanQuery.Builder pinnedQueries = new BooleanQuery.Builder(); + + // Ensure each pin order using a Boost query with the relevant boost factor + int minPin = NumericUtils.floatToSortableInt(MAX_ORGANIC_SCORE) + 1; + int boostNum = minPin + ids.size(); + float lastScore = Float.MAX_VALUE; + for (String id : ids) { + float pinScore = NumericUtils.sortableIntToFloat(boostNum); + assert pinScore < lastScore; + lastScore = pinScore; + boostNum--; + // Ensure the pin order using a Boost query with the relevant boost factor + Query idQuery = new BoostQuery(new ConstantScoreQuery(idField.termQuery(id, context)), pinScore); + pinnedQueries.add(idQuery, BooleanClause.Occur.SHOULD); + } + + // Score for any pinned query clause should be used, regardless of any organic clause score, to preserve pin order. + // Use dismax to always take the larger (ie pinned) of the organic vs pinned scores + List organicAndPinned = new ArrayList<>(); + organicAndPinned.add(pinnedQueries.build()); + // Cap the scores of the organic query + organicAndPinned.add(new CappedScoreQuery(organicQuery.toQuery(context), MAX_ORGANIC_SCORE)); + return new DisjunctionMaxQuery(organicAndPinned, 0); + } + + } + + @Override + protected int doHashCode() { + return Objects.hash(ids, organicQuery); + } + + @Override + protected boolean doEquals(PinnedQueryBuilder other) { + return Objects.equals(ids, other.ids) && Objects.equals(organicQuery, other.organicQuery) && boost == other.boost; + } +} diff --git a/x-pack/plugin/search-business-rules/src/main/java/org/elasticsearch/xpack/searchbusinessrules/SearchBusinessRules.java b/x-pack/plugin/search-business-rules/src/main/java/org/elasticsearch/xpack/searchbusinessrules/SearchBusinessRules.java new file mode 100644 index 000000000000..d479a471e193 --- /dev/null +++ b/x-pack/plugin/search-business-rules/src/main/java/org/elasticsearch/xpack/searchbusinessrules/SearchBusinessRules.java @@ -0,0 +1,23 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.searchbusinessrules; + +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.SearchPlugin; + +import java.util.List; + +import static java.util.Collections.singletonList; + +public class SearchBusinessRules extends Plugin implements SearchPlugin { + + @Override + public List> getQueries() { + return singletonList(new QuerySpec<>(PinnedQueryBuilder.NAME, PinnedQueryBuilder::new, PinnedQueryBuilder::fromXContent)); + } + +} diff --git a/x-pack/plugin/search-business-rules/src/test/java/org/elasticsearch/xpack/searchbusinessrules/PinnedQueryBuilderIT.java b/x-pack/plugin/search-business-rules/src/test/java/org/elasticsearch/xpack/searchbusinessrules/PinnedQueryBuilderIT.java new file mode 100644 index 000000000000..6ef9436c8157 --- /dev/null +++ b/x-pack/plugin/search-business-rules/src/test/java/org/elasticsearch/xpack/searchbusinessrules/PinnedQueryBuilderIT.java @@ -0,0 +1,173 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.searchbusinessrules; + +import org.apache.lucene.search.Explanation; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.SearchType; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.query.MatchAllQueryBuilder; +import org.elasticsearch.index.query.Operator; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.test.ESIntegTestCase; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.LinkedHashSet; +import java.util.List; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFirstHit; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSecondHit; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThirdHit; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasId; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.lessThan; +import static org.hamcrest.Matchers.lessThanOrEqualTo; +import static org.hamcrest.Matchers.notNullValue; + + +public class PinnedQueryBuilderIT extends ESIntegTestCase { + + public void testIdInsertionOrderRetained() { + String[] ids = generateRandomStringArray(10, 50, false); + PinnedQueryBuilder pqb = new PinnedQueryBuilder(new MatchAllQueryBuilder(), ids); + List addedIds = pqb.ids(); + int pos = 0; + for (String key : addedIds) { + assertEquals(ids[pos++], key); + } + } + + @Override + protected Collection> nodePlugins() { + List> plugins = new ArrayList<>(); + plugins.add(SearchBusinessRules.class); + return plugins; + } + + public void testPinnedPromotions() throws Exception { + assertAcked(prepareCreate("test") + .addMapping("type1", + jsonBuilder().startObject().startObject("type1").startObject("properties").startObject("field1") + .field("analyzer", "whitespace").field("type", "text").endObject().endObject().endObject().endObject()) + .setSettings(Settings.builder().put(indexSettings()).put("index.number_of_shards", 2))); + + int numRelevantDocs = randomIntBetween(1, 100); + for (int i = 0; i < numRelevantDocs; i++) { + if (i % 2 == 0) { + // add lower-scoring text + client().prepareIndex("test", "type1", Integer.toString(i)).setSource("field1", "the quick brown fox").get(); + } else { + // add higher-scoring text + client().prepareIndex("test", "type1", Integer.toString(i)).setSource("field1", "red fox").get(); + } + } + // Add docs with no relevance + int numIrrelevantDocs = randomIntBetween(1, 10); + for (int i = numRelevantDocs; i <= numRelevantDocs + numIrrelevantDocs; i++) { + client().prepareIndex("test", "type1", Integer.toString(i)).setSource("field1", "irrelevant").get(); + } + refresh(); + + // Test doc pinning + int totalDocs = numRelevantDocs + numIrrelevantDocs; + for (int i = 0; i < 100; i++) { + int numPromotions = randomIntBetween(0, totalDocs); + + LinkedHashSet pins = new LinkedHashSet<>(); + for (int j = 0; j < numPromotions; j++) { + pins.add(Integer.toString(randomIntBetween(0, totalDocs))); + } + QueryBuilder organicQuery = null; + if (i % 5 == 0) { + // Occasionally try a query with no matches to check all pins still show + organicQuery = QueryBuilders.matchQuery("field1", "matchNoDocs"); + } else { + organicQuery = QueryBuilders.matchQuery("field1", "red fox"); + } + PinnedQueryBuilder pqb = new PinnedQueryBuilder(organicQuery, pins.toArray(new String[0])); + + int from = randomIntBetween(0, numRelevantDocs); + int size = randomIntBetween(10, 100); + SearchResponse searchResponse = client().prepareSearch().setQuery(pqb).setTrackTotalHits(true).setSize(size).setFrom(from) + .get(); + + long numHits = searchResponse.getHits().getTotalHits().value; + assertThat(numHits, lessThanOrEqualTo((long) numRelevantDocs + pins.size())); + + // Check pins are sorted by increasing score, (unlike organic, there are no duplicate scores) + float lastScore = Float.MAX_VALUE; + SearchHit[] hits = searchResponse.getHits().getHits(); + for (int hitNumber = 0; hitNumber < Math.min(hits.length, pins.size() - from); hitNumber++) { + assertThat("Hit " + hitNumber + " in iter " + i + " wrong" + pins, hits[hitNumber].getScore(), lessThan(lastScore)); + lastScore = hits[hitNumber].getScore(); + } + // Check that the pins appear in the requested order (globalHitNumber is cursor independent of from and size window used) + int globalHitNumber = 0; + for (String id : pins) { + if (globalHitNumber < size && globalHitNumber >= from) { + assertThat("Hit " + globalHitNumber + " in iter " + i + " wrong" + pins, hits[globalHitNumber - from].getId(), + equalTo(id)); + } + globalHitNumber++; + } + // Test the organic hits are sorted by text relevance + boolean highScoresExhausted = false; + for (; globalHitNumber < hits.length + from; globalHitNumber++) { + if (globalHitNumber >= from) { + int id = Integer.parseInt(hits[globalHitNumber - from].getId()); + if (id % 2 == 0) { + highScoresExhausted = true; + } else { + assertFalse("All odd IDs should have scored higher than even IDs in organic results", highScoresExhausted); + } + } + + } + + } + + } + + public void testExplain() throws Exception { + assertAcked(prepareCreate("test").addMapping("type1", + jsonBuilder().startObject().startObject("type1").startObject("properties").startObject("field1") + .field("analyzer", "whitespace").field("type", "text").endObject().endObject().endObject().endObject())); + ensureGreen(); + client().prepareIndex("test", "type1", "1").setSource("field1", "the quick brown fox").get(); + client().prepareIndex("test", "type1", "2").setSource("field1", "pinned").get(); + client().prepareIndex("test", "type1", "3").setSource("field1", "irrelevant").get(); + client().prepareIndex("test", "type1", "4").setSource("field1", "slow brown cat").get(); + refresh(); + + PinnedQueryBuilder pqb = new PinnedQueryBuilder(QueryBuilders.matchQuery("field1", "the quick brown").operator(Operator.OR), "2"); + + SearchResponse searchResponse = client().prepareSearch().setSearchType(SearchType.DFS_QUERY_THEN_FETCH).setQuery(pqb) + .setExplain(true).get(); + assertHitCount(searchResponse, 3); + assertFirstHit(searchResponse, hasId("2")); + assertSecondHit(searchResponse, hasId("1")); + assertThirdHit(searchResponse, hasId("4")); + + Explanation pinnedExplanation = searchResponse.getHits().getAt(0).getExplanation(); + assertThat(pinnedExplanation, notNullValue()); + assertThat(pinnedExplanation.isMatch(), equalTo(true)); + assertThat(pinnedExplanation.getDetails().length, equalTo(1)); + assertThat(pinnedExplanation.getDetails()[0].isMatch(), equalTo(true)); + assertThat(pinnedExplanation.getDetails()[0].getDescription(), containsString("ConstantScore")); + + + } + +} diff --git a/x-pack/plugin/search-business-rules/src/test/java/org/elasticsearch/xpack/searchbusinessrules/PinnedQueryBuilderTests.java b/x-pack/plugin/search-business-rules/src/test/java/org/elasticsearch/xpack/searchbusinessrules/PinnedQueryBuilderTests.java new file mode 100644 index 000000000000..57db7972655e --- /dev/null +++ b/x-pack/plugin/search-business-rules/src/test/java/org/elasticsearch/xpack/searchbusinessrules/PinnedQueryBuilderTests.java @@ -0,0 +1,173 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.searchbusinessrules; + +import com.fasterxml.jackson.core.io.JsonStringEncoder; + +import org.apache.lucene.search.DisjunctionMaxQuery; +import org.apache.lucene.search.MatchNoDocsQuery; +import org.apache.lucene.search.Query; +import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.query.MatchAllQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.TermQueryBuilder; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.search.internal.SearchContext; +import org.elasticsearch.test.AbstractQueryTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; + +import static org.hamcrest.CoreMatchers.instanceOf; + +public class PinnedQueryBuilderTests extends AbstractQueryTestCase { + @Override + protected PinnedQueryBuilder doCreateTestQueryBuilder() { + return new PinnedQueryBuilder(createRandomQuery(), generateRandomStringArray(100, 256, false, true)); + } + + private QueryBuilder createRandomQuery() { + if (randomBoolean()) { + return new MatchAllQueryBuilder(); + } else { + return createTestTermQueryBuilder(); + } + } + + private QueryBuilder createTestTermQueryBuilder() { + String fieldName = null; + Object value; + switch (randomIntBetween(0, 3)) { + case 0: + if (randomBoolean()) { + fieldName = BOOLEAN_FIELD_NAME; + } + value = randomBoolean(); + break; + case 1: + if (randomBoolean()) { + fieldName = randomFrom(STRING_FIELD_NAME, STRING_ALIAS_FIELD_NAME); + } + if (frequently()) { + value = randomAlphaOfLengthBetween(1, 10); + } else { + // generate unicode string in 10% of cases + JsonStringEncoder encoder = JsonStringEncoder.getInstance(); + value = new String(encoder.quoteAsString(randomUnicodeOfLength(10))); + } + break; + case 2: + if (randomBoolean()) { + fieldName = INT_FIELD_NAME; + } + value = randomInt(10000); + break; + case 3: + if (randomBoolean()) { + fieldName = DOUBLE_FIELD_NAME; + } + value = randomDouble(); + break; + default: + throw new UnsupportedOperationException(); + } + + if (fieldName == null) { + fieldName = randomAlphaOfLengthBetween(1, 10); + } + return new TermQueryBuilder(fieldName, value); + } + + @Override + protected void doAssertLuceneQuery(PinnedQueryBuilder queryBuilder, Query query, SearchContext searchContext) throws IOException { + if (queryBuilder.ids().size() == 0 && queryBuilder.organicQuery() == null) { + assertThat(query, instanceOf(MatchNoDocsQuery.class)); + } else { + if (queryBuilder.ids().size() > 0) { + // Have IDs and an organic query - uses DisMax + assertThat(query, instanceOf(DisjunctionMaxQuery.class)); + } + } + } + + @Override + protected Collection> getPlugins() { + List> classpathPlugins = new ArrayList<>(); + classpathPlugins.add(SearchBusinessRules.class); + return classpathPlugins; + } + + public void testIllegalArguments() { + expectThrows(IllegalArgumentException.class, () -> new PinnedQueryBuilder(new MatchAllQueryBuilder(), (String)null)); + expectThrows(IllegalArgumentException.class, () -> new PinnedQueryBuilder(null, "1")); + expectThrows(IllegalArgumentException.class, () -> new PinnedQueryBuilder(new MatchAllQueryBuilder(), "1", null, "2")); + String[] bigList = new String[PinnedQueryBuilder.MAX_NUM_PINNED_HITS + 1]; + for (int i = 0; i < bigList.length; i++) { + bigList[i] = String.valueOf(i); + } + expectThrows(IllegalArgumentException.class, () -> new PinnedQueryBuilder(new MatchAllQueryBuilder(), bigList)); + + } + + public void testEmptyPinnedQuery() throws Exception { + XContentBuilder contentBuilder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); + contentBuilder.startObject().startObject("pinned").endObject().endObject(); + try (XContentParser xParser = createParser(contentBuilder)) { + expectThrows(ParsingException.class, () -> parseQuery(xParser).toQuery(createShardContext())); + } + } + + public void testFromJson() throws IOException { + String query = + "{" + + "\"pinned\" : {" + + " \"organic\" : {" + + " \"term\" : {" + + " \"tag\" : {" + + " \"value\" : \"tech\"," + + " \"boost\" : 1.0" + + " }" + + " }" + + " }, "+ + " \"ids\" : [ \"1\",\"2\" ]," + + " \"boost\":1.0 "+ + "}" + + "}"; + + PinnedQueryBuilder queryBuilder = (PinnedQueryBuilder) parseQuery(query); + checkGeneratedJson(query, queryBuilder); + + assertEquals(query, 2, queryBuilder.ids().size()); + assertThat(queryBuilder.organicQuery(), instanceOf(TermQueryBuilder.class)); + } + + /** + * test that unknown query names in the clauses throw an error + */ + public void testUnknownQueryName() throws IOException { + String query = "{\"pinned\" : {\"organic\" : { \"unknown_query\" : { } } } }"; + + ParsingException ex = expectThrows(ParsingException.class, () -> parseQuery(query)); + // BoolQueryBuilder test has this test for a more detailed error message: + // assertEquals("no [query] registered for [unknown_query]", ex.getMessage()); + // But ObjectParser used in PinnedQueryBuilder tends to hide the above message and give this below: + assertEquals("[1:46] [pinned] failed to parse field [organic]", ex.getMessage()); + } + + public void testRewrite() throws IOException { + PinnedQueryBuilder pinnedQueryBuilder = new PinnedQueryBuilder(new TermQueryBuilder("foo", 1), "1"); + QueryBuilder rewritten = pinnedQueryBuilder.rewrite(createShardContext()); + assertThat(rewritten, instanceOf(PinnedQueryBuilder.class)); + } + +} diff --git a/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/AbstractSqlQueryRequest.java b/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/AbstractSqlQueryRequest.java index 50a1ac239711..776f64f2962a 100644 --- a/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/AbstractSqlQueryRequest.java +++ b/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/AbstractSqlQueryRequest.java @@ -194,7 +194,7 @@ public abstract class AbstractSqlQueryRequest extends AbstractSqlRequest impleme super(in); query = in.readString(); params = in.readList(AbstractSqlQueryRequest::readSqlTypedParamValue); - zoneId = ZoneId.of(in.readString()); + zoneId = in.readZoneId(); fetchSize = in.readVInt(); requestTimeout = in.readTimeValue(); pageTimeout = in.readTimeValue(); @@ -218,7 +218,7 @@ public abstract class AbstractSqlQueryRequest extends AbstractSqlRequest impleme for (SqlTypedParamValue param: params) { writeSqlTypedParamValue(out, param); } - out.writeString(zoneId.getId()); + out.writeZoneId(zoneId); out.writeVInt(fetchSize); out.writeTimeValue(requestTimeout); out.writeTimeValue(pageTimeout); diff --git a/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlQueryResponse.java b/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlQueryResponse.java index 5553b4444605..63395611e4d4 100644 --- a/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlQueryResponse.java +++ b/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlQueryResponse.java @@ -86,6 +86,10 @@ public class SqlQueryResponse extends ActionResponse implements ToXContentObject return cursor; } + public boolean hasCursor() { + return StringUtils.EMPTY.equals(cursor) == false; + } + public long size() { return rows.size(); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/common/io/SqlStreamInput.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/common/io/SqlStreamInput.java new file mode 100644 index 000000000000..cc80061fc090 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/common/io/SqlStreamInput.java @@ -0,0 +1,54 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.common.io; + +import org.elasticsearch.Version; +import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; + +import java.io.IOException; +import java.time.ZoneId; +import java.util.Base64; + +/** + * SQL-specific stream extension for {@link StreamInput} used for deserializing + * SQL components, especially on the client-side. + */ +public class SqlStreamInput extends NamedWriteableAwareStreamInput { + + private final ZoneId zoneId; + + public SqlStreamInput(String base64encoded, NamedWriteableRegistry namedWriteableRegistry, Version version) throws IOException { + this(Base64.getDecoder().decode(base64encoded), namedWriteableRegistry, version); + } + + public SqlStreamInput(byte[] input, NamedWriteableRegistry namedWriteableRegistry, Version version) throws IOException { + super(StreamInput.wrap(input), namedWriteableRegistry); + + // version check first + Version ver = Version.readVersion(delegate); + if (version.compareTo(ver) != 0) { + throw new SqlIllegalArgumentException("Unsupported cursor version [{}], expected [{}]", ver, version); + } + delegate.setVersion(version); + // configuration settings + zoneId = delegate.readZoneId(); + } + + public ZoneId zoneId() { + return zoneId; + } + + public static SqlStreamInput asSqlStream(StreamInput in) { + if (in instanceof SqlStreamInput) { + return (SqlStreamInput) in; + } + throw new SqlIllegalArgumentException("Expected SQL cursor stream, received [{}]", in.getClass()); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/common/io/SqlStreamOutput.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/common/io/SqlStreamOutput.java new file mode 100644 index 000000000000..e5e14e82c26c --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/common/io/SqlStreamOutput.java @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.common.io; + +import org.elasticsearch.Version; +import org.elasticsearch.common.io.stream.OutputStreamStreamOutput; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.time.ZoneId; +import java.util.Base64; + +public class SqlStreamOutput extends OutputStreamStreamOutput { + + private final ByteArrayOutputStream bytes; + + public SqlStreamOutput(Version version, ZoneId zoneId) throws IOException { + this(new ByteArrayOutputStream(), version, zoneId); + } + + private SqlStreamOutput(ByteArrayOutputStream bytes, Version version, ZoneId zoneId) throws IOException { + super(Base64.getEncoder().wrap(new OutputStreamStreamOutput(bytes))); + this.bytes = bytes; + + Version.writeVersion(version, this); + writeZoneId(zoneId); + } + + /** + * Should be called _after_ closing the stream - there are no guarantees otherwise. + */ + public String streamAsString() { + // Base64 uses this encoding instead of UTF-8 + return bytes.toString(StandardCharsets.ISO_8859_1); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/PlanExecutor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/PlanExecutor.java index 815c85b7fed8..ca394bf11d88 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/PlanExecutor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/PlanExecutor.java @@ -23,8 +23,7 @@ import org.elasticsearch.xpack.sql.planner.PlanningException; import org.elasticsearch.xpack.sql.proto.SqlTypedParamValue; import org.elasticsearch.xpack.sql.session.Configuration; import org.elasticsearch.xpack.sql.session.Cursor; -import org.elasticsearch.xpack.sql.session.RowSet; -import org.elasticsearch.xpack.sql.session.SchemaRowSet; +import org.elasticsearch.xpack.sql.session.Cursor.Page; import org.elasticsearch.xpack.sql.session.SqlSession; import org.elasticsearch.xpack.sql.stats.Metrics; import org.elasticsearch.xpack.sql.stats.QueryMetric; @@ -91,7 +90,7 @@ public class PlanExecutor { }, listener::onFailure)); } - public void sql(Configuration cfg, String sql, List params, ActionListener listener) { + public void sql(Configuration cfg, String sql, List params, ActionListener listener) { QueryMetric metric = QueryMetric.from(cfg.mode(), cfg.clientId()); metrics.total(metric); @@ -101,7 +100,7 @@ public class PlanExecutor { })); } - public void nextPage(Configuration cfg, Cursor cursor, ActionListener listener) { + public void nextPage(Configuration cfg, Cursor cursor, ActionListener listener) { QueryMetric metric = QueryMetric.from(cfg.mode(), cfg.clientId()); metrics.total(metric); metrics.paging(metric); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/CompositeAggregationCursor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/CompositeAggregationCursor.java index 26c1a690a400..76c05c4f6a1b 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/CompositeAggregationCursor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/CompositeAggregationCursor.java @@ -27,7 +27,8 @@ import org.elasticsearch.xpack.sql.execution.search.extractor.BucketExtractor; import org.elasticsearch.xpack.sql.querydsl.agg.Aggs; import org.elasticsearch.xpack.sql.session.Configuration; import org.elasticsearch.xpack.sql.session.Cursor; -import org.elasticsearch.xpack.sql.session.RowSet; +import org.elasticsearch.xpack.sql.session.Rows; +import org.elasticsearch.xpack.sql.type.Schema; import org.elasticsearch.xpack.sql.util.StringUtils; import java.io.IOException; @@ -36,6 +37,8 @@ import java.util.BitSet; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.function.Consumer; +import java.util.function.Function; /** * Cursor for composite aggregation (GROUP BY). @@ -116,7 +119,7 @@ public class CompositeAggregationCursor implements Cursor { } @Override - public void nextPage(Configuration cfg, Client client, NamedWriteableRegistry registry, ActionListener listener) { + public void nextPage(Configuration cfg, Client client, NamedWriteableRegistry registry, ActionListener listener) { SearchSourceBuilder q; try { q = deserializeQuery(registry, nextQuery); @@ -135,21 +138,11 @@ public class CompositeAggregationCursor implements Cursor { client.search(search, new ActionListener<>() { @Override public void onResponse(SearchResponse r) { - try { - // retry - if (shouldRetryDueToEmptyPage(r)) { - CompositeAggregationCursor.updateCompositeAfterKey(r, search.source()); - client.search(search, this); - return; - } - - boolean hasAfterKey = updateCompositeAfterKey(r, query); - CompositeAggsRowSet rowSet = new CompositeAggsRowSet(extractors, mask, r, limit, - hasAfterKey ? serializeQuery(query) : null, includeFrozen, indices); - listener.onResponse(rowSet); - } catch (Exception ex) { - listener.onFailure(ex); - } + handle(r, search.source(), ba -> new CompositeAggsRowSet(extractors, mask, r, limit, ba), + () -> client.search(search, this), + p -> listener.onResponse(p), + e -> listener.onFailure(e), + Schema.EMPTY, includeFrozen, indices); } @Override @@ -159,6 +152,39 @@ public class CompositeAggregationCursor implements Cursor { }); } + static void handle(SearchResponse response, SearchSourceBuilder source, Function makeRowSet, + Runnable retry, Consumer onPage, Consumer onFailure, + Schema schema, boolean includeFrozen, String[] indices) { + + // there are some results + if (response.getAggregations().asList().isEmpty() == false) { + // retry + if (CompositeAggregationCursor.shouldRetryDueToEmptyPage(response)) { + CompositeAggregationCursor.updateCompositeAfterKey(response, source); + retry.run(); + return; + } + + try { + boolean hasAfterKey = updateCompositeAfterKey(response, source); + byte[] queryAsBytes = hasAfterKey ? serializeQuery(source) : null; + CompositeAggsRowSet rowSet = makeRowSet.apply(queryAsBytes); + + Cursor next = rowSet.remainingData() == 0 + ? Cursor.EMPTY + : new CompositeAggregationCursor(queryAsBytes, rowSet.extractors(), rowSet.mask(), + rowSet.remainingData(), includeFrozen, indices); + onPage.accept(new Page(rowSet, next)); + } catch (Exception ex) { + onFailure.accept(ex); + } + } + // no results + else { + onPage.accept(Page.last(Rows.empty(schema))); + } + } + static boolean shouldRetryDueToEmptyPage(SearchResponse response) { CompositeAggregation composite = getComposite(response); // if there are no buckets but a next page, go fetch it instead of sending an empty response to the client diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/CompositeAggsRowSet.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/CompositeAggsRowSet.java index 88b93359d0f7..dd6b85279cb2 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/CompositeAggsRowSet.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/CompositeAggsRowSet.java @@ -8,7 +8,6 @@ package org.elasticsearch.xpack.sql.execution.search; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.search.aggregations.bucket.composite.CompositeAggregation; import org.elasticsearch.xpack.sql.execution.search.extractor.BucketExtractor; -import org.elasticsearch.xpack.sql.session.Cursor; import org.elasticsearch.xpack.sql.session.RowSet; import java.util.BitSet; @@ -22,14 +21,11 @@ import static java.util.Collections.emptyList; class CompositeAggsRowSet extends ResultRowSet { private final List buckets; - - private final Cursor cursor; - + private final int remainingData; private final int size; private int row = 0; - CompositeAggsRowSet(List exts, BitSet mask, SearchResponse response, - int limit, byte[] next, boolean includeFrozen, String... indices) { + CompositeAggsRowSet(List exts, BitSet mask, SearchResponse response, int limit, byte[] next) { super(exts, mask); CompositeAggregation composite = CompositeAggregationCursor.getComposite(response); @@ -43,7 +39,7 @@ class CompositeAggsRowSet extends ResultRowSet { size = limit == -1 ? buckets.size() : Math.min(buckets.size(), limit); if (next == null) { - cursor = Cursor.EMPTY; + remainingData = 0; } else { // Compute remaining limit @@ -56,9 +52,9 @@ class CompositeAggsRowSet extends ResultRowSet { // however the Querier takes care of that and keeps making requests until either the query is invalid or at least one response // is returned. if (size == 0 || remainingLimit == 0) { - cursor = Cursor.EMPTY; + remainingData = 0; } else { - cursor = new CompositeAggregationCursor(next, exts, mask, remainingLimit, includeFrozen, indices); + remainingData = remainingLimit; } } } @@ -92,8 +88,7 @@ class CompositeAggsRowSet extends ResultRowSet { return size; } - @Override - public Cursor nextPageCursor() { - return cursor; + int remainingData() { + return remainingData; } -} +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/PagingListRowSet.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/PagingListRowSet.java deleted file mode 100644 index 73da15255f0f..000000000000 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/PagingListRowSet.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -package org.elasticsearch.xpack.sql.execution.search; - -import org.elasticsearch.xpack.sql.session.Cursor; -import org.elasticsearch.xpack.sql.session.ListRowSet; -import org.elasticsearch.xpack.sql.type.Schema; - -import java.util.List; - -class PagingListRowSet extends ListRowSet { - - private final int pageSize; - private final int columnCount; - private final Cursor cursor; - - PagingListRowSet(List> list, int columnCount, int pageSize) { - this(Schema.EMPTY, list, columnCount, pageSize); - } - - PagingListRowSet(Schema schema, List> list, int columnCount, int pageSize) { - super(schema, list); - this.columnCount = columnCount; - this.pageSize = Math.min(pageSize, list.size()); - this.cursor = list.size() > pageSize ? new PagingListCursor(list, columnCount, pageSize) : Cursor.EMPTY; - } - - @Override - public int size() { - return pageSize; - } - - @Override - public int columnCount() { - return columnCount; - } - - @Override - public Cursor nextPageCursor() { - return cursor; - } -} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java index 7949505d162f..10b4d8663ef6 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java @@ -20,7 +20,6 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.query.QueryBuilder; -import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.aggregations.Aggregation; import org.elasticsearch.search.aggregations.Aggregations; import org.elasticsearch.search.aggregations.MultiBucketConsumerService; @@ -57,6 +56,8 @@ import org.elasticsearch.xpack.sql.querydsl.container.SearchHitFieldRef; import org.elasticsearch.xpack.sql.querydsl.container.TopHitsAggRef; import org.elasticsearch.xpack.sql.session.Configuration; import org.elasticsearch.xpack.sql.session.Cursor; +import org.elasticsearch.xpack.sql.session.Cursor.Page; +import org.elasticsearch.xpack.sql.session.ListCursor; import org.elasticsearch.xpack.sql.session.RowSet; import org.elasticsearch.xpack.sql.session.Rows; import org.elasticsearch.xpack.sql.session.SchemaRowSet; @@ -75,6 +76,7 @@ import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; import static java.util.Collections.singletonList; +import static org.elasticsearch.action.ActionListener.wrap; // TODO: add retry/back-off public class Querier { @@ -98,7 +100,7 @@ public class Querier { this.size = cfg.pageSize(); } - public void query(List output, QueryContainer query, String index, ActionListener listener) { + public void query(List output, QueryContainer query, String index, ActionListener listener) { // prepare the request SearchSourceBuilder sourceBuilder = SourceGenerator.sourceBuilder(query, filter, size); // set query timeout @@ -152,22 +154,19 @@ public class Querier { * results back to the client. */ @SuppressWarnings("rawtypes") - class LocalAggregationSorterListener implements ActionListener { + class LocalAggregationSorterListener implements ActionListener { - private final ActionListener listener; + private final ActionListener listener; // keep the top N entries. private final AggSortingQueue data; private final AtomicInteger counter = new AtomicInteger(); private volatile Schema schema; - /** - * Match the default value for {@link MultiBucketConsumerService#MAX_BUCKET_SETTING} - */ - private static final int MAXIMUM_SIZE = 10_000; + private static final int MAXIMUM_SIZE = MultiBucketConsumerService.DEFAULT_MAX_BUCKETS; private final boolean noLimit; - LocalAggregationSorterListener(ActionListener listener, List> sortingColumns, int limit) { + LocalAggregationSorterListener(ActionListener listener, List> sortingColumns, int limit) { this.listener = listener; int size = MAXIMUM_SIZE; @@ -186,20 +185,26 @@ public class Querier { } @Override - public void onResponse(SchemaRowSet schemaRowSet) { - schema = schemaRowSet.schema(); - doResponse(schemaRowSet); - } + public void onResponse(Page page) { + // schema is set on the first page (as the rest don't hold the schema anymore) + if (schema == null) { + RowSet rowSet = page.rowSet(); + if (rowSet instanceof SchemaRowSet) { + schema = ((SchemaRowSet) rowSet).schema(); + } else { + onFailure(new SqlIllegalArgumentException("No schema found inside {}", rowSet.getClass())); + return; + } + } - private void doResponse(RowSet rowSet) { // 1. consume all pages received - consumeRowSet(rowSet); + consumeRowSet(page.rowSet()); - Cursor cursor = rowSet.nextPageCursor(); + Cursor cursor = page.next(); // 1a. trigger a next call if there's still data if (cursor != Cursor.EMPTY) { // trigger a next call - planExecutor.nextPage(cfg, cursor, ActionListener.wrap(this::doResponse, this::onFailure)); + planExecutor.nextPage(cfg, cursor, this); // make sure to bail out afterwards as we'll get called by a different thread return; } @@ -223,7 +228,7 @@ public class Querier { } private void sendResponse() { - listener.onResponse(new PagingListRowSet(schema, data.asList(), schema.size(), cfg.pageSize())); + listener.onResponse(ListCursor.of(schema, data.asList(), cfg.pageSize())); } @Override @@ -265,13 +270,13 @@ public class Querier { } }); - ImplicitGroupActionListener(ActionListener listener, Client client, Configuration cfg, List output, + ImplicitGroupActionListener(ActionListener listener, Client client, Configuration cfg, List output, QueryContainer query, SearchRequest request) { super(listener, client, cfg, output, query, request); } @Override - protected void handleResponse(SearchResponse response, ActionListener listener) { + protected void handleResponse(SearchResponse response, ActionListener listener) { Aggregations aggs = response.getAggregations(); if (aggs != null) { Aggregation agg = aggs.get(Aggs.ROOT_GROUP_NAME); @@ -298,10 +303,10 @@ public class Querier { for (int i = mask.nextSetBit(0); i >= 0; i = mask.nextSetBit(i + 1)) { values[index++] = extractors.get(i).extract(implicitGroup); } - listener.onResponse(Rows.singleton(schema, values)); + listener.onResponse(Page.last(Rows.singleton(schema, values))); } else if (buckets.isEmpty()) { - listener.onResponse(Rows.empty(schema)); + listener.onResponse(Page.last(Rows.empty(schema))); } else { throw new SqlIllegalArgumentException("Too many groups returned by the implicit group; expected 1, received {}", @@ -316,43 +321,21 @@ public class Querier { */ static class CompositeActionListener extends BaseAggActionListener { - CompositeActionListener(ActionListener listener, Client client, Configuration cfg, + CompositeActionListener(ActionListener listener, Client client, Configuration cfg, List output, QueryContainer query, SearchRequest request) { super(listener, client, cfg, output, query, request); } @Override - protected void handleResponse(SearchResponse response, ActionListener listener) { - // there are some results - if (response.getAggregations().asList().isEmpty() == false) { - - // retry - if (CompositeAggregationCursor.shouldRetryDueToEmptyPage(response)) { - CompositeAggregationCursor.updateCompositeAfterKey(response, request.source()); - client.search(request, this); - return; - } - - CompositeAggregationCursor.updateCompositeAfterKey(response, request.source()); - byte[] nextSearch; - try { - nextSearch = CompositeAggregationCursor.serializeQuery(request.source()); - } catch (Exception ex) { - listener.onFailure(ex); - return; - } - - listener.onResponse( - new SchemaCompositeAggsRowSet(schema, initBucketExtractors(response), mask, response, - query.sortingColumns().isEmpty() ? query.limit() : -1, - nextSearch, - query.shouldIncludeFrozen(), - request.indices())); - } - // no results - else { - listener.onResponse(Rows.empty(schema)); - } + protected void handleResponse(SearchResponse response, ActionListener listener) { + + CompositeAggregationCursor.handle(response, request.source(), + ba -> new SchemaCompositeAggsRowSet(schema, initBucketExtractors(response), mask, response, + query.sortingColumns().isEmpty() ? query.limit() : -1, ba), + () -> client.search(request, this), + p -> listener.onResponse(p), + e -> listener.onFailure(e), + schema, query.shouldIncludeFrozen(), request.indices()); } } @@ -361,7 +344,7 @@ public class Querier { final SearchRequest request; final BitSet mask; - BaseAggActionListener(ActionListener listener, Client client, Configuration cfg, List output, + BaseAggActionListener(ActionListener listener, Client client, Configuration cfg, List output, QueryContainer query, SearchRequest request) { super(listener, client, cfg, output); @@ -426,7 +409,7 @@ public class Querier { private final BitSet mask; private final boolean multiValueFieldLeniency; - ScrollActionListener(ActionListener listener, Client client, Configuration cfg, + ScrollActionListener(ActionListener listener, Client client, Configuration cfg, List output, QueryContainer query) { super(listener, client, cfg, output); this.query = query; @@ -435,9 +418,7 @@ public class Querier { } @Override - protected void handleResponse(SearchResponse response, ActionListener listener) { - SearchHit[] hits = response.getHits().getHits(); - + protected void handleResponse(SearchResponse response, ActionListener listener) { // create response extractors for the first time List> refs = query.fields(); @@ -446,30 +427,10 @@ public class Querier { exts.add(createExtractor(ref.v1())); } - // there are some results - if (hits.length > 0) { - String scrollId = response.getScrollId(); - SchemaSearchHitRowSet hitRowSet = new SchemaSearchHitRowSet(schema, exts, mask, hits, query.limit(), scrollId); - - // if there's an id, try to setup next scroll - if (scrollId != null && - // is all the content already retrieved? - (Boolean.TRUE.equals(response.isTerminatedEarly()) - || response.getHits().getTotalHits().value == hits.length - || hitRowSet.isLimitReached())) { - // if so, clear the scroll - clear(response.getScrollId(), ActionListener.wrap( - succeeded -> listener.onResponse(new SchemaSearchHitRowSet(schema, exts, mask, hits, query.limit(), null)), - listener::onFailure)); - } else { - listener.onResponse(hitRowSet); - } - } - // no hits - else { - clear(response.getScrollId(), ActionListener.wrap(succeeded -> listener.onResponse(Rows.empty(schema)), - listener::onFailure)); - } + ScrollCursor.handle(response, () -> new SchemaSearchHitRowSet(schema, exts, mask, query.limit(), response), + p -> listener.onResponse(p), + p -> clear(response.getScrollId(), wrap(success -> listener.onResponse(p), listener::onFailure)), + schema); } private HitExtractor createExtractor(FieldExtraction ref) { @@ -515,14 +476,14 @@ public class Querier { */ abstract static class BaseActionListener implements ActionListener { - final ActionListener listener; + final ActionListener listener; final Client client; final Configuration cfg; final TimeValue keepAlive; final Schema schema; - BaseActionListener(ActionListener listener, Client client, Configuration cfg, List output) { + BaseActionListener(ActionListener listener, Client client, Configuration cfg, List output) { this.listener = listener; this.client = client; @@ -546,7 +507,7 @@ public class Querier { } } - protected abstract void handleResponse(SearchResponse response, ActionListener listener); + protected abstract void handleResponse(SearchResponse response, ActionListener listener); // clean-up the scroll in case of exception protected final void cleanup(SearchResponse response, Exception ex) { diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SchemaCompositeAggsRowSet.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SchemaCompositeAggsRowSet.java index 3ec4ff6b1142..7eeb8b28f154 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SchemaCompositeAggsRowSet.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SchemaCompositeAggsRowSet.java @@ -22,11 +22,8 @@ class SchemaCompositeAggsRowSet extends CompositeAggsRowSet implements SchemaRow private final Schema schema; - SchemaCompositeAggsRowSet(Schema schema, List exts, BitSet mask, SearchResponse response, int limitAggs, - byte[] next, - boolean includeFrozen, - String... indices) { - super(exts, mask, response, limitAggs, next, includeFrozen, indices); + SchemaCompositeAggsRowSet(Schema schema, List exts, BitSet mask, SearchResponse r, int limitAggs, byte[] next) { + super(exts, mask, r, limitAggs, next); this.schema = schema; } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SchemaSearchHitRowSet.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SchemaSearchHitRowSet.java index aa5c57aab609..7ba7a06fd8a3 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SchemaSearchHitRowSet.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SchemaSearchHitRowSet.java @@ -5,7 +5,7 @@ */ package org.elasticsearch.xpack.sql.execution.search; -import org.elasticsearch.search.SearchHit; +import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.xpack.sql.execution.search.extractor.HitExtractor; import org.elasticsearch.xpack.sql.session.SchemaRowSet; import org.elasticsearch.xpack.sql.type.Schema; @@ -21,8 +21,8 @@ import java.util.List; class SchemaSearchHitRowSet extends SearchHitRowSet implements SchemaRowSet { private final Schema schema; - SchemaSearchHitRowSet(Schema schema, List exts, BitSet mask, SearchHit[] hits, int limitHits, String scrollId) { - super(exts, mask, hits, limitHits, scrollId); + SchemaSearchHitRowSet(Schema schema, List exts, BitSet mask, int limitHits, SearchResponse response) { + super(exts, mask, limitHits, response); this.schema = schema; } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/ScrollCursor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/ScrollCursor.java index af57126cc561..55f78db40739 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/ScrollCursor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/ScrollCursor.java @@ -14,18 +14,25 @@ import org.elasticsearch.action.search.ClearScrollResponse; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchScrollRequest; import org.elasticsearch.client.Client; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.search.SearchHit; import org.elasticsearch.xpack.sql.execution.search.extractor.HitExtractor; import org.elasticsearch.xpack.sql.session.Configuration; import org.elasticsearch.xpack.sql.session.Cursor; -import org.elasticsearch.xpack.sql.session.RowSet; +import org.elasticsearch.xpack.sql.session.Rows; +import org.elasticsearch.xpack.sql.type.Schema; import java.io.IOException; import java.util.BitSet; import java.util.List; import java.util.Objects; +import java.util.function.Consumer; +import java.util.function.Supplier; + +import static org.elasticsearch.action.ActionListener.wrap; public class ScrollCursor implements Cursor { @@ -83,29 +90,48 @@ public class ScrollCursor implements Cursor { return limit; } @Override - public void nextPage(Configuration cfg, Client client, NamedWriteableRegistry registry, ActionListener listener) { - log.trace("About to execute scroll query {}", scrollId); + public void nextPage(Configuration cfg, Client client, NamedWriteableRegistry registry, ActionListener listener) { + if (log.isTraceEnabled()) { + log.trace("About to execute scroll query {}", scrollId); + } SearchScrollRequest request = new SearchScrollRequest(scrollId).scroll(cfg.pageTimeout()); - client.searchScroll(request, ActionListener.wrap((SearchResponse response) -> { - SearchHitRowSet rowSet = new SearchHitRowSet(extractors, mask, response.getHits().getHits(), - limit, response.getScrollId()); - if (rowSet.nextPageCursor() == Cursor.EMPTY ) { - // we are finished with this cursor, let's clean it before continuing - clear(cfg, client, ActionListener.wrap(success -> listener.onResponse(rowSet), listener::onFailure)); - } else { - listener.onResponse(rowSet); - } + client.searchScroll(request, wrap(response -> { + handle(response, () -> new SearchHitRowSet(extractors, mask, limit, response), + p -> listener.onResponse(p), + p -> clear(cfg, client, wrap(success -> listener.onResponse(p), listener::onFailure)), + Schema.EMPTY); }, listener::onFailure)); } @Override public void clear(Configuration cfg, Client client, ActionListener listener) { - cleanCursor(client, scrollId, - ActionListener.wrap( + cleanCursor(client, scrollId, wrap( clearScrollResponse -> listener.onResponse(clearScrollResponse.isSucceeded()), listener::onFailure)); } + + static void handle(SearchResponse response, Supplier makeRowHit, Consumer onPage, Consumer clearScroll, + Schema schema) { + SearchHit[] hits = response.getHits().getHits(); + // clean-up + if (hits.length > 0) { + SearchHitRowSet rowSet = makeRowHit.get(); + Tuple nextScrollData = rowSet.nextScrollData(); + + if (nextScrollData == null) { + // no more data, let's clean the scroll before continuing + clearScroll.accept(Page.last(rowSet)); + } else { + Cursor next = new ScrollCursor(nextScrollData.v1(), rowSet.extractors(), rowSet.mask(), nextScrollData.v2()); + onPage.accept(new Page(rowSet, next)); + } + } + // no-hits + else { + clearScroll.accept(Page.last(Rows.empty(schema))); + } + } @Override public boolean equals(Object obj) { diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SearchHitRowSet.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SearchHitRowSet.java index 97b369a7a12e..5a11bfde6456 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SearchHitRowSet.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SearchHitRowSet.java @@ -5,11 +5,13 @@ */ package org.elasticsearch.xpack.sql.execution.search; +import org.apache.lucene.search.TotalHits; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; import org.elasticsearch.xpack.sql.execution.search.extractor.HitExtractor; -import org.elasticsearch.xpack.sql.session.Cursor; import java.util.ArrayList; import java.util.Arrays; @@ -29,18 +31,19 @@ import java.util.Set; class SearchHitRowSet extends ResultRowSet { private final SearchHit[] hits; private final Map> flatInnerHits = new HashMap<>(); - private final Cursor cursor; private final Set innerHits = new LinkedHashSet<>(); private final String innerHit; private final int size; private final int[] indexPerLevel; + private final Tuple nextScrollData; + private int row = 0; - SearchHitRowSet(List exts, BitSet mask, SearchHit[] hits, int limit, String scrollId) { + SearchHitRowSet(List exts, BitSet mask, int limit, SearchResponse response) { super(exts, mask); - this.hits = hits; + this.hits = response.getHits().getHits(); // Since the results might contain nested docs, the iteration is similar to that of Aggregation // namely it discovers the nested docs and then, for iteration, increments the deepest level first @@ -81,24 +84,30 @@ class SearchHitRowSet extends ResultRowSet { indexPerLevel = new int[maxDepth + 1]; this.innerHit = innerHit; + String scrollId = response.getScrollId(); + if (scrollId == null) { /* SearchResponse can contain a null scroll when you start a * scroll but all results fit in the first page. */ - cursor = Cursor.EMPTY; + nextScrollData = null; } else { + TotalHits totalHits = response.getHits().getTotalHits(); + // compute remaining limit (only if the limit is specified - that is, positive). int remainingLimit = limit < 0 ? limit : limit - size; // if the computed limit is zero, or the size is zero it means either there's nothing left or the limit has been reached - if (size == 0 || remainingLimit == 0) { - cursor = Cursor.EMPTY; + if (size == 0 || remainingLimit == 0 + // or the scroll has ended + || totalHits != null && totalHits.value == hits.length) { + nextScrollData = null; } else { - cursor = new ScrollCursor(scrollId, extractors(), mask, remainingLimit); + nextScrollData = new Tuple<>(scrollId, remainingLimit); } } } protected boolean isLimitReached() { - return cursor == Cursor.EMPTY; + return nextScrollData == null; } @Override @@ -131,8 +140,8 @@ class SearchHitRowSet extends ResultRowSet { int endOfPath = entry.getKey().lastIndexOf('_'); if (endOfPath >= 0 && entry.getKey().substring(0, endOfPath).equals(path)) { SearchHit[] h = entry.getValue().getHits(); - for (int i = 0; i < h.length; i++) { - lhm.put(h[i].getNestedIdentity().getOffset(), h[i]); + for (SearchHit element : h) { + lhm.put(element.getNestedIdentity().getOffset(), element); } } } @@ -146,7 +155,7 @@ class SearchHitRowSet extends ResultRowSet { } private class NestedHitOffsetComparator implements Comparator { - @Override + @Override public int compare(SearchHit sh1, SearchHit sh2) { if (sh1 == null && sh2 == null) { return 0; @@ -210,8 +219,7 @@ class SearchHitRowSet extends ResultRowSet { return size; } - @Override - public Cursor nextPageCursor() { - return cursor; + Tuple nextScrollData() { + return nextScrollData; } } \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/CompositeKeyExtractor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/CompositeKeyExtractor.java index 1b2e8a3abc09..c4584eb80dc5 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/CompositeKeyExtractor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/CompositeKeyExtractor.java @@ -9,6 +9,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket; import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.common.io.SqlStreamInput; import org.elasticsearch.xpack.sql.querydsl.container.GroupByRef.Property; import org.elasticsearch.xpack.sql.util.DateUtils; @@ -42,15 +43,15 @@ public class CompositeKeyExtractor implements BucketExtractor { CompositeKeyExtractor(StreamInput in) throws IOException { key = in.readString(); property = in.readEnum(Property.class); - zoneId = ZoneId.of(in.readString()); isDateTimeBased = in.readBoolean(); + + zoneId = SqlStreamInput.asSqlStream(in).zoneId(); } @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(key); out.writeEnum(property); - out.writeString(zoneId.getId()); out.writeBoolean(isDateTimeBased); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractor.java index f21a6ee96d1e..d7609ebc8f9f 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractor.java @@ -17,6 +17,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.index.mapper.IgnoredFieldMapper; import org.elasticsearch.search.SearchHit; import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.common.io.SqlStreamInput; import org.elasticsearch.xpack.sql.expression.function.scalar.geo.GeoShape; import org.elasticsearch.xpack.sql.type.DataType; import org.elasticsearch.xpack.sql.util.DateUtils; @@ -95,11 +96,12 @@ public class FieldHitExtractor implements HitExtractor { } String esType = in.readOptionalString(); dataType = esType != null ? DataType.fromTypeName(esType) : null; - zoneId = ZoneId.of(in.readString()); useDocValue = in.readBoolean(); hitName = in.readOptionalString(); arrayLeniency = in.readBoolean(); path = sourcePath(fieldName, useDocValue, hitName); + + zoneId = SqlStreamInput.asSqlStream(in).zoneId(); } @Override @@ -114,7 +116,6 @@ public class FieldHitExtractor implements HitExtractor { out.writeOptionalString(fullFieldName); } out.writeOptionalString(dataType == null ? null : dataType.typeName); - out.writeString(zoneId.getId()); out.writeBoolean(useDocValue); out.writeOptionalString(hitName); out.writeBoolean(arrayLeniency); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/MetricAggExtractor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/MetricAggExtractor.java index ca9e8e1434a7..07309c358220 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/MetricAggExtractor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/MetricAggExtractor.java @@ -21,6 +21,7 @@ import org.elasticsearch.search.aggregations.metrics.InternalSum; import org.elasticsearch.search.aggregations.metrics.InternalTDigestPercentileRanks; import org.elasticsearch.search.aggregations.metrics.InternalTDigestPercentiles; import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.common.io.SqlStreamInput; import org.elasticsearch.xpack.sql.querydsl.agg.Aggs; import org.elasticsearch.xpack.sql.util.DateUtils; @@ -55,7 +56,8 @@ public class MetricAggExtractor implements BucketExtractor { property = in.readString(); innerKey = in.readOptionalString(); isDateTimeBased = in.readBoolean(); - zoneId = ZoneId.of(in.readString()); + + zoneId = SqlStreamInput.asSqlStream(in).zoneId(); } @Override @@ -64,7 +66,6 @@ public class MetricAggExtractor implements BucketExtractor { out.writeString(property); out.writeOptionalString(innerKey); out.writeBoolean(isDateTimeBased); - out.writeString(zoneId.getId()); } String name() { diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/TopHitsAggExtractor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/TopHitsAggExtractor.java index 14d50f7c9a09..b315b4ce16b5 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/TopHitsAggExtractor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/TopHitsAggExtractor.java @@ -10,6 +10,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket; import org.elasticsearch.search.aggregations.metrics.InternalTopHits; import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.common.io.SqlStreamInput; import org.elasticsearch.xpack.sql.type.DataType; import org.elasticsearch.xpack.sql.util.DateUtils; @@ -34,14 +35,13 @@ public class TopHitsAggExtractor implements BucketExtractor { TopHitsAggExtractor(StreamInput in) throws IOException { name = in.readString(); fieldDataType = in.readEnum(DataType.class); - zoneId = ZoneId.of(in.readString()); + zoneId = SqlStreamInput.asSqlStream(in).zoneId(); } @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(name); out.writeEnum(fieldDataType); - out.writeString(zoneId.getId()); } String name() { diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/BaseDateTimeProcessor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/BaseDateTimeProcessor.java index ddab74aa927a..3a4a5709e4f5 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/BaseDateTimeProcessor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/BaseDateTimeProcessor.java @@ -7,8 +7,8 @@ package org.elasticsearch.xpack.sql.expression.function.scalar.datetime; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.common.io.SqlStreamInput; import org.elasticsearch.xpack.sql.expression.gen.processor.Processor; import java.io.IOException; @@ -24,12 +24,7 @@ public abstract class BaseDateTimeProcessor implements Processor { } BaseDateTimeProcessor(StreamInput in) throws IOException { - zoneId = ZoneId.of(in.readString()); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeString(zoneId.getId()); + zoneId = SqlStreamInput.asSqlStream(in).zoneId(); } ZoneId zoneId() { diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeProcessor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeProcessor.java index 4a3991472995..d0f7b5d9afc3 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeProcessor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeProcessor.java @@ -63,7 +63,6 @@ public class DateTimeProcessor extends BaseDateTimeProcessor { @Override public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); out.writeEnum(extractor); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/NamedDateTimeProcessor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/NamedDateTimeProcessor.java index 7a23b40be788..47ade08aa596 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/NamedDateTimeProcessor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/NamedDateTimeProcessor.java @@ -57,7 +57,6 @@ public class NamedDateTimeProcessor extends BaseDateTimeProcessor { @Override public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); out.writeEnum(extractor); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/NonIsoDateTimeProcessor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/NonIsoDateTimeProcessor.java index 714c7c86927e..7f916be31bdb 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/NonIsoDateTimeProcessor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/NonIsoDateTimeProcessor.java @@ -72,7 +72,6 @@ public class NonIsoDateTimeProcessor extends BaseDateTimeProcessor { @Override public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); out.writeEnum(extractor); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/QuarterProcessor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/QuarterProcessor.java index 7d09093d35fb..5630860cc489 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/QuarterProcessor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/QuarterProcessor.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.sql.expression.function.scalar.datetime; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import java.io.IOException; import java.time.ZoneId; @@ -16,6 +17,10 @@ import java.util.Locale; import java.util.Objects; public class QuarterProcessor extends BaseDateTimeProcessor { + + public static final String NAME = "q"; + private static final DateTimeFormatter QUARTER_FORMAT = DateTimeFormatter.ofPattern("q", Locale.ROOT); + public QuarterProcessor(ZoneId zoneId) { super(zoneId); @@ -25,8 +30,8 @@ public class QuarterProcessor extends BaseDateTimeProcessor { super(in); } - public static final String NAME = "q"; - private static final DateTimeFormatter QUARTER_FORMAT = DateTimeFormatter.ofPattern("q", Locale.ROOT); + @Override + public void writeTo(StreamOutput out) throws IOException {} @Override public String getWriteableName() { diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/LocalRelation.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/LocalRelation.java index 53a485a3b054..9abe6fef3d4b 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/LocalRelation.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/LocalRelation.java @@ -7,11 +7,11 @@ package org.elasticsearch.xpack.sql.plan.logical; import org.elasticsearch.action.ActionListener; import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.session.Cursor.Page; import org.elasticsearch.xpack.sql.session.Executable; -import org.elasticsearch.xpack.sql.session.SchemaRowSet; import org.elasticsearch.xpack.sql.session.SqlSession; -import org.elasticsearch.xpack.sql.tree.Source; import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.tree.Source; import java.util.List; import java.util.Objects; @@ -52,7 +52,7 @@ public class LocalRelation extends LogicalPlan implements Executable { } @Override - public void execute(SqlSession session, ActionListener listener) { + public void execute(SqlSession session, ActionListener listener) { executable.execute(session, listener); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/Command.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/Command.java index 72ae456a33b7..0b9766345869 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/Command.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/Command.java @@ -7,7 +7,11 @@ package org.elasticsearch.xpack.sql.plan.logical.command; import org.elasticsearch.xpack.sql.expression.FieldAttribute; import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.sql.session.Cursor.Page; import org.elasticsearch.xpack.sql.session.Executable; +import org.elasticsearch.xpack.sql.session.ListCursor; +import org.elasticsearch.xpack.sql.session.Rows; +import org.elasticsearch.xpack.sql.session.SqlSession; import org.elasticsearch.xpack.sql.tree.Source; import org.elasticsearch.xpack.sql.type.DataType; import org.elasticsearch.xpack.sql.type.EsField; @@ -51,4 +55,8 @@ public abstract class Command extends LogicalPlan implements Executable { private FieldAttribute field(String name, EsField field) { return new FieldAttribute(source(), name, field); } + + protected Page of(SqlSession session, List> values) { + return ListCursor.of(Rows.schema(output()), values, session.configuration().pageSize()); + } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/Debug.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/Debug.java index f4aa378bbce7..eda730e8adbb 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/Debug.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/Debug.java @@ -12,13 +12,13 @@ import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.sql.rule.RuleExecutor.Batch; import org.elasticsearch.xpack.sql.rule.RuleExecutor.ExecutionInfo; import org.elasticsearch.xpack.sql.rule.RuleExecutor.Transformation; +import org.elasticsearch.xpack.sql.session.Cursor.Page; import org.elasticsearch.xpack.sql.session.Rows; -import org.elasticsearch.xpack.sql.session.SchemaRowSet; import org.elasticsearch.xpack.sql.session.SqlSession; -import org.elasticsearch.xpack.sql.tree.Source; import org.elasticsearch.xpack.sql.tree.Node; import org.elasticsearch.xpack.sql.tree.NodeInfo; import org.elasticsearch.xpack.sql.tree.NodeUtils; +import org.elasticsearch.xpack.sql.tree.Source; import org.elasticsearch.xpack.sql.type.KeywordEsField; import org.elasticsearch.xpack.sql.util.Graphviz; @@ -75,7 +75,7 @@ public class Debug extends Command { } @Override - public void execute(SqlSession session, ActionListener listener) { + public void execute(SqlSession session, ActionListener listener) { switch (type) { case ANALYZED: session.debugAnalyzedPlan(plan, wrap(i -> handleInfo(i, listener), listener::onFailure)); @@ -90,7 +90,7 @@ public class Debug extends Command { } @SuppressWarnings({ "rawtypes", "unchecked" }) - private void handleInfo(ExecutionInfo info, ActionListener listener) { + private void handleInfo(ExecutionInfo info, ActionListener listener) { String planString = null; if (format == Format.TEXT) { @@ -135,7 +135,7 @@ public class Debug extends Command { } } - listener.onResponse(Rows.singleton(output(), planString)); + listener.onResponse(Page.last(Rows.singleton(output(), planString))); } @Override diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/Explain.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/Explain.java index c6904a87f3f4..d3eac1cd6bbb 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/Explain.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/Explain.java @@ -13,11 +13,11 @@ import org.elasticsearch.xpack.sql.plan.QueryPlan; import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.sql.plan.physical.PhysicalPlan; import org.elasticsearch.xpack.sql.planner.Planner; +import org.elasticsearch.xpack.sql.session.Cursor.Page; import org.elasticsearch.xpack.sql.session.Rows; -import org.elasticsearch.xpack.sql.session.SchemaRowSet; import org.elasticsearch.xpack.sql.session.SqlSession; -import org.elasticsearch.xpack.sql.tree.Source; import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.tree.Source; import org.elasticsearch.xpack.sql.type.KeywordEsField; import org.elasticsearch.xpack.sql.util.Graphviz; @@ -85,10 +85,10 @@ public class Explain extends Command { } @Override - public void execute(SqlSession session, ActionListener listener) { + public void execute(SqlSession session, ActionListener listener) { if (type == Type.PARSED) { - listener.onResponse(Rows.singleton(output(), formatPlan(format, plan))); + listener.onResponse(Page.last(Rows.singleton(output(), formatPlan(format, plan)))); return; } @@ -96,7 +96,7 @@ public class Explain extends Command { session.analyzedPlan(plan, verify, wrap(analyzedPlan -> { if (type == Type.ANALYZED) { - listener.onResponse(Rows.singleton(output(), formatPlan(format, analyzedPlan))); + listener.onResponse(Page.last(Rows.singleton(output(), formatPlan(format, analyzedPlan)))); return; } @@ -105,25 +105,25 @@ public class Explain extends Command { if (verify) { session.optimizedPlan(analyzedPlan, wrap(optimizedPlan -> { if (type == Type.OPTIMIZED) { - listener.onResponse(Rows.singleton(output(), formatPlan(format, optimizedPlan))); + listener.onResponse(Page.last(Rows.singleton(output(), formatPlan(format, optimizedPlan)))); return; } PhysicalPlan mappedPlan = planner.mapPlan(optimizedPlan, verify); if (type == Type.MAPPED) { - listener.onResponse(Rows.singleton(output(), formatPlan(format, mappedPlan))); + listener.onResponse(Page.last(Rows.singleton(output(), formatPlan(format, mappedPlan)))); return; } PhysicalPlan executablePlan = planner.foldPlan(mappedPlan, verify); if (type == Type.EXECUTABLE) { - listener.onResponse(Rows.singleton(output(), formatPlan(format, executablePlan))); + listener.onResponse(Page.last(Rows.singleton(output(), formatPlan(format, executablePlan)))); return; } // Type.All - listener.onResponse(Rows.singleton(output(), printPlans(format, plan, analyzedPlan, optimizedPlan, - mappedPlan, executablePlan))); + listener.onResponse(Page.last( + Rows.singleton(output(), printPlans(format, plan, analyzedPlan, optimizedPlan, mappedPlan, executablePlan)))); }, listener::onFailure)); } @@ -133,14 +133,14 @@ public class Explain extends Command { if (session.verifier().verifyFailures(analyzedPlan).isEmpty()) { session.optimizedPlan(analyzedPlan, wrap(optimizedPlan -> { if (type == Type.OPTIMIZED) { - listener.onResponse(Rows.singleton(output(), formatPlan(format, optimizedPlan))); + listener.onResponse(Page.last(Rows.singleton(output(), formatPlan(format, optimizedPlan)))); return; } PhysicalPlan mappedPlan = planner.mapPlan(optimizedPlan, verify); if (type == Type.MAPPED) { - listener.onResponse(Rows.singleton(output(), formatPlan(format, mappedPlan))); + listener.onResponse(Page.last(Rows.singleton(output(), formatPlan(format, mappedPlan)))); return; } @@ -148,30 +148,30 @@ public class Explain extends Command { PhysicalPlan executablePlan = planner.foldPlan(mappedPlan, verify); if (type == Type.EXECUTABLE) { - listener.onResponse(Rows.singleton(output(), formatPlan(format, executablePlan))); + listener.onResponse(Page.last(Rows.singleton(output(), formatPlan(format, executablePlan)))); return; } - listener.onResponse(Rows.singleton(output(), printPlans(format, plan, analyzedPlan, optimizedPlan, - mappedPlan, executablePlan))); + listener.onResponse(Page.last(Rows.singleton(output(), + printPlans(format, plan, analyzedPlan, optimizedPlan, mappedPlan, executablePlan)))); return; } // mapped failed if (type != Type.ALL) { - listener.onResponse(Rows.singleton(output(), formatPlan(format, mappedPlan))); + listener.onResponse(Page.last(Rows.singleton(output(), formatPlan(format, mappedPlan)))); return; } - listener.onResponse(Rows.singleton(output(), printPlans(format, plan, analyzedPlan, optimizedPlan, - mappedPlan, null))); + listener.onResponse(Page + .last(Rows.singleton(output(), printPlans(format, plan, analyzedPlan, optimizedPlan, mappedPlan, null)))); }, listener::onFailure)); // cannot continue } else { if (type != Type.ALL) { - listener.onResponse(Rows.singleton(output(), formatPlan(format, analyzedPlan))); + listener.onResponse(Page.last(Rows.singleton(output(), formatPlan(format, analyzedPlan)))); } else { - listener.onResponse(Rows.singleton(output(), printPlans(format, plan, analyzedPlan, null, null, null))); + listener.onResponse(Page.last(Rows.singleton(output(), printPlans(format, plan, analyzedPlan, null, null, null)))); } } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/ShowColumns.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/ShowColumns.java index 7cddc3fc0a7e..33643fa0f9ff 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/ShowColumns.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/ShowColumns.java @@ -9,8 +9,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.xpack.sql.expression.Attribute; import org.elasticsearch.xpack.sql.expression.FieldAttribute; import org.elasticsearch.xpack.sql.expression.predicate.regex.LikePattern; -import org.elasticsearch.xpack.sql.session.Rows; -import org.elasticsearch.xpack.sql.session.SchemaRowSet; +import org.elasticsearch.xpack.sql.session.Cursor.Page; import org.elasticsearch.xpack.sql.session.SqlSession; import org.elasticsearch.xpack.sql.tree.NodeInfo; import org.elasticsearch.xpack.sql.tree.Source; @@ -61,7 +60,7 @@ public class ShowColumns extends Command { } @Override - public void execute(SqlSession session, ActionListener listener) { + public void execute(SqlSession session, ActionListener listener) { String idx = index != null ? index : (pattern != null ? pattern.asIndexNameWildcard() : "*"); String regex = pattern != null ? pattern.asJavaRegex() : null; @@ -73,7 +72,7 @@ public class ShowColumns extends Command { rows = new ArrayList<>(); fillInRows(indexResult.get().mapping(), null, rows); } - listener.onResponse(Rows.of(output(), rows)); + listener.onResponse(of(session, rows)); }, listener::onFailure)); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/ShowFunctions.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/ShowFunctions.java index f0e5d3fced0a..e30c252fe324 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/ShowFunctions.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/ShowFunctions.java @@ -11,11 +11,10 @@ import org.elasticsearch.xpack.sql.expression.FieldAttribute; import org.elasticsearch.xpack.sql.expression.function.FunctionDefinition; import org.elasticsearch.xpack.sql.expression.function.FunctionRegistry; import org.elasticsearch.xpack.sql.expression.predicate.regex.LikePattern; -import org.elasticsearch.xpack.sql.session.Rows; -import org.elasticsearch.xpack.sql.session.SchemaRowSet; +import org.elasticsearch.xpack.sql.session.Cursor.Page; import org.elasticsearch.xpack.sql.session.SqlSession; -import org.elasticsearch.xpack.sql.tree.Source; import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.tree.Source; import org.elasticsearch.xpack.sql.type.KeywordEsField; import java.util.Collection; @@ -50,11 +49,11 @@ public class ShowFunctions extends Command { } @Override - public void execute(SqlSession session, ActionListener listener) { + public void execute(SqlSession session, ActionListener listener) { FunctionRegistry registry = session.functionRegistry(); Collection functions = registry.listFunctions(pattern != null ? pattern.asJavaRegex() : null); - listener.onResponse(Rows.of(output(), functions.stream() + listener.onResponse(of(session, functions.stream() .map(f -> asList(f.name(), f.type().name())) .collect(toList()))); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/ShowSchemas.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/ShowSchemas.java index 9d684b3ca456..6ebcfb2b16b2 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/ShowSchemas.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/ShowSchemas.java @@ -8,11 +8,11 @@ package org.elasticsearch.xpack.sql.plan.logical.command; import org.elasticsearch.action.ActionListener; import org.elasticsearch.xpack.sql.expression.Attribute; import org.elasticsearch.xpack.sql.expression.FieldAttribute; +import org.elasticsearch.xpack.sql.session.Cursor.Page; import org.elasticsearch.xpack.sql.session.Rows; -import org.elasticsearch.xpack.sql.session.SchemaRowSet; import org.elasticsearch.xpack.sql.session.SqlSession; -import org.elasticsearch.xpack.sql.tree.Source; import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.tree.Source; import org.elasticsearch.xpack.sql.type.KeywordEsField; import java.util.List; @@ -36,8 +36,8 @@ public class ShowSchemas extends Command { } @Override - public void execute(SqlSession session, ActionListener listener) { - listener.onResponse(Rows.empty(output())); + public void execute(SqlSession session, ActionListener listener) { + listener.onResponse(Page.last(Rows.empty(output()))); } @Override diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/ShowTables.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/ShowTables.java index 8efc7d84377f..4cdeae3ef501 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/ShowTables.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/ShowTables.java @@ -9,8 +9,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.xpack.sql.analysis.index.IndexResolver.IndexType; import org.elasticsearch.xpack.sql.expression.Attribute; import org.elasticsearch.xpack.sql.expression.predicate.regex.LikePattern; -import org.elasticsearch.xpack.sql.session.Rows; -import org.elasticsearch.xpack.sql.session.SchemaRowSet; +import org.elasticsearch.xpack.sql.session.Cursor.Page; import org.elasticsearch.xpack.sql.session.SqlSession; import org.elasticsearch.xpack.sql.tree.NodeInfo; import org.elasticsearch.xpack.sql.tree.Source; @@ -54,7 +53,7 @@ public class ShowTables extends Command { } @Override - public final void execute(SqlSession session, ActionListener listener) { + public final void execute(SqlSession session, ActionListener listener) { String idx = index != null ? index : (pattern != null ? pattern.asIndexNameWildcard() : "*"); String regex = pattern != null ? pattern.asJavaRegex() : null; @@ -63,7 +62,7 @@ public class ShowTables extends Command { IndexType.VALID_INCLUDE_FROZEN : IndexType.VALID_REGULAR; session.indexResolver().resolveNames(idx, regex, withFrozen, ActionListener.wrap(result -> { - listener.onResponse(Rows.of(output(), result.stream() + listener.onResponse(of(session, result.stream() .map(t -> asList(t.name(), t.type().toSql(), t.type().toNative())) .collect(toList()))); }, listener::onFailure)); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumns.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumns.java index 16d6eae924e3..a1bb62b00215 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumns.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumns.java @@ -13,8 +13,8 @@ import org.elasticsearch.xpack.sql.expression.Attribute; import org.elasticsearch.xpack.sql.expression.predicate.regex.LikePattern; import org.elasticsearch.xpack.sql.plan.logical.command.Command; import org.elasticsearch.xpack.sql.proto.Mode; +import org.elasticsearch.xpack.sql.session.Cursor.Page; import org.elasticsearch.xpack.sql.session.Rows; -import org.elasticsearch.xpack.sql.session.SchemaRowSet; import org.elasticsearch.xpack.sql.session.SqlSession; import org.elasticsearch.xpack.sql.tree.NodeInfo; import org.elasticsearch.xpack.sql.tree.Source; @@ -97,14 +97,14 @@ public class SysColumns extends Command { } @Override - public void execute(SqlSession session, ActionListener listener) { + public void execute(SqlSession session, ActionListener listener) { Mode mode = session.configuration().mode(); List output = output(mode == Mode.ODBC); String cluster = session.indexResolver().clusterName(); // bail-out early if the catalog is present but differs if (Strings.hasText(catalog) && cluster.equals(catalog) == false) { - listener.onResponse(Rows.empty(output)); + listener.onResponse(Page.last(Rows.empty(output))); return; } @@ -125,7 +125,7 @@ public class SysColumns extends Command { fillInRows(cluster, esIndex.name(), esIndex.mapping(), null, rows, columnMatcher, mode); } - listener.onResponse(Rows.of(output, rows)); + listener.onResponse(of(session, rows)); }, listener::onFailure)); } // otherwise use a merged mapping @@ -138,7 +138,7 @@ public class SysColumns extends Command { fillInRows(cluster, indexName, esIndex.mapping(), null, rows, columnMatcher, mode); } - listener.onResponse(Rows.of(output, rows)); + listener.onResponse(of(session, rows)); }, listener::onFailure)); } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTables.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTables.java index 111b392adb6b..a3b8f1817415 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTables.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTables.java @@ -11,8 +11,8 @@ import org.elasticsearch.xpack.sql.analysis.index.IndexResolver.IndexType; import org.elasticsearch.xpack.sql.expression.Attribute; import org.elasticsearch.xpack.sql.expression.predicate.regex.LikePattern; import org.elasticsearch.xpack.sql.plan.logical.command.Command; +import org.elasticsearch.xpack.sql.session.Cursor.Page; import org.elasticsearch.xpack.sql.session.Rows; -import org.elasticsearch.xpack.sql.session.SchemaRowSet; import org.elasticsearch.xpack.sql.session.SqlSession; import org.elasticsearch.xpack.sql.tree.NodeInfo; import org.elasticsearch.xpack.sql.tree.Source; @@ -70,7 +70,7 @@ public class SysTables extends Command { } @Override - public final void execute(SqlSession session, ActionListener listener) { + public final void execute(SqlSession session, ActionListener listener) { String cluster = session.indexResolver().clusterName(); // first check if where dealing with ODBC enumeration @@ -85,7 +85,7 @@ public class SysTables extends Command { Object[] enumeration = new Object[10]; // send only the cluster, everything else null enumeration[0] = cluster; - listener.onResponse(Rows.singleton(output(), enumeration)); + listener.onResponse(Page.last(Rows.singleton(output(), enumeration))); return; } } @@ -111,7 +111,7 @@ public class SysTables extends Command { } values.sort(Comparator.comparing(l -> l.get(3).toString())); - listener.onResponse(Rows.of(output(), values)); + listener.onResponse(of(session, values)); return; } } @@ -122,7 +122,7 @@ public class SysTables extends Command { // if the catalog doesn't match, don't return any results if (cRegex != null && !Pattern.matches(cRegex, cluster)) { - listener.onResponse(Rows.empty(output())); + listener.onResponse(Page.last(Rows.empty(output()))); return; } @@ -141,7 +141,7 @@ public class SysTables extends Command { } session.indexResolver().resolveNames(idx, regex, tableTypes, ActionListener.wrap(result -> listener.onResponse( - Rows.of(output(), result.stream() + of(session, result.stream() // sort by type (which might be legacy), then by name .sorted(Comparator. comparing(i -> legacyName(i.type())) .thenComparing(Comparator.comparing(i -> i.name()))) diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTypes.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTypes.java index 2112128b41b0..95ba2346c3ef 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTypes.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTypes.java @@ -8,8 +8,7 @@ package org.elasticsearch.xpack.sql.plan.logical.command.sys; import org.elasticsearch.action.ActionListener; import org.elasticsearch.xpack.sql.expression.Attribute; import org.elasticsearch.xpack.sql.plan.logical.command.Command; -import org.elasticsearch.xpack.sql.session.Rows; -import org.elasticsearch.xpack.sql.session.SchemaRowSet; +import org.elasticsearch.xpack.sql.session.Cursor.Page; import org.elasticsearch.xpack.sql.session.SqlSession; import org.elasticsearch.xpack.sql.tree.NodeInfo; import org.elasticsearch.xpack.sql.tree.Source; @@ -72,7 +71,7 @@ public class SysTypes extends Command { } @Override - public final void execute(SqlSession session, ActionListener listener) { + public final void execute(SqlSession session, ActionListener listener) { Stream values = Stream.of(DataType.values()); if (type.intValue() != 0) { values = values.filter(t -> type.equals(t.sqlType.getVendorTypeNumber())); @@ -110,7 +109,7 @@ public class SysTypes extends Command { )) .collect(toList()); - listener.onResponse(Rows.of(output(), rows)); + listener.onResponse(of(session, rows)); } @Override diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/physical/CommandExec.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/physical/CommandExec.java index f1fccb7e2c49..43a7bfac4628 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/physical/CommandExec.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/physical/CommandExec.java @@ -8,14 +8,16 @@ package org.elasticsearch.xpack.sql.plan.physical; import org.elasticsearch.action.ActionListener; import org.elasticsearch.xpack.sql.expression.Attribute; import org.elasticsearch.xpack.sql.plan.logical.command.Command; -import org.elasticsearch.xpack.sql.session.SchemaRowSet; +import org.elasticsearch.xpack.sql.session.Cursor.Page; import org.elasticsearch.xpack.sql.session.SqlSession; -import org.elasticsearch.xpack.sql.tree.Source; import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.tree.Source; import java.util.List; import java.util.Objects; +import static org.elasticsearch.action.ActionListener.wrap; + public class CommandExec extends LeafExec { private final Command command; @@ -35,8 +37,8 @@ public class CommandExec extends LeafExec { } @Override - public void execute(SqlSession session, ActionListener listener) { - command.execute(session, listener); + public void execute(SqlSession session, ActionListener listener) { + command.execute(session, wrap(listener::onResponse, listener::onFailure)); } @Override diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/physical/EsQueryExec.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/physical/EsQueryExec.java index 6fa87ca90bbf..6e132fb68711 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/physical/EsQueryExec.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/physical/EsQueryExec.java @@ -9,7 +9,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.xpack.sql.execution.search.Querier; import org.elasticsearch.xpack.sql.expression.Attribute; import org.elasticsearch.xpack.sql.querydsl.container.QueryContainer; -import org.elasticsearch.xpack.sql.session.SchemaRowSet; +import org.elasticsearch.xpack.sql.session.Cursor.Page; import org.elasticsearch.xpack.sql.session.SqlSession; import org.elasticsearch.xpack.sql.tree.NodeInfo; import org.elasticsearch.xpack.sql.tree.Source; @@ -53,7 +53,7 @@ public class EsQueryExec extends LeafExec { } @Override - public void execute(SqlSession session, ActionListener listener) { + public void execute(SqlSession session, ActionListener listener) { Querier scroller = new Querier(session); scroller.query(output, queryContainer, index, listener); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/physical/LocalExec.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/physical/LocalExec.java index cce19411465e..c0adb1a98659 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/physical/LocalExec.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/physical/LocalExec.java @@ -7,12 +7,12 @@ package org.elasticsearch.xpack.sql.plan.physical; import org.elasticsearch.action.ActionListener; import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.session.Cursor.Page; import org.elasticsearch.xpack.sql.session.EmptyExecutable; import org.elasticsearch.xpack.sql.session.Executable; -import org.elasticsearch.xpack.sql.session.SchemaRowSet; import org.elasticsearch.xpack.sql.session.SqlSession; -import org.elasticsearch.xpack.sql.tree.Source; import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.tree.Source; import java.util.List; import java.util.Objects; @@ -45,7 +45,7 @@ public class LocalExec extends LeafExec { } @Override - public void execute(SqlSession session, ActionListener listener) { + public void execute(SqlSession session, ActionListener listener) { executable.execute(session, listener); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/physical/Unexecutable.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/physical/Unexecutable.java index e1b08b3e492f..a32d2f889909 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/physical/Unexecutable.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/physical/Unexecutable.java @@ -7,15 +7,16 @@ package org.elasticsearch.xpack.sql.plan.physical; import org.elasticsearch.action.ActionListener; import org.elasticsearch.xpack.sql.planner.PlanningException; +import org.elasticsearch.xpack.sql.session.Cursor.Page; import org.elasticsearch.xpack.sql.session.Executable; -import org.elasticsearch.xpack.sql.session.SchemaRowSet; import org.elasticsearch.xpack.sql.session.SqlSession; // this is mainly a marker interface to validate a plan before being executed public interface Unexecutable extends Executable { - default void execute(SqlSession session, ActionListener listener) { + @Override + default void execute(SqlSession session, ActionListener listener) { throw new PlanningException("Current plan {} is not executable", this); } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlQueryAction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlQueryAction.java index f1bd8162239c..e785c2e74cc0 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlQueryAction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlQueryAction.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.sql.plugin; import org.apache.logging.log4j.LogManager; -import org.elasticsearch.Version; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -24,8 +23,6 @@ import org.elasticsearch.xpack.sql.action.SqlQueryAction; import org.elasticsearch.xpack.sql.action.SqlQueryRequest; import org.elasticsearch.xpack.sql.action.SqlQueryResponse; import org.elasticsearch.xpack.sql.proto.Protocol; -import org.elasticsearch.xpack.sql.session.Cursor; -import org.elasticsearch.xpack.sql.session.Cursors; import java.io.IOException; import java.nio.charset.StandardCharsets; @@ -89,22 +86,9 @@ public class RestSqlQueryAction extends BaseRestHandler { * which we turn into a 400 error. */ XContentType xContentType = accept == null ? XContentType.JSON : XContentType.fromMediaTypeOrFormat(accept); - if (xContentType != null) { - return channel -> client.execute(SqlQueryAction.INSTANCE, sqlRequest, new RestResponseListener(channel) { - @Override - public RestResponse buildResponse(SqlQueryResponse response) throws Exception { - XContentBuilder builder = channel.newBuilder(request.getXContentType(), xContentType, true); - response.toXContent(builder, request); - return new BytesRestResponse(RestStatus.OK, builder); - } - }); - } + TextFormat textFormat = xContentType == null ? TextFormat.fromMediaTypeOrFormat(accept) : null; - TextFormat textFormat = TextFormat.fromMediaTypeOrFormat(accept); - - // if we reached this point, the format to be used can be one of TXT, CSV or TSV - // which won't work in a columnar fashion - if (sqlRequest.columnar()) { + if (xContentType == null && sqlRequest.columnar()) { throw new IllegalArgumentException("Invalid use of [columnar] argument: cannot be used in combination with " + "txt, csv or tsv formats"); } @@ -113,19 +97,27 @@ public class RestSqlQueryAction extends BaseRestHandler { return channel -> client.execute(SqlQueryAction.INSTANCE, sqlRequest, new RestResponseListener(channel) { @Override public RestResponse buildResponse(SqlQueryResponse response) throws Exception { - Cursor cursor = Cursors.decodeFromString(sqlRequest.cursor()); - final String data = textFormat.format(cursor, request, response); + RestResponse restResponse; - RestResponse restResponse = new BytesRestResponse(RestStatus.OK, textFormat.contentType(request), + // XContent branch + if (xContentType != null) { + XContentBuilder builder = channel.newBuilder(request.getXContentType(), xContentType, true); + response.toXContent(builder, request); + restResponse = new BytesRestResponse(RestStatus.OK, builder); + } + // TextFormat + else { + final String data = textFormat.format(request, response); + + restResponse = new BytesRestResponse(RestStatus.OK, textFormat.contentType(request), data.getBytes(StandardCharsets.UTF_8)); - Cursor responseCursor = textFormat.wrapCursor(cursor, response); - - if (responseCursor != Cursor.EMPTY) { - restResponse.addHeader("Cursor", Cursors.encodeToString(Version.CURRENT, responseCursor)); + if (response.hasCursor()) { + restResponse.addHeader("Cursor", response.cursor()); + } } - restResponse.addHeader("Took-nanos", Long.toString(System.nanoTime() - startNanos)); + restResponse.addHeader("Took-nanos", Long.toString(System.nanoTime() - startNanos)); return restResponse; } }); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TextFormat.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TextFormat.java index f4e3e006e70f..5a415703bb78 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TextFormat.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TextFormat.java @@ -6,7 +6,9 @@ package org.elasticsearch.xpack.sql.plugin; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; import org.elasticsearch.xpack.sql.action.BasicFormatter; import org.elasticsearch.xpack.sql.action.SqlQueryResponse; import org.elasticsearch.xpack.sql.proto.ColumnInfo; @@ -15,6 +17,7 @@ import org.elasticsearch.xpack.sql.session.Cursors; import org.elasticsearch.xpack.sql.util.DateUtils; import org.elasticsearch.xpack.sql.util.StringUtils; +import java.time.ZoneId; import java.time.ZonedDateTime; import java.util.List; import java.util.Locale; @@ -26,9 +29,6 @@ import static org.elasticsearch.xpack.sql.action.BasicFormatter.FormatOption.TEX /** * Templating class for displaying SQL responses in text formats. */ - -// TODO are we sure toString is correct here? What about dates that come back as longs. -// Tracked by https://github.com/elastic/x-pack-elasticsearch/issues/3081 enum TextFormat { /** @@ -41,22 +41,41 @@ enum TextFormat { */ PLAIN_TEXT() { @Override - String format(Cursor cursor, RestRequest request, SqlQueryResponse response) { - final BasicFormatter formatter; - if (cursor instanceof TextFormatterCursor) { - formatter = ((TextFormatterCursor) cursor).getFormatter(); - return formatter.formatWithoutHeader(response.rows()); - } else { + String format(RestRequest request, SqlQueryResponse response) { + BasicFormatter formatter = null; + Cursor cursor = null; + ZoneId zoneId = null; + + // check if the cursor is already wrapped first + if (response.hasCursor()) { + Tuple tuple = Cursors.decodeFromStringWithZone(response.cursor()); + cursor = tuple.v1(); + zoneId = tuple.v2(); + if (cursor instanceof TextFormatterCursor) { + formatter = ((TextFormatterCursor) cursor).getFormatter(); + } + } + + // if there are headers available, it means it's the first request + // so initialize the underlying formatter and wrap it in the cursor + if (response.columns() != null) { formatter = new BasicFormatter(response.columns(), response.rows(), TEXT); + // if there's a cursor, wrap the formatter in it + if (cursor != null) { + response.cursor(Cursors.encodeToString(new TextFormatterCursor(cursor, formatter), zoneId)); + } + // format with header return formatter.formatWithHeader(response.columns(), response.rows()); } - } - - @Override - Cursor wrapCursor(Cursor oldCursor, SqlQueryResponse response) { - BasicFormatter formatter = (oldCursor instanceof TextFormatterCursor) ? - ((TextFormatterCursor) oldCursor).getFormatter() : new BasicFormatter(response.columns(), response.rows(), TEXT); - return TextFormatterCursor.wrap(super.wrapCursor(oldCursor, response), formatter); + else { + // should be initialized (wrapped by the cursor) + if (formatter != null) { + // format without header + return formatter.formatWithoutHeader(response.rows()); + } + } + // if this code is reached, it means it's a next page without cursor wrapping + throw new SqlIllegalArgumentException("Cannot find text formatter - this is likely a bug"); } @Override @@ -219,12 +238,11 @@ enum TextFormat { }; - String format(Cursor cursor, RestRequest request, SqlQueryResponse response) { + String format(RestRequest request, SqlQueryResponse response) { StringBuilder sb = new StringBuilder(); - boolean header = hasHeader(request); - - if (header && (cursor == null || cursor == Cursor.EMPTY)) { + // if the header is requested (and the column info is present - namely it's the first page) return the info + if (hasHeader(request) && response.columns() != null) { row(sb, response.columns(), ColumnInfo::name); } @@ -239,10 +257,6 @@ enum TextFormat { return true; } - Cursor wrapCursor(Cursor oldCursor, SqlQueryResponse response) { - return Cursors.decodeFromString(response.cursor()); - } - static TextFormat fromMediaTypeOrFormat(String accept) { for (TextFormat text : values()) { String contentType = text.contentType(); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TextFormatterCursor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TextFormatterCursor.java index 4ab1d77fe21f..76e6b895aed8 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TextFormatterCursor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TextFormatterCursor.java @@ -13,11 +13,11 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xpack.sql.action.BasicFormatter; import org.elasticsearch.xpack.sql.session.Configuration; import org.elasticsearch.xpack.sql.session.Cursor; -import org.elasticsearch.xpack.sql.session.RowSet; import java.io.IOException; import java.util.Objects; +import static org.elasticsearch.action.ActionListener.wrap; /** * The cursor that wraps all necessary information for textual representation of the result table */ @@ -27,18 +27,7 @@ public class TextFormatterCursor implements Cursor { private final Cursor delegate; private final BasicFormatter formatter; - /** - * If the newCursor is empty, returns an empty cursor. Otherwise, creates a new - * TextFormatterCursor that wraps the newCursor. - */ - public static Cursor wrap(Cursor newCursor, BasicFormatter formatter) { - if (newCursor == EMPTY) { - return EMPTY; - } - return new TextFormatterCursor(newCursor, formatter); - } - - private TextFormatterCursor(Cursor delegate, BasicFormatter formatter) { + TextFormatterCursor(Cursor delegate, BasicFormatter formatter) { this.delegate = delegate; this.formatter = formatter; } @@ -59,8 +48,13 @@ public class TextFormatterCursor implements Cursor { } @Override - public void nextPage(Configuration cfg, Client client, NamedWriteableRegistry registry, ActionListener listener) { - delegate.nextPage(cfg, client, registry, listener); + public void nextPage(Configuration cfg, Client client, NamedWriteableRegistry registry, ActionListener listener) { + // keep wrapping the text formatter + delegate.nextPage(cfg, client, registry, + wrap(p -> { + Cursor next = p.next(); + listener.onResponse(next == Cursor.EMPTY ? p : new Page(p.rowSet(), new TextFormatterCursor(next, formatter))); + }, listener::onFailure)); } @Override diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlQueryAction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlQueryAction.java index 3e9c30f49b45..97da20902a0d 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlQueryAction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlQueryAction.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.sql.plugin; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; @@ -18,6 +17,7 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.security.SecurityContext; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; import org.elasticsearch.xpack.sql.action.SqlQueryAction; import org.elasticsearch.xpack.sql.action.SqlQueryRequest; import org.elasticsearch.xpack.sql.action.SqlQueryResponse; @@ -25,6 +25,7 @@ import org.elasticsearch.xpack.sql.execution.PlanExecutor; import org.elasticsearch.xpack.sql.proto.ColumnInfo; import org.elasticsearch.xpack.sql.proto.Mode; import org.elasticsearch.xpack.sql.session.Configuration; +import org.elasticsearch.xpack.sql.session.Cursor.Page; import org.elasticsearch.xpack.sql.session.Cursors; import org.elasticsearch.xpack.sql.session.RowSet; import org.elasticsearch.xpack.sql.session.SchemaRowSet; @@ -71,20 +72,26 @@ public class TransportSqlQueryAction extends HandledTransportAction listener.onResponse(createResponse(request, rowSet)), listener::onFailure)); + wrap(p -> listener.onResponse(createResponseWithSchema(request, p)), listener::onFailure)); } else { planExecutor.nextPage(cfg, Cursors.decodeFromString(request.cursor()), - wrap(rowSet -> listener.onResponse(createResponse(request.mode(), request.columnar(), rowSet, null)), + wrap(p -> listener.onResponse(createResponse(request, null, p)), listener::onFailure)); } } - static SqlQueryResponse createResponse(SqlQueryRequest request, SchemaRowSet rowSet) { + static SqlQueryResponse createResponseWithSchema(SqlQueryRequest request, Page page) { + RowSet rset = page.rowSet(); + if ((rset instanceof SchemaRowSet) == false) { + throw new SqlIllegalArgumentException("No schema found inside {}", rset.getClass()); + } + SchemaRowSet rowSet = (SchemaRowSet) rset; + List columns = new ArrayList<>(rowSet.columnCount()); for (Schema.Entry entry : rowSet.schema()) { if (Mode.isDriver(request.mode())) { @@ -94,22 +101,22 @@ public class TransportSqlQueryAction extends HandledTransportAction columns) { + static SqlQueryResponse createResponse(SqlQueryRequest request, List header, Page page) { List> rows = new ArrayList<>(); - rowSet.forEachRow(rowView -> { + page.rowSet().forEachRow(rowView -> { List row = new ArrayList<>(rowView.columnCount()); rowView.forEachColumn(row::add); rows.add(unmodifiableList(row)); }); return new SqlQueryResponse( - Cursors.encodeToString(Version.CURRENT, rowSet.nextPageCursor()), - mode, - columnar, - columns, + Cursors.encodeToString(page.next(), request.zoneId()), + request.mode(), + request.columnar(), + header, rows); } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/GroupByDateHistogram.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/GroupByDateHistogram.java index 24367fc5e1f2..f66278fc2c6d 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/GroupByDateHistogram.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/GroupByDateHistogram.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.sql.querydsl.agg; import org.elasticsearch.search.aggregations.bucket.composite.CompositeValuesSourceBuilder; import org.elasticsearch.search.aggregations.bucket.composite.DateHistogramValuesSourceBuilder; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.xpack.sql.expression.gen.script.ScriptTemplate; import org.elasticsearch.xpack.sql.querydsl.container.Sort.Direction; @@ -45,7 +46,7 @@ public class GroupByDateHistogram extends GroupByKey { @Override protected CompositeValuesSourceBuilder createSourceBuilder() { return new DateHistogramValuesSourceBuilder(id()) - .interval(interval) + .fixedInterval(new DateHistogramInterval(interval + "ms")) .timeZone(zoneId); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/Cursor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/Cursor.java index ccb4a7cdc40d..618b417b0d58 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/Cursor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/Cursor.java @@ -14,12 +14,35 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; * Information required to access the next page of response. */ public interface Cursor extends NamedWriteable { + + class Page { + private final RowSet rowSet; + private final Cursor next; + + public Page(RowSet rowSet, Cursor next) { + this.rowSet = rowSet; + this.next = next; + } + + public RowSet rowSet() { + return rowSet; + } + + public Cursor next() { + return next; + } + + public static Page last(RowSet rowSet) { + return new Page(rowSet, EMPTY); + } + } + Cursor EMPTY = EmptyCursor.INSTANCE; /** * Request the next page of data. */ - void nextPage(Configuration cfg, Client client, NamedWriteableRegistry registry, ActionListener listener); + void nextPage(Configuration cfg, Client client, NamedWriteableRegistry registry, ActionListener listener); /** * Cleans the resources associated with the cursor diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/Cursors.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/Cursors.java index f268754e7048..8f2c37356028 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/Cursors.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/Cursors.java @@ -6,27 +6,24 @@ package org.elasticsearch.xpack.sql.session; import org.elasticsearch.Version; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.stream.NamedWriteable; -import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.io.stream.OutputStreamStreamOutput; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.common.io.SqlStreamInput; +import org.elasticsearch.xpack.sql.common.io.SqlStreamOutput; import org.elasticsearch.xpack.sql.execution.search.CompositeAggregationCursor; -import org.elasticsearch.xpack.sql.execution.search.PagingListCursor; import org.elasticsearch.xpack.sql.execution.search.ScrollCursor; import org.elasticsearch.xpack.sql.execution.search.extractor.BucketExtractors; import org.elasticsearch.xpack.sql.execution.search.extractor.HitExtractors; import org.elasticsearch.xpack.sql.expression.function.scalar.Processors; import org.elasticsearch.xpack.sql.expression.literal.Literals; import org.elasticsearch.xpack.sql.plugin.TextFormatterCursor; +import org.elasticsearch.xpack.sql.util.StringUtils; -import java.io.ByteArrayOutputStream; -import java.io.OutputStream; -import java.nio.charset.StandardCharsets; +import java.io.IOException; +import java.time.ZoneId; import java.util.ArrayList; -import java.util.Base64; import java.util.List; /** @@ -35,6 +32,7 @@ import java.util.List; public final class Cursors { private static final NamedWriteableRegistry WRITEABLE_REGISTRY = new NamedWriteableRegistry(getNamedWriteables()); + private static final Version VERSION = Version.CURRENT; private Cursors() {} @@ -49,7 +47,7 @@ public final class Cursors { entries.add(new NamedWriteableRegistry.Entry(Cursor.class, ScrollCursor.NAME, ScrollCursor::new)); entries.add(new NamedWriteableRegistry.Entry(Cursor.class, CompositeAggregationCursor.NAME, CompositeAggregationCursor::new)); entries.add(new NamedWriteableRegistry.Entry(Cursor.class, TextFormatterCursor.NAME, TextFormatterCursor::new)); - entries.add(new NamedWriteableRegistry.Entry(Cursor.class, PagingListCursor.NAME, PagingListCursor::new)); + entries.add(new NamedWriteableRegistry.Entry(Cursor.class, ListCursor.NAME, ListCursor::new)); // plus all their dependencies entries.addAll(Processors.getNamedWriteables()); @@ -65,17 +63,20 @@ public final class Cursors { /** * Write a {@linkplain Cursor} to a string for serialization across xcontent. */ - public static String encodeToString(Version version, Cursor info) { + public static String encodeToString(Cursor info, ZoneId zoneId) { + return encodeToString(info, VERSION, zoneId); + } + + static String encodeToString(Cursor info, Version version, ZoneId zoneId) { if (info == Cursor.EMPTY) { - return ""; + return StringUtils.EMPTY; } - try (ByteArrayOutputStream os = new ByteArrayOutputStream()) { - try (OutputStream base64 = Base64.getEncoder().wrap(os); StreamOutput out = new OutputStreamStreamOutput(base64)) { - Version.writeVersion(version, out); - out.writeNamedWriteable(info); - } - return os.toString(StandardCharsets.UTF_8.name()); - } catch (Exception ex) { + try (SqlStreamOutput output = new SqlStreamOutput(version, zoneId)) { + output.writeNamedWriteable(info); + output.close(); + // return the string only after closing the resource + return output.streamAsString(); + } catch (IOException ex) { throw new SqlIllegalArgumentException("Unexpected failure retrieving next page", ex); } } @@ -84,22 +85,23 @@ public final class Cursors { /** * Read a {@linkplain Cursor} from a string. */ - public static Cursor decodeFromString(String info) { - if (info.isEmpty()) { - return Cursor.EMPTY; + public static Cursor decodeFromString(String base64) { + return decodeFromStringWithZone(base64).v1(); + } + + /** + * Read a {@linkplain Cursor} from a string. + */ + public static Tuple decodeFromStringWithZone(String base64) { + if (base64.isEmpty()) { + return new Tuple<>(Cursor.EMPTY, null); } - byte[] bytes = info.getBytes(StandardCharsets.UTF_8); - try (StreamInput in = new NamedWriteableAwareStreamInput(StreamInput.wrap(Base64.getDecoder().decode(bytes)), WRITEABLE_REGISTRY)) { - Version version = Version.readVersion(in); - if (version.after(Version.CURRENT)) { - throw new SqlIllegalArgumentException("Unsupported cursor version " + version); - } - in.setVersion(version); - return in.readNamedWriteable(Cursor.class); - } catch (SqlIllegalArgumentException ex) { - throw ex; - } catch (Exception ex) { + try (SqlStreamInput in = new SqlStreamInput(base64, WRITEABLE_REGISTRY, VERSION)) { + Cursor cursor = in.readNamedWriteable(Cursor.class); + return new Tuple<>(cursor, in.zoneId()); + } catch (IOException ex) { throw new SqlIllegalArgumentException("Unexpected failure decoding cursor", ex); } } + } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/EmptyCursor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/EmptyCursor.java index fd9c63438a87..9f95b9049400 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/EmptyCursor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/EmptyCursor.java @@ -9,6 +9,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.client.Client; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; import java.io.IOException; @@ -31,8 +32,8 @@ class EmptyCursor implements Cursor { } @Override - public void nextPage(Configuration cfg, Client client, NamedWriteableRegistry registry, ActionListener listener) { - throw new IllegalArgumentException("there is no next page"); + public void nextPage(Configuration cfg, Client client, NamedWriteableRegistry registry, ActionListener listener) { + throw new SqlIllegalArgumentException("there is no next page"); } @Override diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/EmptyExecutable.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/EmptyExecutable.java index 09e0d3ac2a30..93a7e51c39b7 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/EmptyExecutable.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/EmptyExecutable.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.sql.session; import org.elasticsearch.action.ActionListener; import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.session.Cursor.Page; import java.util.List; import java.util.Objects; @@ -25,8 +26,8 @@ public class EmptyExecutable implements Executable { } @Override - public void execute(SqlSession session, ActionListener listener) { - listener.onResponse(Rows.empty(output)); + public void execute(SqlSession session, ActionListener listener) { + listener.onResponse(Page.last(Rows.empty(output))); } @Override diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/EmptyRowSet.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/EmptyRowSet.java index 9237850998a5..9b6fa2aa3a06 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/EmptyRowSet.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/EmptyRowSet.java @@ -39,11 +39,6 @@ class EmptyRowSet extends AbstractRowSet implements SchemaRowSet { return 0; } - @Override - public Cursor nextPageCursor() { - return Cursor.EMPTY; - } - @Override public Schema schema() { return schema; diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/Executable.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/Executable.java index dbc163170291..d1d78194ebe6 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/Executable.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/Executable.java @@ -5,14 +5,15 @@ */ package org.elasticsearch.xpack.sql.session; -import java.util.List; - import org.elasticsearch.action.ActionListener; import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.session.Cursor.Page; + +import java.util.List; public interface Executable { List output(); - void execute(SqlSession session, ActionListener listener); + void execute(SqlSession session, ActionListener listener); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/PagingListCursor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/ListCursor.java similarity index 64% rename from x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/PagingListCursor.java rename to x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/ListCursor.java index 5a318eaa31f4..7e20abc31de9 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/PagingListCursor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/ListCursor.java @@ -4,16 +4,14 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.sql.execution.search; +package org.elasticsearch.xpack.sql.session; import org.elasticsearch.action.ActionListener; import org.elasticsearch.client.Client; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.xpack.sql.session.Configuration; -import org.elasticsearch.xpack.sql.session.Cursor; -import org.elasticsearch.xpack.sql.session.RowSet; +import org.elasticsearch.xpack.sql.type.Schema; import java.io.IOException; import java.util.List; @@ -21,7 +19,7 @@ import java.util.Objects; import static java.util.Collections.emptyList; -public class PagingListCursor implements Cursor { +public class ListCursor implements Cursor { public static final String NAME = "p"; @@ -29,14 +27,14 @@ public class PagingListCursor implements Cursor { private final int columnCount; private final int pageSize; - PagingListCursor(List> data, int columnCount, int pageSize) { + public ListCursor(List> data, int pageSize, int columnCount) { this.data = data; this.columnCount = columnCount; this.pageSize = pageSize; } @SuppressWarnings("unchecked") - public PagingListCursor(StreamInput in) throws IOException { + public ListCursor(StreamInput in) throws IOException { data = (List>) in.readGenericValue(); columnCount = in.readVInt(); pageSize = in.readVInt(); @@ -66,11 +64,27 @@ public class PagingListCursor implements Cursor { return pageSize; } - @Override - public void nextPage(Configuration cfg, Client client, NamedWriteableRegistry registry, ActionListener listener) { - // the check is really a safety measure since the page initialization handles it already (by returning an empty cursor) + public static Page of(Schema schema, List> data, int pageSize) { + return of(schema, data, pageSize, schema.size()); + } + + // NB: private since the columnCount is for public cases inferred by the columnCount + // only on the next-page the schema becomes null however that's an internal detail hence + // why this method is not exposed + private static Page of(Schema schema, List> data, int pageSize, int columnCount) { List> nextData = data.size() > pageSize ? data.subList(pageSize, data.size()) : emptyList(); - listener.onResponse(new PagingListRowSet(nextData, columnCount, pageSize)); + Cursor next = nextData.isEmpty() + ? Cursor.EMPTY + : new ListCursor(nextData, pageSize, columnCount); + List> currData = data.isEmpty() || pageSize == 0 + ? emptyList() + : data.size() == pageSize ? data : data.subList(0, Math.min(pageSize, data.size())); + return new Page(new ListRowSet(schema, currData, columnCount), next); + } + + @Override + public void nextPage(Configuration cfg, Client client, NamedWriteableRegistry registry, ActionListener listener) { + listener.onResponse(of(Schema.EMPTY, data, pageSize, columnCount)); } @Override @@ -93,7 +107,7 @@ public class PagingListCursor implements Cursor { return false; } - PagingListCursor other = (PagingListCursor) obj; + ListCursor other = (ListCursor) obj; return Objects.equals(pageSize, other.pageSize) && Objects.equals(columnCount, other.columnCount) && Objects.equals(data, other.data); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/ListRowSet.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/ListRowSet.java index 0122d333f7e9..6bbbbaa462d0 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/ListRowSet.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/ListRowSet.java @@ -13,13 +13,20 @@ public class ListRowSet extends AbstractRowSet implements SchemaRowSet { private final Schema schema; private final List> list; + private final int columnCount; private int pos = 0; - protected ListRowSet(Schema schema, List> list) { + ListRowSet(Schema schema, List> list) { + this(schema, list, schema.size()); + } + + ListRowSet(Schema schema, List> list, int columnCount) { this.schema = schema; + this.columnCount = columnCount; this.list = list; } + @Override protected boolean doHasCurrent() { return pos < size(); @@ -49,13 +56,13 @@ public class ListRowSet extends AbstractRowSet implements SchemaRowSet { return list.size(); } - @Override - public Cursor nextPageCursor() { - return Cursor.EMPTY; - } - @Override public Schema schema() { return schema; } + + @Override + public int columnCount() { + return columnCount; + } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/RowSet.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/RowSet.java index 38a22ff73f14..93d6745dc659 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/RowSet.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/RowSet.java @@ -22,11 +22,6 @@ public interface RowSet extends RowView { void reset(); - /** - * The key used by PlanExecutor#nextPage to fetch the next page. - */ - Cursor nextPageCursor(); - default void forEachRow(Consumer action) { for (boolean hasRows = hasCurrentRow(); hasRows; hasRows = advanceRow()) { action.accept(this); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/SingletonExecutable.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/SingletonExecutable.java index 8a526fac6dfe..d86ac5fe0081 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/SingletonExecutable.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/SingletonExecutable.java @@ -7,18 +7,20 @@ package org.elasticsearch.xpack.sql.session; import org.elasticsearch.action.ActionListener; import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.session.Cursor.Page; import org.elasticsearch.xpack.sql.util.Check; -import java.util.Collections; import java.util.List; +import static java.util.Collections.emptyList; + public class SingletonExecutable implements Executable { private final List output; private final Object[] values; public SingletonExecutable() { - this(Collections.emptyList()); + this(emptyList()); } public SingletonExecutable(List output, Object... values) { @@ -33,8 +35,8 @@ public class SingletonExecutable implements Executable { } @Override - public void execute(SqlSession session, ActionListener listener) { - listener.onResponse(Rows.singleton(output, values)); + public void execute(SqlSession session, ActionListener listener) { + listener.onResponse(Page.last(Rows.singleton(output, values))); } @Override diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/SingletonRowSet.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/SingletonRowSet.java index c8a4e5eddfb6..649caafc9be2 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/SingletonRowSet.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/SingletonRowSet.java @@ -43,11 +43,6 @@ class SingletonRowSet extends AbstractRowSet implements SchemaRowSet { return 1; } - @Override - public Cursor nextPageCursor() { - return Cursor.EMPTY; - } - @Override public Schema schema() { return schema; diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/SqlSession.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/SqlSession.java index 6a5b5bd2ae5f..023b443850c1 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/SqlSession.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/SqlSession.java @@ -26,6 +26,7 @@ import org.elasticsearch.xpack.sql.plan.physical.PhysicalPlan; import org.elasticsearch.xpack.sql.planner.Planner; import org.elasticsearch.xpack.sql.proto.SqlTypedParamValue; import org.elasticsearch.xpack.sql.rule.RuleExecutor; +import org.elasticsearch.xpack.sql.session.Cursor.Page; import java.util.List; import java.util.function.Function; @@ -159,7 +160,7 @@ public class SqlSession { optimizedPlan(optimized, wrap(o -> listener.onResponse(planner.plan(o, verify)), listener::onFailure)); } - public void sql(String sql, List params, ActionListener listener) { + public void sql(String sql, List params, ActionListener listener) { sqlExecutable(sql, params, wrap(e -> e.execute(this, listener), listener::onFailure)); } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/AbstractSqlWireSerializingTestCase.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/AbstractSqlWireSerializingTestCase.java new file mode 100644 index 000000000000..057cc76ee022 --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/AbstractSqlWireSerializingTestCase.java @@ -0,0 +1,54 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql; + +import org.elasticsearch.Version; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireTestCase; +import org.elasticsearch.xpack.sql.common.io.SqlStreamInput; +import org.elasticsearch.xpack.sql.common.io.SqlStreamOutput; +import org.elasticsearch.xpack.sql.session.Cursors; + +import java.io.IOException; +import java.time.ZoneId; + +public abstract class AbstractSqlWireSerializingTestCase extends AbstractWireTestCase { + + @Override + protected T copyInstance(T instance, Version version) throws IOException { + try (BytesStreamOutput output = new BytesStreamOutput()) { + ZoneId zoneId = instanceZoneId(instance); + SqlStreamOutput out = new SqlStreamOutput(version, zoneId); + instance.writeTo(out); + out.close(); + try (SqlStreamInput in = new SqlStreamInput(out.streamAsString(), getNamedWriteableRegistry(), version)) { + return instanceReader().read(in); + } + } + } + + protected ZoneId instanceZoneId(T instance) { + return randomSafeZone(); + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry(Cursors.getNamedWriteables()); + } + + + /** + * We need to exclude SystemV/* time zones because they cannot be converted + * back to DateTimeZone which we currently still need to do internally, + * e.g. in bwc serialization and in the extract() method + */ + protected static ZoneId randomSafeZone() { + return randomValueOtherThanMany(zi -> zi.getId().startsWith("SystemV"), () -> randomZone()); + } +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/CompositeAggregationCursorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/CompositeAggregationCursorTests.java index 0baf470e4b36..4216db7cb70d 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/CompositeAggregationCursorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/CompositeAggregationCursorTests.java @@ -5,39 +5,38 @@ */ package org.elasticsearch.xpack.sql.execution.search; -import org.elasticsearch.Version; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.Writeable.Reader; -import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.sql.AbstractSqlWireSerializingTestCase; import org.elasticsearch.xpack.sql.execution.search.extractor.BucketExtractor; import org.elasticsearch.xpack.sql.execution.search.extractor.CompositeKeyExtractorTests; import org.elasticsearch.xpack.sql.execution.search.extractor.ConstantExtractorTests; import org.elasticsearch.xpack.sql.execution.search.extractor.MetricAggExtractorTests; -import org.elasticsearch.xpack.sql.session.Cursors; import java.io.IOException; +import java.time.ZoneId; import java.util.ArrayList; import java.util.BitSet; import java.util.List; import java.util.function.Supplier; -public class CompositeAggregationCursorTests extends AbstractWireSerializingTestCase { +public class CompositeAggregationCursorTests extends AbstractSqlWireSerializingTestCase { public static CompositeAggregationCursor randomCompositeCursor() { int extractorsSize = between(1, 20); + ZoneId id = randomSafeZone(); List extractors = new ArrayList<>(extractorsSize); for (int i = 0; i < extractorsSize; i++) { - extractors.add(randomBucketExtractor()); + extractors.add(randomBucketExtractor(id)); } return new CompositeAggregationCursor(new byte[randomInt(256)], extractors, randomBitSet(extractorsSize), randomIntBetween(10, 1024), randomBoolean(), randomAlphaOfLength(5)); } - static BucketExtractor randomBucketExtractor() { + static BucketExtractor randomBucketExtractor(ZoneId zoneId) { List> options = new ArrayList<>(); options.add(ConstantExtractorTests::randomConstantExtractor); - options.add(MetricAggExtractorTests::randomMetricAggExtractor); - options.add(CompositeKeyExtractorTests::randomCompositeKeyExtractor); + options.add(() -> MetricAggExtractorTests.randomMetricAggExtractor(zoneId)); + options.add(() -> CompositeKeyExtractorTests.randomCompositeKeyExtractor(zoneId)); return randomFrom(options).get(); } @@ -50,11 +49,6 @@ public class CompositeAggregationCursorTests extends AbstractWireSerializingTest instance.indices()); } - @Override - protected NamedWriteableRegistry getNamedWriteableRegistry() { - return new NamedWriteableRegistry(Cursors.getNamedWriteables()); - } - @Override protected CompositeAggregationCursor createTestInstance() { return randomCompositeCursor(); @@ -66,13 +60,17 @@ public class CompositeAggregationCursorTests extends AbstractWireSerializingTest } @Override - protected CompositeAggregationCursor copyInstance(CompositeAggregationCursor instance, Version version) throws IOException { - /* Randomly choose between internal protocol round trip and String based - * round trips used to toXContent. */ - if (randomBoolean()) { - return super.copyInstance(instance, version); + protected ZoneId instanceZoneId(CompositeAggregationCursor instance) { + List extractors = instance.extractors(); + for (BucketExtractor bucketExtractor : extractors) { + ZoneId zoneId = MetricAggExtractorTests.extractZoneId(bucketExtractor); + zoneId = zoneId == null ? CompositeKeyExtractorTests.extractZoneId(bucketExtractor) : zoneId; + + if (zoneId != null) { + return zoneId; + } } - return (CompositeAggregationCursor) Cursors.decodeFromString(Cursors.encodeToString(version, instance)); + return randomSafeZone(); } static BitSet randomBitSet(int size) { diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/ScrollCursorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/ScrollCursorTests.java index d34107e6519d..ca135d5170fd 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/ScrollCursorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/ScrollCursorTests.java @@ -8,10 +8,9 @@ package org.elasticsearch.xpack.sql.execution.search; import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.Writeable.Reader; -import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.sql.AbstractSqlWireSerializingTestCase; import org.elasticsearch.xpack.sql.execution.search.extractor.ComputingExtractorTests; import org.elasticsearch.xpack.sql.execution.search.extractor.ConstantExtractorTests; -import org.elasticsearch.xpack.sql.execution.search.extractor.FieldHitExtractorTests; import org.elasticsearch.xpack.sql.execution.search.extractor.HitExtractor; import org.elasticsearch.xpack.sql.session.Cursors; @@ -20,7 +19,7 @@ import java.util.ArrayList; import java.util.List; import java.util.function.Supplier; -public class ScrollCursorTests extends AbstractWireSerializingTestCase { +public class ScrollCursorTests extends AbstractSqlWireSerializingTestCase { public static ScrollCursor randomScrollCursor() { int extractorsSize = between(1, 20); List extractors = new ArrayList<>(extractorsSize); @@ -37,7 +36,6 @@ public class ScrollCursorTests extends AbstractWireSerializingTestCase ComputingExtractorTests.randomComputingExtractor()); } options.add(ConstantExtractorTests::randomConstantExtractor); - options.add(FieldHitExtractorTests::randomFieldHitExtractor); return randomFrom(options).get(); } @@ -70,6 +68,6 @@ public class ScrollCursorTests extends AbstractWireSerializingTestCase { +public class CompositeKeyExtractorTests extends AbstractSqlWireSerializingTestCase { public static CompositeKeyExtractor randomCompositeKeyExtractor() { return new CompositeKeyExtractor(randomAlphaOfLength(16), randomFrom(asList(Property.values())), randomSafeZone(), randomBoolean()); } + public static CompositeKeyExtractor randomCompositeKeyExtractor(ZoneId zoneId) { + return new CompositeKeyExtractor(randomAlphaOfLength(16), randomFrom(asList(Property.values())), zoneId, randomBoolean()); + } + @Override protected CompositeKeyExtractor createTestInstance() { return randomCompositeKeyExtractor(); @@ -38,6 +42,11 @@ public class CompositeKeyExtractorTests extends AbstractWireSerializingTestCase< return CompositeKeyExtractor::new; } + @Override + protected ZoneId instanceZoneId(CompositeKeyExtractor instance) { + return instance.zoneId(); + } + @Override protected CompositeKeyExtractor mutateInstance(CompositeKeyExtractor instance) { return new CompositeKeyExtractor( @@ -79,12 +88,7 @@ public class CompositeKeyExtractorTests extends AbstractWireSerializingTestCase< assertEquals("Invalid date key returned: " + value, exception.getMessage()); } - /** - * We need to exclude SystemV/* time zones because they cannot be converted - * back to DateTimeZone which we currently still need to do internally, - * e.g. in bwc serialization and in the extract() method - */ - private static ZoneId randomSafeZone() { - return randomValueOtherThanMany(zi -> zi.getId().startsWith("SystemV"), () -> randomZone()); + public static ZoneId extractZoneId(BucketExtractor extractor) { + return extractor instanceof CompositeKeyExtractor ? ((CompositeKeyExtractor) extractor).zoneId() : null; } } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/ComputingExtractorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/ComputingExtractorTests.java index 96713befd945..909fdf831e13 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/ComputingExtractorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/ComputingExtractorTests.java @@ -9,10 +9,10 @@ import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.Writeable.Reader; import org.elasticsearch.search.SearchHit; -import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.sql.AbstractSqlWireSerializingTestCase; import org.elasticsearch.xpack.sql.expression.function.scalar.CastProcessorTests; import org.elasticsearch.xpack.sql.expression.function.scalar.Processors; -import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeProcessorTests; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.BinaryMathProcessorTests; import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathFunctionProcessorTests; import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor; import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor.MathOperation; @@ -32,7 +32,7 @@ import static java.util.Collections.singletonMap; import static org.elasticsearch.xpack.sql.util.CollectionUtils.combine; import static org.elasticsearch.xpack.sql.util.DateUtils.UTC; -public class ComputingExtractorTests extends AbstractWireSerializingTestCase { +public class ComputingExtractorTests extends AbstractSqlWireSerializingTestCase { public static ComputingExtractor randomComputingExtractor() { return new ComputingExtractor(randomProcessor(), randomAlphaOfLength(10)); } @@ -41,8 +41,8 @@ public class ComputingExtractorTests extends AbstractWireSerializingTestCase> options = new ArrayList<>(); options.add(() -> ChainingProcessorTests.randomComposeProcessor()); options.add(CastProcessorTests::randomCastProcessor); - options.add(DateTimeProcessorTests::randomDateTimeProcessor); options.add(MathFunctionProcessorTests::randomMathFunctionProcessor); + options.add(BinaryMathProcessorTests::randomProcessor); return randomFrom(options).get(); } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractorTests.java index 008494fa1a5e..b7404b8412a4 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractorTests.java @@ -11,8 +11,8 @@ import org.elasticsearch.common.io.stream.Writeable.Reader; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.search.SearchHit; -import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.AbstractSqlWireSerializingTestCase; import org.elasticsearch.xpack.sql.SqlException; import org.elasticsearch.xpack.sql.expression.function.scalar.geo.GeoShape; import org.elasticsearch.xpack.sql.type.DataType; @@ -37,7 +37,7 @@ import static java.util.Collections.singletonMap; import static org.elasticsearch.xpack.sql.util.DateUtils.UTC; import static org.hamcrest.Matchers.is; -public class FieldHitExtractorTests extends AbstractWireSerializingTestCase { +public class FieldHitExtractorTests extends AbstractSqlWireSerializingTestCase { public static FieldHitExtractor randomFieldHitExtractor() { String hitName = randomAlphaOfLength(5); @@ -55,6 +55,11 @@ public class FieldHitExtractorTests extends AbstractWireSerializingTestCase { +public class MetricAggExtractorTests extends AbstractSqlWireSerializingTestCase { public static MetricAggExtractor randomMetricAggExtractor() { return new MetricAggExtractor(randomAlphaOfLength(16), randomAlphaOfLength(16), randomAlphaOfLength(16), randomZone(), randomBoolean()); } + public static MetricAggExtractor randomMetricAggExtractor(ZoneId zoneId) { + return new MetricAggExtractor(randomAlphaOfLength(16), randomAlphaOfLength(16), randomAlphaOfLength(16), zoneId, randomBoolean()); + } + @Override protected MetricAggExtractor createTestInstance() { return randomMetricAggExtractor(); @@ -39,6 +43,11 @@ public class MetricAggExtractorTests extends AbstractWireSerializingTestCase { +public class TopHitsAggExtractorTests extends AbstractSqlWireSerializingTestCase { public static TopHitsAggExtractor randomTopHitsAggExtractor() { return new TopHitsAggExtractor(randomAlphaOfLength(16), randomFrom(DataType.values()), randomZone()); @@ -44,6 +44,11 @@ public class TopHitsAggExtractorTests extends AbstractWireSerializingTestCase { +public class DateTimeProcessorTests extends AbstractSqlWireSerializingTestCase { public static DateTimeProcessor randomDateTimeProcessor() { return new DateTimeProcessor(randomFrom(DateTimeExtractor.values()), randomZone()); @@ -33,6 +33,11 @@ public class DateTimeProcessorTests extends AbstractWireSerializingTestCase randomFrom(DateTimeExtractor.values())); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/NamedDateTimeProcessorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/NamedDateTimeProcessorTests.java index 3531152c69b8..37aec8b48eff 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/NamedDateTimeProcessorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/NamedDateTimeProcessorTests.java @@ -8,7 +8,7 @@ package org.elasticsearch.xpack.sql.expression.function.scalar.datetime; import org.elasticsearch.bootstrap.JavaVersion; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.Writeable.Reader; -import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.sql.AbstractSqlWireSerializingTestCase; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.NamedDateTimeProcessor.NameExtractor; import org.junit.Assume; @@ -18,7 +18,7 @@ import java.time.ZoneId; import static org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeTestUtils.dateTime; import static org.elasticsearch.xpack.sql.util.DateUtils.UTC; -public class NamedDateTimeProcessorTests extends AbstractWireSerializingTestCase { +public class NamedDateTimeProcessorTests extends AbstractSqlWireSerializingTestCase { public static NamedDateTimeProcessor randomNamedDateTimeProcessor() { return new NamedDateTimeProcessor(randomFrom(NameExtractor.values()), UTC); @@ -40,6 +40,11 @@ public class NamedDateTimeProcessorTests extends AbstractWireSerializingTestCase return new NamedDateTimeProcessor(replaced, UTC); } + @Override + protected ZoneId instanceZoneId(NamedDateTimeProcessor instance) { + return instance.zoneId(); + } + public void testValidDayNamesInUTC() { assumeJava9PlusAndCompatLocaleProviderSetting(); NamedDateTimeProcessor proc = new NamedDateTimeProcessor(NameExtractor.DAY_NAME, UTC); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/NonIsoDateTimeProcessorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/NonIsoDateTimeProcessorTests.java index 6fb007e43213..a2ee6796f668 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/NonIsoDateTimeProcessorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/NonIsoDateTimeProcessorTests.java @@ -6,7 +6,7 @@ package org.elasticsearch.xpack.sql.expression.function.scalar.datetime; import org.elasticsearch.common.io.stream.Writeable.Reader; -import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.sql.AbstractSqlWireSerializingTestCase; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.NonIsoDateTimeProcessor.NonIsoDateTimeExtractor; import java.io.IOException; @@ -15,7 +15,7 @@ import java.time.ZoneId; import static org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeTestUtils.dateTime; import static org.elasticsearch.xpack.sql.util.DateUtils.UTC; -public class NonIsoDateTimeProcessorTests extends AbstractWireSerializingTestCase { +public class NonIsoDateTimeProcessorTests extends AbstractSqlWireSerializingTestCase { public static NonIsoDateTimeProcessor randomNonISODateTimeProcessor() { @@ -38,6 +38,11 @@ public class NonIsoDateTimeProcessorTests extends AbstractWireSerializingTestCas return new NonIsoDateTimeProcessor(replaced, UTC); } + @Override + protected ZoneId instanceZoneId(NonIsoDateTimeProcessor instance) { + return instance.zoneId(); + } + public void testNonISOWeekOfYearInUTC() { NonIsoDateTimeProcessor proc = new NonIsoDateTimeProcessor(NonIsoDateTimeExtractor.WEEK_OF_YEAR, UTC); assertEquals(2, proc.process(dateTime(568372930000L))); //1988-01-05T09:22:10Z[UTC] diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/TimeProcessorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/TimeProcessorTests.java index 65b2cde2d0a6..cab3acdfeb3d 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/TimeProcessorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/TimeProcessorTests.java @@ -6,7 +6,7 @@ package org.elasticsearch.xpack.sql.expression.function.scalar.datetime; import org.elasticsearch.common.io.stream.Writeable.Reader; -import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.sql.AbstractSqlWireSerializingTestCase; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeProcessor.DateTimeExtractor; import java.time.ZoneId; @@ -14,7 +14,7 @@ import java.time.ZoneId; import static org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeTestUtils.time; import static org.elasticsearch.xpack.sql.util.DateUtils.UTC; -public class TimeProcessorTests extends AbstractWireSerializingTestCase { +public class TimeProcessorTests extends AbstractSqlWireSerializingTestCase { public static TimeProcessor randomTimeProcessor() { return new TimeProcessor(randomFrom(DateTimeExtractor.values()), randomZone()); @@ -30,6 +30,11 @@ public class TimeProcessorTests extends AbstractWireSerializingTestCase randomFrom(DateTimeExtractor.values())); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/gen/processor/ChainingProcessorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/gen/processor/ChainingProcessorTests.java index 98ad0daa8239..fc7c59836d27 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/gen/processor/ChainingProcessorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/gen/processor/ChainingProcessorTests.java @@ -7,16 +7,15 @@ package org.elasticsearch.xpack.sql.expression.gen.processor; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.Writeable.Reader; -import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.sql.AbstractSqlWireSerializingTestCase; import org.elasticsearch.xpack.sql.expression.function.scalar.Processors; -import org.elasticsearch.xpack.sql.expression.gen.processor.ChainingProcessor; import java.io.IOException; import java.util.function.Supplier; import static org.elasticsearch.xpack.sql.execution.search.extractor.ComputingExtractorTests.randomProcessor; -public class ChainingProcessorTests extends AbstractWireSerializingTestCase { +public class ChainingProcessorTests extends AbstractSqlWireSerializingTestCase { public static ChainingProcessor randomComposeProcessor() { return new ChainingProcessor(randomProcessor(), randomProcessor()); } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumnsTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumnsTests.java index 9c8c32689b70..509e8e954f35 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumnsTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumnsTests.java @@ -514,7 +514,7 @@ public class SysColumnsTests extends ESTestCase { return Void.TYPE; }).when(resolver).resolveAsMergedMapping(any(), any(), anyBoolean(), any()); - tuple.v1().execute(tuple.v2(), wrap(consumer::accept, ex -> fail(ex.getMessage()))); + tuple.v1().execute(tuple.v2(), wrap(p -> consumer.accept((SchemaRowSet) p.rowSet()), ex -> fail(ex.getMessage()))); } private Tuple sql(String sql, List params, Map mapping) { diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTablesTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTablesTests.java index d4db97aba09c..834c5808b707 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTablesTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTablesTests.java @@ -386,6 +386,6 @@ public class SysTablesTests extends ESTestCase { return Void.TYPE; }).when(resolver).resolveNames(any(), any(), any(), any()); - tuple.v1().execute(tuple.v2(), wrap(consumer::accept, ex -> fail(ex.getMessage()))); + tuple.v1().execute(tuple.v2(), wrap(p -> consumer.accept((SchemaRowSet) p.rowSet()), ex -> fail(ex.getMessage()))); } } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTypesTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTypesTests.java index 805268dd5b68..6b7500cab661 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTypesTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTypesTests.java @@ -15,6 +15,7 @@ import org.elasticsearch.xpack.sql.analysis.index.IndexResolver; import org.elasticsearch.xpack.sql.expression.function.FunctionRegistry; import org.elasticsearch.xpack.sql.parser.SqlParser; import org.elasticsearch.xpack.sql.plan.logical.command.Command; +import org.elasticsearch.xpack.sql.session.SchemaRowSet; import org.elasticsearch.xpack.sql.session.SqlSession; import org.elasticsearch.xpack.sql.type.DataType; import org.elasticsearch.xpack.sql.type.TypesTests; @@ -50,7 +51,8 @@ public class SysTypesTests extends ESTestCase { "INTERVAL_HOUR_TO_MINUTE", "INTERVAL_HOUR_TO_SECOND", "INTERVAL_MINUTE_TO_SECOND", "GEO_SHAPE", "GEO_POINT", "UNSUPPORTED", "OBJECT", "NESTED"); - cmd.execute(null, wrap(r -> { + cmd.execute(session(), wrap(p -> { + SchemaRowSet r = (SchemaRowSet) p.rowSet(); assertEquals(19, r.columnCount()); assertEquals(DataType.values().length, r.size()); assertFalse(r.schema().types().contains(DataType.NULL)); @@ -72,7 +74,8 @@ public class SysTypesTests extends ESTestCase { public void testSysTypesDefaultFiltering() { Command cmd = sql("SYS TYPES 0").v1(); - cmd.execute(null, wrap(r -> { + cmd.execute(session(), wrap(p -> { + SchemaRowSet r = (SchemaRowSet) p.rowSet(); assertEquals(DataType.values().length, r.size()); }, ex -> fail(ex.getMessage()))); } @@ -81,7 +84,8 @@ public class SysTypesTests extends ESTestCase { // boolean = 16 Command cmd = sql("SYS TYPES " + JDBCType.BOOLEAN.getVendorTypeNumber()).v1(); - cmd.execute(null, wrap(r -> { + cmd.execute(session(), wrap(p -> { + SchemaRowSet r = (SchemaRowSet) p.rowSet(); assertEquals(1, r.size()); assertEquals("BOOLEAN", r.column(0)); }, ex -> fail(ex.getMessage()))); @@ -90,7 +94,8 @@ public class SysTypesTests extends ESTestCase { public void testSysTypesNegativeFiltering() { Command cmd = sql("SYS TYPES " + JDBCType.TINYINT.getVendorTypeNumber()).v1(); - cmd.execute(null, wrap(r -> { + cmd.execute(session(), wrap(p -> { + SchemaRowSet r = (SchemaRowSet) p.rowSet(); assertEquals(1, r.size()); assertEquals("BYTE", r.column(0)); }, ex -> fail(ex.getMessage()))); @@ -99,7 +104,8 @@ public class SysTypesTests extends ESTestCase { public void testSysTypesMultipleMatches() { Command cmd = sql("SYS TYPES " + JDBCType.VARCHAR.getVendorTypeNumber()).v1(); - cmd.execute(null, wrap(r -> { + cmd.execute(session(), wrap(p -> { + SchemaRowSet r = (SchemaRowSet) p.rowSet(); assertEquals(3, r.size()); assertEquals("KEYWORD", r.column(0)); assertTrue(r.advanceRow()); @@ -108,4 +114,8 @@ public class SysTypesTests extends ESTestCase { assertEquals("IP", r.column(0)); }, ex -> fail(ex.getMessage()))); } + + private static SqlSession session() { + return new SqlSession(TestUtils.TEST_CFG, null, null, null, null, null, null, null, null); + } } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryTranslatorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryTranslatorTests.java index 71d0e9f0f2b8..adc8fd60af28 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryTranslatorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryTranslatorTests.java @@ -861,6 +861,30 @@ public class QueryTranslatorTests extends ESTestCase { assertEquals(DataType.DATETIME, field.dataType()); } + public void testGroupByHistogramQueryTranslator() { + PhysicalPlan p = optimizeAndPlan("SELECT MAX(int) FROM test GROUP BY HISTOGRAM(date, INTERVAL 2 YEARS)"); + assertEquals(EsQueryExec.class, p.getClass()); + EsQueryExec eqe = (EsQueryExec) p; + assertEquals(1, eqe.output().size()); + assertEquals("MAX(int)", eqe.output().get(0).qualifiedName()); + assertEquals(DataType.INTEGER, eqe.output().get(0).dataType()); + assertThat(eqe.queryContainer().aggs().asAggBuilder().toString().replaceAll("\\s+", ""), + containsString("\"date_histogram\":{\"field\":\"date\",\"missing_bucket\":true,\"value_type\":\"date\",\"order\":\"asc\"," + + "\"fixed_interval\":\"62208000000ms\",\"time_zone\":\"Z\"}}}]}")); + } + + public void testGroupByYearQueryTranslator() { + PhysicalPlan p = optimizeAndPlan("SELECT YEAR(date) FROM test GROUP BY YEAR(date)"); + assertEquals(EsQueryExec.class, p.getClass()); + EsQueryExec eqe = (EsQueryExec) p; + assertEquals(1, eqe.output().size()); + assertEquals("YEAR(date)", eqe.output().get(0).qualifiedName()); + assertEquals(DataType.INTEGER, eqe.output().get(0).dataType()); + assertThat(eqe.queryContainer().aggs().asAggBuilder().toString().replaceAll("\\s+", ""), + endsWith("\"date_histogram\":{\"field\":\"date\",\"missing_bucket\":true,\"value_type\":\"date\",\"order\":\"asc\"," + + "\"fixed_interval\":\"31536000000ms\",\"time_zone\":\"Z\"}}}]}}}")); + } + public void testGroupByHistogramWithDate() { LogicalPlan p = plan("SELECT MAX(int) FROM test GROUP BY HISTOGRAM(CAST(date AS DATE), INTERVAL 2 MONTHS)"); assertTrue(p instanceof Aggregate); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/CursorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plugin/CursorTests.java similarity index 82% rename from x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/CursorTests.java rename to x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plugin/CursorTests.java index f39e9bc2e2bb..28d9c278ed16 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/CursorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plugin/CursorTests.java @@ -3,23 +3,26 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.sql.execution.search; +package org.elasticsearch.xpack.sql.plugin; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.search.ClearScrollRequest; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.client.Client; +import org.elasticsearch.common.logging.LoggerMessageFormat; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.sql.SqlException; import org.elasticsearch.xpack.sql.TestUtils; import org.elasticsearch.xpack.sql.action.BasicFormatter; import org.elasticsearch.xpack.sql.action.SqlQueryResponse; -import org.elasticsearch.xpack.sql.plugin.TextFormatterCursor; +import org.elasticsearch.xpack.sql.execution.search.ScrollCursor; +import org.elasticsearch.xpack.sql.execution.search.ScrollCursorTests; import org.elasticsearch.xpack.sql.proto.ColumnInfo; import org.elasticsearch.xpack.sql.proto.Mode; import org.elasticsearch.xpack.sql.session.Cursor; import org.elasticsearch.xpack.sql.session.Cursors; +import org.elasticsearch.xpack.sql.session.CursorsTestUtil; import org.mockito.ArgumentCaptor; import java.util.ArrayList; @@ -29,8 +32,6 @@ import java.util.List; import java.util.function.Supplier; import static org.elasticsearch.action.support.PlainActionFuture.newFuture; -import static org.elasticsearch.xpack.sql.action.BasicFormatter.FormatOption.CLI; -import static org.elasticsearch.xpack.sql.action.BasicFormatter.FormatOption.TEXT; import static org.mockito.Matchers.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; @@ -82,8 +83,8 @@ public class CursorTests extends ESTestCase { () -> { SqlQueryResponse response = createRandomSqlResponse(); if (response.columns() != null && response.rows() != null) { - return TextFormatterCursor.wrap(ScrollCursorTests.randomScrollCursor(), - new BasicFormatter(response.columns(), response.rows(), CLI)); + return new TextFormatterCursor(ScrollCursorTests.randomScrollCursor(), + new BasicFormatter(response.columns(), response.rows(), BasicFormatter.FormatOption.CLI)); } else { return ScrollCursorTests.randomScrollCursor(); } @@ -91,8 +92,8 @@ public class CursorTests extends ESTestCase { () -> { SqlQueryResponse response = createRandomSqlResponse(); if (response.columns() != null && response.rows() != null) { - return TextFormatterCursor.wrap(ScrollCursorTests.randomScrollCursor(), - new BasicFormatter(response.columns(), response.rows(), TEXT)); + return new TextFormatterCursor(ScrollCursorTests.randomScrollCursor(), + new BasicFormatter(response.columns(), response.rows(), BasicFormatter.FormatOption.TEXT)); } else { return ScrollCursorTests.randomScrollCursor(); } @@ -103,15 +104,14 @@ public class CursorTests extends ESTestCase { public void testVersionHandling() { Cursor cursor = randomNonEmptyCursor(); - assertEquals(cursor, Cursors.decodeFromString(Cursors.encodeToString(Version.CURRENT, cursor))); + assertEquals(cursor, Cursors.decodeFromString(Cursors.encodeToString(cursor, randomZone()))); Version nextMinorVersion = Version.fromId(Version.CURRENT.id + 10000); - String encodedWithWrongVersion = Cursors.encodeToString(nextMinorVersion, cursor); + String encodedWithWrongVersion = CursorsTestUtil.encodeToString(cursor, nextMinorVersion, randomZone()); SqlException exception = expectThrows(SqlException.class, () -> Cursors.decodeFromString(encodedWithWrongVersion)); - assertEquals("Unsupported cursor version " + nextMinorVersion, exception.getMessage()); + assertEquals(LoggerMessageFormat.format("Unsupported cursor version [{}], expected [{}]", nextMinorVersion, Version.CURRENT), + exception.getMessage()); } - - -} +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plugin/TextFormatTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plugin/TextFormatTests.java index b1c2526b606d..a709133b6014 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plugin/TextFormatTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plugin/TextFormatTests.java @@ -76,17 +76,17 @@ public class TextFormatTests extends ESTestCase { } public void testCsvFormatWithEmptyData() { - String text = CSV.format(null, req(), emptyData()); + String text = CSV.format(req(), emptyData()); assertEquals("name\r\n", text); } public void testTsvFormatWithEmptyData() { - String text = TSV.format(null, req(), emptyData()); + String text = TSV.format(req(), emptyData()); assertEquals("name\n", text); } public void testCsvFormatWithRegularData() { - String text = CSV.format(null, req(), regularData()); + String text = CSV.format(req(), regularData()); assertEquals("string,number\r\n" + "Along The River Bank,708\r\n" + "Mind Train,280\r\n", @@ -94,7 +94,7 @@ public class TextFormatTests extends ESTestCase { } public void testTsvFormatWithRegularData() { - String text = TSV.format(null, req(), regularData()); + String text = TSV.format(req(), regularData()); assertEquals("string\tnumber\n" + "Along The River Bank\t708\n" + "Mind Train\t280\n", @@ -102,7 +102,7 @@ public class TextFormatTests extends ESTestCase { } public void testCsvFormatWithEscapedData() { - String text = CSV.format(null, req(), escapedData()); + String text = CSV.format(req(), escapedData()); assertEquals("first,\"\"\"special\"\"\"\r\n" + "normal,\"\"\"quo\"\"ted\"\",\n\"\r\n" + "commas,\"a,b,c,\n,d,e,\t\n\"\r\n" @@ -110,7 +110,7 @@ public class TextFormatTests extends ESTestCase { } public void testTsvFormatWithEscapedData() { - String text = TSV.format(null, req(), escapedData()); + String text = TSV.format(req(), escapedData()); assertEquals("first\t\"special\"\n" + "normal\t\"quo\"ted\",\\n\n" + "commas\ta,b,c,\\n,d,e,\\t\\n\n" diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/session/CursorsTestUtil.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/session/CursorsTestUtil.java new file mode 100644 index 000000000000..83102b461163 --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/session/CursorsTestUtil.java @@ -0,0 +1,18 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.session; + +import org.elasticsearch.Version; + +import java.time.ZoneId; + +public class CursorsTestUtil { + + public static String encodeToString(Cursor info, Version version, ZoneId zoneId) { + return Cursors.encodeToString(info, version, zoneId); + } +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/PagingListCursorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/session/ListCursorTests.java similarity index 61% rename from x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/PagingListCursorTests.java rename to x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/session/ListCursorTests.java index c042b99ddf33..ac14465451eb 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/PagingListCursorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/session/ListCursorTests.java @@ -3,21 +3,20 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.sql.execution.search; +package org.elasticsearch.xpack.sql.session; import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.Writeable.Reader; import org.elasticsearch.test.AbstractWireSerializingTestCase; -import org.elasticsearch.xpack.sql.session.Cursors; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.List; -public class PagingListCursorTests extends AbstractWireSerializingTestCase { - public static PagingListCursor randomPagingListCursor() { +public class ListCursorTests extends AbstractWireSerializingTestCase { + public static ListCursor randomPagingListCursor() { int size = between(1, 20); int depth = between(1, 20); @@ -26,14 +25,14 @@ public class PagingListCursorTests extends AbstractWireSerializingTestCase new Object[depth], () -> randomByte()))); } - return new PagingListCursor(values, depth, between(1, 20)); + return new ListCursor(values, between(1, 20), depth); } @Override - protected PagingListCursor mutateInstance(PagingListCursor instance) throws IOException { - return new PagingListCursor(instance.data(), - instance.columnCount(), - randomValueOtherThan(instance.pageSize(), () -> between(1, 20))); + protected ListCursor mutateInstance(ListCursor instance) throws IOException { + return new ListCursor(instance.data(), + randomValueOtherThan(instance.pageSize(), () -> between(1, 20)), + instance.columnCount()); } @Override @@ -42,22 +41,22 @@ public class PagingListCursorTests extends AbstractWireSerializingTestCase instanceReader() { - return PagingListCursor::new; + protected Reader instanceReader() { + return ListCursor::new; } @Override - protected PagingListCursor copyInstance(PagingListCursor instance, Version version) throws IOException { + protected ListCursor copyInstance(ListCursor instance, Version version) throws IOException { /* Randomly choose between internal protocol round trip and String based * round trips used to toXContent. */ if (randomBoolean()) { return super.copyInstance(instance, version); } - return (PagingListCursor) Cursors.decodeFromString(Cursors.encodeToString(version, instance)); + return (ListCursor) Cursors.decodeFromString(Cursors.encodeToString(instance, randomZone())); } } \ No newline at end of file diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/indices.freeze/20_stats.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/indices.freeze/20_stats.yml index 1962fa232c8e..f995f0ed67b8 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/indices.freeze/20_stats.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/indices.freeze/20_stats.yml @@ -10,8 +10,8 @@ setup: --- "Translog stats on frozen indices": - skip: - version: " - 7.2.99" - reason: "frozen indices have translog stats starting version 7.3.0" + version: " - 7.9.99" + reason: "start ignoring translog retention policy with soft-deletes enabled in 8.0" - do: index: @@ -46,7 +46,7 @@ setup: - do: indices.stats: metric: [ translog ] - - match: { indices.test.primaries.translog.operations: 3 } + - match: { indices.test.primaries.translog.operations: 0 } - match: { indices.test.primaries.translog.uncommitted_operations: 0 } # unfreeze index @@ -58,5 +58,5 @@ setup: - do: indices.stats: metric: [ translog ] - - match: { indices.test.primaries.translog.operations: 3 } + - match: { indices.test.primaries.translog.operations: 0 } - match: { indices.test.primaries.translog.uncommitted_operations: 0 } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/data_frame_analytics_memory_usage_estimation.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/data_frame_analytics_memory_usage_estimation.yml index 0b84f31cfb22..a58ea36aaacf 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/data_frame_analytics_memory_usage_estimation.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/data_frame_analytics_memory_usage_estimation.yml @@ -13,19 +13,16 @@ setup: --- "Test memory usage estimation for empty data frame": - - do: ml.estimate_memory_usage: body: - data_frame_analytics_config: - source: { index: "index-source" } - analysis: { outlier_detection: {} } - - match: { expected_memory_usage_with_one_partition: "0" } - - match: { expected_memory_usage_with_max_partitions: "0" } + source: { index: "index-source" } + analysis: { outlier_detection: {} } + - match: { expected_memory_without_disk: "0" } + - match: { expected_memory_with_disk: "0" } --- "Test memory usage estimation for non-empty data frame": - - do: index: index: index-source @@ -36,11 +33,10 @@ setup: - do: ml.estimate_memory_usage: body: - data_frame_analytics_config: - source: { index: "index-source" } - analysis: { outlier_detection: {} } - - match: { expected_memory_usage_with_one_partition: "3kb" } - - match: { expected_memory_usage_with_max_partitions: "3kb" } + source: { index: "index-source" } + analysis: { outlier_detection: {} } + - match: { expected_memory_without_disk: "3kb" } + - match: { expected_memory_with_disk: "3kb" } - do: index: @@ -52,11 +48,10 @@ setup: - do: ml.estimate_memory_usage: body: - data_frame_analytics_config: - source: { index: "index-source" } - analysis: { outlier_detection: {} } - - match: { expected_memory_usage_with_one_partition: "4kb" } - - match: { expected_memory_usage_with_max_partitions: "4kb" } + source: { index: "index-source" } + analysis: { outlier_detection: {} } + - match: { expected_memory_without_disk: "4kb" } + - match: { expected_memory_with_disk: "4kb" } - do: index: @@ -68,8 +63,7 @@ setup: - do: ml.estimate_memory_usage: body: - data_frame_analytics_config: - source: { index: "index-source" } - analysis: { outlier_detection: {} } - - match: { expected_memory_usage_with_one_partition: "6kb" } - - match: { expected_memory_usage_with_max_partitions: "5kb" } + source: { index: "index-source" } + analysis: { outlier_detection: {} } + - match: { expected_memory_without_disk: "6kb" } + - match: { expected_memory_with_disk: "5kb" } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/evaluate_data_frame.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/evaluate_data_frame.yml index 46d903977eb2..a4d3c1f1979c 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/evaluate_data_frame.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/evaluate_data_frame.yml @@ -602,3 +602,34 @@ setup: - match: { regression.mean_squared_error.error: 28.67749840974834 } - match: { regression.r_squared.value: 0.8551031778603486 } +--- +"Test regression given missing actual_field": + - do: + catch: /No documents found containing both \[missing, regression_field_pred\] fields/ + ml.evaluate_data_frame: + body: > + { + "index": "utopia", + "evaluation": { + "regression": { + "actual_field": "missing", + "predicted_field": "regression_field_pred" + } + } + } + +--- +"Test regression given missing predicted_field": + - do: + catch: /No documents found containing both \[regression_field_act, missing\] fields/ + ml.evaluate_data_frame: + body: > + { + "index": "utopia", + "evaluation": { + "regression": { + "actual_field": "regression_field_act", + "predicted_field": "missing" + } + } + } diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleService.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleService.java index fca29821bfaa..f1dd7be19656 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleService.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleService.java @@ -5,6 +5,8 @@ */ package org.elasticsearch.xpack.watcher; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateListener; @@ -25,6 +27,7 @@ import org.elasticsearch.xpack.watcher.watch.WatchStoreUtils; import java.util.Collections; import java.util.Comparator; +import java.util.EnumSet; import java.util.List; import java.util.Set; import java.util.concurrent.atomic.AtomicReference; @@ -35,10 +38,12 @@ import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED; public class WatcherLifeCycleService implements ClusterStateListener { + private static final Logger logger = LogManager.getLogger(WatcherLifeCycleService.class); private final AtomicReference state = new AtomicReference<>(WatcherState.STARTED); private final AtomicReference> previousShardRoutings = new AtomicReference<>(Collections.emptyList()); private volatile boolean shutDown = false; // indicates that the node has been shutdown and we should never start watcher after this. private volatile WatcherService watcherService; + private final EnumSet stopStates = EnumSet.of(WatcherState.STOPPED, WatcherState.STOPPING); WatcherLifeCycleService(ClusterService clusterService, WatcherService watcherService) { this.watcherService = watcherService; @@ -57,8 +62,10 @@ public class WatcherLifeCycleService implements ClusterStateListener { this.state.set(WatcherState.STOPPING); shutDown = true; clearAllocationIds(); - watcherService.shutDown(); - this.state.set(WatcherState.STOPPED); + watcherService.shutDown(() -> { + this.state.set(WatcherState.STOPPED); + logger.info("watcher has stopped and shutdown"); + }); } /** @@ -88,9 +95,10 @@ public class WatcherLifeCycleService implements ClusterStateListener { } boolean isWatcherStoppedManually = isWatcherStoppedManually(event.state()); + boolean isStoppedOrStopping = stopStates.contains(this.state.get()); // if this is not a data node, we need to start it ourselves possibly if (event.state().nodes().getLocalNode().isDataNode() == false && - isWatcherStoppedManually == false && this.state.get() == WatcherState.STOPPED) { + isWatcherStoppedManually == false && isStoppedOrStopping) { this.state.set(WatcherState.STARTING); watcherService.start(event.state(), () -> this.state.set(WatcherState.STARTED)); return; @@ -99,8 +107,20 @@ public class WatcherLifeCycleService implements ClusterStateListener { if (isWatcherStoppedManually) { if (this.state.get() == WatcherState.STARTED) { clearAllocationIds(); - watcherService.stop("watcher manually marked to shutdown by cluster state update"); - this.state.set(WatcherState.STOPPED); + boolean stopping = this.state.compareAndSet(WatcherState.STARTED, WatcherState.STOPPING); + if (stopping) { + //waiting to set state to stopped until after all currently running watches are finished + watcherService.stop("watcher manually marked to shutdown by cluster state update", () -> { + //only transition from stopping -> stopped (which may not be the case if restarted quickly) + boolean stopped = state.compareAndSet(WatcherState.STOPPING, WatcherState.STOPPED); + if (stopped) { + logger.info("watcher has stopped"); + } else { + logger.info("watcher has not been stopped. not currently in a stopping state, current state [{}]", state.get()); + } + + }); + } } return; } @@ -142,7 +162,7 @@ public class WatcherLifeCycleService implements ClusterStateListener { previousShardRoutings.set(localAffectedShardRoutings); if (state.get() == WatcherState.STARTED) { watcherService.reload(event.state(), "new local watcher shard allocation ids"); - } else if (state.get() == WatcherState.STOPPED) { + } else if (isStoppedOrStopping) { this.state.set(WatcherState.STARTING); watcherService.start(event.state(), () -> this.state.set(WatcherState.STARTED)); } diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherService.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherService.java index c96203bd6422..32031e78f5e4 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherService.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherService.java @@ -35,6 +35,7 @@ import org.elasticsearch.search.sort.SortBuilders; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.upgrade.UpgradeField; +import org.elasticsearch.xpack.core.watcher.WatcherState; import org.elasticsearch.xpack.core.watcher.execution.TriggeredWatchStoreField; import org.elasticsearch.xpack.core.watcher.watch.Watch; import org.elasticsearch.xpack.watcher.execution.ExecutionService; @@ -144,24 +145,29 @@ public class WatcherService { } /** - * Stops the watcher service and marks its services as paused + * Stops the watcher service and marks its services as paused. Callers should set the Watcher state to {@link WatcherState#STOPPING} + * prior to calling this method. + * + * @param stoppedListener The listener that will set Watcher state to: {@link WatcherState#STOPPED}, may not be {@code null} */ - public void stop(String reason) { + public void stop(String reason, Runnable stoppedListener) { + assert stoppedListener != null; logger.info("stopping watch service, reason [{}]", reason); - executionService.pause(); + executionService.pause(stoppedListener); triggerService.pauseExecution(); } /** * shuts down the trigger service as well to make sure there are no lingering threads - * also no need to check anything, as this is final, we just can go to status STOPPED + * + * @param stoppedListener The listener that will set Watcher state to: {@link WatcherState#STOPPED}, may not be {@code null} */ - void shutDown() { + void shutDown(Runnable stoppedListener) { + assert stoppedListener != null; logger.info("stopping watch service, reason [shutdown initiated]"); - executionService.pause(); + executionService.pause(stoppedListener); triggerService.stop(); stopExecutor(); - logger.debug("watch service has stopped"); } void stopExecutor() { @@ -185,7 +191,7 @@ public class WatcherService { processedClusterStateVersion.set(state.getVersion()); triggerService.pauseExecution(); - int cancelledTaskCount = executionService.clearExecutionsAndQueue(); + int cancelledTaskCount = executionService.clearExecutionsAndQueue(() -> {}); logger.info("reloading watcher, reason [{}], cancelled [{}] queued tasks", reason, cancelledTaskCount); executor.execute(wrapWatcherService(() -> reloadInner(state, reason, false), @@ -256,7 +262,7 @@ public class WatcherService { */ public void pauseExecution(String reason) { triggerService.pauseExecution(); - int cancelledTaskCount = executionService.pause(); + int cancelledTaskCount = executionService.pause(() -> {}); logger.info("paused watch execution, reason [{}], cancelled [{}] queued tasks", reason, cancelledTaskCount); } diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/CurrentExecutions.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/CurrentExecutions.java index 95ac80300368..9e76cbcffca6 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/CurrentExecutions.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/CurrentExecutions.java @@ -5,8 +5,11 @@ */ package org.elasticsearch.xpack.watcher.execution; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.lucene.util.SetOnce; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.xpack.core.watcher.WatcherState; import java.util.Iterator; import java.util.concurrent.ConcurrentHashMap; @@ -19,6 +22,7 @@ import static org.elasticsearch.xpack.core.watcher.support.Exceptions.illegalSta public final class CurrentExecutions implements Iterable { + private static final Logger logger = LogManager.getLogger(CurrentExecutions.class); private final ConcurrentMap currentExecutions = new ConcurrentHashMap<>(); // the condition of the lock is used to wait and signal the finishing of all executions on shutdown private final ReentrantLock lock = new ReentrantLock(); @@ -63,9 +67,12 @@ public final class CurrentExecutions implements Iterable()); - this.clearExecutions(); + this.clearExecutions(stoppedListener); return cancelledTaskCount; } @@ -278,8 +287,10 @@ public class ExecutionService { ctx.setNodeId(clusterService.localNode().getId()); WatchRecord record = null; final String watchId = ctx.id().watchId(); + //pull this to a local reference since the class reference can be swapped, and need to ensure same object is used for put/remove + final CurrentExecutions currentExecutions = this.currentExecutions.get(); try { - boolean executionAlreadyExists = currentExecutions.get().put(watchId, new WatchExecution(ctx, Thread.currentThread())); + boolean executionAlreadyExists = currentExecutions.put(watchId, new WatchExecution(ctx, Thread.currentThread())); if (executionAlreadyExists) { logger.trace("not executing watch [{}] because it is already queued", watchId); record = ctx.abortBeforeExecution(ExecutionState.NOT_EXECUTED_ALREADY_QUEUED, "Watch is already queued in thread pool"); @@ -334,7 +345,7 @@ public class ExecutionService { triggeredWatchStore.delete(ctx.id()); } - currentExecutions.get().remove(watchId); + currentExecutions.remove(watchId); logger.debug("finished [{}]/[{}]", watchId, ctx.id()); } return record; @@ -577,11 +588,15 @@ public class ExecutionService { /** * This clears out the current executions and sets new empty current executions * This is needed, because when this method is called, watcher keeps running, so sealing executions would be a bad idea + * + * @param stoppedListener The listener that will set Watcher state to: {@link WatcherState#STOPPED}, may be a no-op assuming the + * {@link WatcherState#STOPPED} is set elsewhere or not needed to be set. */ - private void clearExecutions() { + private void clearExecutions(Runnable stoppedListener) { + assert stoppedListener != null; final CurrentExecutions currentExecutionsBeforeSetting = currentExecutions.getAndSet(new CurrentExecutions()); // clear old executions in background, no need to wait - genericExecutor.execute(() -> currentExecutionsBeforeSetting.sealAndAwaitEmpty(maxStopTimeout)); + genericExecutor.execute(() -> currentExecutionsBeforeSetting.sealAndAwaitEmpty(maxStopTimeout, stoppedListener)); } // the watch execution task takes another runnable as parameter diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleServiceTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleServiceTests.java index 548583ac14b7..cf6c2c5ac666 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleServiceTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleServiceTests.java @@ -34,6 +34,7 @@ import org.elasticsearch.xpack.core.watcher.WatcherMetaData; import org.elasticsearch.xpack.core.watcher.WatcherState; import org.elasticsearch.xpack.core.watcher.watch.Watch; import org.junit.Before; +import org.mockito.ArgumentCaptor; import org.mockito.stubbing.Answer; import java.util.Collections; @@ -133,8 +134,8 @@ public class WatcherLifeCycleServiceTests extends ESTestCase { when(watcherService.validate(clusterState)).thenReturn(true); lifeCycleService.shutDown(); - verify(watcherService, never()).stop(anyString()); - verify(watcherService, times(1)).shutDown(); + verify(watcherService, never()).stop(anyString(), any()); + verify(watcherService, times(1)).shutDown(any()); reset(watcherService); lifeCycleService.clusterChanged(new ClusterChangedEvent("any", clusterState, clusterState)); @@ -175,7 +176,12 @@ public class WatcherLifeCycleServiceTests extends ESTestCase { .build(); lifeCycleService.clusterChanged(new ClusterChangedEvent("foo", stoppedClusterState, clusterState)); - verify(watcherService, times(1)).stop(eq("watcher manually marked to shutdown by cluster state update")); + ArgumentCaptor captor = ArgumentCaptor.forClass(Runnable.class); + verify(watcherService, times(1)) + .stop(eq("watcher manually marked to shutdown by cluster state update"), captor.capture()); + assertEquals(WatcherState.STOPPING, lifeCycleService.getState()); + captor.getValue().run(); + assertEquals(WatcherState.STOPPED, lifeCycleService.getState()); // Starting via cluster state update, as the watcher metadata block is removed/set to true reset(watcherService); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherServiceTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherServiceTests.java index f4ee831266b3..e67512ee694c 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherServiceTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherServiceTests.java @@ -269,8 +269,8 @@ public class WatcherServiceTests extends ESTestCase { csBuilder.metaData(MetaData.builder()); service.reload(csBuilder.build(), "whatever"); - verify(executionService).clearExecutionsAndQueue(); - verify(executionService, never()).pause(); + verify(executionService).clearExecutionsAndQueue(any()); + verify(executionService, never()).pause(any()); verify(triggerService).pauseExecution(); } diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/AbstractWatcherIntegrationTestCase.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/AbstractWatcherIntegrationTestCase.java index 20c27531509a..594bdb6258cc 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/AbstractWatcherIntegrationTestCase.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/AbstractWatcherIntegrationTestCase.java @@ -520,10 +520,12 @@ public abstract class AbstractWatcherIntegrationTestCase extends ESIntegTestCase WatcherStatsResponse watcherStatsResponse = new WatcherStatsRequestBuilder(client()).get(); assertThat(watcherStatsResponse.hasFailures(), is(false)); List> currentStatesFromStatsRequest = watcherStatsResponse.getNodes().stream() - .map(response -> Tuple.tuple(response.getNode().getName(), response.getWatcherState())) - .collect(Collectors.toList()); + .map(response -> Tuple.tuple(response.getNode().getName() + " (" + response.getThreadPoolQueueSize() + ")", + response.getWatcherState())).collect(Collectors.toList()); List states = currentStatesFromStatsRequest.stream().map(Tuple::v2).collect(Collectors.toList()); + + logger.info("waiting to stop watcher, current states {}", currentStatesFromStatsRequest); boolean isAllStateStarted = states.stream().allMatch(w -> w == WatcherState.STARTED); @@ -548,7 +550,7 @@ public abstract class AbstractWatcherIntegrationTestCase extends ESIntegTestCase } throw new AssertionError("unexpected state, retrying with next run"); - }); + }, 60, TimeUnit.SECONDS); } public static class NoopEmailService extends EmailService { diff --git a/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityClientYamlTestSuiteIT.java b/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityClientYamlTestSuiteIT.java index 679bc08f01f3..55bb78cdc475 100644 --- a/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityClientYamlTestSuiteIT.java +++ b/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityClientYamlTestSuiteIT.java @@ -19,6 +19,7 @@ import org.junit.After; import org.junit.Before; import java.util.Collections; +import java.util.concurrent.TimeUnit; import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; @@ -108,7 +109,7 @@ public class SmokeTestWatcherWithSecurityClientYamlTestSuiteIT extends ESClientY default: throw new AssertionError("unknown state[" + state + "]"); } - }); + }, 60, TimeUnit.SECONDS); } @Override diff --git a/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java b/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java index 20fd53b72e41..7675642860b7 100644 --- a/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java +++ b/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java @@ -22,6 +22,7 @@ import org.junit.Before; import java.io.IOException; import java.util.Map; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; @@ -116,7 +117,7 @@ public class SmokeTestWatcherWithSecurityIT extends ESRestTestCase { } catch (IOException e) { throw new AssertionError(e); } - }); + }, 60, TimeUnit.SECONDS); adminClient().performRequest(new Request("DELETE", "/my_test_index")); } diff --git a/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherTestSuiteIT.java b/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherTestSuiteIT.java index 60866661617f..2b509a348e90 100644 --- a/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherTestSuiteIT.java +++ b/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherTestSuiteIT.java @@ -21,6 +21,7 @@ import org.junit.Before; import java.io.IOException; import java.util.Map; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; @@ -93,7 +94,7 @@ public class SmokeTestWatcherTestSuiteIT extends ESRestTestCase { default: throw new AssertionError("unknown state[" + state + "]"); } - }); + }, 60, TimeUnit.SECONDS); } @Override diff --git a/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/WatcherRestIT.java b/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/WatcherRestIT.java index 2dd5cc86a89c..63efe7ad781c 100644 --- a/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/WatcherRestIT.java +++ b/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/WatcherRestIT.java @@ -14,6 +14,8 @@ import org.elasticsearch.xpack.test.rest.XPackRestTestConstants; import org.junit.After; import org.junit.Before; +import java.util.concurrent.TimeUnit; + import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; import static java.util.Collections.singletonMap; @@ -90,6 +92,6 @@ public class WatcherRestIT extends ESClientYamlSuiteTestCase { default: throw new AssertionError("unknown state[" + state + "]"); } - }); + }, 60, TimeUnit.SECONDS); } } diff --git a/x-pack/qa/third-party/jira/src/test/java/org/elasticsearch/smoketest/WatcherJiraYamlTestSuiteIT.java b/x-pack/qa/third-party/jira/src/test/java/org/elasticsearch/smoketest/WatcherJiraYamlTestSuiteIT.java index 8f8792f26971..c2579bed6f39 100644 --- a/x-pack/qa/third-party/jira/src/test/java/org/elasticsearch/smoketest/WatcherJiraYamlTestSuiteIT.java +++ b/x-pack/qa/third-party/jira/src/test/java/org/elasticsearch/smoketest/WatcherJiraYamlTestSuiteIT.java @@ -17,6 +17,7 @@ import org.junit.Before; import java.io.IOException; import java.util.Arrays; import java.util.List; +import java.util.concurrent.TimeUnit; import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; @@ -70,6 +71,6 @@ public class WatcherJiraYamlTestSuiteIT extends ESClientYamlSuiteTestCase { } catch (IOException e) { throw new AssertionError(e); } - }); + }, 60, TimeUnit.SECONDS); } } diff --git a/x-pack/qa/third-party/pagerduty/src/test/java/org/elasticsearch/smoketest/WatcherPagerDutyYamlTestSuiteIT.java b/x-pack/qa/third-party/pagerduty/src/test/java/org/elasticsearch/smoketest/WatcherPagerDutyYamlTestSuiteIT.java index b9a628f71f97..2c79eecdef1b 100644 --- a/x-pack/qa/third-party/pagerduty/src/test/java/org/elasticsearch/smoketest/WatcherPagerDutyYamlTestSuiteIT.java +++ b/x-pack/qa/third-party/pagerduty/src/test/java/org/elasticsearch/smoketest/WatcherPagerDutyYamlTestSuiteIT.java @@ -17,6 +17,7 @@ import org.junit.Before; import java.io.IOException; import java.util.Arrays; import java.util.List; +import java.util.concurrent.TimeUnit; import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; @@ -70,6 +71,6 @@ public class WatcherPagerDutyYamlTestSuiteIT extends ESClientYamlSuiteTestCase { } catch (IOException e) { throw new AssertionError(e); } - }); + }, 60, TimeUnit.SECONDS); } } diff --git a/x-pack/qa/third-party/slack/src/test/java/org/elasticsearch/smoketest/WatcherSlackYamlTestSuiteIT.java b/x-pack/qa/third-party/slack/src/test/java/org/elasticsearch/smoketest/WatcherSlackYamlTestSuiteIT.java index 01eeae442b2e..2e916692605e 100644 --- a/x-pack/qa/third-party/slack/src/test/java/org/elasticsearch/smoketest/WatcherSlackYamlTestSuiteIT.java +++ b/x-pack/qa/third-party/slack/src/test/java/org/elasticsearch/smoketest/WatcherSlackYamlTestSuiteIT.java @@ -17,6 +17,7 @@ import org.junit.Before; import java.io.IOException; import java.util.Arrays; import java.util.List; +import java.util.concurrent.TimeUnit; import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; @@ -70,6 +71,6 @@ public class WatcherSlackYamlTestSuiteIT extends ESClientYamlSuiteTestCase { } catch (IOException e) { throw new AssertionError(e); } - }); + }, 60, TimeUnit.SECONDS); } }