Merge remote-tracking branch 'es/master' into enrich

This commit is contained in:
Martijn van Groningen 2019-10-09 08:48:28 +02:00
commit 957f0fad0f
No known key found for this signature in database
GPG key ID: AB236F4FCF2AF12A
584 changed files with 9243 additions and 5730 deletions

View file

@ -33,4 +33,4 @@ $ErrorActionPreference="Continue"
-x :distribution:packages:buildOssRpm `
-x :distribution:packages:buildRpm `
exit $?
exit $LastExitCode

105
Vagrantfile vendored
View file

@ -1,5 +1,5 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
# vim: ft=ruby ts=2 sw=2 sts=2 et:
# This Vagrantfile exists to test packaging. Read more about its use in the
# vagrant section in TESTING.asciidoc.
@ -63,6 +63,7 @@ Vagrant.configure(2) do |config|
# Install Jayatana so we can work around it being present.
[ -f /usr/share/java/jayatanaag.jar ] || install jayatana
SHELL
ubuntu_docker config
end
end
'ubuntu-1804'.tap do |box|
@ -72,6 +73,7 @@ Vagrant.configure(2) do |config|
# Install Jayatana so we can work around it being present.
[ -f /usr/share/java/jayatanaag.jar ] || install jayatana
SHELL
ubuntu_docker config
end
end
'debian-8'.tap do |box|
@ -87,6 +89,7 @@ Vagrant.configure(2) do |config|
config.vm.define box, define_opts do |config|
config.vm.box = 'elastic/debian-9-x86_64'
deb_common config, box
deb_docker config
end
end
'centos-6'.tap do |box|
@ -99,6 +102,7 @@ Vagrant.configure(2) do |config|
config.vm.define box, define_opts do |config|
config.vm.box = 'elastic/centos-7-x86_64'
rpm_common config, box
rpm_docker config
end
end
'oel-6'.tap do |box|
@ -117,12 +121,14 @@ Vagrant.configure(2) do |config|
config.vm.define box, define_opts do |config|
config.vm.box = 'elastic/fedora-28-x86_64'
dnf_common config, box
dnf_docker config
end
end
'fedora-29'.tap do |box|
config.vm.define box, define_opts do |config|
config.vm.box = 'elastic/fedora-28-x86_64'
dnf_common config, box
dnf_docker config
end
end
'opensuse-42'.tap do |box|
@ -185,6 +191,63 @@ def deb_common(config, name, extra: '')
)
end
def ubuntu_docker(config)
config.vm.provision 'install Docker using apt', type: 'shell', inline: <<-SHELL
# Install packages to allow apt to use a repository over HTTPS
apt-get install -y \
apt-transport-https \
ca-certificates \
curl \
gnupg2 \
software-properties-common
# Add Dockers official GPG key
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -
# Set up the stable Docker repository
add-apt-repository \
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
$(lsb_release -cs) \
stable"
# Install Docker. Unlike Fedora and CentOS, this also start the daemon.
apt-get update
apt-get install -y docker-ce docker-ce-cli containerd.io
# Add vagrant to the Docker group, so that it can run commands
usermod -aG docker vagrant
SHELL
end
def deb_docker(config)
config.vm.provision 'install Docker using apt', type: 'shell', inline: <<-SHELL
# Install packages to allow apt to use a repository over HTTPS
apt-get install -y \
apt-transport-https \
ca-certificates \
curl \
gnupg2 \
software-properties-common
# Add Dockers official GPG key
curl -fsSL https://download.docker.com/linux/debian/gpg | apt-key add -
# Set up the stable Docker repository
add-apt-repository \
"deb [arch=amd64] https://download.docker.com/linux/debian \
$(lsb_release -cs) \
stable"
# Install Docker. Unlike Fedora and CentOS, this also start the daemon.
apt-get update
apt-get install -y docker-ce docker-ce-cli containerd.io
# Add vagrant to the Docker group, so that it can run commands
usermod -aG docker vagrant
SHELL
end
def rpm_common(config, name)
linux_common(
config,
@ -195,6 +258,25 @@ def rpm_common(config, name)
)
end
def rpm_docker(config)
config.vm.provision 'install Docker using yum', type: 'shell', inline: <<-SHELL
# Install prerequisites
yum install -y yum-utils device-mapper-persistent-data lvm2
# Add repository
yum-config-manager -y --add-repo https://download.docker.com/linux/centos/docker-ce.repo
# Install Docker
yum install -y docker-ce docker-ce-cli containerd.io
# Start Docker
systemctl enable --now docker
# Add vagrant to the Docker group, so that it can run commands
usermod -aG docker vagrant
SHELL
end
def dnf_common(config, name)
# Autodetect doesn't work....
if Vagrant.has_plugin?('vagrant-cachier')
@ -211,6 +293,25 @@ def dnf_common(config, name)
)
end
def dnf_docker(config)
config.vm.provision 'install Docker using dnf', type: 'shell', inline: <<-SHELL
# Install prerequisites
dnf -y install dnf-plugins-core
# Add repository
dnf config-manager --add-repo https://download.docker.com/linux/fedora/docker-ce.repo
# Install Docker
dnf install -y docker-ce docker-ce-cli containerd.io
# Start Docker
systemctl enable --now docker
# Add vagrant to the Docker group, so that it can run commands
usermod -aG docker vagrant
SHELL
end
def suse_common(config, name, extra: '')
linux_common(
config,
@ -268,7 +369,7 @@ def linux_common(config,
# This prevents leftovers from previous tests using the
# same VM from messing up the current test
config.vm.provision 'clean es installs in tmp', run: 'always', type: 'shell', inline: <<-SHELL
config.vm.provision 'clean es installs in tmp', type: 'shell', inline: <<-SHELL
rm -rf /tmp/elasticsearch*
SHELL

View file

@ -17,17 +17,17 @@
* under the License.
*/
import com.github.jengelman.gradle.plugins.shadow.ShadowPlugin
import org.apache.tools.ant.taskdefs.condition.Os
import org.elasticsearch.gradle.BuildPlugin
import org.elasticsearch.gradle.Version
import org.elasticsearch.gradle.BwcVersions
import org.elasticsearch.gradle.Version
import org.elasticsearch.gradle.VersionProperties
import org.elasticsearch.gradle.plugin.PluginBuildPlugin
import org.elasticsearch.gradle.tool.Boilerplate
import org.gradle.util.GradleVersion
import org.gradle.util.DistributionLocator
import org.gradle.plugins.ide.eclipse.model.SourceFolder
import org.gradle.util.DistributionLocator
import org.gradle.util.GradleVersion
import static org.elasticsearch.gradle.tool.Boilerplate.maybeConfigure
@ -449,7 +449,7 @@ class Run extends DefaultTask {
description = "Enable debugging configuration, to allow attaching a debugger to elasticsearch."
)
public void setDebug(boolean enabled) {
project.project(':distribution').run.clusterConfig.debug = enabled
project.project(':distribution').run.debug = enabled
}
}
task run(type: Run) {

View file

@ -58,6 +58,8 @@ class RestTestsFromSnippetsTask extends SnippetsTask {
@OutputDirectory
File testRoot = project.file('build/rest')
Set<String> names = new HashSet<>()
RestTestsFromSnippetsTask() {
project.afterEvaluate {
// Wait to set this so testRoot can be customized
@ -238,7 +240,14 @@ class RestTestsFromSnippetsTask extends SnippetsTask {
}
} else {
current.println('---')
if (test.name != null && test.name.isBlank() == false) {
if(names.add(test.name) == false) {
throw new InvalidUserDataException("Duplicated snippet name '$test.name': $test")
}
current.println("\"$test.name\":")
} else {
current.println("\"line_$test.start\":")
}
/* The Elasticsearch test runner doesn't support quite a few
* constructs unless we output this skip. We don't know if
* we're going to use these constructs, but we might so we
@ -406,6 +415,7 @@ class RestTestsFromSnippetsTask extends SnippetsTask {
if (lastDocsPath == test.path) {
return
}
names.clear()
finishLastTest()
lastDocsPath = test.path

View file

@ -82,6 +82,7 @@ class SnippetsTask extends DefaultTask {
*/
for (File file: docs) {
String lastLanguage
String name
int lastLanguageLine
Snippet snippet = null
StringBuilder contents = null
@ -155,19 +156,21 @@ class SnippetsTask extends DefaultTask {
if (line ==~ /-{4,}\s*/) { // Four dashes looks like a snippet
if (snippet == null) {
Path path = docs.dir.toPath().relativize(file.toPath())
snippet = new Snippet(path: path, start: lineNumber, testEnv: testEnv)
snippet = new Snippet(path: path, start: lineNumber, testEnv: testEnv, name: name)
if (lastLanguageLine == lineNumber - 1) {
snippet.language = lastLanguage
}
name = null
} else {
snippet.end = lineNumber
}
return
}
matcher = line =~ /\["?source"?,\s*"?([-\w]+)"?(,.*)?].*/
if (matcher.matches()) {
lastLanguage = matcher.group(1)
def source = matchSource(line)
if (source.matches) {
lastLanguage = source.language
lastLanguageLine = lineNumber
name = source.name
return
}
if (line ==~ /\/\/\s*AUTOSENSE\s*/) {
@ -310,6 +313,20 @@ class SnippetsTask extends DefaultTask {
}
}
static Source matchSource(String line) {
def matcher = line =~ /\["?source"?,\s*"?([-\w]+)"?(,((?!id=).)*(id="?([-\w]+)"?)?(.*))?].*/
if(matcher.matches()){
return new Source(matches: true, language: matcher.group(1), name: matcher.group(5))
}
return new Source(matches: false)
}
static class Source {
boolean matches
String language
String name
}
static class Snippet {
static final int NOT_FINISHED = -1
@ -336,6 +353,7 @@ class SnippetsTask extends DefaultTask {
boolean curl
List warnings = new ArrayList()
boolean skipShardsFailures = false
String name
@Override
public String toString() {

View file

@ -24,7 +24,7 @@ import org.elasticsearch.gradle.NoticeTask
import org.elasticsearch.gradle.Version
import org.elasticsearch.gradle.VersionProperties
import org.elasticsearch.gradle.test.RestIntegTestTask
import org.elasticsearch.gradle.test.RunTask
import org.elasticsearch.gradle.testclusters.RunTask
import org.elasticsearch.gradle.testclusters.TestClustersPlugin
import org.elasticsearch.gradle.tool.ClasspathUtils
import org.gradle.api.InvalidUserDataException
@ -65,14 +65,7 @@ class PluginBuildPlugin implements Plugin<Project> {
project.archivesBaseName = name
project.description = extension1.description
configurePublishing(project, extension1)
if (project.plugins.hasPlugin(TestClustersPlugin.class) == false) {
project.integTestCluster.dependsOn(project.tasks.bundlePlugin)
if (isModule) {
project.integTestCluster.module(project)
} else {
project.integTestCluster.plugin(project.path)
}
} else {
project.tasks.integTest.dependsOn(project.tasks.bundlePlugin)
if (isModule) {
project.testClusters.integTest.module(
@ -93,7 +86,7 @@ class PluginBuildPlugin implements Plugin<Project> {
)
}
}
}
if (extension1.name == null) {
throw new InvalidUserDataException('name is a required setting for esplugin')
}
@ -117,14 +110,6 @@ class PluginBuildPlugin implements Plugin<Project> {
]
buildProperties.expand(properties)
buildProperties.inputs.properties(properties)
project.tasks.run.dependsOn(project.tasks.bundlePlugin)
if (isModule) {
project.tasks.run.clusterConfig.distribution = System.getProperty(
'run.distribution', isXPackModule ? 'default' : 'oss'
)
} else {
project.tasks.run.clusterConfig.plugin(project.path)
}
if (isModule == false || isXPackModule) {
addNoticeGeneration(project, extension1)
}
@ -145,7 +130,11 @@ class PluginBuildPlugin implements Plugin<Project> {
createIntegTestTask(project)
createBundleTasks(project, extension)
project.configurations.getByName('default').extendsFrom(project.configurations.getByName('runtime'))
project.tasks.create('run', RunTask) // allow running ES with this plugin in the foreground of a build
// allow running ES with this plugin in the foreground of a build
project.tasks.register('run', RunTask) {
dependsOn(project.tasks.bundlePlugin)
useCluster project.testClusters.integTest
}
}
@ -178,10 +167,6 @@ class PluginBuildPlugin implements Plugin<Project> {
private static void createIntegTestTask(Project project) {
RestIntegTestTask integTest = project.tasks.create('integTest', RestIntegTestTask.class)
integTest.mustRunAfter('precommit', 'test')
if (project.plugins.hasPlugin(TestClustersPlugin.class) == false) {
// only if not using test clusters
project.integTestCluster.distribution = System.getProperty('tests.distribution', 'integ-test-zip')
}
project.check.dependsOn(integTest)
}

View file

@ -1,254 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.gradle.test
import org.elasticsearch.gradle.Version
import org.gradle.api.GradleException
import org.gradle.api.Project
import org.gradle.api.tasks.Input
/** Configuration for an elasticsearch cluster, used for integration tests. */
class ClusterConfiguration {
private final Project project
@Input
String distribution = 'default'
@Input
int numNodes = 1
@Input
int numBwcNodes = 0
@Input
Version bwcVersion = null
@Input
int httpPort = 0
@Input
int transportPort = 0
/**
* An override of the data directory. Input is the node number and output
* is the override data directory.
*/
@Input
Closure<String> dataDir = null
/** Optional override of the cluster name. */
@Input
String clusterName = null
@Input
boolean daemonize = true
@Input
boolean debug = false
/**
* Whether the initial_master_nodes setting should be automatically derived from the nodes
* in the cluster. Only takes effect if all nodes in the cluster understand this setting
* and the discovery type is not explicitly set.
*/
@Input
boolean autoSetInitialMasterNodes = true
/**
* Whether the file-based discovery provider should be automatically setup based on
* the nodes in the cluster. Only takes effect if no other hosts provider is already
* configured.
*/
@Input
boolean autoSetHostsProvider = true
@Input
String jvmArgs = "-Xms" + System.getProperty('tests.heap.size', '512m') +
" " + "-Xmx" + System.getProperty('tests.heap.size', '512m') +
" " + System.getProperty('tests.jvm.argline', '')
/**
* Should the shared environment be cleaned on cluster startup? Defaults
* to {@code true} so we run with a clean cluster but some tests wish to
* preserve snapshots between clusters so they set this to true.
*/
@Input
boolean cleanShared = true
/**
* A closure to call which returns the unicast host to connect to for cluster formation.
*
* This allows multi node clusters, or a new cluster to connect to an existing cluster.
* The closure takes three arguments, the NodeInfo for the first node in the cluster,
* the NodeInfo for the node current being configured, an AntBuilder which may be used
* to wait on conditions before returning.
*/
@Input
Closure unicastTransportUri = { NodeInfo seedNode, NodeInfo node, AntBuilder ant ->
if (seedNode == node) {
return null
}
ant.waitfor(maxwait: '40', maxwaitunit: 'second', checkevery: '500', checkeveryunit: 'millisecond',
timeoutproperty: "failed.${seedNode.transportPortsFile.path}") {
resourceexists {
file(file: seedNode.transportPortsFile.toString())
}
}
if (ant.properties.containsKey("failed.${seedNode.transportPortsFile.path}".toString())) {
throw new GradleException("Failed to locate seed node transport file [${seedNode.transportPortsFile}]: " +
"timed out waiting for it to be created after 40 seconds")
}
return seedNode.transportUri()
}
/**
* A closure to call which returns a manually supplied list of unicast seed hosts.
*/
@Input
Closure<List<String>> otherUnicastHostAddresses = {
Collections.emptyList()
}
/**
* A closure to call before the cluster is considered ready. The closure is passed the node info,
* as well as a groovy AntBuilder, to enable running ant condition checks. The default wait
* condition is for http on the http port.
*/
@Input
Closure waitCondition = { NodeInfo node, AntBuilder ant ->
File tmpFile = new File(node.cwd, 'wait.success')
String waitUrl = "http://${node.httpUri()}/_cluster/health?wait_for_nodes=>=${numNodes}&wait_for_status=yellow"
ant.echo(message: "==> [${new Date()}] checking health: ${waitUrl}",
level: 'info')
// checking here for wait_for_nodes to be >= the number of nodes because its possible
// this cluster is attempting to connect to nodes created by another task (same cluster name),
// so there will be more nodes in that case in the cluster state
ant.get(src: waitUrl,
dest: tmpFile.toString(),
ignoreerrors: true, // do not fail on error, so logging buffers can be flushed by the wait task
retries: 10)
return tmpFile.exists()
}
/**
* The maximum number of seconds to wait for nodes to complete startup, which includes writing
* the ports files for the transports and the pid file. This wait time occurs before the wait
* condition is executed.
*/
@Input
int nodeStartupWaitSeconds = 30
public ClusterConfiguration(Project project) {
this.project = project
}
// **Note** for systemProperties, settings, keystoreFiles etc:
// value could be a GString that is evaluated to just a String
// there are cases when value depends on task that is not executed yet on configuration stage
Map<String, Object> systemProperties = new HashMap<>()
Map<String, Object> environmentVariables = new HashMap<>()
Map<String, Object> settings = new HashMap<>()
Map<String, String> keystoreSettings = new HashMap<>()
Map<String, Object> keystoreFiles = new HashMap<>()
// map from destination path, to source file
Map<String, Object> extraConfigFiles = new HashMap<>()
LinkedHashMap<String, Object> plugins = new LinkedHashMap<>()
List<Project> modules = new ArrayList<>()
LinkedHashMap<String, Object[]> setupCommands = new LinkedHashMap<>()
List<Object> dependencies = new ArrayList<>()
@Input
void systemProperty(String property, Object value) {
systemProperties.put(property, value)
}
@Input
void environment(String variable, Object value) {
environmentVariables.put(variable, value)
}
@Input
void setting(String name, Object value) {
settings.put(name, value)
}
@Input
void keystoreSetting(String name, String value) {
keystoreSettings.put(name, value)
}
/**
* Adds a file to the keystore. The name is the secure setting name, and the sourceFile
* is anything accepted by project.file()
*/
@Input
void keystoreFile(String name, Object sourceFile) {
keystoreFiles.put(name, sourceFile)
}
@Input
void plugin(String path) {
Project pluginProject = project.project(path)
plugins.put(pluginProject.name, pluginProject)
}
@Input
void mavenPlugin(String name, String mavenCoords) {
plugins.put(name, mavenCoords)
}
/** Add a module to the cluster. The project must be an esplugin and have a single zip default artifact. */
@Input
void module(Project moduleProject) {
modules.add(moduleProject)
}
@Input
void setupCommand(String name, Object... args) {
setupCommands.put(name, args)
}
/**
* Add an extra configuration file. The path is relative to the config dir, and the sourceFile
* is anything accepted by project.file()
*/
@Input
void extraConfigFile(String path, Object sourceFile) {
if (path == 'elasticsearch.yml') {
throw new GradleException('Overwriting elasticsearch.yml is not allowed, add additional settings using cluster { setting "foo", "bar" }')
}
extraConfigFiles.put(path, sourceFile)
}
/** Add dependencies that must be run before the first task setting up the cluster. */
@Input
void dependsOn(Object... deps) {
dependencies.addAll(deps)
}
}

View file

@ -1,991 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.gradle.test
import org.apache.tools.ant.DefaultLogger
import org.apache.tools.ant.taskdefs.condition.Os
import org.elasticsearch.gradle.BuildPlugin
import org.elasticsearch.gradle.BwcVersions
import org.elasticsearch.gradle.LoggedExec
import org.elasticsearch.gradle.Version
import org.elasticsearch.gradle.VersionProperties
import org.elasticsearch.gradle.plugin.PluginBuildPlugin
import org.elasticsearch.gradle.plugin.PluginPropertiesExtension
import org.gradle.api.AntBuilder
import org.gradle.api.DefaultTask
import org.gradle.api.GradleException
import org.gradle.api.Project
import org.gradle.api.Task
import org.gradle.api.artifacts.Configuration
import org.gradle.api.artifacts.Dependency
import org.gradle.api.file.FileCollection
import org.gradle.api.logging.Logger
import org.gradle.api.tasks.Copy
import org.gradle.api.tasks.Delete
import org.gradle.api.tasks.Exec
import org.gradle.internal.jvm.Jvm
import java.nio.charset.StandardCharsets
import java.nio.file.Paths
import java.util.concurrent.TimeUnit
import java.util.stream.Collectors
/**
* A helper for creating tasks to build a cluster that is used by a task, and tear down the cluster when the task is finished.
*/
class ClusterFormationTasks {
/**
* Adds dependent tasks to the given task to start and stop a cluster with the given configuration.
*
* Returns a list of NodeInfo objects for each node in the cluster.
*/
static List<NodeInfo> setup(Project project, String prefix, Task runner, ClusterConfiguration config) {
File sharedDir = new File(project.buildDir, "cluster/shared")
Object startDependencies = config.dependencies
/* First, if we want a clean environment, we remove everything in the
* shared cluster directory to ensure there are no leftovers in repos
* or anything in theory this should not be necessary but repositories
* are only deleted in the cluster-state and not on-disk such that
* snapshots survive failures / test runs and there is no simple way
* today to fix that. */
if (config.cleanShared) {
Task cleanup = project.tasks.create(
name: "${prefix}#prepareCluster.cleanShared",
type: Delete,
dependsOn: startDependencies) {
delete sharedDir
doLast {
sharedDir.mkdirs()
}
}
startDependencies = cleanup
}
List<Task> startTasks = []
List<NodeInfo> nodes = []
if (config.numNodes < config.numBwcNodes) {
throw new GradleException("numNodes must be >= numBwcNodes [${config.numNodes} < ${config.numBwcNodes}]")
}
if (config.numBwcNodes > 0 && config.bwcVersion == null) {
throw new GradleException("bwcVersion must not be null if numBwcNodes is > 0")
}
// this is our current version distribution configuration we use for all kinds of REST tests etc.
Configuration currentDistro = project.configurations.create("${prefix}_elasticsearchDistro")
Configuration bwcDistro = project.configurations.create("${prefix}_elasticsearchBwcDistro")
Configuration bwcPlugins = project.configurations.create("${prefix}_elasticsearchBwcPlugins")
if (System.getProperty('tests.distribution', 'oss') == 'integ-test-zip') {
throw new Exception("tests.distribution=integ-test-zip is not supported")
}
configureDistributionDependency(project, config.distribution, currentDistro, VersionProperties.elasticsearch)
boolean hasBwcNodes = config.numBwcNodes > 0
if (hasBwcNodes) {
if (config.bwcVersion == null) {
throw new IllegalArgumentException("Must specify bwcVersion when numBwcNodes > 0")
}
// if we have a cluster that has a BWC cluster we also need to configure a dependency on the BWC version
// this version uses the same distribution etc. and only differs in the version we depend on.
// from here on everything else works the same as if it's the current version, we fetch the BWC version
// from mirrors using gradles built-in mechanism etc.
configureDistributionDependency(project, config.distribution, bwcDistro, config.bwcVersion.toString())
for (Map.Entry<String, Object> entry : config.plugins.entrySet()) {
configureBwcPluginDependency(project, entry.getValue(), bwcPlugins, config.bwcVersion)
}
bwcDistro.resolutionStrategy.cacheChangingModulesFor(0, TimeUnit.SECONDS)
bwcPlugins.resolutionStrategy.cacheChangingModulesFor(0, TimeUnit.SECONDS)
}
for (int i = 0; i < config.numNodes; i++) {
// we start N nodes and out of these N nodes there might be M bwc nodes.
// for each of those nodes we might have a different configuration
Configuration distro
String elasticsearchVersion
if (i < config.numBwcNodes) {
elasticsearchVersion = config.bwcVersion.toString()
if (project.bwcVersions.unreleased.contains(config.bwcVersion) &&
(project.version != elasticsearchVersion)) {
elasticsearchVersion += "-SNAPSHOT"
}
distro = bwcDistro
} else {
elasticsearchVersion = VersionProperties.elasticsearch
distro = currentDistro
}
NodeInfo node = new NodeInfo(config, i, project, prefix, elasticsearchVersion, sharedDir)
nodes.add(node)
Closure<Map> writeConfigSetup
Object dependsOn
writeConfigSetup = { Map esConfig ->
if (config.getAutoSetHostsProvider()) {
if (esConfig.containsKey("discovery.seed_providers") == false) {
esConfig["discovery.seed_providers"] = 'file'
}
esConfig["discovery.seed_hosts"] = []
}
if (esConfig['discovery.type'] == null && config.getAutoSetInitialMasterNodes()) {
esConfig['cluster.initial_master_nodes'] = nodes.stream().map({ n ->
if (n.config.settings['node.name'] == null) {
return "node-" + n.nodeNum
} else {
return n.config.settings['node.name']
}
}).collect(Collectors.toList())
}
esConfig
}
dependsOn = startDependencies
startTasks.add(configureNode(project, prefix, runner, dependsOn, node, config, distro, writeConfigSetup))
}
Task wait = configureWaitTask("${prefix}#wait", project, nodes, startTasks, config.nodeStartupWaitSeconds)
runner.dependsOn(wait)
return nodes
}
/** Adds a dependency on the given distribution */
static void configureDistributionDependency(Project project, String distro, Configuration configuration, String elasticsearchVersion) {
boolean internalBuild = project.hasProperty('bwcVersions')
if (distro.equals("integ-test-zip")) {
// short circuit integ test so it doesn't complicate the rest of the distribution setup below
if (internalBuild) {
project.dependencies.add(
configuration.name,
project.dependencies.project(path: ":distribution", configuration: 'integ-test-zip')
)
} else {
project.dependencies.add(
configuration.name,
"org.elasticsearch.distribution.integ-test-zip:elasticsearch:${elasticsearchVersion}@zip"
)
}
return
}
// TEMP HACK
// The oss docs CI build overrides the distro on the command line. This hack handles backcompat until CI is updated.
if (distro.equals('oss-zip')) {
distro = 'oss'
}
if (distro.equals('zip')) {
distro = 'default'
}
// END TEMP HACK
if (['oss', 'default'].contains(distro) == false) {
throw new GradleException("Unknown distribution: ${distro} in project ${project.path}")
}
Version version = Version.fromString(elasticsearchVersion)
String os = getOs()
String classifier = "${os}-x86_64"
String packaging = os.equals('windows') ? 'zip' : 'tar.gz'
String artifactName = 'elasticsearch'
if (distro.equals('oss') && Version.fromString(elasticsearchVersion).onOrAfter('6.3.0')) {
artifactName += '-oss'
}
Object dependency
String snapshotProject = "${os}-${os.equals('windows') ? 'zip' : 'tar'}"
if (version.before("7.0.0")) {
snapshotProject = "zip"
}
if (distro.equals("oss")) {
snapshotProject = "oss-" + snapshotProject
}
BwcVersions.UnreleasedVersionInfo unreleasedInfo = null
if (project.hasProperty('bwcVersions')) {
// NOTE: leniency is needed for external plugin authors using build-tools. maybe build the version compat info into build-tools?
unreleasedInfo = project.bwcVersions.unreleasedInfo(version)
}
if (unreleasedInfo != null) {
dependency = project.dependencies.project(
path: unreleasedInfo.gradleProjectPath, configuration: snapshotProject
)
} else if (internalBuild && elasticsearchVersion.equals(VersionProperties.elasticsearch)) {
dependency = project.dependencies.project(path: ":distribution:archives:${snapshotProject}")
} else {
if (version.before('7.0.0')) {
classifier = "" // for bwc, before we had classifiers
}
// group does not matter as it is not used when we pull from the ivy repo that points to the download service
dependency = "dnm:${artifactName}:${elasticsearchVersion}-${classifier}@${packaging}"
}
project.dependencies.add(configuration.name, dependency)
}
/** Adds a dependency on a different version of the given plugin, which will be retrieved using gradle's dependency resolution */
static void configureBwcPluginDependency(Project project, Object plugin, Configuration configuration, Version elasticsearchVersion) {
if (plugin instanceof Project) {
Project pluginProject = (Project)plugin
verifyProjectHasBuildPlugin(configuration.name, elasticsearchVersion, project, pluginProject)
final String pluginName = findPluginName(pluginProject)
project.dependencies.add(configuration.name, "org.elasticsearch.plugin:${pluginName}:${elasticsearchVersion}@zip")
} else {
project.dependencies.add(configuration.name, "${plugin}@zip")
}
}
/**
* Adds dependent tasks to start an elasticsearch cluster before the given task is executed,
* and stop it after it has finished executing.
*
* The setup of the cluster involves the following:
* <ol>
* <li>Cleanup the extraction directory</li>
* <li>Extract a fresh copy of elasticsearch</li>
* <li>Write an elasticsearch.yml config file</li>
* <li>Copy plugins that will be installed to a temporary dir (which contains spaces)</li>
* <li>Install plugins</li>
* <li>Run additional setup commands</li>
* <li>Start elasticsearch<li>
* </ol>
*
* @return a task which starts the node.
*/
static Task configureNode(Project project, String prefix, Task runner, Object dependsOn, NodeInfo node, ClusterConfiguration config,
Configuration distribution, Closure<Map> writeConfig) {
// tasks are chained so their execution order is maintained
Task setup = project.tasks.create(name: taskName(prefix, node, 'clean'), type: Delete, dependsOn: dependsOn) {
delete node.homeDir
delete node.cwd
}
setup = project.tasks.create(name: taskName(prefix, node, 'createCwd'), type: DefaultTask, dependsOn: setup) {
doLast {
node.cwd.mkdirs()
}
outputs.dir node.cwd
}
setup = configureCheckPreviousTask(taskName(prefix, node, 'checkPrevious'), project, setup, node)
setup = configureStopTask(taskName(prefix, node, 'stopPrevious'), project, setup, node)
setup = configureExtractTask(taskName(prefix, node, 'extract'), project, setup, node, distribution, config.distribution)
setup = configureWriteConfigTask(taskName(prefix, node, 'configure'), project, setup, node, writeConfig)
setup = configureCreateKeystoreTask(taskName(prefix, node, 'createKeystore'), project, setup, node)
setup = configureAddKeystoreSettingTasks(prefix, project, setup, node)
setup = configureAddKeystoreFileTasks(prefix, project, setup, node)
if (node.config.plugins.isEmpty() == false) {
if (node.nodeVersion == Version.fromString(VersionProperties.elasticsearch)) {
setup = configureCopyPluginsTask(taskName(prefix, node, 'copyPlugins'), project, setup, node, prefix)
} else {
setup = configureCopyBwcPluginsTask(taskName(prefix, node, 'copyBwcPlugins'), project, setup, node, prefix)
}
}
// install modules
for (Project module : node.config.modules) {
String actionName = pluginTaskName('install', module.name, 'Module')
setup = configureInstallModuleTask(taskName(prefix, node, actionName), project, setup, node, module)
}
// install plugins
for (String pluginName : node.config.plugins.keySet()) {
String actionName = pluginTaskName('install', pluginName, 'Plugin')
setup = configureInstallPluginTask(taskName(prefix, node, actionName), project, setup, node, pluginName, prefix)
}
// sets up any extra config files that need to be copied over to the ES instance;
// its run after plugins have been installed, as the extra config files may belong to plugins
setup = configureExtraConfigFilesTask(taskName(prefix, node, 'extraConfig'), project, setup, node)
// extra setup commands
for (Map.Entry<String, Object[]> command : node.config.setupCommands.entrySet()) {
// the first argument is the actual script name, relative to home
Object[] args = command.getValue().clone()
final Object commandPath
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
/*
* We have to delay building the string as the path will not exist during configuration which will fail on Windows due to
* getting the short name requiring the path to already exist. Note that we have to capture the value of arg[0] now
* otherwise we would stack overflow later since arg[0] is replaced below.
*/
String argsZero = args[0]
commandPath = "${-> Paths.get(NodeInfo.getShortPathName(node.homeDir.toString())).resolve(argsZero.toString()).toString()}"
} else {
commandPath = node.homeDir.toPath().resolve(args[0].toString()).toString()
}
args[0] = commandPath
setup = configureExecTask(taskName(prefix, node, command.getKey()), project, setup, node, args)
}
Task start = configureStartTask(taskName(prefix, node, 'start'), project, setup, node)
if (node.config.daemonize) {
Task stop = configureStopTask(taskName(prefix, node, 'stop'), project, [], node)
// if we are running in the background, make sure to stop the server when the task completes
runner.finalizedBy(stop)
start.finalizedBy(stop)
for (Object dependency : config.dependencies) {
if (dependency instanceof Fixture) {
def depStop = ((Fixture)dependency).stopTask
runner.finalizedBy(depStop)
start.finalizedBy(depStop)
}
}
}
return start
}
/** Adds a task to extract the elasticsearch distribution */
static Task configureExtractTask(String name, Project project, Task setup, NodeInfo node,
Configuration configuration, String distribution) {
List extractDependsOn = [configuration, setup]
/* configuration.singleFile will be an external artifact if this is being run by a plugin not living in the
elasticsearch source tree. If this is a plugin built in the elasticsearch source tree or this is a distro in
the elasticsearch source tree then this should be the version of elasticsearch built by the source tree.
If it isn't then Bad Things(TM) will happen. */
Task extract = project.tasks.create(name: name, type: Copy, dependsOn: extractDependsOn) {
if (getOs().equals("windows") || distribution.equals("integ-test-zip")) {
from {
project.zipTree(configuration.singleFile)
}
} else {
// macos and linux use tar
from {
project.tarTree(project.resources.gzip(configuration.singleFile))
}
}
into node.baseDir
}
return extract
}
/** Adds a task to write elasticsearch.yml for the given node configuration */
static Task configureWriteConfigTask(String name, Project project, Task setup, NodeInfo node, Closure<Map> configFilter) {
Map esConfig = [
'cluster.name' : node.clusterName,
'node.name' : "node-" + node.nodeNum,
(node.nodeVersion.onOrAfter('7.4.0') ? 'node.pidfile' : 'pidfile') : node.pidFile,
'path.repo' : "${node.sharedDir}/repo",
'path.shared_data' : "${node.sharedDir}/",
// Define a node attribute so we can test that it exists
'node.attr.testattr' : 'test',
// Don't wait for state, just start up quickly. This will also allow new and old nodes in the BWC case to become the master
'discovery.initial_state_timeout' : '0s'
]
esConfig['http.port'] = node.config.httpPort
if (node.nodeVersion.onOrAfter('6.7.0')) {
esConfig['transport.port'] = node.config.transportPort
} else {
esConfig['transport.tcp.port'] = node.config.transportPort
}
// Default the watermarks to absurdly low to prevent the tests from failing on nodes without enough disk space
esConfig['cluster.routing.allocation.disk.watermark.low'] = '1b'
esConfig['cluster.routing.allocation.disk.watermark.high'] = '1b'
if (node.nodeVersion.major >= 6) {
esConfig['cluster.routing.allocation.disk.watermark.flood_stage'] = '1b'
}
// increase script compilation limit since tests can rapid-fire script compilations
esConfig['script.max_compilations_rate'] = '2048/1m'
// Temporarily disable the real memory usage circuit breaker. It depends on real memory usage which we have no full control
// over and the REST client will not retry on circuit breaking exceptions yet (see #31986 for details). Once the REST client
// can retry on circuit breaking exceptions, we can revert again to the default configuration.
if (node.nodeVersion.major >= 7) {
esConfig['indices.breaker.total.use_real_memory'] = false
}
Task writeConfig = project.tasks.create(name: name, type: DefaultTask, dependsOn: setup)
writeConfig.doFirst {
for (Map.Entry<String, Object> setting : node.config.settings) {
if (setting.value == null) {
esConfig.remove(setting.key)
} else {
esConfig.put(setting.key, setting.value)
}
}
esConfig = configFilter.call(esConfig)
File configFile = new File(node.pathConf, 'elasticsearch.yml')
logger.info("Configuring ${configFile}")
configFile.setText(esConfig.collect { key, value -> "${key}: ${value}" }.join('\n'), 'UTF-8')
}
}
/** Adds a task to create keystore */
static Task configureCreateKeystoreTask(String name, Project project, Task setup, NodeInfo node) {
if (node.config.keystoreSettings.isEmpty() && node.config.keystoreFiles.isEmpty()) {
return setup
} else {
/*
* We have to delay building the string as the path will not exist during configuration which will fail on Windows due to
* getting the short name requiring the path to already exist.
*/
final Object esKeystoreUtil = "${-> node.binPath().resolve('elasticsearch-keystore').toString()}"
return configureExecTask(name, project, setup, node, esKeystoreUtil, 'create')
}
}
/** Adds tasks to add settings to the keystore */
static Task configureAddKeystoreSettingTasks(String parent, Project project, Task setup, NodeInfo node) {
Map kvs = node.config.keystoreSettings
Task parentTask = setup
/*
* We have to delay building the string as the path will not exist during configuration which will fail on Windows due to getting
* the short name requiring the path to already exist.
*/
final Object esKeystoreUtil = "${-> node.binPath().resolve('elasticsearch-keystore').toString()}"
for (Map.Entry<String, String> entry in kvs) {
String key = entry.getKey()
String name = taskName(parent, node, 'addToKeystore#' + key)
Task t = configureExecTask(name, project, parentTask, node, esKeystoreUtil, 'add', key, '-x')
String settingsValue = entry.getValue() // eval this early otherwise it will not use the right value
t.doFirst {
standardInput = new ByteArrayInputStream(settingsValue.getBytes(StandardCharsets.UTF_8))
}
parentTask = t
}
return parentTask
}
/** Adds tasks to add files to the keystore */
static Task configureAddKeystoreFileTasks(String parent, Project project, Task setup, NodeInfo node) {
Map<String, Object> kvs = node.config.keystoreFiles
if (kvs.isEmpty()) {
return setup
}
Task parentTask = setup
/*
* We have to delay building the string as the path will not exist during configuration which will fail on Windows due to getting
* the short name requiring the path to already exist.
*/
final Object esKeystoreUtil = "${-> node.binPath().resolve('elasticsearch-keystore').toString()}"
for (Map.Entry<String, Object> entry in kvs) {
String key = entry.getKey()
String name = taskName(parent, node, 'addToKeystore#' + key)
String srcFileName = entry.getValue()
Task t = configureExecTask(name, project, parentTask, node, esKeystoreUtil, 'add-file', key, srcFileName)
t.doFirst {
File srcFile = project.file(srcFileName)
if (srcFile.isDirectory()) {
throw new GradleException("Source for keystoreFile must be a file: ${srcFile}")
}
if (srcFile.exists() == false) {
throw new GradleException("Source file for keystoreFile does not exist: ${srcFile}")
}
}
parentTask = t
}
return parentTask
}
static Task configureExtraConfigFilesTask(String name, Project project, Task setup, NodeInfo node) {
if (node.config.extraConfigFiles.isEmpty()) {
return setup
}
Copy copyConfig = project.tasks.create(name: name, type: Copy, dependsOn: setup)
File configDir = new File(node.homeDir, 'config')
copyConfig.into(configDir) // copy must always have a general dest dir, even though we don't use it
for (Map.Entry<String,Object> extraConfigFile : node.config.extraConfigFiles.entrySet()) {
Object extraConfigFileValue = extraConfigFile.getValue()
copyConfig.doFirst {
// make sure the copy won't be a no-op or act on a directory
File srcConfigFile = project.file(extraConfigFileValue)
if (srcConfigFile.isDirectory()) {
throw new GradleException("Source for extraConfigFile must be a file: ${srcConfigFile}")
}
if (srcConfigFile.exists() == false) {
throw new GradleException("Source file for extraConfigFile does not exist: ${srcConfigFile}")
}
}
File destConfigFile = new File(node.homeDir, 'config/' + extraConfigFile.getKey())
// wrap source file in closure to delay resolution to execution time
copyConfig.from({ extraConfigFileValue }) {
// this must be in a closure so it is only applied to the single file specified in from above
into(configDir.toPath().relativize(destConfigFile.canonicalFile.parentFile.toPath()).toFile())
rename { destConfigFile.name }
}
}
return copyConfig
}
/**
* Adds a task to copy plugins to a temp dir, which they will later be installed from.
*
* For each plugin, if the plugin has rest spec apis in its tests, those api files are also copied
* to the test resources for this project.
*/
static Task configureCopyPluginsTask(String name, Project project, Task setup, NodeInfo node, String prefix) {
Copy copyPlugins = project.tasks.create(name: name, type: Copy, dependsOn: setup)
List<FileCollection> pluginFiles = []
for (Map.Entry<String, Object> plugin : node.config.plugins.entrySet()) {
String configurationName = pluginConfigurationName(prefix, plugin.key)
Configuration configuration = project.configurations.findByName(configurationName)
if (configuration == null) {
configuration = project.configurations.create(configurationName)
}
if (plugin.getValue() instanceof Project) {
Project pluginProject = plugin.getValue()
verifyProjectHasBuildPlugin(name, node.nodeVersion, project, pluginProject)
project.dependencies.add(configurationName, project.dependencies.project(path: pluginProject.path, configuration: 'zip'))
setup.dependsOn(pluginProject.tasks.bundlePlugin)
// also allow rest tests to use the rest spec from the plugin
String copyRestSpecTaskName = pluginTaskName('copy', plugin.getKey(), 'PluginRestSpec')
Copy copyRestSpec = project.tasks.findByName(copyRestSpecTaskName)
for (File resourceDir : pluginProject.sourceSets.test.resources.srcDirs) {
File restApiDir = new File(resourceDir, 'rest-api-spec/api')
if (restApiDir.exists() == false) continue
if (copyRestSpec == null) {
copyRestSpec = project.tasks.create(name: copyRestSpecTaskName, type: Copy)
copyPlugins.dependsOn(copyRestSpec)
copyRestSpec.into(project.sourceSets.test.output.resourcesDir)
}
copyRestSpec.from(resourceDir).include('rest-api-spec/api/**')
}
} else {
project.dependencies.add(configurationName, "${plugin.getValue()}@zip")
}
pluginFiles.add(configuration)
}
copyPlugins.into(node.pluginsTmpDir)
copyPlugins.from(pluginFiles)
return copyPlugins
}
private static String pluginConfigurationName(final String prefix, final String name) {
return "_plugin_${prefix}_${name}".replace(':', '_')
}
private static String pluginBwcConfigurationName(final String prefix, final String name) {
return "_plugin_bwc_${prefix}_${name}".replace(':', '_')
}
/** Configures task to copy a plugin based on a zip file resolved using dependencies for an older version */
static Task configureCopyBwcPluginsTask(String name, Project project, Task setup, NodeInfo node, String prefix) {
Configuration bwcPlugins = project.configurations.getByName("${prefix}_elasticsearchBwcPlugins")
for (Map.Entry<String, Object> plugin : node.config.plugins.entrySet()) {
String configurationName = pluginBwcConfigurationName(prefix, plugin.key)
Configuration configuration = project.configurations.findByName(configurationName)
if (configuration == null) {
configuration = project.configurations.create(configurationName)
}
if (plugin.getValue() instanceof Project) {
Project pluginProject = plugin.getValue()
verifyProjectHasBuildPlugin(name, node.nodeVersion, project, pluginProject)
final String depName = findPluginName(pluginProject)
Dependency dep = bwcPlugins.dependencies.find {
it.name == depName
}
configuration.dependencies.add(dep)
} else {
project.dependencies.add(configurationName, "${plugin.getValue()}@zip")
}
}
Copy copyPlugins = project.tasks.create(name: name, type: Copy, dependsOn: setup) {
from bwcPlugins
into node.pluginsTmpDir
}
return copyPlugins
}
static Task configureInstallModuleTask(String name, Project project, Task setup, NodeInfo node, Project module) {
if (node.config.distribution != 'integ-test-zip') {
project.logger.info("Not installing modules for $name, ${node.config.distribution} already has them")
return setup
}
if (module.plugins.hasPlugin(PluginBuildPlugin) == false) {
throw new GradleException("Task ${name} cannot include module ${module.path} which is not an esplugin")
}
Copy installModule = project.tasks.create(name, Copy.class)
installModule.dependsOn(setup)
installModule.dependsOn(module.tasks.bundlePlugin)
installModule.into(new File(node.homeDir, "modules/${module.name}"))
installModule.from({ project.zipTree(module.tasks.bundlePlugin.outputs.files.singleFile) })
return installModule
}
static Task configureInstallPluginTask(String name, Project project, Task setup, NodeInfo node, String pluginName, String prefix) {
FileCollection pluginZip;
if (node.nodeVersion != Version.fromString(VersionProperties.elasticsearch)) {
pluginZip = project.configurations.getByName(pluginBwcConfigurationName(prefix, pluginName))
} else {
pluginZip = project.configurations.getByName(pluginConfigurationName(prefix, pluginName))
}
// delay reading the file location until execution time by wrapping in a closure within a GString
final Object file = "${-> new File(node.pluginsTmpDir, pluginZip.singleFile.getName()).toURI().toURL().toString()}"
/*
* We have to delay building the string as the path will not exist during configuration which will fail on Windows due to getting
* the short name requiring the path to already exist.
*/
final Object esPluginUtil = "${-> node.binPath().resolve('elasticsearch-plugin').toString()}"
final Object[] args = [esPluginUtil, 'install', '--batch', file]
return configureExecTask(name, project, setup, node, args)
}
/** Wrapper for command line argument: surrounds comma with double quotes **/
private static class EscapeCommaWrapper {
Object arg
public String toString() {
String s = arg.toString()
/// Surround strings that contains a comma with double quotes
if (s.indexOf(',') != -1) {
return "\"${s}\""
}
return s
}
}
/** Adds a task to execute a command to help setup the cluster */
static Task configureExecTask(String name, Project project, Task setup, NodeInfo node, Object[] execArgs) {
return project.tasks.create(name: name, type: LoggedExec, dependsOn: setup) { Exec exec ->
exec.workingDir node.cwd
if (useRuntimeJava(project, node)) {
exec.environment.put('JAVA_HOME', project.runtimeJavaHome)
} else {
// force JAVA_HOME to *not* be set
exec.environment.remove('JAVA_HOME')
}
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
exec.executable 'cmd'
exec.args '/C', 'call'
// On Windows the comma character is considered a parameter separator:
// argument are wrapped in an ExecArgWrapper that escapes commas
exec.args execArgs.collect { a -> new EscapeCommaWrapper(arg: a) }
} else {
exec.commandLine execArgs
}
}
}
public static boolean useRuntimeJava(Project project, NodeInfo node) {
return (project.isRuntimeJavaHomeSet ||
(node.isBwcNode == false && node.nodeVersion.before(Version.fromString("7.0.0"))) ||
node.config.distribution == 'integ-test-zip')
}
/** Adds a task to start an elasticsearch node with the given configuration */
static Task configureStartTask(String name, Project project, Task setup, NodeInfo node) {
// this closure is converted into ant nodes by groovy's AntBuilder
Closure antRunner = { AntBuilder ant ->
ant.exec(executable: node.executable, spawn: node.config.daemonize, newenvironment: true,
dir: node.cwd, taskname: 'elasticsearch') {
node.env.each { key, value -> env(key: key, value: value) }
if (useRuntimeJava(project, node)) {
env(key: 'JAVA_HOME', value: project.runtimeJavaHome)
}
node.args.each { arg(value: it) }
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
// Having no TMP on Windows defaults to C:\Windows and permission errors
// Since we configure ant to run with a new environment above, we need to explicitly pass this
String tmp = System.getenv("TMP")
assert tmp != null
env(key: "TMP", value: tmp)
}
}
}
// this closure is the actual code to run elasticsearch
Closure elasticsearchRunner = {
// Due to how ant exec works with the spawn option, we lose all stdout/stderr from the
// process executed. To work around this, when spawning, we wrap the elasticsearch start
// command inside another shell script, which simply internally redirects the output
// of the real elasticsearch script. This allows ant to keep the streams open with the
// dummy process, but us to have the output available if there is an error in the
// elasticsearch start script
if (node.config.daemonize) {
node.writeWrapperScript()
}
node.getCommandString().eachLine { line -> logger.info(line) }
if (logger.isInfoEnabled() || node.config.daemonize == false) {
runAntCommand(project, antRunner, System.out, System.err)
} else {
// buffer the output, we may not need to print it
PrintStream captureStream = new PrintStream(node.buffer, true, "UTF-8")
runAntCommand(project, antRunner, captureStream, captureStream)
}
}
Task start = project.tasks.create(name: name, type: DefaultTask, dependsOn: setup)
if (node.javaVersion != null) {
BuildPlugin.requireJavaHome(start, node.javaVersion)
}
start.doLast(elasticsearchRunner)
start.doFirst {
// If the node runs in a FIPS 140-2 JVM, the BCFKS default keystore will be password protected
if (project.inFipsJvm){
node.config.systemProperties.put('javax.net.ssl.trustStorePassword', 'password')
node.config.systemProperties.put('javax.net.ssl.keyStorePassword', 'password')
}
// Configure ES JAVA OPTS - adds system properties, assertion flags, remote debug etc
List<String> esJavaOpts = [node.env.get('ES_JAVA_OPTS', '')]
String collectedSystemProperties = node.config.systemProperties.collect { key, value -> "-D${key}=${value}" }.join(" ")
esJavaOpts.add(collectedSystemProperties)
esJavaOpts.add(node.config.jvmArgs)
if (Boolean.parseBoolean(System.getProperty('tests.asserts', 'true'))) {
// put the enable assertions options before other options to allow
// flexibility to disable assertions for specific packages or classes
// in the cluster-specific options
esJavaOpts.add("-ea")
esJavaOpts.add("-esa")
}
// we must add debug options inside the closure so the config is read at execution time, as
// gradle task options are not processed until the end of the configuration phase
if (node.config.debug) {
println 'Running elasticsearch in debug mode, suspending until connected on port 8000'
esJavaOpts.add('-agentlib:jdwp=transport=dt_socket,server=y,suspend=y,address=8000')
}
node.env['ES_JAVA_OPTS'] = esJavaOpts.join(" ")
//
project.logger.info("Starting node in ${node.clusterName} distribution: ${node.config.distribution}")
}
return start
}
static Task configureWaitTask(String name, Project project, List<NodeInfo> nodes, List<Task> startTasks, int waitSeconds) {
Task wait = project.tasks.create(name: name, dependsOn: startTasks)
wait.doLast {
Collection<String> unicastHosts = new HashSet<>()
nodes.forEach { node ->
unicastHosts.addAll(node.config.otherUnicastHostAddresses.call())
String unicastHost = node.config.unicastTransportUri(node, null, project.createAntBuilder())
if (unicastHost != null) {
unicastHosts.add(unicastHost)
}
}
String unicastHostsTxt = String.join("\n", unicastHosts)
nodes.forEach { node ->
node.pathConf.toPath().resolve("unicast_hosts.txt").setText(unicastHostsTxt)
}
ant.waitfor(maxwait: "${waitSeconds}", maxwaitunit: 'second', checkevery: '500', checkeveryunit: 'millisecond', timeoutproperty: "failed${name}") {
or {
for (NodeInfo node : nodes) {
resourceexists {
file(file: node.failedMarker.toString())
}
}
and {
for (NodeInfo node : nodes) {
resourceexists {
file(file: node.pidFile.toString())
}
resourceexists {
file(file: node.httpPortsFile.toString())
}
resourceexists {
file(file: node.transportPortsFile.toString())
}
}
}
}
}
if (ant.properties.containsKey("failed${name}".toString())) {
waitFailed(project, nodes, logger, "Failed to start elasticsearch: timed out after ${waitSeconds} seconds")
}
boolean anyNodeFailed = false
for (NodeInfo node : nodes) {
if (node.failedMarker.exists()) {
logger.error("Failed to start elasticsearch: ${node.failedMarker.toString()} exists")
anyNodeFailed = true
}
}
if (anyNodeFailed) {
waitFailed(project, nodes, logger, 'Failed to start elasticsearch')
}
// make sure all files exist otherwise we haven't fully started up
boolean missingFile = false
for (NodeInfo node : nodes) {
missingFile |= node.pidFile.exists() == false
missingFile |= node.httpPortsFile.exists() == false
missingFile |= node.transportPortsFile.exists() == false
}
if (missingFile) {
waitFailed(project, nodes, logger, 'Elasticsearch did not complete startup in time allotted')
}
// go through each node checking the wait condition
for (NodeInfo node : nodes) {
// first bind node info to the closure, then pass to the ant runner so we can get good logging
Closure antRunner = node.config.waitCondition.curry(node)
boolean success
if (logger.isInfoEnabled()) {
success = runAntCommand(project, antRunner, System.out, System.err)
} else {
PrintStream captureStream = new PrintStream(node.buffer, true, "UTF-8")
success = runAntCommand(project, antRunner, captureStream, captureStream)
}
if (success == false) {
waitFailed(project, nodes, logger, 'Elasticsearch cluster failed to pass wait condition')
}
}
}
return wait
}
static void waitFailed(Project project, List<NodeInfo> nodes, Logger logger, String msg) {
for (NodeInfo node : nodes) {
if (logger.isInfoEnabled() == false) {
// We already log the command at info level. No need to do it twice.
node.getCommandString().eachLine { line -> logger.error(line) }
}
logger.error("Node ${node.nodeNum} output:")
logger.error("|-----------------------------------------")
logger.error("| failure marker exists: ${node.failedMarker.exists()}")
logger.error("| pid file exists: ${node.pidFile.exists()}")
logger.error("| http ports file exists: ${node.httpPortsFile.exists()}")
logger.error("| transport ports file exists: ${node.transportPortsFile.exists()}")
// the waitfor failed, so dump any output we got (if info logging this goes directly to stdout)
logger.error("|\n| [ant output]")
node.buffer.toString('UTF-8').eachLine { line -> logger.error("| ${line}") }
// also dump the log file for the startup script (which will include ES logging output to stdout)
if (node.startLog.exists()) {
logger.error("|\n| [log]")
node.startLog.eachLine { line -> logger.error("| ${line}") }
}
if (node.pidFile.exists() && node.failedMarker.exists() == false &&
(node.httpPortsFile.exists() == false || node.transportPortsFile.exists() == false)) {
logger.error("|\n| [jstack]")
String pid = node.pidFile.getText('UTF-8')
ByteArrayOutputStream output = new ByteArrayOutputStream()
project.exec {
commandLine = ["${project.runtimeJavaHome}/bin/jstack", pid]
standardOutput = output
}
output.toString('UTF-8').eachLine { line -> logger.error("| ${line}") }
}
logger.error("|-----------------------------------------")
}
throw new GradleException(msg)
}
/** Adds a task to check if the process with the given pidfile is actually elasticsearch */
static Task configureCheckPreviousTask(String name, Project project, Object depends, NodeInfo node) {
return project.tasks.create(name: name, type: Exec, dependsOn: depends) {
onlyIf { node.pidFile.exists() }
// the pid file won't actually be read until execution time, since the read is wrapped within an inner closure of the GString
ext.pid = "${ -> node.pidFile.getText('UTF-8').trim()}"
final File jps = Jvm.forHome(project.runtimeJavaHome).getExecutable('jps')
commandLine jps, '-l'
standardOutput = new ByteArrayOutputStream()
doLast {
String out = standardOutput.toString()
if (out.contains("${ext.pid} org.elasticsearch.bootstrap.Elasticsearch") == false) {
logger.error('jps -l')
logger.error(out)
logger.error("pid file: ${node.pidFile}")
logger.error("pid: ${ext.pid}")
throw new GradleException("jps -l did not report any process with org.elasticsearch.bootstrap.Elasticsearch\n" +
"Did you run gradle clean? Maybe an old pid file is still lying around.")
} else {
logger.info(out)
}
}
}
}
/** Adds a task to kill an elasticsearch node with the given pidfile */
static Task configureStopTask(String name, Project project, Object depends, NodeInfo node) {
return project.tasks.create(name: name, type: LoggedExec, dependsOn: depends) {
onlyIf { node.pidFile.exists() }
// the pid file won't actually be read until execution time, since the read is wrapped within an inner closure of the GString
ext.pid = "${ -> node.pidFile.getText('UTF-8').trim()}"
doFirst {
logger.info("Shutting down external node with pid ${pid}")
}
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
executable 'Taskkill'
args '/PID', pid, '/F'
} else {
executable 'kill'
args '-9', pid
}
doLast {
project.delete(node.pidFile)
// Large tests can exhaust disk space, clean up jdk from the distribution to save some space
project.delete(new File(node.homeDir, "jdk"))
}
}
}
/** Returns a unique task name for this task and node configuration */
static String taskName(String prefix, NodeInfo node, String action) {
if (node.config.numNodes > 1) {
return "${prefix}#node${node.nodeNum}.${action}"
} else {
return "${prefix}#${action}"
}
}
public static String pluginTaskName(String action, String name, String suffix) {
// replace every dash followed by a character with just the uppercase character
String camelName = name.replaceAll(/-(\w)/) { _, c -> c.toUpperCase(Locale.ROOT) }
return action + camelName[0].toUpperCase(Locale.ROOT) + camelName.substring(1) + suffix
}
/** Runs an ant command, sending output to the given out and error streams */
static Object runAntCommand(Project project, Closure command, PrintStream outputStream, PrintStream errorStream) {
DefaultLogger listener = new DefaultLogger(
errorPrintStream: errorStream,
outputPrintStream: outputStream,
messageOutputLevel: org.apache.tools.ant.Project.MSG_INFO)
AntBuilder ant = project.createAntBuilder()
ant.project.addBuildListener(listener)
Object retVal = command(ant)
ant.project.removeBuildListener(listener)
return retVal
}
static void verifyProjectHasBuildPlugin(String name, Version version, Project project, Project pluginProject) {
if (pluginProject.plugins.hasPlugin(PluginBuildPlugin) == false) {
throw new GradleException("Task [${name}] cannot add plugin [${pluginProject.path}] with version [${version}] to project's " +
"[${project.path}] dependencies: the plugin is not an esplugin")
}
}
/** Find the plugin name in the given project. */
static String findPluginName(Project pluginProject) {
PluginPropertiesExtension extension = pluginProject.extensions.findByName('esplugin')
return extension.name
}
/** Find the current OS */
static String getOs() {
String os = "linux"
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
os = "windows"
} else if (Os.isFamily(Os.FAMILY_MAC)) {
os = "darwin"
}
return os
}
}

View file

@ -319,12 +319,17 @@ public class DistroTestPlugin implements Plugin<Project> {
List<ElasticsearchDistribution> currentDistros = new ArrayList<>();
List<ElasticsearchDistribution> upgradeDistros = new ArrayList<>();
for (Type type : Arrays.asList(Type.DEB, Type.RPM)) {
// Docker disabled for https://github.com/elastic/elasticsearch/issues/47639
for (Type type : Arrays.asList(Type.DEB, Type.RPM /*,Type.DOCKER*/)) {
for (Flavor flavor : Flavor.values()) {
for (boolean bundledJdk : Arrays.asList(true, false)) {
// We should never add a Docker distro with bundledJdk == false
boolean skip = type == Type.DOCKER && bundledJdk == false;
if (skip == false) {
addDistro(distributions, type, null, flavor, bundledJdk, VersionProperties.getElasticsearch(), currentDistros);
}
}
}
// upgrade version is always bundled jdk
// NOTE: this is mimicking the old VagrantTestPlugin upgrade behavior. It will eventually be replaced
// witha dedicated upgrade test from every bwc version like other bwc tests
@ -386,6 +391,11 @@ public class DistroTestPlugin implements Plugin<Project> {
}
private static String destructiveDistroTestTaskName(ElasticsearchDistribution distro) {
return "destructiveDistroTest." + distroId(distro.getType(), distro.getPlatform(), distro.getFlavor(), distro.getBundledJdk());
Type type = distro.getType();
return "destructiveDistroTest." + distroId(
type,
distro.getPlatform(),
distro.getFlavor(),
distro.getBundledJdk());
}
}

View file

@ -1,297 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.gradle.test
import com.sun.jna.Native
import com.sun.jna.WString
import org.apache.tools.ant.taskdefs.condition.Os
import org.elasticsearch.gradle.Version
import org.elasticsearch.gradle.VersionProperties
import org.gradle.api.Project
import java.nio.file.Files
import java.nio.file.Path
import java.nio.file.Paths
/**
* A container for the files and configuration associated with a single node in a test cluster.
*/
class NodeInfo {
/** Gradle project this node is part of */
Project project
/** common configuration for all nodes, including this one */
ClusterConfiguration config
/** node number within the cluster, for creating unique names and paths */
int nodeNum
/** name of the cluster this node is part of */
String clusterName
/** root directory all node files and operations happen under */
File baseDir
/** shared data directory all nodes share */
File sharedDir
/** the pid file the node will use */
File pidFile
/** a file written by elasticsearch containing the ports of each bound address for http */
File httpPortsFile
/** a file written by elasticsearch containing the ports of each bound address for transport */
File transportPortsFile
/** elasticsearch home dir */
File homeDir
/** config directory */
File pathConf
/** data directory (as an Object, to allow lazy evaluation) */
Object dataDir
/** THE config file */
File configFile
/** working directory for the node process */
File cwd
/** file that if it exists, indicates the node failed to start */
File failedMarker
/** stdout/stderr log of the elasticsearch process for this node */
File startLog
/** directory to install plugins from */
File pluginsTmpDir
/** Major version of java this node runs with, or {@code null} if using the runtime java version */
Integer javaVersion
/** environment variables to start the node with */
Map<String, String> env
/** arguments to start the node with */
List<String> args
/** Executable to run the bin/elasticsearch with, either cmd or sh */
String executable
/** Path to the elasticsearch start script */
private Object esScript
/** script to run when running in the background */
private File wrapperScript
/** buffer for ant output when starting this node */
ByteArrayOutputStream buffer = new ByteArrayOutputStream()
/** the version of elasticsearch that this node runs */
Version nodeVersion
/** true if the node is not the current version */
boolean isBwcNode
/** Holds node configuration for part of a test cluster. */
NodeInfo(ClusterConfiguration config, int nodeNum, Project project, String prefix, String nodeVersion, File sharedDir) {
this.config = config
this.nodeNum = nodeNum
this.project = project
this.sharedDir = sharedDir
if (config.clusterName != null) {
clusterName = config.clusterName
} else {
clusterName = project.path.replace(':', '_').substring(1) + '_' + prefix
}
baseDir = new File(project.buildDir, "cluster/${prefix} node${nodeNum}")
pidFile = new File(baseDir, 'es.pid')
this.nodeVersion = Version.fromString(nodeVersion)
this.isBwcNode = this.nodeVersion.before(VersionProperties.elasticsearch)
homeDir = new File(baseDir, "elasticsearch-${nodeVersion}")
pathConf = new File(homeDir, 'config')
if (config.dataDir != null) {
dataDir = "${config.dataDir(nodeNum)}"
} else {
dataDir = new File(homeDir, "data")
}
configFile = new File(pathConf, 'elasticsearch.yml')
// even for rpm/deb, the logs are under home because we dont start with real services
File logsDir = new File(homeDir, 'logs')
httpPortsFile = new File(logsDir, 'http.ports')
transportPortsFile = new File(logsDir, 'transport.ports')
cwd = new File(baseDir, "cwd")
failedMarker = new File(cwd, 'run.failed')
startLog = new File(cwd, 'run.log')
pluginsTmpDir = new File(baseDir, "plugins tmp")
args = []
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
executable = 'cmd'
args.add('/C')
args.add('"') // quote the entire command
wrapperScript = new File(cwd, "run.bat")
/*
* We have to delay building the string as the path will not exist during configuration which will fail on Windows due to
* getting the short name requiring the path to already exist.
*/
esScript = "${-> binPath().resolve('elasticsearch.bat').toString()}"
} else {
executable = 'bash'
wrapperScript = new File(cwd, "run")
esScript = binPath().resolve('elasticsearch')
}
if (config.daemonize) {
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
/*
* We have to delay building the string as the path will not exist during configuration which will fail on Windows due to
* getting the short name requiring the path to already exist.
*/
args.add("${-> getShortPathName(wrapperScript.toString())}")
} else {
args.add("${wrapperScript}")
}
} else {
args.add("${esScript}")
}
if (this.nodeVersion.before("6.2.0")) {
javaVersion = 8
} else if (this.nodeVersion.onOrAfter("6.2.0") && this.nodeVersion.before("6.3.0")) {
javaVersion = 9
} else if (this.nodeVersion.onOrAfter("6.3.0") && this.nodeVersion.before("6.5.0")) {
javaVersion = 10
}
args.addAll("-E", "node.portsfile=true")
env = [:]
env.putAll(config.environmentVariables)
for (Map.Entry<String, String> property : System.properties.entrySet()) {
if (property.key.startsWith('tests.es.')) {
args.add("-E")
args.add("${property.key.substring('tests.es.'.size())}=${property.value}")
}
}
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
/*
* We have to delay building the string as the path will not exist during configuration which will fail on Windows due to
* getting the short name requiring the path to already exist.
*/
env.put('ES_PATH_CONF', "${-> getShortPathName(pathConf.toString())}")
}
else {
env.put('ES_PATH_CONF', pathConf)
}
if (!System.properties.containsKey("tests.es.path.data")) {
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
/*
* We have to delay building the string as the path will not exist during configuration which will fail on Windows due to
* getting the short name requiring the path to already exist. This one is extra tricky because usually we rely on the node
* creating its data directory on startup but we simply can not do that here because getting the short path name requires
* the directory to already exist. Therefore, we create this directory immediately before getting the short name.
*/
args.addAll("-E", "path.data=${-> Files.createDirectories(Paths.get(dataDir.toString())); getShortPathName(dataDir.toString())}")
} else {
args.addAll("-E", "path.data=${-> dataDir.toString()}")
}
}
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
args.add('"') // end the entire command, quoted
}
}
Path binPath() {
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
return Paths.get(getShortPathName(new File(homeDir, 'bin').toString()))
} else {
return Paths.get(new File(homeDir, 'bin').toURI())
}
}
static String getShortPathName(String path) {
assert Os.isFamily(Os.FAMILY_WINDOWS)
final WString longPath = new WString("\\\\?\\" + path)
// first we get the length of the buffer needed
final int length = JNAKernel32Library.getInstance().GetShortPathNameW(longPath, null, 0)
if (length == 0) {
throw new IllegalStateException("path [" + path + "] encountered error [" + Native.getLastError() + "]")
}
final char[] shortPath = new char[length]
// knowing the length of the buffer, now we get the short name
if (JNAKernel32Library.getInstance().GetShortPathNameW(longPath, shortPath, length) == 0) {
throw new IllegalStateException("path [" + path + "] encountered error [" + Native.getLastError() + "]")
}
// we have to strip the \\?\ away from the path for cmd.exe
return Native.toString(shortPath).substring(4)
}
/** Returns debug string for the command that started this node. */
String getCommandString() {
String esCommandString = "\nNode ${nodeNum} configuration:\n"
esCommandString += "|-----------------------------------------\n"
esCommandString += "| cwd: ${cwd}\n"
esCommandString += "| command: ${executable} ${args.join(' ')}\n"
esCommandString += '| environment:\n'
env.each { k, v -> esCommandString += "| ${k}: ${v}\n" }
if (config.daemonize) {
esCommandString += "|\n| [${wrapperScript.name}]\n"
wrapperScript.eachLine('UTF-8', { line -> esCommandString += " ${line}\n"})
}
esCommandString += '|\n| [elasticsearch.yml]\n'
configFile.eachLine('UTF-8', { line -> esCommandString += "| ${line}\n" })
esCommandString += "|-----------------------------------------"
return esCommandString
}
void writeWrapperScript() {
String argsPasser = '"$@"'
String exitMarker = "; if [ \$? != 0 ]; then touch run.failed; fi"
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
argsPasser = '%*'
exitMarker = "\r\n if \"%errorlevel%\" neq \"0\" ( type nul >> run.failed )"
}
wrapperScript.setText("\"${esScript}\" ${argsPasser} > run.log 2>&1 ${exitMarker}", 'UTF-8')
}
/** Returns an address and port suitable for a uri to connect to this node over http */
String httpUri() {
return httpPortsFile.readLines("UTF-8").get(0)
}
/** Returns an address and port suitable for a uri to connect to this node over transport protocol */
String transportUri() {
return transportPortsFile.readLines("UTF-8").get(0)
}
/** Returns the file which contains the transport protocol ports for this node */
File getTransportPortsFile() {
return transportPortsFile
}
/** Returns the data directory for this node */
File getDataDir() {
if (!(dataDir instanceof File)) {
return new File(dataDir)
}
return dataDir
}
}

View file

@ -1,41 +0,0 @@
package org.elasticsearch.gradle.test
import org.gradle.api.DefaultTask
import org.gradle.api.Task
import org.gradle.api.tasks.Internal
import org.gradle.api.tasks.options.Option
import org.gradle.util.ConfigureUtil
class RunTask extends DefaultTask {
@Internal
ClusterConfiguration clusterConfig
RunTask() {
description = "Runs elasticsearch with '${project.path}'"
group = 'Verification'
clusterConfig = new ClusterConfiguration(project)
clusterConfig.httpPort = 9200
clusterConfig.transportPort = 9300
clusterConfig.daemonize = false
clusterConfig.distribution = 'default'
project.afterEvaluate {
ClusterFormationTasks.setup(project, name, this, clusterConfig)
}
}
@Option(
option = "debug-jvm",
description = "Enable debugging configuration, to allow attaching a debugger to elasticsearch."
)
void setDebug(boolean enabled) {
clusterConfig.debug = enabled;
}
/** Configure the cluster that will be run. */
@Override
Task configure(Closure closure) {
ConfigureUtil.configure(closure, clusterConfig)
return this
}
}

View file

@ -93,8 +93,8 @@ public class DistributionDownloadPlugin implements Plugin<Project> {
// for the distribution as a file, just depend on the artifact directly
dependencies.add(distribution.configuration.getName(), dependencyNotation(project, distribution));
// no extraction allowed for rpm or deb
if (distribution.getType() != Type.RPM && distribution.getType() != Type.DEB) {
// no extraction allowed for rpm, deb or docker
if (distribution.getType().shouldExtract()) {
// for the distribution extracted, add a root level task that does the extraction, and depend on that
// extracted configuration as an artifact consisting of the extracted distribution directory
dependencies.add(distribution.getExtracted().configuration.getName(),
@ -221,7 +221,6 @@ public class DistributionDownloadPlugin implements Plugin<Project> {
}
private static Dependency projectDependency(Project project, String projectPath, String projectConfig) {
if (project.findProject(projectPath) == null) {
throw new GradleException("no project [" + projectPath + "], project names: " + project.getRootProject().getAllprojects());
}
@ -233,11 +232,20 @@ public class DistributionDownloadPlugin implements Plugin<Project> {
private static String distributionProjectPath(ElasticsearchDistribution distribution) {
String projectPath = ":distribution";
if (distribution.getType() == Type.INTEG_TEST_ZIP) {
switch (distribution.getType()) {
case INTEG_TEST_ZIP:
projectPath += ":archives:integ-test-zip";
} else {
break;
case DOCKER:
projectPath += ":docker:";
projectPath += distributionProjectName(distribution);
break;
default:
projectPath += distribution.getType() == Type.ARCHIVE ? ":archives:" : ":packages:";
projectPath += distributionProjectName(distribution);
break;
}
return projectPath;
}
@ -250,9 +258,12 @@ public class DistributionDownloadPlugin implements Plugin<Project> {
if (distribution.getBundledJdk() == false) {
projectName += "no-jdk-";
}
if (distribution.getType() == Type.ARCHIVE) {
Platform platform = distribution.getPlatform();
projectName += platform.toString() + (platform == Platform.WINDOWS ? "-zip" : "-tar");
} else if (distribution.getType() == Type.DOCKER) {
projectName += "docker-export";
} else {
projectName += distribution.getType();
}

View file

@ -46,12 +46,25 @@ public class ElasticsearchDistribution implements Buildable, Iterable<File> {
INTEG_TEST_ZIP,
ARCHIVE,
RPM,
DEB;
DEB,
DOCKER;
@Override
public String toString() {
return super.toString().toLowerCase(Locale.ROOT);
}
public boolean shouldExtract() {
switch (this) {
case DEB:
case DOCKER:
case RPM:
return false;
default:
return true;
}
}
}
public enum Flavor {
@ -171,12 +184,17 @@ public class ElasticsearchDistribution implements Buildable, Iterable<File> {
}
public Extracted getExtracted() {
if (getType() == Type.RPM || getType() == Type.DEB) {
switch (getType()) {
case DEB:
case DOCKER:
case RPM:
throw new UnsupportedOperationException("distribution type [" + getType() + "] for " +
"elasticsearch distribution [" + name + "] cannot be extracted");
}
default:
return extracted;
}
}
@Override
public TaskDependency getBuildDependencies() {
@ -217,7 +235,7 @@ public class ElasticsearchDistribution implements Buildable, Iterable<File> {
if (platform.isPresent() == false) {
platform.set(CURRENT_PLATFORM);
}
} else { // rpm or deb
} else { // rpm, deb or docker
if (platform.isPresent()) {
throw new IllegalArgumentException("platform not allowed for elasticsearch distribution ["
+ name + "] of type [" + getType() + "]");

View file

@ -104,7 +104,7 @@ public class ElasticsearchCluster implements TestClusterConfiguration, Named {
}
}
private ElasticsearchNode getFirstNode() {
ElasticsearchNode getFirstNode() {
return nodes.getAt(clusterName + "-0");
}

View file

@ -148,6 +148,8 @@ public class ElasticsearchNode implements TestClusterConfiguration {
private volatile Process esProcess;
private Function<String, String> nameCustomization = Function.identity();
private boolean isWorkingDirConfigured = false;
private String httpPort = "0";
private String transportPort = "0";
ElasticsearchNode(String path, String name, Project project, ReaperService reaper, File workingDirBase) {
this.path = path;
@ -359,8 +361,7 @@ public class ElasticsearchNode implements TestClusterConfiguration {
@Override
public void freeze() {
requireNonNull(distributions, "null distribution passed when configuring test cluster `" + this + "`");
requireNonNull(javaHome, "null javaHome passed when configuring test cluster `" + this + "`");
requireNonNull(testDistribution, "null testDistribution passed when configuring test cluster `" + this + "`");
LOGGER.info("Locking configuration of `{}`", this);
configurationFrozen.set(true);
}
@ -637,7 +638,9 @@ public class ElasticsearchNode implements TestClusterConfiguration {
private Map<String, String> getESEnvironment() {
Map<String, String> defaultEnv = new HashMap<>();
if ( getJavaHome() != null) {
defaultEnv.put("JAVA_HOME", getJavaHome().getAbsolutePath());
}
defaultEnv.put("ES_PATH_CONF", configFile.getParent().toString());
String systemPropertiesString = "";
if (systemProperties.isEmpty() == false) {
@ -696,9 +699,11 @@ public class ElasticsearchNode implements TestClusterConfiguration {
// Don't inherit anything from the environment for as that would lack reproducibility
environment.clear();
environment.putAll(getESEnvironment());
// don't buffer all in memory, make sure we don't block on the default pipes
processBuilder.redirectError(ProcessBuilder.Redirect.appendTo(esStderrFile.toFile()));
processBuilder.redirectOutput(ProcessBuilder.Redirect.appendTo(esStdoutFile.toFile()));
LOGGER.info("Running `{}` in `{}` for {} env: {}", command, workingDir, this, environment);
try {
esProcess = processBuilder.start();
@ -988,11 +993,11 @@ public class ElasticsearchNode implements TestClusterConfiguration {
defaultConfig.put("path.shared_data", workingDir.resolve("sharedData").toString());
defaultConfig.put("node.attr.testattr", "test");
defaultConfig.put("node.portsfile", "true");
defaultConfig.put("http.port", "0");
defaultConfig.put("http.port", httpPort);
if (getVersion().onOrAfter(Version.fromString("6.7.0"))) {
defaultConfig.put("transport.port", "0");
defaultConfig.put("transport.port", transportPort);
} else {
defaultConfig.put("transport.tcp.port", "0");
defaultConfig.put("transport.tcp.port", transportPort);
}
// Default the watermarks to absurdly low to prevent the tests from failing on nodes without enough disk space
defaultConfig.put("cluster.routing.allocation.disk.watermark.low", "1b");
@ -1287,6 +1292,24 @@ public class ElasticsearchNode implements TestClusterConfiguration {
}
}
void setHttpPort(String httpPort) {
this.httpPort = httpPort;
}
void setTransportPort(String transportPort) {
this.transportPort = transportPort;
}
@Internal
Path getEsStdoutFile() {
return esStdoutFile;
}
@Internal
Path getEsStderrFile() {
return esStderrFile;
}
private static class FileEntry implements Named {
private String name;
private File file;

View file

@ -0,0 +1,73 @@
package org.elasticsearch.gradle.testclusters;
import org.gradle.api.logging.Logger;
import org.gradle.api.logging.Logging;
import org.gradle.api.tasks.Input;
import org.gradle.api.tasks.TaskAction;
import org.gradle.api.tasks.options.Option;
import java.io.BufferedReader;
import java.io.IOException;
import java.nio.file.Files;
import java.util.HashSet;
import java.util.Set;
public class RunTask extends DefaultTestClustersTask {
private static final Logger logger = Logging.getLogger(RunTask.class);
private Boolean debug = false;
@Option(
option = "debug-jvm",
description = "Enable debugging configuration, to allow attaching a debugger to elasticsearch."
)
public void setDebug(boolean enabled) {
this.debug = debug;
}
@Input
public Boolean getDebug() {
return debug;
}
@Override
public void beforeStart() {
int debugPort = 8000;
int httpPort = 9200;
int transportPort = 9300;
for (ElasticsearchCluster cluster : getClusters()) {
cluster.getFirstNode().setHttpPort(String.valueOf(httpPort));
httpPort++;
cluster.getFirstNode().setTransportPort(String.valueOf(transportPort));
transportPort++;
for (ElasticsearchNode node : cluster.getNodes()) {
if (debug) {
logger.lifecycle(
"Running elasticsearch in debug mode, {} suspending until connected on debugPort {}",
node, debugPort
);
node.jvmArgs("-agentlib:jdwp=transport=dt_socket,server=y,suspend=y,address=" + debugPort);
debugPort += 1;
}
}
}
}
@TaskAction
public void runAndWait() throws IOException {
Set<BufferedReader> toRead = new HashSet<>();
for (ElasticsearchCluster cluster : getClusters()) {
for (ElasticsearchNode node : cluster.getNodes()) {
toRead.add(Files.newBufferedReader(node.getEsStdoutFile()));
}
}
while (Thread.currentThread().isInterrupted() == false) {
for (BufferedReader bufferedReader : toRead) {
if (bufferedReader.ready()) {
logger.lifecycle(bufferedReader.readLine());
}
}
}
}
}

View file

@ -18,9 +18,13 @@ interface TestClustersAware extends Task {
);
}
cluster.getNodes().stream().flatMap(node -> node.getDistributions().stream()).forEach( distro ->
cluster.getNodes().stream().flatMap(node -> node.getDistributions().stream()).forEach(distro ->
dependsOn(distro.getExtracted())
);
getClusters().add(cluster);
}
default void beforeStart() {
}
}

View file

@ -123,7 +123,9 @@ public class TestClustersPlugin implements Plugin<Project> {
return;
}
// we only start the cluster before the actions, so we'll not start it if the task is up-to-date
((TestClustersAware) task).getClusters().forEach(registry::maybeStartCluster);
TestClustersAware awareTask = (TestClustersAware) task;
awareTask.beforeStart();
awareTask.getClusters().forEach(registry::maybeStartCluster);
}
@Override
public void afterActions(Task task) {}

View file

@ -0,0 +1,43 @@
package org.elasticsearch.gradle.doc;
import org.elasticsearch.gradle.test.GradleUnitTestCase;
public class SnippetsTaskTests extends GradleUnitTestCase {
public void testMatchSource() {
SnippetsTask.Source source = SnippetsTask.matchSource("[source,console]");
assertTrue(source.getMatches());
assertEquals("console", source.getLanguage());
assertNull(source.getName());
source = SnippetsTask.matchSource("[source,console,id=snippet-name-1]");
assertTrue(source.getMatches());
assertEquals("console", source.getLanguage());
assertEquals("snippet-name-1", source.getName());
source = SnippetsTask.matchSource("[source, console, id=snippet-name-1]");
assertTrue(source.getMatches());
assertEquals("console", source.getLanguage());
assertEquals("snippet-name-1", source.getName());
source = SnippetsTask.matchSource("[source,console,attr=5,id=snippet-name-1,attr2=6]");
assertTrue(source.getMatches());
assertEquals("console", source.getLanguage());
assertEquals("snippet-name-1", source.getName());
source = SnippetsTask.matchSource("[source,console, attr=5, id=snippet-name-1, attr2=6]");
assertTrue(source.getMatches());
assertEquals("console", source.getLanguage());
assertEquals("snippet-name-1", source.getName());
source = SnippetsTask.matchSource("[\"source\",\"console\",id=\"snippet-name-1\"]");
assertTrue(source.getMatches());
assertEquals("console", source.getLanguage());
assertEquals("snippet-name-1", source.getName());
source = SnippetsTask.matchSource("[source,console,id=\"snippet-name-1\"]");
assertTrue(source.getMatches());
assertEquals("console", source.getLanguage());
assertEquals("snippet-name-1", source.getName());
}
}

View file

@ -0,0 +1,35 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.gradle.test;
import com.carrotsearch.randomizedtesting.ThreadFilter;
/**
* Filter out threads controlled by gradle that may be created during unit tests.
*
* Currently this is only the pooled threads for Exec.
*/
public class GradleThreadsFilter implements ThreadFilter {
@Override
public boolean reject(Thread t) {
return t.getName().startsWith("Exec process");
}
}

View file

@ -3,6 +3,7 @@ package org.elasticsearch.gradle.test;
import com.carrotsearch.randomizedtesting.JUnit4MethodProvider;
import com.carrotsearch.randomizedtesting.RandomizedRunner;
import com.carrotsearch.randomizedtesting.annotations.TestMethodProviders;
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
import org.junit.runner.RunWith;
@RunWith(RandomizedRunner.class)
@ -10,5 +11,8 @@ import org.junit.runner.RunWith;
JUnit4MethodProvider.class,
JUnit3MethodProvider.class
})
@ThreadLeakFilters(defaultFilters = true, filters = {
GradleThreadsFilter.class
})
public abstract class GradleUnitTestCase extends BaseTestCase {
}

View file

@ -48,7 +48,7 @@ final class TransformRequestConverters {
static Request putTransform(PutTransformRequest putRequest) throws IOException {
String endpoint = new RequestConverters.EndpointBuilder()
.addPathPartAsIs("_data_frame", "transforms")
.addPathPartAsIs("_transform")
.addPathPart(putRequest.getConfig().getId())
.build();
Request request = new Request(HttpPut.METHOD_NAME, endpoint);
@ -61,7 +61,7 @@ final class TransformRequestConverters {
static Request updateTransform(UpdateTransformRequest updateDataFrameTransformRequest) throws IOException {
String endpoint = new RequestConverters.EndpointBuilder()
.addPathPartAsIs("_data_frame", "transforms")
.addPathPartAsIs("_transform")
.addPathPart(updateDataFrameTransformRequest.getId())
.addPathPart("_update")
.build();
@ -75,7 +75,7 @@ final class TransformRequestConverters {
static Request getTransform(GetTransformRequest getRequest) {
String endpoint = new RequestConverters.EndpointBuilder()
.addPathPartAsIs("_data_frame", "transforms")
.addPathPartAsIs("_transform")
.addPathPart(Strings.collectionToCommaDelimitedString(getRequest.getId()))
.build();
Request request = new Request(HttpGet.METHOD_NAME, endpoint);
@ -93,7 +93,7 @@ final class TransformRequestConverters {
static Request deleteTransform(DeleteTransformRequest deleteRequest) {
String endpoint = new RequestConverters.EndpointBuilder()
.addPathPartAsIs("_data_frame", "transforms")
.addPathPartAsIs("_transform")
.addPathPart(deleteRequest.getId())
.build();
Request request = new Request(HttpDelete.METHOD_NAME, endpoint);
@ -105,7 +105,7 @@ final class TransformRequestConverters {
static Request startTransform(StartTransformRequest startRequest) {
String endpoint = new RequestConverters.EndpointBuilder()
.addPathPartAsIs("_data_frame", "transforms")
.addPathPartAsIs("_transform")
.addPathPart(startRequest.getId())
.addPathPartAsIs("_start")
.build();
@ -120,7 +120,7 @@ final class TransformRequestConverters {
static Request stopTransform(StopTransformRequest stopRequest) {
String endpoint = new RequestConverters.EndpointBuilder()
.addPathPartAsIs("_data_frame", "transforms")
.addPathPartAsIs("_transform")
.addPathPart(stopRequest.getId())
.addPathPartAsIs("_stop")
.build();
@ -141,7 +141,7 @@ final class TransformRequestConverters {
static Request previewTransform(PreviewTransformRequest previewRequest) throws IOException {
String endpoint = new RequestConverters.EndpointBuilder()
.addPathPartAsIs("_data_frame", "transforms", "_preview")
.addPathPartAsIs("_transform", "_preview")
.build();
Request request = new Request(HttpPost.METHOD_NAME, endpoint);
request.setEntity(createEntity(previewRequest, REQUEST_BODY_CONTENT_TYPE));
@ -150,7 +150,7 @@ final class TransformRequestConverters {
static Request getTransformStats(GetTransformStatsRequest statsRequest) {
String endpoint = new RequestConverters.EndpointBuilder()
.addPathPartAsIs("_data_frame", "transforms")
.addPathPartAsIs("_transform")
.addPathPart(statsRequest.getId())
.addPathPartAsIs("_stats")
.build();

View file

@ -19,7 +19,6 @@
package org.elasticsearch.client.ml.dataframe;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.xcontent.ObjectParser;
@ -48,6 +47,9 @@ public class OutlierDetection implements DataFrameAnalysis {
static final ParseField N_NEIGHBORS = new ParseField("n_neighbors");
static final ParseField METHOD = new ParseField("method");
public static final ParseField FEATURE_INFLUENCE_THRESHOLD = new ParseField("feature_influence_threshold");
static final ParseField COMPUTE_FEATURE_INFLUENCE = new ParseField("compute_feature_influence");
static final ParseField OUTLIER_FRACTION = new ParseField("outlier_fraction");
static final ParseField STANDARDIZATION_ENABLED = new ParseField("standardization_enabled");
private static ObjectParser<Builder, Void> PARSER = new ObjectParser<>(NAME.getPreferredName(), true, Builder::new);
@ -60,22 +62,49 @@ public class OutlierDetection implements DataFrameAnalysis {
throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]");
}, METHOD, ObjectParser.ValueType.STRING);
PARSER.declareDouble(Builder::setFeatureInfluenceThreshold, FEATURE_INFLUENCE_THRESHOLD);
PARSER.declareBoolean(Builder::setComputeFeatureInfluence, COMPUTE_FEATURE_INFLUENCE);
PARSER.declareDouble(Builder::setOutlierFraction, OUTLIER_FRACTION);
PARSER.declareBoolean(Builder::setStandardizationEnabled, STANDARDIZATION_ENABLED);
}
/**
* The number of neighbors. Leave unspecified for dynamic detection.
*/
private final Integer nNeighbors;
/**
* The method. Leave unspecified for a dynamic mixture of methods.
*/
private final Method method;
/**
* The min outlier score required to calculate feature influence. Defaults to 0.1.
*/
private final Double featureInfluenceThreshold;
/**
* Constructs the outlier detection configuration
* @param nNeighbors The number of neighbors. Leave unspecified for dynamic detection.
* @param method The method. Leave unspecified for a dynamic mixture of methods.
* @param featureInfluenceThreshold The min outlier score required to calculate feature influence. Defaults to 0.1.
* Whether to compute feature influence or not. Defaults to true.
*/
private OutlierDetection(@Nullable Integer nNeighbors, @Nullable Method method, @Nullable Double featureInfluenceThreshold) {
private final Boolean computeFeatureInfluence;
/**
* The proportion of data assumed to be outlying prior to outlier detection. Defaults to 0.05.
*/
private final Double outlierFraction;
/**
* Whether to perform standardization.
*/
private final Boolean standardizationEnabled;
private OutlierDetection(Integer nNeighbors, Method method, Double featureInfluenceThreshold, Boolean computeFeatureInfluence,
Double outlierFraction, Boolean standardizationEnabled) {
this.nNeighbors = nNeighbors;
this.method = method;
this.featureInfluenceThreshold = featureInfluenceThreshold;
this.computeFeatureInfluence = computeFeatureInfluence;
this.outlierFraction = outlierFraction;
this.standardizationEnabled = standardizationEnabled;
}
@Override
@ -95,6 +124,18 @@ public class OutlierDetection implements DataFrameAnalysis {
return featureInfluenceThreshold;
}
public Boolean getComputeFeatureInfluence() {
return computeFeatureInfluence;
}
public Double getOutlierFraction() {
return outlierFraction;
}
public Boolean getStandardizationEnabled() {
return standardizationEnabled;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
@ -107,6 +148,15 @@ public class OutlierDetection implements DataFrameAnalysis {
if (featureInfluenceThreshold != null) {
builder.field(FEATURE_INFLUENCE_THRESHOLD.getPreferredName(), featureInfluenceThreshold);
}
if (computeFeatureInfluence != null) {
builder.field(COMPUTE_FEATURE_INFLUENCE.getPreferredName(), computeFeatureInfluence);
}
if (outlierFraction != null) {
builder.field(OUTLIER_FRACTION.getPreferredName(), outlierFraction);
}
if (standardizationEnabled != null) {
builder.field(STANDARDIZATION_ENABLED.getPreferredName(), standardizationEnabled);
}
builder.endObject();
return builder;
}
@ -119,12 +169,16 @@ public class OutlierDetection implements DataFrameAnalysis {
OutlierDetection other = (OutlierDetection) o;
return Objects.equals(nNeighbors, other.nNeighbors)
&& Objects.equals(method, other.method)
&& Objects.equals(featureInfluenceThreshold, other.featureInfluenceThreshold);
&& Objects.equals(featureInfluenceThreshold, other.featureInfluenceThreshold)
&& Objects.equals(computeFeatureInfluence, other.computeFeatureInfluence)
&& Objects.equals(outlierFraction, other.outlierFraction)
&& Objects.equals(standardizationEnabled, other.standardizationEnabled);
}
@Override
public int hashCode() {
return Objects.hash(nNeighbors, method, featureInfluenceThreshold);
return Objects.hash(nNeighbors, method, featureInfluenceThreshold, computeFeatureInfluence, outlierFraction,
standardizationEnabled);
}
@Override
@ -150,6 +204,9 @@ public class OutlierDetection implements DataFrameAnalysis {
private Integer nNeighbors;
private Method method;
private Double featureInfluenceThreshold;
private Boolean computeFeatureInfluence;
private Double outlierFraction;
private Boolean standardizationEnabled;
private Builder() {}
@ -168,8 +225,24 @@ public class OutlierDetection implements DataFrameAnalysis {
return this;
}
public Builder setComputeFeatureInfluence(Boolean computeFeatureInfluence) {
this.computeFeatureInfluence = computeFeatureInfluence;
return this;
}
public Builder setOutlierFraction(Double outlierFraction) {
this.outlierFraction = outlierFraction;
return this;
}
public Builder setStandardizationEnabled(Boolean standardizationEnabled) {
this.standardizationEnabled = standardizationEnabled;
return this;
}
public OutlierDetection build() {
return new OutlierDetection(nNeighbors, method, featureInfluenceThreshold);
return new OutlierDetection(nNeighbors, method, featureInfluenceThreshold, computeFeatureInfluence, outlierFraction,
standardizationEnabled);
}
}
}

View file

@ -22,6 +22,7 @@ import org.elasticsearch.client.ml.inference.preprocessing.PreProcessor;
import org.elasticsearch.client.ml.inference.trainedmodel.TrainedModel;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.ToXContentObject;
import org.elasticsearch.common.xcontent.XContentBuilder;
@ -38,6 +39,7 @@ public class TrainedModelDefinition implements ToXContentObject {
public static final ParseField TRAINED_MODEL = new ParseField("trained_model");
public static final ParseField PREPROCESSORS = new ParseField("preprocessors");
public static final ParseField INPUT = new ParseField("input");
public static final ObjectParser<Builder, Void> PARSER = new ObjectParser<>(NAME,
true,
@ -51,6 +53,7 @@ public class TrainedModelDefinition implements ToXContentObject {
(p, c, n) -> p.namedObject(PreProcessor.class, n, null),
(trainedModelDefBuilder) -> {/* Does not matter client side*/ },
PREPROCESSORS);
PARSER.declareObject(TrainedModelDefinition.Builder::setInput, (p, c) -> Input.fromXContent(p), INPUT);
}
public static TrainedModelDefinition.Builder fromXContent(XContentParser parser) throws IOException {
@ -59,10 +62,12 @@ public class TrainedModelDefinition implements ToXContentObject {
private final TrainedModel trainedModel;
private final List<PreProcessor> preProcessors;
private final Input input;
TrainedModelDefinition(TrainedModel trainedModel, List<PreProcessor> preProcessors) {
TrainedModelDefinition(TrainedModel trainedModel, List<PreProcessor> preProcessors, Input input) {
this.trainedModel = trainedModel;
this.preProcessors = preProcessors == null ? Collections.emptyList() : Collections.unmodifiableList(preProcessors);
this.input = input;
}
@Override
@ -78,6 +83,9 @@ public class TrainedModelDefinition implements ToXContentObject {
true,
PREPROCESSORS.getPreferredName(),
preProcessors);
if (input != null) {
builder.field(INPUT.getPreferredName(), input);
}
builder.endObject();
return builder;
}
@ -90,6 +98,10 @@ public class TrainedModelDefinition implements ToXContentObject {
return preProcessors;
}
public Input getInput() {
return input;
}
@Override
public String toString() {
return Strings.toString(this);
@ -101,18 +113,20 @@ public class TrainedModelDefinition implements ToXContentObject {
if (o == null || getClass() != o.getClass()) return false;
TrainedModelDefinition that = (TrainedModelDefinition) o;
return Objects.equals(trainedModel, that.trainedModel) &&
Objects.equals(preProcessors, that.preProcessors) ;
Objects.equals(preProcessors, that.preProcessors) &&
Objects.equals(input, that.input);
}
@Override
public int hashCode() {
return Objects.hash(trainedModel, preProcessors);
return Objects.hash(trainedModel, preProcessors, input);
}
public static class Builder {
private List<PreProcessor> preProcessors;
private TrainedModel trainedModel;
private Input input;
public Builder setPreProcessors(List<PreProcessor> preProcessors) {
this.preProcessors = preProcessors;
@ -124,14 +138,71 @@ public class TrainedModelDefinition implements ToXContentObject {
return this;
}
public Builder setInput(Input input) {
this.input = input;
return this;
}
private Builder setTrainedModel(List<TrainedModel> trainedModel) {
assert trainedModel.size() == 1;
return setTrainedModel(trainedModel.get(0));
}
public TrainedModelDefinition build() {
return new TrainedModelDefinition(this.trainedModel, this.preProcessors);
return new TrainedModelDefinition(this.trainedModel, this.preProcessors, this.input);
}
}
public static class Input implements ToXContentObject {
public static final String NAME = "trained_mode_definition_input";
public static final ParseField FIELD_NAMES = new ParseField("field_names");
@SuppressWarnings("unchecked")
public static final ConstructingObjectParser<Input, Void> PARSER = new ConstructingObjectParser<>(NAME,
true,
a -> new Input((List<String>)a[0]));
static {
PARSER.declareStringArray(ConstructingObjectParser.constructorArg(), FIELD_NAMES);
}
public static Input fromXContent(XContentParser parser) throws IOException {
return PARSER.parse(parser, null);
}
private final List<String> fieldNames;
public Input(List<String> fieldNames) {
this.fieldNames = fieldNames;
}
public List<String> getFieldNames() {
return fieldNames;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
if (fieldNames != null) {
builder.field(FIELD_NAMES.getPreferredName(), fieldNames);
}
builder.endObject();
return builder;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
TrainedModelDefinition.Input that = (TrainedModelDefinition.Input) o;
return Objects.equals(fieldNames, that.fieldNames);
}
@Override
public int hashCode() {
return Objects.hash(fieldNames);
}
}
}

View file

@ -39,7 +39,7 @@ public class TargetMeanEncoding implements PreProcessor {
public static final String NAME = "target_mean_encoding";
public static final ParseField FIELD = new ParseField("field");
public static final ParseField FEATURE_NAME = new ParseField("feature_name");
public static final ParseField TARGET_MEANS = new ParseField("target_means");
public static final ParseField TARGET_MAP = new ParseField("target_map");
public static final ParseField DEFAULT_VALUE = new ParseField("default_value");
@SuppressWarnings("unchecked")
@ -52,7 +52,7 @@ public class TargetMeanEncoding implements PreProcessor {
PARSER.declareString(ConstructingObjectParser.constructorArg(), FEATURE_NAME);
PARSER.declareObject(ConstructingObjectParser.constructorArg(),
(p, c) -> p.map(HashMap::new, XContentParser::doubleValue),
TARGET_MEANS);
TARGET_MAP);
PARSER.declareDouble(ConstructingObjectParser.constructorArg(), DEFAULT_VALUE);
}
@ -110,7 +110,7 @@ public class TargetMeanEncoding implements PreProcessor {
builder.startObject();
builder.field(FIELD.getPreferredName(), field);
builder.field(FEATURE_NAME.getPreferredName(), featureName);
builder.field(TARGET_MEANS.getPreferredName(), meanMap);
builder.field(TARGET_MAP.getPreferredName(), meanMap);
builder.field(DEFAULT_VALUE.getPreferredName(), defaultValue);
builder.endObject();
return builder;

View file

@ -38,13 +38,13 @@ public final class GetApiKeyRequest implements Validatable, ToXContentObject {
private final String name;
private final boolean ownedByAuthenticatedUser;
private GetApiKeyRequest() {
this(null, null, null, null, false);
}
// pkg scope for testing
GetApiKeyRequest(@Nullable String realmName, @Nullable String userName, @Nullable String apiKeyId,
@Nullable String apiKeyName, boolean ownedByAuthenticatedUser) {
if (Strings.hasText(realmName) == false && Strings.hasText(userName) == false && Strings.hasText(apiKeyId) == false
&& Strings.hasText(apiKeyName) == false && ownedByAuthenticatedUser == false) {
throwValidationError("One of [api key id, api key name, username, realm name] must be specified if [owner] flag is false");
}
if (Strings.hasText(apiKeyId) || Strings.hasText(apiKeyName)) {
if (Strings.hasText(realmName) || Strings.hasText(userName)) {
throwValidationError(
@ -147,6 +147,13 @@ public final class GetApiKeyRequest implements Validatable, ToXContentObject {
return new GetApiKeyRequest(null, null, null, null, true);
}
/**
* Creates get api key request to retrieve api key information for all api keys if the authenticated user is authorized to do so.
*/
public static GetApiKeyRequest forAllApiKeys() {
return new GetApiKeyRequest();
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
return builder;

View file

@ -347,8 +347,9 @@ public final class Role {
public static final String VIEW_INDEX_METADATA = "view_index_metadata";
public static final String MANAGE_FOLLOW_INDEX = "manage_follow_index";
public static final String MANAGE_ILM = "manage_ilm";
public static final String CREATE_DOC = "create_doc";
public static final String[] ALL_ARRAY = new String[] { NONE, ALL, READ, READ_CROSS, CREATE, INDEX, DELETE, WRITE, MONITOR, MANAGE,
DELETE_INDEX, CREATE_INDEX, VIEW_INDEX_METADATA, MANAGE_FOLLOW_INDEX, MANAGE_ILM };
DELETE_INDEX, CREATE_INDEX, VIEW_INDEX_METADATA, MANAGE_FOLLOW_INDEX, MANAGE_ILM, CREATE_DOC };
}
}

View file

@ -36,8 +36,6 @@ import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.rest.action.document.RestBulkAction;
import org.elasticsearch.search.SearchHit;
import org.hamcrest.Matcher;
@ -74,12 +72,6 @@ public class BulkProcessorIT extends ESRestHighLevelClientTestCase {
bulkListener), listener);
}
private static BulkProcessor.Builder initBulkProcessorBuilderUsingTypes(BulkProcessor.Listener listener) {
return BulkProcessor.builder(
(request, bulkListener) -> highLevelClient().bulkAsync(request, expectWarnings(RestBulkAction.TYPES_DEPRECATION_MESSAGE),
bulkListener), listener);
}
public void testThatBulkProcessorCountIsCorrect() throws Exception {
final CountDownLatch latch = new CountDownLatch(1);
BulkProcessorTestListener listener = new BulkProcessorTestListener(latch);
@ -170,7 +162,6 @@ public class BulkProcessorIT extends ESRestHighLevelClientTestCase {
for (BulkItemResponse bulkItemResponse : listener.bulkItems) {
assertThat(bulkItemResponse.getFailureMessage(), bulkItemResponse.isFailed(), equalTo(false));
assertThat(bulkItemResponse.getIndex(), equalTo("test"));
assertThat(bulkItemResponse.getType(), equalTo("_doc"));
//with concurrent requests > 1 we can't rely on the order of the bulk requests
assertThat(Integer.valueOf(bulkItemResponse.getId()), both(greaterThan(0)).and(lessThanOrEqualTo(numDocs)));
//we do want to check that we don't get duplicate ids back
@ -269,7 +260,6 @@ public class BulkProcessorIT extends ESRestHighLevelClientTestCase {
Set<String> readOnlyIds = new HashSet<>();
for (BulkItemResponse bulkItemResponse : listener.bulkItems) {
assertThat(bulkItemResponse.getIndex(), either(equalTo("test")).or(equalTo("test-ro")));
assertThat(bulkItemResponse.getType(), equalTo("_doc"));
if (bulkItemResponse.getIndex().equals("test")) {
assertThat(bulkItemResponse.isFailed(), equalTo(false));
//with concurrent requests > 1 we can't rely on the order of the bulk requests
@ -298,7 +288,6 @@ public class BulkProcessorIT extends ESRestHighLevelClientTestCase {
// tag::bulk-processor-mix-parameters
try (BulkProcessor processor = initBulkProcessorBuilder(listener)
.setGlobalIndex("tweets")
.setGlobalType("_doc")
.setGlobalRouting("routing")
.setGlobalPipeline("pipeline_id")
.build()) {
@ -326,99 +315,36 @@ public class BulkProcessorIT extends ESRestHighLevelClientTestCase {
createIndexWithMultipleShards("test");
createFieldAddingPipleine("pipeline_id", "fieldNameXYZ", "valueXYZ");
final String customType = "testType";
final String ignoredType = "ignoredType";
int numDocs = randomIntBetween(10, 10);
{
final CountDownLatch latch = new CountDownLatch(1);
BulkProcessorTestListener listener = new BulkProcessorTestListener(latch);
//Check that untyped document additions inherit the global type
String globalType = customType;
String localType = null;
try (BulkProcessor processor = initBulkProcessorBuilderUsingTypes(listener)
//let's make sure that the bulk action limit trips, one single execution will index all the documents
.setConcurrentRequests(randomIntBetween(0, 1)).setBulkActions(numDocs)
.setFlushInterval(TimeValue.timeValueHours(24)).setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB))
.setGlobalIndex("test")
.setGlobalType(globalType)
.setGlobalRouting("routing")
.setGlobalPipeline("pipeline_id")
.build()) {
indexDocs(processor, numDocs, null, localType, "test", globalType, "pipeline_id");
latch.await();
assertThat(listener.beforeCounts.get(), equalTo(1));
assertThat(listener.afterCounts.get(), equalTo(1));
assertThat(listener.bulkFailures.size(), equalTo(0));
assertResponseItems(listener.bulkItems, numDocs, globalType);
Iterable<SearchHit> hits = searchAll(new SearchRequest("test").routing("routing"));
assertThat(hits, everyItem(hasProperty(fieldFromSource("fieldNameXYZ"), equalTo("valueXYZ"))));
assertThat(hits, containsInAnyOrder(expectedIds(numDocs)));
}
}
{
//Check that typed document additions don't inherit the global type
String globalType = ignoredType;
String localType = customType;
final CountDownLatch latch = new CountDownLatch(1);
BulkProcessorTestListener listener = new BulkProcessorTestListener(latch);
try (BulkProcessor processor = initBulkProcessorBuilderUsingTypes(listener)
//let's make sure that the bulk action limit trips, one single execution will index all the documents
.setConcurrentRequests(randomIntBetween(0, 1)).setBulkActions(numDocs)
.setFlushInterval(TimeValue.timeValueHours(24)).setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB))
.setGlobalIndex("test")
.setGlobalType(globalType)
.setGlobalRouting("routing")
.setGlobalPipeline("pipeline_id")
.build()) {
indexDocs(processor, numDocs, null, localType, "test", globalType, "pipeline_id");
latch.await();
assertThat(listener.beforeCounts.get(), equalTo(1));
assertThat(listener.afterCounts.get(), equalTo(1));
assertThat(listener.bulkFailures.size(), equalTo(0));
assertResponseItems(listener.bulkItems, numDocs, localType);
Iterable<SearchHit> hits = searchAll(new SearchRequest("test").routing("routing"));
assertThat(hits, everyItem(hasProperty(fieldFromSource("fieldNameXYZ"), equalTo("valueXYZ"))));
assertThat(hits, containsInAnyOrder(expectedIds(numDocs)));
}
}
{
//Check that untyped document additions and untyped global inherit the established custom type
// (the custom document type introduced to the mapping by the earlier code in this test)
String globalType = null;
String localType = null;
final CountDownLatch latch = new CountDownLatch(1);
BulkProcessorTestListener listener = new BulkProcessorTestListener(latch);
try (BulkProcessor processor = initBulkProcessorBuilder(listener)
//let's make sure that the bulk action limit trips, one single execution will index all the documents
.setConcurrentRequests(randomIntBetween(0, 1)).setBulkActions(numDocs)
.setFlushInterval(TimeValue.timeValueHours(24)).setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB))
.setGlobalIndex("test")
.setGlobalType(globalType)
.setGlobalRouting("routing")
.setGlobalPipeline("pipeline_id")
.build()) {
indexDocs(processor, numDocs, null, localType, "test", globalType, "pipeline_id");
indexDocs(processor, numDocs, null, localType, "test", "pipeline_id");
latch.await();
assertThat(listener.beforeCounts.get(), equalTo(1));
assertThat(listener.afterCounts.get(), equalTo(1));
assertThat(listener.bulkFailures.size(), equalTo(0));
assertResponseItems(listener.bulkItems, numDocs, MapperService.SINGLE_MAPPING_NAME);
assertResponseItems(listener.bulkItems, numDocs);
Iterable<SearchHit> hits = searchAll(new SearchRequest("test").routing("routing"));
assertThat(hits, everyItem(hasProperty(fieldFromSource("fieldNameXYZ"), equalTo("valueXYZ"))));
assertThat(hits, containsInAnyOrder(expectedIds(numDocs)));
}
}
}
@ -431,7 +357,7 @@ public class BulkProcessorIT extends ESRestHighLevelClientTestCase {
}
private MultiGetRequest indexDocs(BulkProcessor processor, int numDocs, String localIndex, String localType,
String globalIndex, String globalType, String globalPipeline) throws Exception {
String globalIndex, String globalPipeline) throws Exception {
MultiGetRequest multiGetRequest = new MultiGetRequest();
for (int i = 1; i <= numDocs; i++) {
if (randomBoolean()) {
@ -439,12 +365,7 @@ public class BulkProcessorIT extends ESRestHighLevelClientTestCase {
.source(XContentType.JSON, "field", randomRealisticUnicodeOfLengthBetween(1, 30)));
} else {
BytesArray data = bytesBulkRequest(localIndex, localType, i);
processor.add(data, globalIndex, globalType, globalPipeline, XContentType.JSON);
if (localType != null) {
// If the payload contains types, parsing it into a bulk request results in a warning.
assertWarnings(RestBulkAction.TYPES_DEPRECATION_MESSAGE);
}
processor.add(data, globalIndex, globalPipeline, XContentType.JSON);
}
multiGetRequest.add(localIndex, Integer.toString(i));
}
@ -475,19 +396,14 @@ public class BulkProcessorIT extends ESRestHighLevelClientTestCase {
}
private MultiGetRequest indexDocs(BulkProcessor processor, int numDocs) throws Exception {
return indexDocs(processor, numDocs, "test", null, null, null, null);
return indexDocs(processor, numDocs, "test", null, null, null);
}
private static void assertResponseItems(List<BulkItemResponse> bulkItemResponses, int numDocs) {
assertResponseItems(bulkItemResponses, numDocs, MapperService.SINGLE_MAPPING_NAME);
}
private static void assertResponseItems(List<BulkItemResponse> bulkItemResponses, int numDocs, String expectedType) {
assertThat(bulkItemResponses.size(), is(numDocs));
int i = 1;
for (BulkItemResponse bulkItemResponse : bulkItemResponses) {
assertThat(bulkItemResponse.getIndex(), equalTo("test"));
assertThat(bulkItemResponse.getType(), equalTo(expectedType));
assertThat(bulkItemResponse.getId(), equalTo(Integer.toString(i++)));
assertThat("item " + i + " failed with cause: " + bulkItemResponse.getFailureMessage(),
bulkItemResponse.isFailed(), equalTo(false));

View file

@ -106,7 +106,7 @@ public class BulkRequestWithGlobalParametersIT extends ESRestHighLevelClientTest
}
public void testGlobalIndex() throws IOException {
BulkRequest request = new BulkRequest("global_index", null);
BulkRequest request = new BulkRequest("global_index");
request.add(new IndexRequest().id("1")
.source(XContentType.JSON, "field", "bulk1"));
request.add(new IndexRequest().id("2")
@ -120,7 +120,7 @@ public class BulkRequestWithGlobalParametersIT extends ESRestHighLevelClientTest
@SuppressWarnings("unchecked")
public void testIndexGlobalAndPerRequest() throws IOException {
BulkRequest request = new BulkRequest("global_index", null);
BulkRequest request = new BulkRequest("global_index");
request.add(new IndexRequest("local_index").id("1")
.source(XContentType.JSON, "field", "bulk1"));
request.add(new IndexRequest().id("2") // will take global index
@ -168,19 +168,6 @@ public class BulkRequestWithGlobalParametersIT extends ESRestHighLevelClientTest
assertThat(hits, containsInAnyOrder(hasId("1"), hasId("2")));
}
public void testGlobalIndexNoTypes() throws IOException {
BulkRequest request = new BulkRequest("global_index");
request.add(new IndexRequest().id("1")
.source(XContentType.JSON, "field", "bulk1"));
request.add(new IndexRequest().id("2")
.source(XContentType.JSON, "field", "bulk2"));
bulk(request);
Iterable<SearchHit> hits = searchAll("global_index");
assertThat(hits, everyItem(hasIndex("global_index")));
}
private BulkResponse bulk(BulkRequest request) throws IOException {
BulkResponse bulkResponse = execute(request, highLevelClient()::bulk, highLevelClient()::bulkAsync, RequestOptions.DEFAULT);
assertFalse(bulkResponse.hasFailures());

View file

@ -54,7 +54,6 @@ import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.VersionType;
import org.elasticsearch.index.get.GetResult;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.rest.action.document.RestBulkAction;
import org.elasticsearch.rest.action.document.RestDeleteAction;
import org.elasticsearch.rest.action.document.RestIndexAction;
import org.elasticsearch.rest.action.document.RestUpdateAction;
@ -401,20 +400,6 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
}
}
public void testMultiGetWithTypes() throws IOException {
BulkRequest bulk = new BulkRequest();
bulk.setRefreshPolicy(RefreshPolicy.IMMEDIATE);
bulk.add(new IndexRequest("index", "type", "id1")
.source("{\"field\":\"value1\"}", XContentType.JSON));
bulk.add(new IndexRequest("index", "type", "id2")
.source("{\"field\":\"value2\"}", XContentType.JSON));
highLevelClient().bulk(bulk, expectWarnings(RestBulkAction.TYPES_DEPRECATION_MESSAGE));
MultiGetRequest multiGetRequest = new MultiGetRequest();
multiGetRequest.add("index", "id1");
multiGetRequest.add("index", "id2");
}
public void testIndex() throws IOException {
final XContentType xContentType = randomFrom(XContentType.values());
{
@ -897,7 +882,6 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
assertEquals(i, bulkItemResponse.getItemId());
assertEquals("index", bulkItemResponse.getIndex());
assertEquals("_doc", bulkItemResponse.getType());
assertEquals(String.valueOf(i), bulkItemResponse.getId());
DocWriteRequest.OpType requestOpType = bulkRequest.requests().get(i).opType();

View file

@ -1246,7 +1246,10 @@ public class MachineLearningIT extends ESRestHighLevelClientTestCase {
assertThat(createdConfig.getSource().getQueryConfig(), equalTo(new QueryConfig(new MatchAllQueryBuilder()))); // default value
assertThat(createdConfig.getDest().getIndex(), equalTo(config.getDest().getIndex()));
assertThat(createdConfig.getDest().getResultsField(), equalTo("ml")); // default value
assertThat(createdConfig.getAnalysis(), equalTo(config.getAnalysis()));
assertThat(createdConfig.getAnalysis(), equalTo(OutlierDetection.builder()
.setComputeFeatureInfluence(true)
.setOutlierFraction(0.05)
.setStandardizationEnabled(true).build()));
assertThat(createdConfig.getAnalyzedFields(), equalTo(config.getAnalyzedFields()));
assertThat(createdConfig.getModelMemoryLimit(), equalTo(ByteSizeValue.parseBytesSizeValue("1gb", ""))); // default value
assertThat(createdConfig.getDescription(), equalTo("some description"));

View file

@ -24,7 +24,6 @@ import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.client.methods.HttpPut;
import org.elasticsearch.client.core.PageParams;
import org.elasticsearch.client.transform.TransformNamedXContentProvider;
import org.elasticsearch.client.transform.DeleteTransformRequest;
import org.elasticsearch.client.transform.GetTransformRequest;
import org.elasticsearch.client.transform.GetTransformStatsRequest;
@ -32,6 +31,7 @@ import org.elasticsearch.client.transform.PreviewTransformRequest;
import org.elasticsearch.client.transform.PutTransformRequest;
import org.elasticsearch.client.transform.StartTransformRequest;
import org.elasticsearch.client.transform.StopTransformRequest;
import org.elasticsearch.client.transform.TransformNamedXContentProvider;
import org.elasticsearch.client.transform.UpdateTransformRequest;
import org.elasticsearch.client.transform.transforms.TransformConfig;
import org.elasticsearch.client.transform.transforms.TransformConfigTests;
@ -56,7 +56,7 @@ import static org.hamcrest.Matchers.hasEntry;
import static org.hamcrest.Matchers.hasKey;
import static org.hamcrest.Matchers.not;
public class DataFrameRequestConvertersTests extends ESTestCase {
public class TransformRequestConvertersTests extends ESTestCase {
@Override
protected NamedXContentRegistry xContentRegistry() {
@ -73,7 +73,7 @@ public class DataFrameRequestConvertersTests extends ESTestCase {
Request request = TransformRequestConverters.putTransform(putRequest);
assertThat(request.getParameters(), not(hasKey("defer_validation")));
assertEquals(HttpPut.METHOD_NAME, request.getMethod());
assertThat(request.getEndpoint(), equalTo("/_data_frame/transforms/" + putRequest.getConfig().getId()));
assertThat(request.getEndpoint(), equalTo("/_transform/" + putRequest.getConfig().getId()));
try (XContentParser parser = createParser(JsonXContent.jsonXContent, request.getEntity().getContent())) {
TransformConfig parsedConfig = TransformConfig.PARSER.apply(parser, null);
@ -92,7 +92,7 @@ public class DataFrameRequestConvertersTests extends ESTestCase {
Request request = TransformRequestConverters.updateTransform(updateDataFrameTransformRequest);
assertThat(request.getParameters(), not(hasKey("defer_validation")));
assertEquals(HttpPost.METHOD_NAME, request.getMethod());
assertThat(request.getEndpoint(), equalTo("/_data_frame/transforms/" + transformId + "/_update"));
assertThat(request.getEndpoint(), equalTo("/_transform/" + transformId + "/_update"));
try (XContentParser parser = createParser(JsonXContent.jsonXContent, request.getEntity().getContent())) {
TransformConfigUpdate parsedConfig = TransformConfigUpdate.fromXContent(parser);
@ -109,7 +109,7 @@ public class DataFrameRequestConvertersTests extends ESTestCase {
Request request = TransformRequestConverters.deleteTransform(deleteRequest);
assertEquals(HttpDelete.METHOD_NAME, request.getMethod());
assertThat(request.getEndpoint(), equalTo("/_data_frame/transforms/foo"));
assertThat(request.getEndpoint(), equalTo("/_transform/foo"));
assertThat(request.getParameters(), not(hasKey("force")));
@ -129,7 +129,7 @@ public class DataFrameRequestConvertersTests extends ESTestCase {
Request request = TransformRequestConverters.startTransform(startRequest);
assertEquals(HttpPost.METHOD_NAME, request.getMethod());
assertThat(request.getEndpoint(), equalTo("/_data_frame/transforms/" + startRequest.getId() + "/_start"));
assertThat(request.getEndpoint(), equalTo("/_transform/" + startRequest.getId() + "/_start"));
if (timeValue != null) {
assertTrue(request.getParameters().containsKey("timeout"));
@ -153,7 +153,7 @@ public class DataFrameRequestConvertersTests extends ESTestCase {
Request request = TransformRequestConverters.stopTransform(stopRequest);
assertEquals(HttpPost.METHOD_NAME, request.getMethod());
assertThat(request.getEndpoint(), equalTo("/_data_frame/transforms/" + stopRequest.getId() + "/_stop"));
assertThat(request.getEndpoint(), equalTo("/_transform/" + stopRequest.getId() + "/_stop"));
if (waitForCompletion != null) {
assertTrue(request.getParameters().containsKey("wait_for_completion"));
@ -181,7 +181,7 @@ public class DataFrameRequestConvertersTests extends ESTestCase {
Request request = TransformRequestConverters.previewTransform(previewRequest);
assertEquals(HttpPost.METHOD_NAME, request.getMethod());
assertThat(request.getEndpoint(), equalTo("/_data_frame/transforms/_preview"));
assertThat(request.getEndpoint(), equalTo("/_transform/_preview"));
try (XContentParser parser = createParser(JsonXContent.jsonXContent, request.getEntity().getContent())) {
TransformConfig parsedConfig = TransformConfig.PARSER.apply(parser, null);
@ -194,7 +194,7 @@ public class DataFrameRequestConvertersTests extends ESTestCase {
Request request = TransformRequestConverters.getTransformStats(getStatsRequest);
assertEquals(HttpGet.METHOD_NAME, request.getMethod());
assertThat(request.getEndpoint(), equalTo("/_data_frame/transforms/foo/_stats"));
assertThat(request.getEndpoint(), equalTo("/_transform/foo/_stats"));
assertFalse(request.getParameters().containsKey("from"));
assertFalse(request.getParameters().containsKey("size"));
@ -224,7 +224,7 @@ public class DataFrameRequestConvertersTests extends ESTestCase {
Request request = TransformRequestConverters.getTransform(getRequest);
assertEquals(HttpGet.METHOD_NAME, request.getMethod());
assertThat(request.getEndpoint(), equalTo("/_data_frame/transforms/bar"));
assertThat(request.getEndpoint(), equalTo("/_transform/bar"));
assertFalse(request.getParameters().containsKey("from"));
assertFalse(request.getParameters().containsKey("size"));
@ -254,6 +254,6 @@ public class DataFrameRequestConvertersTests extends ESTestCase {
Request request = TransformRequestConverters.getTransform(getRequest);
assertEquals(HttpGet.METHOD_NAME, request.getMethod());
assertThat(request.getEndpoint(), equalTo("/_data_frame/transforms/foo,bar,baz"));
assertThat(request.getEndpoint(), equalTo("/_transform/foo,bar,baz"));
}
}

View file

@ -2932,6 +2932,10 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase {
DataFrameAnalysis outlierDetectionCustomized = OutlierDetection.builder() // <1>
.setMethod(OutlierDetection.Method.DISTANCE_KNN) // <2>
.setNNeighbors(5) // <3>
.setFeatureInfluenceThreshold(0.1) // <4>
.setComputeFeatureInfluence(true) // <5>
.setOutlierFraction(0.05) // <6>
.setStandardizationEnabled(true) // <7>
.build();
// end::put-data-frame-analytics-outlier-detection-customized

View file

@ -1985,6 +1985,18 @@ public class SecurityDocumentationIT extends ESRestHighLevelClientTestCase {
verifyApiKey(getApiKeyResponse.getApiKeyInfos().get(0), expectedApiKeyInfo);
}
{
// tag::get-all-api-keys-request
GetApiKeyRequest getApiKeyRequest = GetApiKeyRequest.forAllApiKeys();
// end::get-all-api-keys-request
GetApiKeyResponse getApiKeyResponse = client.security().getApiKey(getApiKeyRequest, RequestOptions.DEFAULT);
assertThat(getApiKeyResponse.getApiKeyInfos(), is(notNullValue()));
assertThat(getApiKeyResponse.getApiKeyInfos().size(), is(1));
verifyApiKey(getApiKeyResponse.getApiKeyInfos().get(0), expectedApiKeyInfo);
}
{
// tag::get-user-realm-api-keys-request
GetApiKeyRequest getApiKeyRequest = GetApiKeyRequest.usingRealmAndUserName("default_file", "test_user");

View file

@ -42,16 +42,16 @@ import org.elasticsearch.client.transform.StopTransformRequest;
import org.elasticsearch.client.transform.StopTransformResponse;
import org.elasticsearch.client.transform.UpdateTransformRequest;
import org.elasticsearch.client.transform.UpdateTransformResponse;
import org.elasticsearch.client.transform.transforms.TransformIndexerStats;
import org.elasticsearch.client.transform.transforms.TransformConfig;
import org.elasticsearch.client.transform.transforms.TransformConfigUpdate;
import org.elasticsearch.client.transform.transforms.TransformProgress;
import org.elasticsearch.client.transform.transforms.TransformStats;
import org.elasticsearch.client.transform.transforms.DestConfig;
import org.elasticsearch.client.transform.transforms.NodeAttributes;
import org.elasticsearch.client.transform.transforms.QueryConfig;
import org.elasticsearch.client.transform.transforms.SourceConfig;
import org.elasticsearch.client.transform.transforms.TimeSyncConfig;
import org.elasticsearch.client.transform.transforms.TransformConfig;
import org.elasticsearch.client.transform.transforms.TransformConfigUpdate;
import org.elasticsearch.client.transform.transforms.TransformIndexerStats;
import org.elasticsearch.client.transform.transforms.TransformProgress;
import org.elasticsearch.client.transform.transforms.TransformStats;
import org.elasticsearch.client.transform.transforms.pivot.AggregationConfig;
import org.elasticsearch.client.transform.transforms.pivot.GroupConfig;
import org.elasticsearch.client.transform.transforms.pivot.PivotConfig;
@ -219,7 +219,7 @@ public class TransformDocumentationIT extends ESRestHighLevelClientTestCase {
}
}
public void testUpdateDataFrameTransform() throws IOException, InterruptedException {
public void testUpdateTransform() throws IOException, InterruptedException {
createIndex("source-data");
RestHighLevelClient client = highLevelClient();

View file

@ -26,6 +26,7 @@ import java.io.IOException;
import static org.hamcrest.Matchers.closeTo;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.is;
public class OutlierDetectionTests extends AbstractXContentTestCase<OutlierDetection> {
@ -34,6 +35,9 @@ public class OutlierDetectionTests extends AbstractXContentTestCase<OutlierDetec
.setNNeighbors(randomBoolean() ? null : randomIntBetween(1, 20))
.setMethod(randomBoolean() ? null : randomFrom(OutlierDetection.Method.values()))
.setFeatureInfluenceThreshold(randomBoolean() ? null : randomDoubleBetween(0.0, 1.0, true))
.setComputeFeatureInfluence(randomBoolean() ? null : randomBoolean())
.setOutlierFraction(randomBoolean() ? null : randomDoubleBetween(0.0, 1.0, true))
.setStandardizationEnabled(randomBoolean() ? null : randomBoolean())
.build();
}
@ -57,6 +61,9 @@ public class OutlierDetectionTests extends AbstractXContentTestCase<OutlierDetec
assertNull(outlierDetection.getNNeighbors());
assertNull(outlierDetection.getMethod());
assertNull(outlierDetection.getFeatureInfluenceThreshold());
assertNull(outlierDetection.getComputeFeatureInfluence());
assertNull(outlierDetection.getOutlierFraction());
assertNull(outlierDetection.getStandardizationEnabled());
}
public void testGetParams_GivenExplicitValues() {
@ -65,9 +72,15 @@ public class OutlierDetectionTests extends AbstractXContentTestCase<OutlierDetec
.setNNeighbors(42)
.setMethod(OutlierDetection.Method.LDOF)
.setFeatureInfluenceThreshold(0.5)
.setComputeFeatureInfluence(true)
.setOutlierFraction(0.42)
.setStandardizationEnabled(false)
.build();
assertThat(outlierDetection.getNNeighbors(), equalTo(42));
assertThat(outlierDetection.getMethod(), equalTo(OutlierDetection.Method.LDOF));
assertThat(outlierDetection.getFeatureInfluenceThreshold(), closeTo(0.5, 1E-9));
assertThat(outlierDetection.getComputeFeatureInfluence(), is(true));
assertThat(outlierDetection.getOutlierFraction(), closeTo(0.42, 1E-9));
assertThat(outlierDetection.getStandardizationEnabled(), is(false));
}
}

View file

@ -64,7 +64,10 @@ public class TrainedModelDefinitionTests extends AbstractXContentTestCase<Traine
TargetMeanEncodingTests.createRandom()))
.limit(numberOfProcessors)
.collect(Collectors.toList()))
.setTrainedModel(randomFrom(TreeTests.createRandom()));
.setTrainedModel(randomFrom(TreeTests.createRandom()))
.setInput(new TrainedModelDefinition.Input(Stream.generate(() -> randomAlphaOfLength(10))
.limit(randomLongBetween(1, 10))
.collect(Collectors.toList())));
}
@Override

View file

@ -52,7 +52,6 @@ public class GetApiKeyRequestTests extends ESTestCase {
public void testRequestValidationFailureScenarios() throws IOException {
String[][] inputs = new String[][] {
{ randomNullOrEmptyString(), randomNullOrEmptyString(), randomNullOrEmptyString(), randomNullOrEmptyString(), "false" },
{ randomNullOrEmptyString(), "user", "api-kid", "api-kname", "false" },
{ "realm", randomNullOrEmptyString(), "api-kid", "api-kname", "false" },
{ "realm", "user", "api-kid", randomNullOrEmptyString(), "false" },
@ -60,7 +59,6 @@ public class GetApiKeyRequestTests extends ESTestCase {
{ "realm", randomNullOrEmptyString(), randomNullOrEmptyString(), randomNullOrEmptyString(), "true"},
{ randomNullOrEmptyString(), "user", randomNullOrEmptyString(), randomNullOrEmptyString(), "true"} };
String[] expectedErrorMessages = new String[] {
"One of [api key id, api key name, username, realm name] must be specified if [owner] flag is false",
"username or realm name must not be specified when the api key id or api key name is specified",
"username or realm name must not be specified when the api key id or api key name is specified",
"username or realm name must not be specified when the api key id or api key name is specified",

View file

@ -23,11 +23,13 @@ import org.elasticsearch.gradle.ConcatFilesTask
import org.elasticsearch.gradle.MavenFilteringHack
import org.elasticsearch.gradle.NoticeTask
import org.elasticsearch.gradle.VersionProperties
import org.elasticsearch.gradle.test.RunTask
import org.elasticsearch.gradle.testclusters.RunTask
import java.nio.file.Files
import java.nio.file.Path
apply plugin: 'elasticsearch.testclusters'
/*****************************************************************************
* Third party dependencies report *
*****************************************************************************/
@ -414,9 +416,10 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) {
}
task run(type: RunTask) {
distribution = System.getProperty('run.distribution', 'default')
if (distribution == 'default') {
testClusters {
runTask {
testDistribution = System.getProperty('run.distribution', 'default')
if (System.getProperty('run.distribution', 'default') == 'default') {
String licenseType = System.getProperty("run.license_type", "basic")
if (licenseType == 'trial') {
setting 'xpack.ml.enabled', 'true'
@ -426,14 +429,17 @@ task run(type: RunTask) {
} else if (licenseType != 'basic') {
throw new IllegalArgumentException("Unsupported self-generated license type: [" + licenseType + "[basic] or [trial].")
}
setupCommand 'setupTestAdmin',
'bin/elasticsearch-users', 'useradd', 'elastic-admin', '-p', 'elastic-password', '-r', 'superuser'
setting 'xpack.security.enabled', 'true'
setting 'xpack.monitoring.enabled', 'true'
setting 'xpack.sql.enabled', 'true'
setting 'xpack.rollup.enabled', 'true'
keystoreSetting 'bootstrap.password', 'password'
keystore 'bootstrap.password', 'password'
}
}
}
task run(type: RunTask) {
useCluster testClusters.runTask;
}
/**

View file

@ -186,3 +186,37 @@ assemble.dependsOn "buildDockerImage"
if (tasks.findByName("composePull")) {
tasks.composePull.enabled = false
}
/*
* The export subprojects write out the generated Docker images to disk, so
* that they can be easily reloaded, for example into a VM.
*/
subprojects { Project subProject ->
if (subProject.name.contains('docker-export')) {
apply plugin: 'distribution'
final boolean oss = subProject.name.startsWith('oss')
def exportTaskName = taskName("export", oss, "DockerImage")
def buildTaskName = taskName("build", oss, "DockerImage")
def tarFile = "${parent.projectDir}/build/elasticsearch${oss ? '-oss' : ''}_test.${VersionProperties.elasticsearch}.docker.tar"
final Task exportDockerImageTask = task(exportTaskName, type: LoggedExec) {
executable 'docker'
args "save",
"-o",
tarFile,
"elasticsearch${oss ? '-oss' : ''}:test"
}
exportDockerImageTask.dependsOn(parent.tasks.getByName(buildTaskName))
artifacts.add('default', file(tarFile)) {
type 'tar'
name "elasticsearch${oss ? '-oss' : ''}"
builtBy exportTaskName
}
assemble.dependsOn exportTaskName
}
}

View file

@ -0,0 +1,2 @@
// This file is intentionally blank. All configuration of the
// export is done in the parent project.

View file

@ -0,0 +1,2 @@
// This file is intentionally blank. All configuration of the
// export is done in the parent project.

View file

@ -21,8 +21,7 @@ if [ -z "$ES_TMPDIR" ]; then
fi
ES_JVM_OPTIONS="$ES_PATH_CONF"/jvm.options
JVM_OPTIONS=`"$JAVA" -cp "$ES_CLASSPATH" org.elasticsearch.tools.launchers.JvmOptionsParser "$ES_JVM_OPTIONS"`
ES_JAVA_OPTS="${JVM_OPTIONS//\$\{ES_TMPDIR\}/$ES_TMPDIR}"
ES_JAVA_OPTS=`export ES_TMPDIR; "$JAVA" -cp "$ES_CLASSPATH" org.elasticsearch.tools.launchers.JvmOptionsParser "$ES_JVM_OPTIONS"`
# manual parsing to find out, if process should be detached
if ! echo $* | grep -E '(^-d |-d$| -d |--daemonize$|--daemonize )' > /dev/null; then

View file

@ -115,8 +115,8 @@ set ES_JVM_OPTIONS=%ES_PATH_CONF%\jvm.options
if not "%ES_JAVA_OPTS%" == "" set ES_JAVA_OPTS=%ES_JAVA_OPTS: =;%
@setlocal
for /F "usebackq delims=" %%a in (`"%JAVA% -cp "!ES_CLASSPATH!" "org.elasticsearch.tools.launchers.JvmOptionsParser" "!ES_JVM_OPTIONS!" || echo jvm_options_parser_failed"`) do set JVM_OPTIONS=%%a
@endlocal & set "MAYBE_JVM_OPTIONS_PARSER_FAILED=%JVM_OPTIONS%" & set ES_JAVA_OPTS=%JVM_OPTIONS:${ES_TMPDIR}=!ES_TMPDIR!%
for /F "usebackq delims=" %%a in (`"%JAVA% -cp "!ES_CLASSPATH!" "org.elasticsearch.tools.launchers.JvmOptionsParser" "!ES_JVM_OPTIONS!" || echo jvm_options_parser_failed"`) do set ES_JAVA_OPTS=%%a
@endlocal & set "MAYBE_JVM_OPTIONS_PARSER_FAILED=%ES_JAVA_OPTS%" & set ES_JAVA_OPTS=%ES_JAVA_OPTS%
if "%MAYBE_JVM_OPTIONS_PARSER_FAILED%" == "jvm_options_parser_failed" (
exit /b 1

View file

@ -47,8 +47,8 @@ if not defined ES_TMPDIR (
set ES_JVM_OPTIONS=%ES_PATH_CONF%\jvm.options
@setlocal
for /F "usebackq delims=" %%a in (`CALL %JAVA% -cp "!ES_CLASSPATH!" "org.elasticsearch.tools.launchers.JvmOptionsParser" "!ES_JVM_OPTIONS!" ^|^| echo jvm_options_parser_failed`) do set JVM_OPTIONS=%%a
@endlocal & set "MAYBE_JVM_OPTIONS_PARSER_FAILED=%JVM_OPTIONS%" & set ES_JAVA_OPTS=%JVM_OPTIONS:${ES_TMPDIR}=!ES_TMPDIR!%
for /F "usebackq delims=" %%a in (`CALL %JAVA% -cp "!ES_CLASSPATH!" "org.elasticsearch.tools.launchers.JvmOptionsParser" "!ES_JVM_OPTIONS!" ^|^| echo jvm_options_parser_failed`) do set ES_JAVA_OPTS=%%a
@endlocal & set "MAYBE_JVM_OPTIONS_PARSER_FAILED=%ES_JAVA_OPTS%" & set ES_JAVA_OPTS=%ES_JAVA_OPTS%
if "%MAYBE_JVM_OPTIONS_PARSER_FAILED%" == "jvm_options_parser_failed" (
exit /b 1

View file

@ -87,9 +87,11 @@ final class JvmOptionsParser {
.filter(Predicate.not(String::isBlank))
.collect(Collectors.toUnmodifiableList()));
}
final List<String> ergonomicJvmOptions = JvmErgonomics.choose(jvmOptions);
jvmOptions.addAll(ergonomicJvmOptions);
final String spaceDelimitedJvmOptions = spaceDelimitJvmOptions(jvmOptions);
final List<String> substitutedJvmOptions =
substitutePlaceholders(jvmOptions, Map.of("ES_TMPDIR", System.getenv("ES_TMPDIR")));
final List<String> ergonomicJvmOptions = JvmErgonomics.choose(substitutedJvmOptions);
substitutedJvmOptions.addAll(ergonomicJvmOptions);
final String spaceDelimitedJvmOptions = spaceDelimitJvmOptions(substitutedJvmOptions);
Launchers.outPrintln(spaceDelimitedJvmOptions);
Launchers.exit(0);
} else {
@ -115,6 +117,24 @@ final class JvmOptionsParser {
}
}
static List<String> substitutePlaceholders(final List<String> jvmOptions, final Map<String, String> substitutions) {
final Map<String, String> placeholderSubstitutions =
substitutions.entrySet().stream().collect(Collectors.toMap(e -> "${" + e.getKey() + "}", Map.Entry::getValue));
return jvmOptions.stream()
.map(
jvmOption -> {
String actualJvmOption = jvmOption;
int start = jvmOption.indexOf("${");
if (start >= 0 && jvmOption.indexOf('}', start) > 0) {
for (final Map.Entry<String, String> placeholderSubstitution : placeholderSubstitutions.entrySet()) {
actualJvmOption = actualJvmOption.replace(placeholderSubstitution.getKey(), placeholderSubstitution.getValue());
}
}
return actualJvmOption;
})
.collect(Collectors.toList());
}
/**
* Callback for valid JVM options.
*/

View file

@ -30,6 +30,7 @@ import java.util.Locale;
import java.util.Map;
import java.util.concurrent.atomic.AtomicBoolean;
import static org.hamcrest.Matchers.contains;
import static org.hamcrest.Matchers.equalTo;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNull;
@ -39,6 +40,12 @@ import static org.junit.Assert.fail;
public class JvmOptionsParserTests extends LaunchersTestCase {
public void testSubstitution() {
final List<String> jvmOptions =
JvmOptionsParser.substitutePlaceholders(List.of("-Djava.io.tmpdir=${ES_TMPDIR}"), Map.of("ES_TMPDIR", "/tmp/elasticsearch"));
assertThat(jvmOptions, contains("-Djava.io.tmpdir=/tmp/elasticsearch"));
}
public void testUnversionedOptions() throws IOException {
try (StringReader sr = new StringReader("-Xms1g\n-Xmx1g");
BufferedReader br = new BufferedReader(sr)) {

View file

@ -93,6 +93,8 @@ buildRestTests.docs = fileTree(projectDir) {
listSnippets.docs = buildRestTests.docs
listConsoleCandidates.docs = buildRestTests.docs
Closure setupTwitter = { String name, int count ->
buildRestTests.setups[name] = '''
- do:
@ -574,7 +576,6 @@ buildRestTests.setups['library'] = '''
- do:
bulk:
index: library
type: book
refresh: true
body: |
{"index":{"_id": "Leviathan Wakes"}}
@ -923,7 +924,6 @@ buildRestTests.setups['farequote_data'] = buildRestTests.setups['farequote_index
- do:
bulk:
index: farequote
type: metric
refresh: true
body: |
{"index": {"_id":"1"}}
@ -983,7 +983,6 @@ buildRestTests.setups['server_metrics_data'] = buildRestTests.setups['server_met
- do:
bulk:
index: server-metrics
type: metric
refresh: true
body: |
{"index": {"_id":"1177"}}
@ -1189,7 +1188,7 @@ buildRestTests.setups['simple_kibana_continuous_pivot'] = buildRestTests.setups[
- do:
raw:
method: PUT
path: _data_frame/transforms/simple-kibana-ecomm-pivot
path: _transform/simple-kibana-ecomm-pivot
body: >
{
"source": {

View file

@ -96,6 +96,10 @@ include-tagged::{doc-tests-file}[{api}-outlier-detection-customized]
<1> Constructing a new OutlierDetection object
<2> The method used to perform the analysis
<3> Number of neighbors taken into account during analysis
<4> The min `outlier_score` required to compute feature influence
<5> Whether to compute feature influence
<6> The proportion of the data set that is assumed to be outlying prior to outlier detection
<7> Whether to apply standardization to feature values
===== Regression

View file

@ -23,6 +23,8 @@ The +{request}+ supports retrieving API key information for
. A specific key or all API keys owned by the current authenticated user
. All API keys if the user is authorized to do so
===== Retrieve a specific API key by its id
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
@ -59,6 +61,12 @@ include-tagged::{doc-tests-file}[get-user-realm-api-keys-request]
include-tagged::{doc-tests-file}[get-api-keys-owned-by-authenticated-user-request]
--------------------------------------------------
===== Retrieve all API keys if the user is authorized to do so
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests-file}[get-all-api-keys-request]
--------------------------------------------------
include::../execution.asciidoc[]
[id="{upid}-{api}-response"]

View file

@ -25,7 +25,7 @@ PUT /products
}
}
POST /products/_doc/_bulk?refresh
POST /products/_bulk?refresh
{"index":{"_id":0}}
{"genre": "rock", "product": "Product A"}
{"index":{"_id":1}}

View file

@ -42,7 +42,7 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-v]
////
Hidden setup for example:
[source,console]
[source,console,id=cat-aliases-example]
--------------------------------------------------
PUT test1
{

View file

@ -41,7 +41,7 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-v]
[[cat-allocation-api-example]]
==== {api-examples-title}
[source,console]
[source,console,id=cat-allocation-example]
--------------------------------------------------
GET /_cat/allocation?v
--------------------------------------------------

View file

@ -50,7 +50,7 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-v]
The following `count` API request retrieves the document count of a single
index, `twitter`.
[source,console]
[source,console,id=cat-count-individual-example]
--------------------------------------------------
GET /_cat/count/twitter?v
--------------------------------------------------
@ -72,7 +72,7 @@ epoch timestamp count
The following `count` API request retrieves the document count of all indices in
the cluster.
[source,console]
[source,console,id=cat-count-all-example]
--------------------------------------------------
GET /_cat/count?v
--------------------------------------------------

View file

@ -48,7 +48,7 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-v]
////
Hidden setup snippet to build an index with fielddata so our results are real:
[source,console]
[source,console,id=cat-fielddata-example]
--------------------------------------------------
PUT test
{

View file

@ -67,7 +67,7 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-v]
By default, the cat health API returns `HH:MM:SS` and
https://en.wikipedia.org/wiki/Unix_time[Unix `epoch`] timestamps. For example:
[source,console]
[source,console,id=cat-health-example]
--------------------------------------------------
GET /_cat/health?v
--------------------------------------------------
@ -87,7 +87,7 @@ epoch timestamp cluster status node.total node.data shards pri relo i
===== Example without a timestamp
You can use the `ts` (timestamps) parameter to disable timestamps. For example:
[source,console]
[source,console,id=cat-health-no-timestamp-example]
--------------------------------------------------
GET /_cat/health?v&ts=false
--------------------------------------------------

View file

@ -41,13 +41,13 @@ DELETE /_ccr/auto_follow/<auto_follow_pattern_name>
* If the {es} {security-features} are enabled, you must have `manage_ccr` cluster
privileges on the cluster that contains the follower index. For more information,
see {stack-ov}/security-privileges.html[Security privileges].
see <<security-privileges>>.
[[ccr-delete-auto-follow-pattern-desc]]
==== {api-description-title}
This API deletes a configured collection of
{stack-ov}/ccr-auto-follow.html[auto-follow patterns].
<<ccr-auto-follow,auto-follow patterns>>.
[[ccr-delete-auto-follow-pattern-path-parms]]
==== {api-path-parms-title}

View file

@ -53,12 +53,12 @@ GET /_ccr/auto_follow/<auto_follow_pattern_name>
* If the {es} {security-features} are enabled, you must have `manage_ccr` cluster
privileges on the cluster that contains the follower index. For more information,
see {stack-ov}/security-privileges.html[Security privileges].
see <<security-privileges>>.
[[ccr-get-auto-follow-pattern-desc]]
==== {api-description-title}
This API gets configured {stack-ov}/ccr-auto-follow.html[auto-follow patterns].
This API gets configured <<ccr-auto-follow,auto-follow patterns>>.
This API will return the specified auto-follow pattern collection.
[[ccr-get-auto-follow-pattern-path-parms]]

View file

@ -45,14 +45,13 @@ DELETE /_ccr/auto_follow/auto_follow_pattern_name
* If the {es} {security-features} are enabled, you must have `read` and `monitor`
index privileges for the leader index patterns. You must also have `manage_ccr`
cluster privileges on the cluster that contains the follower index. For more
information, see
{stack-ov}/security-privileges.html[Security privileges].
information, see <<security-privileges>>.
[[ccr-put-auto-follow-pattern-desc]]
==== {api-description-title}
This API creates a new named collection of
{stack-ov}/ccr-auto-follow.html[auto-follow patterns] against the remote cluster
<<ccr-auto-follow,auto-follow patterns>> against the remote cluster
specified in the request body. Newly created indices on the remote cluster
matching any of the specified patterns will be automatically configured as follower
indices.

View file

@ -42,8 +42,7 @@ GET /<index>/_ccr/info
==== {api-prereq-title}
* If the {es} {security-features} are enabled, you must have `monitor` cluster
privileges. For more information, see
{stack-ov}/security-privileges.html[Security privileges].
privileges. For more information, see <<security-privileges>>.
[[ccr-get-follow-info-desc]]
==== {api-description-title}

View file

@ -43,7 +43,7 @@ GET /<index>/_ccr/stats
* If the {es} {security-features} are enabled, you must have `monitor` cluster
privileges on the cluster that contains the follower index. For more information,
see {stack-ov}/security-privileges.html[Security privileges].
see <<security-privileges>>.
[[ccr-get-follow-stats-desc]]
==== {api-description-title}

View file

@ -70,7 +70,7 @@ POST /<leader_index>/_ccr/forget_follower
* If the {es} {security-features} are enabled, you must have `manage_leader_index`
index privileges for the leader index. For more information, see
{stack-ov}/security-privileges.html[Security privileges].
<<security-privileges>>.
[[ccr-post-forget-follower-desc]]
==== {api-description-title}

View file

@ -37,7 +37,7 @@ POST /<follower_index>/_ccr/pause_follow
* If the {es} {security-features} are enabled, you must have `manage_ccr` cluster
privileges on the cluster that contains the follower index. For more information,
see {stack-ov}/security-privileges.html[Security privileges].
see <<security-privileges>>.
[[ccr-post-pause-follow-desc]]
==== {api-description-title}

View file

@ -51,7 +51,7 @@ POST /<follower_index>/_ccr/resume_follow
index privileges for the follower index. You must have `read` and `monitor`
index privileges for the leader index. You must also have `manage_ccr` cluster
privileges on the cluster that contains the follower index. For more information,
see {stack-ov}/security-privileges.html[Security privileges].
see <<security-privileges>>.
[[ccr-post-resume-follow-desc]]
==== {api-description-title}

View file

@ -41,7 +41,7 @@ POST /<follower_index>/_ccr/unfollow
* If the {es} {security-features} are enabled, you must have `manage_follow_index`
index privileges for the follower index. For more information, see
{stack-ov}/security-privileges.html[Security privileges].
<<security-privileges>>.
[[ccr-post-unfollow-desc]]
==== {api-description-title}

View file

@ -41,8 +41,7 @@ PUT /<follower_index>/_ccr/follow?wait_for_active_shards=1
and `manage_follow_index` index privileges for the follower index. You must have
`read` and `monitor` index privileges for the leader index. You must also have
`manage_ccr` cluster privileges on the cluster that contains the follower index.
For more information, see
{stack-ov}/security-privileges.html[Security privileges].
For more information, see <<security-privileges>>.
[[ccr-put-follow-desc]]
==== {api-description-title}

View file

@ -42,7 +42,7 @@ GET /_ccr/stats
* If the {es} {security-features} are enabled, you must have `monitor` cluster
privileges on the cluster that contains the follower index. For more information,
see {stack-ov}/security-privileges.html[Security privileges].
see <<security-privileges>>.
[[ccr-get-stats-desc]]
==== {api-description-title}

View file

@ -34,7 +34,7 @@ to control which users have authority to manage {ccr}.
By default, you can perform all of the steps in this tutorial by
using the built-in `elastic` user. However, a password must be set for this user
before the user can do anything. For information about how to set that password,
see {stack-ov}/security-getting-started.html[Tutorial: Getting started with security].
see <<security-getting-started>>.
If you are performing these steps in a production environment, take extra care
because the `elastic` user has the `superuser` role and you could inadvertently

View file

@ -51,11 +51,11 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=wait_for_completion]
[[tasks-api-response-codes]]
==== {api-response-codes-title}
tag::tasks-api-404[]
// tag::tasks-api-404[]
`404` (Missing resources)::
If `<task_id>` is specified but not found, this code indicates that there
are no resources that match the request.
end::tasks-api-404[]
// end::tasks-api-404[]
[[tasks-api-examples]]
==== {api-examples-title}

View file

@ -224,7 +224,7 @@ Alternatively, you can specify the `--ca-pass`, `--out`, and `--pass` parameters
By default, this command generates a file called `elastic-certificates.p12`,
which you can copy to the relevant configuration directory for each Elastic
product that you want to configure. For more information, see
{xpack-ref}/ssl-tls.html[Setting Up TLS on a Cluster].
<<ssl-tls>>.
[float]
[[certutil-silent]]

View file

@ -4,7 +4,7 @@
== elasticsearch-setup-passwords
The `elasticsearch-setup-passwords` command sets the passwords for the
{stack-ov}/built-in-users.html[built-in users].
<<built-in-users,built-in users>>.
[float]
=== Synopsis
@ -21,7 +21,7 @@ bin/elasticsearch-setup-passwords auto|interactive
This command is intended for use only during the initial configuration of the
{es} {security-features}. It uses the
{stack-ov}/built-in-users.html#bootstrap-elastic-passwords[`elastic` bootstrap password]
<<bootstrap-elastic-passwords,`elastic` bootstrap password>>
to run user management API requests. After you set a password for the `elastic`
user, the bootstrap password is no longer active and you cannot use this command.
Instead, you can change passwords by using the *Management > Users* UI in {kib}
@ -36,7 +36,7 @@ location, ensure that the *ES_PATH_CONF* environment variable returns the
correct path before you run the `elasticsearch-setup-passwords` command. You can
override settings in your `elasticsearch.yml` file by using the `-E` command
option. For more information about debugging connection failures, see
{stack-ov}/trb-security-setup.html[`elasticsearch-setup-passwords` command fails due to connection failure].
<<trb-security-setup>>.
[float]
=== Parameters

View file

@ -33,7 +33,7 @@ Leading or trailing whitespace is not allowed.
Passwords must be at least 6 characters long.
For more information, see {xpack-ref}/file-realm.html[File-based User Authentication].
For more information, see <<file-realm>>.
TIP: To ensure that {es} can read the user and role information at startup, run
`elasticsearch-users useradd` as the same user you use to run {es}. Running the

View file

@ -1,28 +1,37 @@
[[docs-bulk]]
=== Bulk API
++++
<titleabbrev>Bulk</titleabbrev>
++++
The bulk API makes it possible to perform many index/delete operations
in a single API call. This can greatly increase the indexing speed.
Performs multiple indexing or delete operations in a single API call.
This reduces overhead and can greatly increase indexing speed.
.Client support for bulk requests
*********************************************
[source,console]
--------------------------------------------------
POST _bulk
{ "index" : { "_index" : "test", "_id" : "1" } }
{ "field1" : "value1" }
{ "delete" : { "_index" : "test", "_id" : "2" } }
{ "create" : { "_index" : "test", "_id" : "3" } }
{ "field1" : "value3" }
{ "update" : {"_id" : "1", "_index" : "test"} }
{ "doc" : {"field2" : "value2"} }
--------------------------------------------------
Some of the officially supported clients provide helpers to assist with
bulk requests and reindexing of documents from one index to another:
[[docs-bulk-api-request]]
==== {api-request-title}
Perl::
`POST /_bulk`
See https://metacpan.org/pod/Search::Elasticsearch::Client::5_0::Bulk[Search::Elasticsearch::Client::5_0::Bulk]
and https://metacpan.org/pod/Search::Elasticsearch::Client::5_0::Scroll[Search::Elasticsearch::Client::5_0::Scroll]
`POST /<index>/_bulk`
Python::
[[docs-bulk-api-desc]]
==== {api-description-title}
See http://elasticsearch-py.readthedocs.org/en/master/helpers.html[elasticsearch.helpers.*]
Provides a way to perform multiple `index`, `create`, `delete`, and `update` actions in a single request.
*********************************************
The REST API endpoint is `/_bulk`, and it expects the following newline delimited JSON
(NDJSON) structure:
The actions are specified in the request body using a newline delimited JSON (NDJSON) structure:
[source,js]
--------------------------------------------------
@ -36,19 +45,70 @@ optional_source\n
--------------------------------------------------
// NOTCONSOLE
*NOTE*: The final line of data must end with a newline character `\n`. Each newline character
may be preceded by a carriage return `\r`. When sending requests to this endpoint the
`Content-Type` header should be set to `application/x-ndjson`.
The `index` and `create` actions expect a source on the next line,
and have the same semantics as the `op_type` parameter in the standard index API:
create fails if a document with the same name already exists in the index,
index adds or replaces a document as necessary.
The possible actions are `index`, `create`, `delete`, and `update`.
`index` and `create` expect a source on the next
line, and have the same semantics as the `op_type` parameter to the
standard index API (i.e. create will fail if a document with the same
index exists already, whereas index will add or replace a
document as necessary). `delete` does not expect a source on the
following line, and has the same semantics as the standard delete API.
`update` expects that the partial doc, upsert and script and its options
are specified on the next line.
`update` expects that the partial doc, upsert,
and script and its options are specified on the next line.
`delete` does not expect a source on the next line and
has the same semantics as the standard delete API.
[NOTE]
====
The final line of data must end with a newline character `\n`.
Each newline character may be preceded by a carriage return `\r`.
When sending requests to the `_bulk` endpoint,
the `Content-Type` header should be set to `application/x-ndjson`.
====
Because this format uses literal `\n`'s as delimiters,
make sure that the JSON actions and sources are not pretty printed.
If you specify an index in the request URI,
it is used for any actions that don't explicitly specify an index.
A note on the format: The idea here is to make processing of this as
fast as possible. As some of the actions are redirected to other
shards on other nodes, only `action_meta_data` is parsed on the
receiving node side.
Client libraries using this protocol should try and strive to do
something similar on the client side, and reduce buffering as much as
possible.
The response to a bulk action is a large JSON structure with
the individual results of each action performed,
in the same order as the actions that appeared in the request.
The failure of a single action does not affect the remaining actions.
There is no "correct" number of actions to perform in a single bulk request.
Experiment with different settings to find the optimal size for your particular workload.
When using the HTTP API, make sure that the client does not send HTTP chunks,
as this will slow things down.
[float]
[[bulk-clients]]
===== Client support for bulk requests
Some of the officially supported clients provide helpers to assist with
bulk requests and reindexing of documents from one index to another:
Perl::
See https://metacpan.org/pod/Search::Elasticsearch::Client::5_0::Bulk[Search::Elasticsearch::Client::5_0::Bulk]
and https://metacpan.org/pod/Search::Elasticsearch::Client::5_0::Scroll[Search::Elasticsearch::Client::5_0::Scroll]
Python::
See http://elasticsearch-py.readthedocs.org/en/master/helpers.html[elasticsearch.helpers.*]
[float]
[[bulk-curl]]
===== Submitting bulk requests with cURL
If you're providing text file input to `curl`, you *must* use the
`--data-binary` flag instead of plain `-d`. The latter doesn't preserve
@ -65,9 +125,97 @@ $ curl -s -H "Content-Type: application/x-ndjson" -XPOST localhost:9200/_bulk --
// NOTCONSOLE
// Not converting to console because this shows how curl works
Because this format uses literal `\n`'s as delimiters, please be sure
that the JSON actions and sources are not pretty printed. Here is an
example of a correct sequence of bulk commands:
[float]
[[bulk-optimistic-concurrency-control]]
===== Optimistic Concurrency Control
Each `index` and `delete` action within a bulk API call may include the
`if_seq_no` and `if_primary_term` parameters in their respective action
and meta data lines. The `if_seq_no` and `if_primary_term` parameters control
how operations are executed, based on the last modification to existing
documents. See <<optimistic-concurrency-control>> for more details.
[float]
[[bulk-versioning]]
===== Versioning
Each bulk item can include the version value using the
`version` field. It automatically follows the behavior of the
index / delete operation based on the `_version` mapping. It also
support the `version_type` (see <<index-versioning, versioning>>).
[float]
[[bulk-routing]]
===== Routing
Each bulk item can include the routing value using the
`routing` field. It automatically follows the behavior of the
index / delete operation based on the `_routing` mapping.
[float]
[[bulk-wait-for-active-shards]]
===== Wait For Active Shards
When making bulk calls, you can set the `wait_for_active_shards`
parameter to require a minimum number of shard copies to be active
before starting to process the bulk request. See
<<index-wait-for-active-shards,here>> for further details and a usage
example.
[float]
[[bulk-refresh]]
===== Refresh
Control when the changes made by this request are visible to search. See
<<docs-refresh,refresh>>.
NOTE: Only the shards that receive the bulk request will be affected by
`refresh`. Imagine a `_bulk?refresh=wait_for` request with three
documents in it that happen to be routed to different shards in an index
with five shards. The request will only wait for those three shards to
refresh. The other two shards that make up the index do not
participate in the `_bulk` request at all.
[float]
[[bulk-security]]
===== Security
See <<url-access-control>>.
[float]
[[bulk-partial-responses]]
===== Partial responses
To ensure fast responses, the bulk API will respond with partial results if one or more shards fail.
See <<shard-failures, Shard failures>> for more information.
[[docs-bulk-api-path-params]]
==== {api-path-parms-title}
`<index>`::
(Optional, string) Name of the index to perform the bulk actions against.
[[docs-bulk-api-query-params]]
==== {api-query-parms-title}
include::{docdir}/rest-api/common-parms.asciidoc[tag=pipeline]
include::{docdir}/rest-api/common-parms.asciidoc[tag=refresh]
include::{docdir}/rest-api/common-parms.asciidoc[tag=routing]
include::{docdir}/rest-api/common-parms.asciidoc[tag=source]
include::{docdir}/rest-api/common-parms.asciidoc[tag=source_excludes]
include::{docdir}/rest-api/common-parms.asciidoc[tag=source_includes]
include::{docdir}/rest-api/common-parms.asciidoc[tag=timeout]
include::{docdir}/rest-api/common-parms.asciidoc[tag=wait_for_active_shards]
[[docs-bulk-api-example]]
==== {api-examples-title}
[source,console]
--------------------------------------------------
@ -81,7 +229,7 @@ POST _bulk
{ "doc" : {"field2" : "value2"} }
--------------------------------------------------
The result of this bulk operation is:
The API returns the following result:
[source,console-result]
--------------------------------------------------
@ -171,85 +319,9 @@ The result of this bulk operation is:
// TESTRESPONSE[s/"_seq_no" : 3/"_seq_no" : $body.items.3.update._seq_no/]
// TESTRESPONSE[s/"_primary_term" : 4/"_primary_term" : $body.items.3.update._primary_term/]
The endpoints are `/_bulk` and `/{index}/_bulk`. When the index is provided, it
will be used by default on bulk items that don't provide it explicitly.
A note on the format. The idea here is to make processing of this as
fast as possible. As some of the actions will be redirected to other
shards on other nodes, only `action_meta_data` is parsed on the
receiving node side.
Client libraries using this protocol should try and strive to do
something similar on the client side, and reduce buffering as much as
possible.
The response to a bulk action is a large JSON structure with the individual
results of each action that was performed in the same order as the actions that
appeared in the request. The failure of a single action does not affect the
remaining actions.
There is no "correct" number of actions to perform in a single bulk
call. You should experiment with different settings to find the optimum
size for your particular workload.
If using the HTTP API, make sure that the client does not send HTTP
chunks, as this will slow things down.
[float]
[[bulk-optimistic-concurrency-control]]
==== Optimistic Concurrency Control
Each `index` and `delete` action within a bulk API call may include the
`if_seq_no` and `if_primary_term` parameters in their respective action
and meta data lines. The `if_seq_no` and `if_primary_term` parameters control
how operations are executed, based on the last modification to existing
documents. See <<optimistic-concurrency-control>> for more details.
[float]
[[bulk-versioning]]
==== Versioning
Each bulk item can include the version value using the
`version` field. It automatically follows the behavior of the
index / delete operation based on the `_version` mapping. It also
support the `version_type` (see <<index-versioning, versioning>>).
[float]
[[bulk-routing]]
==== Routing
Each bulk item can include the routing value using the
`routing` field. It automatically follows the behavior of the
index / delete operation based on the `_routing` mapping.
[float]
[[bulk-wait-for-active-shards]]
==== Wait For Active Shards
When making bulk calls, you can set the `wait_for_active_shards`
parameter to require a minimum number of shard copies to be active
before starting to process the bulk request. See
<<index-wait-for-active-shards,here>> for further details and a usage
example.
[float]
[[bulk-refresh]]
==== Refresh
Control when the changes made by this request are visible to search. See
<<docs-refresh,refresh>>.
NOTE: Only the shards that receive the bulk request will be affected by
`refresh`. Imagine a `_bulk?refresh=wait_for` request with three
documents in it that happen to be routed to different shards in an index
with five shards. The request will only wait for those three shards to
refresh. The other two shards that make up the index do not
participate in the `_bulk` request at all.
[float]
[[bulk-update]]
==== Update
===== Bulk update example
When using the `update` action, `retry_on_conflict` can be used as a field in
the action itself (not in the extra payload line), to specify how many
@ -276,13 +348,3 @@ POST _bulk
--------------------------------------------------
// TEST[continued]
[float]
[[bulk-security]]
==== Security
See <<url-access-control>>.
[float]
[[bulk-partial-responses]]
==== Partial responses
To ensure fast responses, the bulk API will respond with partial results if one or more shards fail. See <<shard-failures, Shard failures>> for more information.

View file

@ -6,6 +6,12 @@
Retrieves the specified JSON document from an index.
[source,console]
--------------------------------------------------
GET twitter/_doc/0
--------------------------------------------------
// TEST[setup:twitter]
[[docs-get-api-request]]
==== {api-request-title}
@ -150,32 +156,21 @@ deleted documents in the background as you continue to index more data.
[[docs-get-api-query-params]]
==== {api-query-parms-title}
`preference`::
(Optional, string) Specify the node or shard the operation should
be performed on (default: random).
include::{docdir}/rest-api/common-parms.asciidoc[tag=preference]
`realtime`::
(Optional, boolean) Set to `false` to disable real time GET
(default: `true`). See <<realtime>>.
include::{docdir}/rest-api/common-parms.asciidoc[tag=realtime]
include::{docdir}/rest-api/common-parms.asciidoc[tag=refresh]
include::{docdir}/rest-api/common-parms.asciidoc[tag=routing]
`stored_fields`::
(Optional, boolean) Set to `true` to retrieve the document fields stored in the
index rather than the document `_source` (default: `false`).
include::{docdir}/rest-api/common-parms.asciidoc[tag=stored_fields]
`_source`::
(Optional, list) Set to `false` to disable source retrieval (default: `true`).
You can also specify a comma-separated list of the fields
you want to retrieve.
include::{docdir}/rest-api/common-parms.asciidoc[tag=source]
`_source_excludes`::
(Optional, list) Specify the source fields you want to exclude.
include::{docdir}/rest-api/common-parms.asciidoc[tag=source_excludes]
`_source_includes`::
(Optional, list) Specify the source fields you want to retrieve.
include::{docdir}/rest-api/common-parms.asciidoc[tag=source_includes]
include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-version]

View file

@ -1,15 +1,10 @@
[[docs-multi-get]]
=== Multi Get API
=== Multi get (mget) API
++++
<titleabbrev>Multi get</titleabbrev>
++++
The Multi get API returns multiple documents based on an index and id
(and possibly routing). The response includes a `docs` array
with all the fetched documents in order corresponding to the original multi-get
request (if there was a failure for a specific get, an object containing this
error is included in place in the response instead). The structure of a
successful get is similar in structure to a document provided by the
<<docs-get,get>> API.
Here is an example:
Retrieves multiple JSON documents by ID.
[source,console]
--------------------------------------------------
@ -17,23 +12,121 @@ GET /_mget
{
"docs" : [
{
"_index" : "test",
"_index" : "twitter",
"_id" : "1"
},
{
"_index" : "test",
"_index" : "twitter",
"_id" : "2"
}
]
}
--------------------------------------------------
// TEST[setup:twitter]
The `mget` endpoint can also be used against an index (in which case it
is not required in the body):
[[docs-multi-get-api-request]]
==== {api-request-title}
`GET /_mget`
`GET /<index>/_mget`
[[docs-multi-get-api-desc]]
==== {api-description-title}
You use `mget` to retrieve multiple documents from one or more indices.
If you specify an index in the request URI, you only need to specify the document IDs in the request body.
[[mget-security]]
===== Security
See <<url-access-control>>.
[[multi-get-partial-responses]]
===== Partial responses
To ensure fast responses, the multi get API responds with partial results if one or more shards fail.
See <<shard-failures, Shard failures>> for more information.
[[docs-multi-get-api-path-params]]
==== {api-path-parms-title}
include::{docdir}/rest-api/common-parms.asciidoc[tag=index]
[[docs-multi-get-api-query-params]]
==== {api-query-parms-title}
include::{docdir}/rest-api/common-parms.asciidoc[tag=preference]
include::{docdir}/rest-api/common-parms.asciidoc[tag=realtime]
include::{docdir}/rest-api/common-parms.asciidoc[tag=refresh]
include::{docdir}/rest-api/common-parms.asciidoc[tag=routing]
include::{docdir}/rest-api/common-parms.asciidoc[tag=stored_fields]
include::{docdir}/rest-api/common-parms.asciidoc[tag=source]
include::{docdir}/rest-api/common-parms.asciidoc[tag=source_excludes]
include::{docdir}/rest-api/common-parms.asciidoc[tag=source_includes]
[[docs-multi-get-api-request-body]]
==== {api-request-body-title}
`docs`::
(Optional, array) The documents you want to retrieve.
Required if no index is specified in the request URI.
You can specify the following attributes for each
document:
+
--
`_id`::
(Required, string) The unique document ID.
`_index`::
(Optional, string)
The index that contains the document.
Required if no index is specified in the request URI.
`_routing`::
(Optional, string) The key for the primary shard the document resides on.
Required if routing is used during indexing.
`_source`::
(Optional, boolean) If `false`, excludes all `_source` fields. Defaults to `true`.
`source_include`:::
(Optional, array) The fields to extract and return from the `_source` field.
`source_exclude`:::
(Optional, array) The fields to exclude from the returned `_source` field.
`_stored_fields`::
(Optional, array) The stored fields you want to retrieve.
--
`ids`::
(Optional, array) The IDs of the documents you want to retrieve.
Allowed when the index is specified in the request URI.
[[multi-get-api-response-body]]
==== {api-response-body-title}
The response includes a `docs` array that contains the documents in the order specified in the request.
The structure of the returned documents is similar to that returned by the <<docs-get,get>> API.
If there is a failure getting a particular document, the error is included in place of the document.
[[docs-multi-get-api-example]]
==== {api-examples-title}
[[mget-ids]]
===== Get documents by ID
If you specify an index in the request URI, only the document IDs are required in the request body:
[source,console]
--------------------------------------------------
GET /test/_mget
GET /twitter/_mget
{
"docs" : [
{
@ -45,30 +138,31 @@ GET /test/_mget
]
}
--------------------------------------------------
//CONSOLE
// TEST[setup:twitter]
In which case, the `ids` element can directly be used to simplify the
request:
You can use the `ids` element to simplify the request:
[source,console]
--------------------------------------------------
GET /test/_mget
GET /twitter/_mget
{
"ids" : ["1", "2"]
}
--------------------------------------------------
// TEST[setup:twitter]
[float]
[[mget-source-filtering]]
==== Source filtering
===== Filter source fields
By default, the `_source` field will be returned for every document (if stored).
Similar to the <<get-source-filtering,get>> API, you can retrieve only parts of
the `_source` (or not at all) by using the `_source` parameter. You can also use
the url parameters `_source`, `_source_includes`, and `_source_excludes` to specify defaults,
which will be used when there are no per-document instructions.
By default, the `_source` field is returned for every document (if stored).
Use the `_source` and `_source_include` or `source_exclude` attributes to
filter what fields are returned for a particular document.
You can include the `_source`, `_source_includes`, and `_source_excludes` query parameters in the
request URI to specify the defaults to use when there are no per-document instructions.
For example:
For example, the following request sets `_source` to false for document 1 to exclude the
source entirely, retrieves `field3` and `field4` from document 2, and retrieves the `user` field
from document 3 but filters out the `user.location` field.
[source,console]
--------------------------------------------------
@ -97,13 +191,16 @@ GET /_mget
}
--------------------------------------------------
[float]
[[mget-fields]]
==== Fields
===== Get stored fields
Specific stored fields can be specified to be retrieved per document to get, similar to the <<get-stored-fields,stored_fields>> parameter of the Get API.
For example:
Use the `stored_fields` attribute to specify the set of stored fields you want
to retrieve. Any requested fields that are not stored are ignored.
You can include the `stored_fields` query parameter in the request URI to specify the defaults
to use when there are no per-document instructions.
For example, the following request retrieves `field1` and `field2` from document 1, and
`field3` and `field4`from document 2:
[source,console]
--------------------------------------------------
@ -124,8 +221,9 @@ GET /_mget
}
--------------------------------------------------
Alternatively, you can specify the `stored_fields` parameter in the query string
as a default to be applied to all documents.
The following request retrieves `field1` and `field2` from all documents by default.
These default fields are returned for document 1, but
overridden to return `field3` and `field4` for document 2.
[source,console]
--------------------------------------------------
@ -133,23 +231,22 @@ GET /test/_mget?stored_fields=field1,field2
{
"docs" : [
{
"_id" : "1" <1>
"_id" : "1"
},
{
"_id" : "2",
"stored_fields" : ["field3", "field4"] <2>
"stored_fields" : ["field3", "field4"]
}
]
}
--------------------------------------------------
<1> Returns `field1` and `field2`
<2> Returns `field3` and `field4`
[float]
[[mget-routing]]
==== Routing
===== Specify document routing
You can also specify a routing value as a parameter:
If routing is used during indexing, you need to specify the routing value to retrieve documents.
For example, the following request fetches `test/_doc/2` from the shard corresponding to routing key `key1`,
and fetches `test/_doc/1` from the shard corresponding to routing key `key2`.
[source,console]
--------------------------------------------------
@ -168,17 +265,3 @@ GET /_mget?routing=key1
]
}
--------------------------------------------------
In this example, document `test/_doc/2` will be fetched from the shard corresponding to routing key `key1` but
document `test/_doc/1` will be fetched from the shard corresponding to routing key `key2`.
[float]
[[mget-security]]
==== Security
See <<url-access-control>>.
[float]
[[multi-get-partial-responses]]
==== Partial responses
To ensure fast responses, the multi get API will respond with partial results if one or more shards fail. See <<shard-failures, Shard failures>> for more information.

View file

@ -1,14 +1,10 @@
[[docs-multi-termvectors]]
=== Multi termvectors API
=== Multi term vectors API
++++
<titleabbrev>Multi term vectors</titleabbrev>
++++
Multi termvectors API allows to get multiple termvectors at once. The
documents from which to retrieve the term vectors are specified by an index and id.
But the documents could also be artificially provided in the request itself.
The response includes a `docs`
array with all the fetched termvectors, each element having the structure
provided by the <<docs-termvectors,termvectors>>
API. Here is an example:
Retrieves multiple term vectors with a single request.
[source,console]
--------------------------------------------------
@ -32,10 +28,64 @@ POST /_mtermvectors
--------------------------------------------------
// TEST[setup:twitter]
See the <<docs-termvectors,termvectors>> API for a description of possible parameters.
[[docs-multi-termvectors-api-request]]
==== {api-request-title}
The `_mtermvectors` endpoint can also be used against an index (in which case it
is not required in the body):
`POST /_mtermvectors`
`POST /<index>/_mtermvectors`
[[docs-multi-termvectors-api-desc]]
==== {api-description-title}
You can specify existing documents by index and ID or
provide artificial documents in the body of the request.
The index can be specified the body of the request or in the request URI.
The response contains a `docs` array with all the fetched termvectors.
Each element has the structure provided by the <<docs-termvectors,termvectors>>
API.
See the <<docs-termvectors,termvectors>> API for more information about the information
that can be included in the response.
[[docs-multi-termvectors-api-path-params]]
==== {api-path-parms-title}
`<index>`::
(Optional, string) Name of the index that contains the documents.
[[docs-multi-termvectors-api-query-params]]
==== {api-query-parms-title}
include::{docdir}/rest-api/common-parms.asciidoc[tag=fields]
include::{docdir}/rest-api/common-parms.asciidoc[tag=field_statistics]
include::{docdir}/rest-api/common-parms.asciidoc[tag=offsets]
include::{docdir}/rest-api/common-parms.asciidoc[tag=payloads]
include::{docdir}/rest-api/common-parms.asciidoc[tag=positions]
include::{docdir}/rest-api/common-parms.asciidoc[tag=preference]
include::{docdir}/rest-api/common-parms.asciidoc[tag=routing]
include::{docdir}/rest-api/common-parms.asciidoc[tag=realtime]
include::{docdir}/rest-api/common-parms.asciidoc[tag=term_statistics]
include::{docdir}/rest-api/common-parms.asciidoc[tag=version]
include::{docdir}/rest-api/common-parms.asciidoc[tag=version_type]
[float]
[[docs-multi-termvectors-api-example]]
==== {api-examples-title}
If you specify an index in the request URI, the index does not need to be specified for each documents
in the request body:
[source,console]
--------------------------------------------------
@ -57,7 +107,8 @@ POST /twitter/_mtermvectors
--------------------------------------------------
// TEST[setup:twitter]
If all requested documents are on same index and also the parameters are the same, the request can be simplified:
If all requested documents are in same index and the parameters are the same, you can use the
following simplified syntax:
[source,console]
--------------------------------------------------
@ -74,9 +125,11 @@ POST /twitter/_mtermvectors
--------------------------------------------------
// TEST[setup:twitter]
Additionally, just like for the <<docs-termvectors,termvectors>>
API, term vectors could be generated for user provided documents.
The mapping used is determined by `_index`.
[[docs-multi-termvectors-artificial-doc]]
===== Artificial documents
You can also use `mtermvectors` to generate term vectors for _artificial_ documents provided
in the body of the request. The mapping used is determined by the specified `_index`.
[source,console]
--------------------------------------------------

File diff suppressed because it is too large Load diff

View file

@ -1,10 +1,10 @@
[[docs-termvectors]]
=== Term Vectors
=== Term vectors API
++++
<titleabbrev>Term vectors</titleabbrev>
++++
Returns information and statistics on terms in the fields of a particular
document. The document could be stored in the index or artificially provided
by the user. Term vectors are <<realtime,realtime>> by default, not near
realtime. This can be changed by setting `realtime` parameter to `false`.
Retrieves information and statistics for terms in the fields of a particular document.
[source,console]
--------------------------------------------------
@ -12,8 +12,19 @@ GET /twitter/_termvectors/1
--------------------------------------------------
// TEST[setup:twitter]
Optionally, you can specify the fields for which the information is
retrieved either with a parameter in the url
[[docs-termvectors-api-request]]
==== {api-request-title}
`GET /<index>/_termvectors/<_id>`
[[docs-termvectors-api-desc]]
==== {api-description-title}
You can retrieve term vectors for documents stored in the index or
for _artificial_ documents passed in the body of the request.
You can specify the fields you are interested in through the `fields` parameter,
or by adding the fields to the request body.
[source,console]
--------------------------------------------------
@ -21,18 +32,16 @@ GET /twitter/_termvectors/1?fields=message
--------------------------------------------------
// TEST[setup:twitter]
or by adding the requested fields in the request body (see
example below). Fields can also be specified with wildcards
in similar way to the <<query-dsl-multi-match-query,multi match query>>
Fields can be specified using wildcards, similar to the <<query-dsl-multi-match-query,multi match query>>.
[float]
==== Return values
Term vectors are <<realtime,real-time>> by default, not near real-time.
This can be changed by setting `realtime` parameter to `false`.
Three types of values can be requested: _term information_, _term statistics_
You can request three types of values: _term information_, _term statistics_
and _field statistics_. By default, all term information and field
statistics are returned for all fields but no term statistics.
statistics are returned for all fields but term statistics are excluded.
[float]
[[docs-termvectors-api-term-info]]
===== Term information
* term frequency in the field (always returned)
@ -52,7 +61,7 @@ should make sure that the string you are taking a sub-string of is also encoded
using UTF-16.
======
[float]
[[docs-termvectors-api-term-stats]]
===== Term statistics
Setting `term_statistics` to `true` (default is `false`) will
@ -65,7 +74,7 @@ return
By default these values are not returned since term statistics can
have a serious performance impact.
[float]
[[docs-termvectors-api-field-stats]]
===== Field statistics
Setting `field_statistics` to `false` (default is `true`) will
@ -77,8 +86,8 @@ omit :
* sum of total term frequencies (the sum of total term frequencies of
each term in this field)
[float]
===== Terms Filtering
[[docs-termvectors-api-terms-filtering]]
===== Terms filtering
With the parameter `filter`, the terms returned could also be filtered based
on their tf-idf scores. This could be useful in order find out a good
@ -105,7 +114,7 @@ The following sub-parameters are supported:
`max_word_length`::
The maximum word length above which words will be ignored. Defaults to unbounded (`0`).
[float]
[[docs-termvectors-api-behavior]]
==== Behaviour
The term and field statistics are not accurate. Deleted documents
@ -116,8 +125,45 @@ whereas the absolute numbers have no meaning in this context. By default,
when requesting term vectors of artificial documents, a shard to get the statistics
from is randomly selected. Use `routing` only to hit a particular shard.
[float]
===== Example: Returning stored term vectors
[[docs-termvectors-api-path-params]]
==== {api-path-parms-title}
`<index>`::
(Required, string) Name of the index that contains the document.
`<_id>`::
(Optional, string) Unique identifier of the document.
[[docs-termvectors-api-query-params]]
==== {api-query-parms-title}
include::{docdir}/rest-api/common-parms.asciidoc[tag=fields]
include::{docdir}/rest-api/common-parms.asciidoc[tag=field_statistics]
include::{docdir}/rest-api/common-parms.asciidoc[tag=offsets]
include::{docdir}/rest-api/common-parms.asciidoc[tag=payloads]
include::{docdir}/rest-api/common-parms.asciidoc[tag=positions]
include::{docdir}/rest-api/common-parms.asciidoc[tag=preference]
include::{docdir}/rest-api/common-parms.asciidoc[tag=routing]
include::{docdir}/rest-api/common-parms.asciidoc[tag=realtime]
include::{docdir}/rest-api/common-parms.asciidoc[tag=term_statistics]
include::{docdir}/rest-api/common-parms.asciidoc[tag=version]
include::{docdir}/rest-api/common-parms.asciidoc[tag=version_type]
[[docs-termvectors-api-example]]
==== {api-examples-title}
[[docs-termvectors-api-stored-termvectors]]
===== Returning stored term vectors
First, we create an index that stores term vectors, payloads etc. :
@ -259,8 +305,8 @@ Response:
// TEST[continued]
// TESTRESPONSE[s/"took": 6/"took": "$body.took"/]
[float]
===== Example: Generating term vectors on the fly
[[docs-termvectors-api-generate-termvectors]]
===== Generating term vectors on the fly
Term vectors which are not explicitly stored in the index are automatically
computed on the fly. The following request returns all information and statistics for the
@ -281,8 +327,7 @@ GET /twitter/_termvectors/1
// TEST[continued]
[[docs-termvectors-artificial-doc]]
[float]
===== Example: Artificial documents
===== Artificial documents
Term vectors can also be generated for artificial documents,
that is for documents not present in the index. For example, the following request would
@ -304,7 +349,6 @@ GET /twitter/_termvectors
// TEST[continued]
[[docs-termvectors-per-field-analyzer]]
[float]
====== Per-field analyzer
Additionally, a different analyzer than the one at the field may be provided
@ -369,8 +413,7 @@ Response:
[[docs-termvectors-terms-filtering]]
[float]
===== Example: Terms filtering
===== Terms filtering
Finally, the terms returned could be filtered based on their tf-idf scores. In
the example below we obtain the three most "interesting" keywords from the

View file

@ -36,7 +36,7 @@ current master node fails.
// tag::ccr-def[]
The {ccr} feature enables you to replicate indices in remote clusters to your
local cluster. For more information, see
{stack-ov}/xpack-ccr.html[{ccr-cap}].
{ref}/xpack-ccr.html[{ccr-cap}].
// end::ccr-def[]
[[glossary-ccs]] {ccs} (CCS)::

View file

@ -95,7 +95,7 @@ prevent non-administrators exfiltrating data.
+
--
The following example creates a new user `snapshot_user` in the
{stack-ov}/native-realm.html[native realm], but it is not important which
<<native-realm,native realm>>, but it is not important which
realm the user is a member of:
[source,console]
@ -202,7 +202,7 @@ Then log into one of the node hosts, navigate to {es} installation directory,
and follow these steps:
. Add a new user with the `superuser` built-in role to the
{stack-ov}/file-realm.html[file realm].
<<file-realm,file realm>>.
+
--
For example, create a user named `restore_user`:

View file

@ -22,6 +22,6 @@ It does *not* grant privileges to create repositories, restore snapshots, or
search within indices. Hence, the user can view and snapshot all indices, but cannot
access or modify any data.
For more information, see {stack-ov}/security-privileges.html[Security privileges]
and {stack-ov}/built-in-roles.html[Built-in roles].
For more information, see <<security-privileges>>
and <<built-in-roles>>.
====

View file

@ -30,7 +30,7 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=timeoutparms]
==== Authorization
You must have the `manage_ilm` cluster privilege to use this API.
For more information, see {stack-ov}/security-privileges.html[Security Privileges].
For more information, see <<security-privileges>>.
==== Examples

View file

@ -41,7 +41,7 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=timeoutparms]
You must have the `view_index_metadata` or `manage_ilm` or both privileges on the indices
being managed to use this API.
For more information, see {stack-ov}/security-privileges.html[Security Privileges].
For more information, see <<security-privileges>>.
==== Examples

View file

@ -30,7 +30,7 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=timeoutparms]
==== Authorization
You must have the `manage_ilm` or `read_ilm` or both cluster privileges to use this API.
For more information, see {stack-ov}/security-privileges.html[Security Privileges].
For more information, see <<security-privileges>>.
==== Examples

View file

@ -28,7 +28,7 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=timeoutparms]
==== Authorization
You must have the `manage_ilm` or `read_ilm` or both cluster privileges to use this API.
For more information, see {stack-ov}/security-privileges.html[Security Privileges].
For more information, see <<security-privileges>>.
==== Examples

View file

@ -39,7 +39,7 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=timeoutparms]
==== Authorization
You must have the `manage_ilm` privileges on the indices being managed to use this API.
For more information, see {stack-ov}/security-privileges.html[Security Privileges].
For more information, see <<security-privileges>>.
==== Examples

View file

@ -36,7 +36,7 @@ You must have the `manage_ilm` cluster privilege to use this API. You must
also have the `manage` index privilege on all indices being managed by `policy`.
All operations executed by {ilm} for a policy are executed as the user that
put the latest version of a policy.
For more information, see {stack-ov}/security-privileges.html[Security Privileges].
For more information, see <<security-privileges>>.
==== Examples

View file

@ -30,7 +30,7 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=timeoutparms]
==== Authorization
You must have the `manage_ilm` privileges on the indices being managed to use this API.
For more information, see {stack-ov}/security-privileges.html[Security Privileges].
For more information, see <<security-privileges>>.
==== Examples

View file

@ -30,7 +30,7 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=timeoutparms]
==== Authorization
You must have the `manage_ilm` privileges on the indices being managed to use this API.
For more information, see {stack-ov}/security-privileges.html[Security Privileges].
For more information, see <<security-privileges>>.
==== Examples

View file

@ -7,7 +7,9 @@ The Snapshot Lifecycle Management APIs are used to manage policies for the time
and frequency of automatic snapshots. Snapshot Lifecycle Management is related
to <<index-lifecycle-management,Index Lifecycle Management>>, however, instead
of managing a lifecycle of actions that are performed on a single index, SLM
allows configuring policies spanning multiple indices.
allows configuring policies spanning multiple indices. Snapshot Lifecycle
Management can also perform deletion of older snapshots based on a configurable
retention policy.
SLM policy management is split into three different CRUD APIs, a way to put or update
policies, a way to retrieve policies, and a way to delete unwanted policies, as
@ -43,7 +45,7 @@ You must have the `manage_slm` cluster privilege to use this API. You must also
have the `manage` index privilege on all indices being managed by `policy`. All
operations executed by {slm} for a policy are executed as the user that put the
latest version of a policy. For more information, see
{stack-ov}/security-privileges.html[Security Privileges].
<<security-privileges>>.
==== Example
@ -62,7 +64,11 @@ PUT /_slm/policy/daily-snapshots
"ignore_unavailable": false,
"include_global_state": false
},
"retention": {}
"retention": { <6>
"expire_after": "30d", <7>
"min_count": 5, <8>
"max_count": 50 <9>
}
}
--------------------------------------------------
// TEST[setup:setup-repository]
@ -72,6 +78,10 @@ PUT /_slm/policy/daily-snapshots
<3> Which repository to take the snapshot in
<4> Any extra snapshot configuration
<5> Which indices the snapshot should contain
<6> Optional retention configuration
<7> Keep snapshots for 30 days
<8> Always keep at least 5 successful snapshots, even if they're more than 30 days old
<9> Keep no more than 50 successful snapshots, even if they're less than 30 days old
The top-level keys that the policy supports are described below:
@ -139,7 +149,11 @@ The output looks similar to the following:
"ignore_unavailable": false,
"include_global_state": false
},
"retention": {}
"retention": {
"expire_after": "30d",
"min_count": 5,
"max_count": 50
}
},
"stats": {
"policy": "daily-snapshots",
@ -229,7 +243,11 @@ Which, in this case shows an error because the index did not exist:
"ignore_unavailable": false,
"include_global_state": false
},
"retention": {}
"retention": {
"expire_after": "30d",
"min_count": 5,
"max_count": 50
}
},
"stats": {
"policy": "daily-snapshots",
@ -270,6 +288,11 @@ PUT /_slm/policy/daily-snapshots
"indices": ["data-*", "important"],
"ignore_unavailable": true,
"include_global_state": false
},
"retention": {
"expire_after": "30d",
"min_count": 5,
"max_count": 50
}
}
--------------------------------------------------
@ -318,7 +341,11 @@ Which now includes the successful snapshot information:
"ignore_unavailable": true,
"include_global_state": false
},
"retention": {}
"retention": {
"expire_after": "30d",
"min_count": 5,
"max_count": 50
}
},
"stats": {
"policy": "daily-snapshots",
@ -374,22 +401,14 @@ Which returns a response similar to:
"retention_timed_out": 0,
"retention_deletion_time": "1.4s",
"retention_deletion_time_millis": 1404,
"policy_metrics": [
{
"policy": "daily-snapshots",
"snapshots_taken": 1,
"snapshots_failed": 1,
"snapshots_deleted": 0,
"snapshot_deletion_failures": 0
}
],
"policy_stats": [ ],
"total_snapshots_taken": 1,
"total_snapshots_failed": 1,
"total_snapshots_deleted": 0,
"total_snapshot_deletion_failures": 0
}
--------------------------------------------------
// TESTRESPONSE[s/runs": 13/runs": $body.retention_runs/ s/_failed": 0/_failed": $body.retention_failed/ s/_timed_out": 0/_timed_out": $body.retention_timed_out/ s/"1.4s"/$body.retention_deletion_time/ s/1404/$body.retention_deletion_time_millis/]
// TESTRESPONSE[s/runs": 13/runs": $body.retention_runs/ s/_failed": 0/_failed": $body.retention_failed/ s/_timed_out": 0/_timed_out": $body.retention_timed_out/ s/"1.4s"/$body.retention_deletion_time/ s/1404/$body.retention_deletion_time_millis/ s/total_snapshots_taken": 1/total_snapshots_taken": $body.total_snapshots_taken/ s/total_snapshots_failed": 1/total_snapshots_failed": $body.total_snapshots_failed/ s/"policy_stats": [.*]/"policy_stats": $body.policy_stats/]
[[slm-api-delete]]
=== Delete Snapshot Lifecycle Policy API
@ -410,3 +429,29 @@ any currently ongoing snapshots or remove any previously taken snapshots.
DELETE /_slm/policy/daily-snapshots
--------------------------------------------------
// TEST[continued]
[[slm-api-execute-retention]]
=== Execute Snapshot Lifecycle Retention API
While Snapshot Lifecycle Management retention is usually invoked through the global cluster settings
for its schedule, it can sometimes be useful to invoke a retention run to expunge expired snapshots
immediately. This API allows you to run a one-off retention run.
==== Example
To immediately start snapshot retention, use the following
[source,console]
--------------------------------------------------
POST /_slm/_execute_retention
--------------------------------------------------
This API will immediately return, as retention will be run asynchronously in the background:
[source,console-result]
--------------------------------------------------
{
"acknowledged": true
}
--------------------------------------------------

View file

@ -27,7 +27,7 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=timeoutparms]
==== Authorization
You must have the `manage_ilm` cluster privilege to use this API.
For more information, see {stack-ov}/security-privileges.html[Security privileges].
For more information, see <<security-privileges>>.
==== Examples

View file

@ -32,7 +32,7 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=timeoutparms]
==== Authorization
You must have the `manage_ilm` cluster privilege to use this API.
For more information, see {stack-ov}/security-privileges.html[Security Privileges].
For more information, see <<security-privileges>>.
==== Examples

View file

@ -6,7 +6,8 @@
Let's get started with snapshot lifecycle management (SLM) by working through a
hands-on scenario. The goal of this example is to automatically back up {es}
indices using the <<modules-snapshots,snapshots>> every day at a particular
time.
time. Once these snapshots have been created, they are kept for a configured
amount of time and then deleted per a configured retention policy.
[float]
[[slm-and-security]]
@ -14,8 +15,9 @@ time.
Before starting, it's important to understand the privileges that are needed
when configuring SLM if you are using the security plugin. There are two
built-in cluster privileges that can be used to assist: `manage_slm` and
`read_slm`. It's also good to note that the `create_snapshot` permission
allows taking snapshots even for indices the role may not have access to.
`read_slm`. It's also good to note that the `cluster:admin/snapshot/*`
permission allows taking and deleting snapshots even for indices the role may
not have access to.
An example of configuring an administrator role for SLM follows:
@ -23,7 +25,7 @@ An example of configuring an administrator role for SLM follows:
-----------------------------------
POST /_security/role/slm-admin
{
"cluster": ["manage_slm", "create_snapshot"],
"cluster": ["manage_slm", "cluster:admin/snapshot/*"],
"indices": [
{
"names": [".slm-history-*"],
@ -82,6 +84,10 @@ snapshots, what the snapshots should be named, and which indices should be
included, among other things. We'll use the <<slm-api-put,Put Policy>> API
to create the policy.
When configurating a policy, retention can also optionally be configured. See
the <<slm-retention,SLM retention>> documentation for the full documentation of
how retention works.
[source,console]
--------------------------------------------------
PUT /_slm/policy/nightly-snapshots
@ -92,7 +98,11 @@ PUT /_slm/policy/nightly-snapshots
"config": { <4>
"indices": ["*"] <5>
},
"retention": {}
"retention": { <6>
"expire_after": "30d", <7>
"min_count": 5, <8>
"max_count": 50 <9>
}
}
--------------------------------------------------
// TEST[continued]
@ -105,6 +115,10 @@ PUT /_slm/policy/nightly-snapshots
<3> the repository the snapshot should be stored in
<4> the configuration to be used for the snapshot requests (see below)
<5> which indices should be included in the snapshot, in this case, every index
<6> Optional retention configuration
<7> Keep snapshots for 30 days
<8> Always keep at least 5 successful snapshots
<9> Keep no more than 50 successful snapshots, even if they're less than 30 days old
This policy will take a snapshot of every index each day at 1:30AM UTC.
Snapshots are incremental, allowing frequent snapshots to be stored efficiently,
@ -166,7 +180,11 @@ next time the policy will be executed.
"config": {
"indices": ["*"],
},
"retention": {}
"retention": {
"expire_after": "30d",
"min_count": 5,
"max_count": 50
}
},
"last_success": { <1>
"snapshot_name": "nightly-snap-2019.04.24-tmtnyjtrsxkhbrrdcgg18a", <2>

View file

@ -87,3 +87,5 @@ include::start-stop-ilm.asciidoc[]
include::ilm-with-existing-indices.asciidoc[]
include::getting-started-slm.asciidoc[]
include::slm-retention.asciidoc[]

View file

@ -0,0 +1,119 @@
[role="xpack"]
[testenv="basic"]
[[slm-retention]]
== Snapshot lifecycle management retention
Automatic deletion of older snapshots is an optional feature of snapshot lifecycle management.
Retention is run as a cluster level task that is not associated with a particular policy's schedule
(though the configuration of which snapshots to keep is done on a per-policy basis). Retention
configuration conists of two parts—The first a cluster-level configuration for when retention is
run and for how long, the second configured on a policy for which snapshots should be eligible for
retention.
The cluster level settings for retention are shown below, and can be changed dynamically using the
<<cluster-update-settings,cluster-update-settings>> API:
|=====================================
| Setting | Default value | Description
| `slm.retention_schedule` | `0 30 1 * * ?` | A periodic or absolute time schedule for when
retention should be run. Supports all values supported by the cron scheduler: <<schedule-cron,Cron
scheduler configuration>>. Retention can also be manually run using the
<<slm-api-execute-retention,Execute retention API>>. Defaults to daily at 1:30am in the master
node's timezone.
| `slm.retention_duration` | `"1h"` | A limit of how long SLM should spend deleting old snapshots.
|=====================================
Policy level configuration for retention is done inside the `retention` object when creating or
updating a policy. All of the retention configurations options are optional.
[source,console]
--------------------------------------------------
PUT /_slm/policy/daily-snapshots
{
"schedule": "0 30 1 * * ?",
"name": "<daily-snap-{now/d}>",
"repository": "my_repository",
"retention": { <1>
"expire_after": "30d", <2>
"min_count": 5, <3>
"max_count": 50 <4>
}
}
--------------------------------------------------
// TEST[setup:setup-repository]
<1> Optional retention configuration
<2> Keep snapshots for 30 days
<3> Always keep at least 5 successful snapshots
<4> Keep no more than 50 successful snapshots
Supported configuration for retention from within a policy are as follows. The default value for
each is unset unless specified by the user in the policy configuration.
NOTE: The oldest snapshots are always deleted first, in the case of a `max_count` of 5 for a policy
with 6 snapshots, the oldest snapshot will be deleted.
|=====================================
| Setting | Description
| `expire_after` | A timevalue for how old a snapshot must be in order to be eligible for deletion.
| `min_count` | A minimum number of snapshots to keep, regardless of age.
| `max_count` | The maximum number of snapshots to keep, regardless of age.
|=====================================
As an example, the retention setting in the policy configured about would read in English as:
____
Remove snapshots older than thirty days, but always keep the latest five snapshots. If there are
more than fifty snapshots, remove the oldest surplus snapshots until there are no more than fifty
successful snapshots.
____
If multiple policies are configured to snapshot to the same repository, or manual snapshots have
been taken without using the <<slm-api-execute,Execute Policy API>>, they are treated as not
eligible for retention, and do not count towards any limits. This allows multiple policies to have
differing retention configuration while using the same snapshot repository.
Statistics for snapshot retention can be retrieved using the <<slm-get-stats,Get Snapshot Lifecycle
Stats API>>:
[source,console]
--------------------------------------------------
GET /_slm/stats
--------------------------------------------------
// TEST[continued]
Which returns a response
[source,js]
--------------------------------------------------
{
"retention_runs": 13, <1>
"retention_failed": 0, <2>
"retention_timed_out": 0, <3>
"retention_deletion_time": "1.4s", <4>
"retention_deletion_time_millis": 1404,
"policy_stats": [
{
"policy": "daily-snapshots",
"snapshots_taken": 1,
"snapshots_failed": 1,
"snapshots_deleted": 0, <5>
"snapshot_deletion_failures": 0 <6>
}
],
"total_snapshots_taken": 1,
"total_snapshots_failed": 1,
"total_snapshots_deleted": 0, <7>
"total_snapshot_deletion_failures": 0 <8>
}
--------------------------------------------------
// TESTRESPONSE[skip:this is not actually running retention]
<1> Number of times retention has been run
<2> Number of times retention failed while running
<3> Number of times retention hit the `slm.retention_duration` time limit and had to stop before deleting all eligible snapshots
<4> Total time spent deleting snapshots by the retention process
<5> Number of snapshots created by the "daily-snapshots" policy that have been deleted
<6> Number of snapshots that failed to be deleted
<7> Total number of snapshots deleted across all policies
<8> Total number of snapshot deletion failures across all policies

View file

@ -4,53 +4,140 @@
<titleabbrev>Clear cache</titleabbrev>
++++
The clear cache API allows to clear either all caches or specific cached
associated with one or more indices.
Clears caches for one or more indices.
[source,console]
--------------------------------------------------
----
POST /twitter/_cache/clear
--------------------------------------------------
----
// TEST[setup:twitter]
The API, by default, will clear all caches. Specific caches can be cleaned
explicitly by setting the `query`, `fielddata` or `request` url parameter to `true`.
[[clear-cache-api-request]]
==== {api-request-title}
`POST /<index>/_cache/clear`
`POST /_cache/clear`
[[clear-cache-api-path-params]]
==== {api-path-parms-title}
include::{docdir}/rest-api/common-parms.asciidoc[tag=index]
[[clear-cache-api-query-params]]
==== {api-query-parms-title}
include::{docdir}/rest-api/common-parms.asciidoc[tag=allow-no-indices]
include::{docdir}/rest-api/common-parms.asciidoc[tag=expand-wildcards]
+
Defaults to `open`.
`fielddata`::
+
--
(Optional, boolean)
If `true`,
clears the fields cache.
Use the `fields` parameter
to clear the cache of specific fields only.
--
`fields`::
+
--
(Optional, string)
Comma-separated list of field names
used to limit the `fielddata` parameter.
Defaults to all fields.
NOTE: This parameter does *not* support objects
or field aliases.
--
`index`::
(Optional, string)
Comma-separated list of index names
used to limit the request.
include::{docdir}/rest-api/common-parms.asciidoc[tag=index-ignore-unavailable]
`query`::
(Optional, boolean)
If `true`,
clears the query cache.
`request`::
(Optional, boolean)
If `true`,
clears the request cache.
[[clear-cache-api-example]]
==== {api-examples-title}
[[clear-cache-api-specific-ex]]
===== Clear a specific cache
By default,
the clear cache API clears all caches.
You can clear only specific caches
by setting the following query parameters to `true`:
* `fielddata`
* `query`
* `request`
[source,console]
--------------------------------------------------
POST /twitter/_cache/clear?query=true <1>
POST /twitter/_cache/clear?request=true <2>
POST /twitter/_cache/clear?fielddata=true <3>
--------------------------------------------------
----
POST /twitter/_cache/clear?fielddata=true <1>
POST /twitter/_cache/clear?query=true <2>
POST /twitter/_cache/clear?request=true <3>
----
// TEST[continued]
<1> Cleans only the query cache
<2> Cleans only the request cache
<3> Cleans only the fielddata cache
<1> Clears only the fields cache
<2> Clears only the query cache
<3> Clears only the request cache
In addition to this, all caches relating to a specific field can also be
cleared by specifying `fields` url parameter with a comma delimited list of
the fields that should be cleared. Note that the provided names must refer to
concrete fields -- objects and field aliases are not supported.
[[clear-cache-api-specific-fields-ex]]
===== Clear the cache of specific fields
To only clear the cache of specific fields,
use the `fields` query parameter.
[source,console]
--------------------------------------------------
----
POST /twitter/_cache/clear?fields=foo,bar <1>
--------------------------------------------------
----
// TEST[continued]
<1> Clear the cache for the `foo` an `bar` field
<1> Clear the cache for the `foo` and `bar` field
[float]
==== Multi Index
The clear cache API can be applied to more than one index with a single
call, or even on `_all` the indices.
[[clear-cache-api-multi-ex]]
===== Clear caches for several indices
[source,console]
--------------------------------------------------
----
POST /kimchy,elasticsearch/_cache/clear
POST /_cache/clear
--------------------------------------------------
----
// TEST[s/^/PUT kimchy\nPUT elasticsearch\n/]
[[clear-cache-api-all-ex]]
===== Clear caches for all indices
[source,console]
----
POST /_cache/clear
----

View file

@ -42,19 +42,24 @@ NOTE: To roll over an index, a condition must be met *when you call the API*.
automatically roll over indices when a condition is met, you can use {es}'s
<<index-lifecycle-management, index lifecycle management (ILM) policies>>.
The API accepts a single alias name and a list of `conditions`. The alias must point to a write index for
a Rollover request to be valid. There are two ways this can be achieved, and depending on the configuration, the
alias metadata will be updated differently. The two scenarios are as follows:
The rollover index API accepts a single alias name
and a list of `conditions`.
- The alias only points to a single index with `is_write_index` not configured (defaults to `null`).
If the specified alias points to a single index,
the rollover request:
In this scenario, the original index will have their rollover alias will be added to the newly created index, and removed
from the original (rolled-over) index.
. Creates a new index
. Adds the alias to the new index
. Removes the alias from the original index
- The alias points to one or more indices with `is_write_index` set to `true` on the index to be rolled over (the write index).
If the specified alias points to multiple indices,
one of these indices must have `is_write_index` set to `true`.
In this case,
the rollover request:
In this scenario, the write index will have its rollover alias' `is_write_index` set to `false`, while the newly created index
will now have the rollover alias pointing to it as the write index with `is_write_index` as `true`.
. Creates a new index
. Sets `is_write_index` to `true` for the new index
. Sets `is_write_index` to `false` for the original index
[[rollover-wait-active-shards]]

View file

@ -17,14 +17,14 @@ This API enables you to delete licensing information.
==== Description
When your license expires, {xpack} operates in a degraded mode. For more
information, see {xpack-ref}/license-expiration.html[License Expiration].
information, see {stack-ov}/license-expiration.html[License Expiration].
[float]
==== Authorization
You must have `manage` cluster privileges to use this API.
For more information, see
{xpack-ref}/security-privileges.html[Security Privileges].
<<security-privileges>>.
[float]
==== Examples

View file

@ -25,8 +25,7 @@ https://www.elastic.co/subscriptions.
==== Authorization
You must have `monitor` cluster privileges to use this API.
For more information, see
{xpack-ref}/security-privileges.html[Security Privileges].
For more information, see <<security-privileges>>.
[float]
==== Examples

View file

@ -35,8 +35,7 @@ https://www.elastic.co/subscriptions.
==== Authorization
You must have `monitor` cluster privileges to use this API.
For more information, see
{xpack-ref}/security-privileges.html[Security Privileges].
For more information, see <<security-privileges>>.
[float]

Some files were not shown because too many files have changed in this diff Show more