mirror of
https://github.com/elastic/elasticsearch.git
synced 2025-04-24 23:27:25 -04:00
Convert repository plugins to modules (#81870)
Closes #81652. Convert the `repository-azure`, `repository-gcs` and `repository-s3` plugins into modules, so that they are always included in the Elasticsearch distribution. Also change plugin installation, removal and syncing so that attempting to add or remove these plugins still succeeds but is now a no-op.
This commit is contained in:
parent
78509f44d1
commit
d2dbef5063
286 changed files with 628 additions and 304 deletions
|
@ -37,11 +37,6 @@ if [ -f "/etc/os-release" ] ; then
|
|||
if [[ "$ID" == "debian" || "$ID_LIKE" == "debian" ]] ; then
|
||||
# FIXME: The base image should not have rpm installed
|
||||
sudo rm -Rf /usr/bin/rpm
|
||||
# Work around incorrect lintian version
|
||||
# https://github.com/elastic/elasticsearch/issues/48573
|
||||
if [ $VERSION_ID == 10 ] ; then
|
||||
sudo apt-get install -y --allow-downgrades lintian=2.15.0
|
||||
fi
|
||||
fi
|
||||
else
|
||||
cat /etc/issue || true
|
||||
|
|
|
@ -88,7 +88,7 @@ if (providers.systemProperty('idea.active').forUseAtConfigurationTime().getOrNul
|
|||
tasks.register('buildDependencyArtifacts') {
|
||||
group = 'ide'
|
||||
description = 'Builds artifacts needed as dependency for IDE modules'
|
||||
dependsOn ':client:rest-high-level:shadowJar', ':plugins:repository-hdfs:hadoop-client-api:shadowJar', ':plugins:repository-azure:azure-storage-blob:shadowJar'
|
||||
dependsOn ':client:rest-high-level:shadowJar', ':plugins:repository-hdfs:hadoop-client-api:shadowJar', ':modules:repository-azure:azure-storage-blob:shadowJar'
|
||||
}
|
||||
|
||||
idea {
|
||||
|
|
|
@ -70,8 +70,7 @@ configurations {
|
|||
dockerSource
|
||||
log4jConfig
|
||||
tini
|
||||
repositoryPlugins
|
||||
nonRepositoryPlugins
|
||||
allPlugins
|
||||
filebeat
|
||||
metricbeat
|
||||
cloudflareZlib
|
||||
|
@ -85,8 +84,7 @@ dependencies {
|
|||
dockerSource project(path: ":distribution:archives:linux-tar", configuration: 'default')
|
||||
log4jConfig project(path: ":distribution", configuration: 'log4jConfig')
|
||||
tini "krallin:tini:0.19.0:${tiniArch}"
|
||||
repositoryPlugins project(path: ':plugins', configuration: 'repositoryPlugins')
|
||||
nonRepositoryPlugins project(path: ':plugins', configuration: 'nonRepositoryPlugins')
|
||||
allPlugins project(path: ':plugins', configuration: 'allPlugins')
|
||||
filebeat "beats:filebeat:${VersionProperties.elasticsearch}:${beatsArch}@tar.gz"
|
||||
metricbeat "beats:metricbeat:${VersionProperties.elasticsearch}:${beatsArch}@tar.gz"
|
||||
cloudflareZlib "cloudflare:zlib:${cloudflareZlibVersion}@tar.gz"
|
||||
|
@ -272,8 +270,6 @@ void addBuildDockerContextTask(Architecture architecture, DockerBase base) {
|
|||
String buildId = providers.systemProperty('build.id').forUseAtConfigurationTime().getOrNull()
|
||||
boolean includeBeats = VersionProperties.isElasticsearchSnapshot() == true || buildId != null
|
||||
|
||||
from configurations.repositoryPlugins
|
||||
|
||||
if (includeBeats) {
|
||||
from configurations.filebeat
|
||||
from configurations.metricbeat
|
||||
|
@ -407,7 +403,7 @@ void addBuildEssDockerImageTask(Architecture architecture) {
|
|||
final Path projectDir = project.projectDir.toPath()
|
||||
|
||||
into("plugins") {
|
||||
from configurations.nonRepositoryPlugins
|
||||
from configurations.allPlugins
|
||||
}
|
||||
|
||||
from(projectDir.resolve("src/docker/Dockerfile.cloud-ess")) {
|
||||
|
|
|
@ -137,18 +137,6 @@ RUN sed -i -e 's/ES_DISTRIBUTION_TYPE=tar/ES_DISTRIBUTION_TYPE=docker/' bin/elas
|
|||
find config -type f -exec chmod 0664 {} +
|
||||
|
||||
<% if (docker_base == "cloud") { %>
|
||||
# Preinstall common plugins. Note that these are installed as root, meaning the `elasticsearch` user cannot delete them.
|
||||
COPY repository-s3-${version}.zip repository-gcs-${version}.zip repository-azure-${version}.zip /tmp/
|
||||
RUN bin/elasticsearch-plugin install --batch --verbose \\
|
||||
file:/tmp/repository-s3-${version}.zip \\
|
||||
file:/tmp/repository-gcs-${version}.zip \\
|
||||
file:/tmp/repository-azure-${version}.zip
|
||||
# Generate a replacement example plugins config that reflects what is actually installed
|
||||
RUN echo "plugins:" > config/elasticsearch-plugins.example.yml && \\
|
||||
echo " - id: repository-azure" >> config/elasticsearch-plugins.example.yml && \\
|
||||
echo " - id: repository-gcs" >> config/elasticsearch-plugins.example.yml && \\
|
||||
echo " - id: repository-s3" >> config/elasticsearch-plugins.example.yml
|
||||
|
||||
COPY filebeat-${version}.tar.gz metricbeat-${version}.tar.gz /tmp/
|
||||
RUN set -eux ; \\
|
||||
for beat in filebeat metricbeat ; do \\
|
||||
|
|
|
@ -291,8 +291,10 @@ Closure commonDebConfig(String architecture) {
|
|||
return {
|
||||
configure(commonPackageConfig('deb', architecture))
|
||||
|
||||
// jdeb does not provide a way to set the License control attribute, and ospackage
|
||||
// silently ignores setting it. Instead, we set the license as "custom field"
|
||||
// jdeb does not provide a way to set the License control attribute, and
|
||||
// ospackage silently ignores setting it. This is probably because `License`
|
||||
// is not actually a field in the Debian control file. So instead, we set
|
||||
// the license as "custom field".
|
||||
customFields['License'] = 'Elastic-License'
|
||||
|
||||
archiveVersion = project.version.replace('-', '~')
|
||||
|
|
|
@ -8,6 +8,8 @@
|
|||
# $1=0 : indicates a removal
|
||||
# $1=1 : indicates an upgrade
|
||||
|
||||
set -e
|
||||
|
||||
# source the default env file
|
||||
if [ -f "@path.env@" ]; then
|
||||
. "@path.env@"
|
||||
|
|
|
@ -9,6 +9,8 @@
|
|||
# $1=0 : indicates a removal
|
||||
# $1=1 : indicates an upgrade
|
||||
|
||||
set -e
|
||||
|
||||
# source the default env file
|
||||
if [ -f "@path.env@" ]; then
|
||||
. "@path.env@"
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
set -e
|
||||
|
||||
# source the default env file
|
||||
if [ -f "@path.env@" ]; then
|
||||
. "@path.env@"
|
||||
|
|
|
@ -10,6 +10,8 @@
|
|||
# $1=1 : indicates an new install
|
||||
# $1=2 : indicates an upgrade
|
||||
|
||||
set -e
|
||||
|
||||
err_exit() {
|
||||
echo "$@" >&2
|
||||
exit 1
|
||||
|
|
|
@ -9,6 +9,8 @@
|
|||
# $1=0 : indicates a removal
|
||||
# $1=1 : indicates an upgrade
|
||||
|
||||
set -e
|
||||
|
||||
# source the default env file
|
||||
if [ -f "@path.env@" ]; then
|
||||
. "@path.env@"
|
||||
|
|
|
@ -1,10 +1,12 @@
|
|||
# we don't have a changelog, but we put our copyright file
|
||||
# under /usr/share/doc/elasticsearch, which triggers this warning
|
||||
# Note that this is renamed to `no-changelog` in newer versions of
|
||||
# lintian, but we still support Debian 8+, so we can't change this.
|
||||
changelog-file-missing-in-native-package
|
||||
|
||||
# we intentionally copy our copyright file for all deb packages
|
||||
copyright-file-contains-full-apache-2-license
|
||||
copyright-should-refer-to-common-license-file-for-apache-2
|
||||
copyright-not-using-common-license-for-apache2
|
||||
copyright-without-copyright-notice
|
||||
|
||||
# we still put all our files under /usr/share/elasticsearch even after transition to platform dependent packages
|
||||
|
@ -21,7 +23,7 @@ non-standard-file-perm etc/elasticsearch/*
|
|||
non-standard-dir-perm var/lib/elasticsearch/ 2750 != 0755
|
||||
non-standard-dir-perm var/log/elasticsearch/ 2750 != 0755
|
||||
|
||||
# this lintian tag is simply wrong; contrary to the explanation, debian systemd
|
||||
# this lintian tag is simply wrong; contrary to the explanation, Debian systemd
|
||||
# does actually look at /usr/lib/systemd/system
|
||||
systemd-service-file-outside-lib usr/lib/systemd/system/elasticsearch.service
|
||||
|
||||
|
@ -30,7 +32,6 @@ maintainer-script-calls-systemctl
|
|||
|
||||
# bundled JDK
|
||||
embedded-library
|
||||
arch-dependent-file-in-usr-share usr/share/elasticsearch/jdk/*
|
||||
unstripped-binary-or-object usr/share/elasticsearch/jdk/*
|
||||
extra-license-file usr/share/elasticsearch/jdk/legal/*
|
||||
hardening-no-pie usr/share/elasticsearch/jdk/bin/*
|
||||
|
@ -41,3 +42,12 @@ unknown-java-class-version
|
|||
|
||||
# elastic licensed modules contain elastic license
|
||||
extra-license-file usr/share/elasticsearch/modules/*
|
||||
|
||||
# This dependency appears to have a packaging flaw, and includes a
|
||||
# generated source file alongside the compiled version
|
||||
jar-contains-source usr/share/elasticsearch/modules/repository-gcs/api-common*.jar *
|
||||
|
||||
# There's no `License` field in Debian control files, but earlier versions
|
||||
# of `lintian` were more permissive. Override this warning so that we can
|
||||
# run `lintian` on different releases of Debian.
|
||||
unknown-field elasticsearch-*.deb License
|
||||
|
|
|
@ -153,6 +153,13 @@ public class InstallPluginAction implements Closeable {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* IDs of plugins that have been migrated to modules and do not require installation. This data is
|
||||
* maintained so that existing user workflows that install these plugins do not need to be updated
|
||||
* immediately.
|
||||
*/
|
||||
public static final Set<String> PLUGINS_CONVERTED_TO_MODULES = Set.of("repository-azure", "repository-gcs", "repository-s3");
|
||||
|
||||
static final Set<PosixFilePermission> BIN_DIR_PERMS;
|
||||
static final Set<PosixFilePermission> BIN_FILES_PERMS;
|
||||
static final Set<PosixFilePermission> CONFIG_DIR_PERMS;
|
||||
|
@ -219,6 +226,15 @@ public class InstallPluginAction implements Closeable {
|
|||
handleInstallXPack(buildFlavor());
|
||||
}
|
||||
|
||||
if (PLUGINS_CONVERTED_TO_MODULES.contains(pluginId)) {
|
||||
// This deliberately does not throw an exception in order to avoid failing automation that relies on installing this
|
||||
// plugin during deployment.
|
||||
terminal.errorPrintln(
|
||||
"[" + pluginId + "] is no longer a plugin but instead a module packaged with this distribution of Elasticsearch"
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
final List<Path> deleteOnFailure = new ArrayList<>();
|
||||
deleteOnFailures.put(pluginId, deleteOnFailure);
|
||||
|
||||
|
|
|
@ -8,6 +8,8 @@
|
|||
|
||||
package org.elasticsearch.plugins.cli;
|
||||
|
||||
import org.elasticsearch.common.Strings;
|
||||
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
|
@ -27,7 +29,7 @@ public class PluginDescriptor {
|
|||
* coordinates. Can be null for official plugins.
|
||||
*/
|
||||
public PluginDescriptor(String id, String location) {
|
||||
this.id = Objects.requireNonNull(id, "id cannot be null");
|
||||
this.id = Strings.requireNonBlank(id, "plugin id cannot be null or blank");
|
||||
this.location = location;
|
||||
}
|
||||
|
||||
|
@ -40,7 +42,7 @@ public class PluginDescriptor {
|
|||
}
|
||||
|
||||
public void setId(String id) {
|
||||
this.id = id;
|
||||
this.id = Strings.requireNonBlank(id, "plugin id cannot be null or blank");
|
||||
}
|
||||
|
||||
public String getLocation() {
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
|
||||
package org.elasticsearch.plugins.cli;
|
||||
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.xcontent.ObjectParser;
|
||||
import org.elasticsearch.xcontent.ParseField;
|
||||
import org.elasticsearch.xcontent.XContent;
|
||||
|
@ -57,10 +58,12 @@ public class PluginsConfig {
|
|||
* </ul>
|
||||
*
|
||||
* @param officialPlugins the plugins that can be installed by name only
|
||||
* @param migratedPlugins plugins that were once official but have since become modules. These
|
||||
* plugin IDs can still be specified, but do nothing.
|
||||
* @throws PluginSyncException if validation problems are found
|
||||
*/
|
||||
public void validate(Set<String> officialPlugins) throws PluginSyncException {
|
||||
if (this.plugins.stream().anyMatch(each -> each == null || each.getId() == null || each.getId().isBlank())) {
|
||||
public void validate(Set<String> officialPlugins, Set<String> migratedPlugins) throws PluginSyncException {
|
||||
if (this.plugins.stream().anyMatch(each -> each == null || Strings.isNullOrBlank(each.getId()))) {
|
||||
throw new RuntimeException("Cannot have null or empty IDs in [elasticsearch-plugins.yml]");
|
||||
}
|
||||
|
||||
|
@ -72,7 +75,9 @@ public class PluginsConfig {
|
|||
}
|
||||
|
||||
for (PluginDescriptor plugin : this.plugins) {
|
||||
if (officialPlugins.contains(plugin.getId()) == false && plugin.getLocation() == null) {
|
||||
if (officialPlugins.contains(plugin.getId()) == false
|
||||
&& migratedPlugins.contains(plugin.getId()) == false
|
||||
&& plugin.getLocation() == null) {
|
||||
throw new PluginSyncException(
|
||||
"Must specify location for non-official plugin [" + plugin.getId() + "] in [elasticsearch-plugins.yml]"
|
||||
);
|
||||
|
|
|
@ -26,10 +26,10 @@ import java.util.Locale;
|
|||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.StringJoiner;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import static org.elasticsearch.cli.Terminal.Verbosity.VERBOSE;
|
||||
import static org.elasticsearch.plugins.cli.InstallPluginAction.PLUGINS_CONVERTED_TO_MODULES;
|
||||
|
||||
/**
|
||||
* An action for the plugin CLI to remove plugins from Elasticsearch.
|
||||
|
@ -116,6 +116,7 @@ public class RemovePluginAction {
|
|||
|
||||
private void checkCanRemove(PluginDescriptor plugin) throws UserException {
|
||||
String pluginId = plugin.getId();
|
||||
|
||||
final Path pluginDir = env.pluginsFile().resolve(pluginId);
|
||||
final Path pluginConfigDir = env.configFile().resolve(pluginId);
|
||||
final Path removing = env.pluginsFile().resolve(".removing-" + pluginId);
|
||||
|
@ -127,12 +128,19 @@ public class RemovePluginAction {
|
|||
*/
|
||||
if ((Files.exists(pluginDir) == false && Files.exists(pluginConfigDir) == false && Files.exists(removing) == false)
|
||||
|| (Files.exists(pluginDir) == false && Files.exists(pluginConfigDir) && this.purge == false)) {
|
||||
final String message = String.format(
|
||||
Locale.ROOT,
|
||||
"plugin [%s] not found; run 'elasticsearch-plugin list' to get list of installed plugins",
|
||||
pluginId
|
||||
);
|
||||
throw new UserException(ExitCodes.CONFIG, message);
|
||||
|
||||
if (PLUGINS_CONVERTED_TO_MODULES.contains(pluginId)) {
|
||||
terminal.errorPrintln(
|
||||
"plugin [" + pluginId + "] is no longer a plugin but instead a module packaged with this distribution of Elasticsearch"
|
||||
);
|
||||
} else {
|
||||
final String message = String.format(
|
||||
Locale.ROOT,
|
||||
"plugin [%s] not found; run 'elasticsearch-plugin list' to get list of installed plugins",
|
||||
pluginId
|
||||
);
|
||||
throw new UserException(ExitCodes.CONFIG, message);
|
||||
}
|
||||
}
|
||||
|
||||
final Path pluginBinDir = env.binFile().resolve(pluginId);
|
||||
|
@ -159,7 +167,7 @@ public class RemovePluginAction {
|
|||
*/
|
||||
if (Files.exists(pluginDir)) {
|
||||
try (Stream<Path> paths = Files.list(pluginDir)) {
|
||||
pluginPaths.addAll(paths.collect(Collectors.toList()));
|
||||
pluginPaths.addAll(paths.toList());
|
||||
}
|
||||
terminal.println(VERBOSE, "removing [" + pluginDir + "]");
|
||||
}
|
||||
|
@ -167,7 +175,7 @@ public class RemovePluginAction {
|
|||
final Path pluginBinDir = env.binFile().resolve(pluginId);
|
||||
if (Files.exists(pluginBinDir)) {
|
||||
try (Stream<Path> paths = Files.list(pluginBinDir)) {
|
||||
pluginPaths.addAll(paths.collect(Collectors.toList()));
|
||||
pluginPaths.addAll(paths.toList());
|
||||
}
|
||||
pluginPaths.add(pluginBinDir);
|
||||
terminal.println(VERBOSE, "removing [" + pluginBinDir + "]");
|
||||
|
@ -176,7 +184,7 @@ public class RemovePluginAction {
|
|||
if (Files.exists(pluginConfigDir)) {
|
||||
if (this.purge) {
|
||||
try (Stream<Path> paths = Files.list(pluginConfigDir)) {
|
||||
pluginPaths.addAll(paths.collect(Collectors.toList()));
|
||||
pluginPaths.addAll(paths.toList());
|
||||
}
|
||||
pluginPaths.add(pluginConfigDir);
|
||||
terminal.println(VERBOSE, "removing [" + pluginConfigDir + "]");
|
||||
|
|
|
@ -24,6 +24,8 @@ import java.nio.file.DirectoryStream;
|
|||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Comparator;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
|
@ -91,7 +93,7 @@ public class SyncPluginsAction implements PluginsSynchronizer {
|
|||
|
||||
// Parse descriptor file
|
||||
final PluginsConfig pluginsConfig = PluginsConfig.parseConfig(configPath, YamlXContent.yamlXContent);
|
||||
pluginsConfig.validate(InstallPluginAction.OFFICIAL_PLUGINS);
|
||||
pluginsConfig.validate(InstallPluginAction.OFFICIAL_PLUGINS, InstallPluginAction.PLUGINS_CONVERTED_TO_MODULES);
|
||||
|
||||
// Parse cached descriptor file, if it exists
|
||||
final Optional<PluginsConfig> cachedPluginsConfig = Files.exists(previousConfigPath)
|
||||
|
@ -115,7 +117,8 @@ public class SyncPluginsAction implements PluginsSynchronizer {
|
|||
PluginChanges getPluginChanges(PluginsConfig pluginsConfig, Optional<PluginsConfig> cachedPluginsConfig) throws PluginSyncException {
|
||||
final List<PluginInfo> existingPlugins = getExistingPlugins();
|
||||
|
||||
final List<PluginDescriptor> pluginsThatShouldExist = pluginsConfig.getPlugins();
|
||||
final List<PluginDescriptor> pluginsThatShouldExist = getPluginsThatShouldExist(pluginsConfig);
|
||||
|
||||
final List<PluginDescriptor> pluginsThatActuallyExist = existingPlugins.stream()
|
||||
.map(info -> new PluginDescriptor(info.getName()))
|
||||
.collect(Collectors.toList());
|
||||
|
@ -131,9 +134,42 @@ public class SyncPluginsAction implements PluginsSynchronizer {
|
|||
|
||||
final List<PluginDescriptor> pluginsToUpgrade = getPluginsToUpgrade(pluginsToMaybeUpgrade, cachedPluginsConfig, existingPlugins);
|
||||
|
||||
pluginsToRemove.sort(Comparator.comparing(PluginDescriptor::getId));
|
||||
pluginsToInstall.sort(Comparator.comparing(PluginDescriptor::getId));
|
||||
pluginsToMaybeUpgrade.sort(Comparator.comparing(PluginDescriptor::getId));
|
||||
|
||||
return new PluginChanges(pluginsToRemove, pluginsToInstall, pluginsToUpgrade);
|
||||
}
|
||||
|
||||
/**
|
||||
* Fetch the plugins that ought to be installed, according to the config file. For plugins that
|
||||
* have migrated to modules, in order to help transition it's OK to still specify these plugins
|
||||
* in the config file, but they will have no effect. Indeed, any existing plugin installation
|
||||
* will also be removed, leaving only the module.
|
||||
* <p>
|
||||
* Why don't we just leave the modularized plugins in this list and allow `InstallPluginAction`
|
||||
* to print a warning? The problem with doing that is that the sync process wouldn't remove the
|
||||
* old plugins. Instead, we remove them from the list, meaning that they will be uninstalled if
|
||||
* they are currently installed. However, this also means that we need to emit our own warning
|
||||
* that installation by plugin is deprecated.
|
||||
*/
|
||||
private List<PluginDescriptor> getPluginsThatShouldExist(PluginsConfig pluginsConfig) {
|
||||
final List<PluginDescriptor> pluginsThatShouldExist = new ArrayList<>(pluginsConfig.getPlugins());
|
||||
|
||||
final Iterator<PluginDescriptor> shouldExistIterator = pluginsThatShouldExist.iterator();
|
||||
while (shouldExistIterator.hasNext()) {
|
||||
final PluginDescriptor each = shouldExistIterator.next();
|
||||
if (InstallPluginAction.PLUGINS_CONVERTED_TO_MODULES.contains(each.getId())) {
|
||||
terminal.errorPrintln(
|
||||
"[" + each.getId() + "] is no longer a plugin but instead a module packaged with this distribution of Elasticsearch"
|
||||
);
|
||||
shouldExistIterator.remove();
|
||||
}
|
||||
}
|
||||
|
||||
return pluginsThatShouldExist;
|
||||
}
|
||||
|
||||
private void performSync(PluginsConfig pluginsConfig, PluginChanges changes) throws Exception {
|
||||
final Proxy proxy = ProxyUtils.buildProxy(pluginsConfig.getProxy());
|
||||
|
||||
|
@ -283,7 +319,7 @@ public class SyncPluginsAction implements PluginsSynchronizer {
|
|||
private void logRequiredChanges(PluginChanges changes) {
|
||||
final BiConsumer<String, List<PluginDescriptor>> printSummary = (action, plugins) -> {
|
||||
if (plugins.isEmpty() == false) {
|
||||
List<String> pluginIds = plugins.stream().map(PluginDescriptor::getId).collect(Collectors.toList());
|
||||
List<String> pluginIds = plugins.stream().map(PluginDescriptor::getId).toList();
|
||||
this.terminal.errorPrintln(String.format(Locale.ROOT, "Plugins to be %s: %s", action, pluginIds));
|
||||
}
|
||||
};
|
||||
|
|
|
@ -781,8 +781,8 @@ public class InstallPluginActionTests extends ESTestCase {
|
|||
UserException e = expectThrows(UserException.class, () -> installPlugin("analysis-smartnc"));
|
||||
assertThat(e.getMessage(), containsString("Unknown plugin analysis-smartnc, did you mean [analysis-smartcn]?"));
|
||||
|
||||
e = expectThrows(UserException.class, () -> installPlugin("repository"));
|
||||
assertThat(e.getMessage(), containsString("Unknown plugin repository, did you mean any of [repository-s3, repository-gcs]?"));
|
||||
e = expectThrows(UserException.class, () -> installPlugin("discovery-ec"));
|
||||
assertThat(e.getMessage(), containsString("Unknown plugin discovery-ec, did you mean any of [discovery-ec2, discovery-gce]?"));
|
||||
|
||||
e = expectThrows(UserException.class, () -> installPlugin("unknown_plugin"));
|
||||
assertThat(e.getMessage(), containsString("Unknown plugin unknown_plugin"));
|
||||
|
@ -1424,4 +1424,15 @@ public class InstallPluginActionTests extends ESTestCase {
|
|||
installPlugin(pluginZip);
|
||||
assertPlugin("fake-with-deps", pluginDir, env.v2());
|
||||
}
|
||||
|
||||
/**
|
||||
* Check that plugins that have been migrated to modules do not cause an error on installation, bit
|
||||
* instead simply print a message to the terminal.
|
||||
*/
|
||||
public void testInstallMigratedPlugins() throws Exception {
|
||||
for (String id : List.of("repository-azure", "repository-gcs", "repository-s3")) {
|
||||
installPlugin(id);
|
||||
assertThat(terminal.getErrorOutput(), containsString("[" + id + "] is no longer a plugin"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,118 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.plugins.cli;
|
||||
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
public class PluginsConfigTests extends ESTestCase {
|
||||
|
||||
/**
|
||||
* Check that an empty config object passes validation.
|
||||
*/
|
||||
public void test_validate_acceptsEmptyConfig() throws PluginSyncException {
|
||||
PluginsConfig config = new PluginsConfig();
|
||||
config.validate(Set.of(), Set.of());
|
||||
}
|
||||
|
||||
/**
|
||||
* Check that validation rejects a null plugin descriptor.
|
||||
*/
|
||||
public void test_validate_rejectsNullDescriptor() {
|
||||
PluginsConfig config = new PluginsConfig();
|
||||
List<PluginDescriptor> descriptors = new ArrayList<>();
|
||||
descriptors.add(null);
|
||||
config.setPlugins(descriptors);
|
||||
final Exception e = expectThrows(RuntimeException.class, () -> config.validate(Set.of(), Set.of()));
|
||||
assertThat(e.getMessage(), equalTo("Cannot have null or empty IDs in [elasticsearch-plugins.yml]"));
|
||||
}
|
||||
|
||||
/**
|
||||
* Check that validation rejects a null plugin descriptor.
|
||||
*/
|
||||
public void test_validate_rejectsDescriptorWithNullId() {
|
||||
PluginsConfig config = new PluginsConfig();
|
||||
config.setPlugins(List.of(new PluginDescriptor()));
|
||||
final Exception e = expectThrows(RuntimeException.class, () -> config.validate(Set.of(), Set.of()));
|
||||
assertThat(e.getMessage(), equalTo("Cannot have null or empty IDs in [elasticsearch-plugins.yml]"));
|
||||
}
|
||||
|
||||
/**
|
||||
* Check that validation rejects duplicate plugin IDs.
|
||||
*/
|
||||
public void test_validate_rejectsDuplicatePluginId() {
|
||||
PluginsConfig config = new PluginsConfig();
|
||||
config.setPlugins(List.of(new PluginDescriptor("foo"), new PluginDescriptor("foo")));
|
||||
final Exception e = expectThrows(PluginSyncException.class, () -> config.validate(Set.of(), Set.of()));
|
||||
assertThat(e.getMessage(), equalTo("Duplicate plugin ID [foo] found in [elasticsearch-plugins.yml]"));
|
||||
}
|
||||
|
||||
/**
|
||||
* Check that validation rejects unofficial plugins without a location
|
||||
*/
|
||||
public void test_validate_rejectsUnofficialPluginWithoutLocation() {
|
||||
PluginsConfig config = new PluginsConfig();
|
||||
config.setPlugins(List.of(new PluginDescriptor("foo")));
|
||||
final Exception e = expectThrows(PluginSyncException.class, () -> config.validate(Set.of(), Set.of()));
|
||||
assertThat(e.getMessage(), equalTo("Must specify location for non-official plugin [foo] in [elasticsearch-plugins.yml]"));
|
||||
}
|
||||
|
||||
/**
|
||||
* Check that validation rejects unofficial plugins with a blank location
|
||||
*/
|
||||
public void test_validate_rejectsUnofficialPluginWithBlankLocation() {
|
||||
PluginsConfig config = new PluginsConfig();
|
||||
config.setPlugins(List.of(new PluginDescriptor("foo", " ")));
|
||||
final Exception e = expectThrows(PluginSyncException.class, () -> config.validate(Set.of(), Set.of()));
|
||||
assertThat(e.getMessage(), equalTo("Empty location for plugin [foo]"));
|
||||
}
|
||||
|
||||
/**
|
||||
* Check that validation rejects unofficial plugins with a blank location
|
||||
*/
|
||||
public void test_validate_rejectsMalformedProxy() {
|
||||
List<String> examples = List.of("foo:bar:baz:8080", ":8080", "foo:", "foo:bar");
|
||||
|
||||
for (String example : examples) {
|
||||
PluginsConfig config = new PluginsConfig();
|
||||
config.setProxy(example);
|
||||
Exception e = expectThrows(PluginSyncException.class, () -> config.validate(Set.of(), Set.of()));
|
||||
assertThat(e.getMessage(), equalTo("Malformed [proxy], expected [host:port] in [elasticsearch-plugins.yml]"));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check that official plugin IDs are accepted.
|
||||
*/
|
||||
public void test_validate_allowsOfficialPlugin() throws PluginSyncException {
|
||||
PluginsConfig config = new PluginsConfig();
|
||||
config.setPlugins(List.of(new PluginDescriptor("analysis-icu")));
|
||||
config.validate(Set.of("analysis-icu"), Set.of());
|
||||
}
|
||||
|
||||
/**
|
||||
* Check that official plugins that have been migrated to modules are still accepted, despite
|
||||
* no longer being plugins.
|
||||
*/
|
||||
public void test_validate_allowsMigratedPlugin() throws PluginSyncException {
|
||||
final List<PluginDescriptor> descriptors = Stream.of("azure", "gcs", "s3")
|
||||
.map(each -> new PluginDescriptor("repository-" + each))
|
||||
.toList();
|
||||
PluginsConfig config = new PluginsConfig();
|
||||
config.setPlugins(descriptors);
|
||||
|
||||
config.validate(Set.of(), Set.of("repository-azure", "repository-gcs", "repository-s3"));
|
||||
}
|
||||
}
|
|
@ -35,6 +35,7 @@ import static org.hamcrest.CoreMatchers.containsString;
|
|||
import static org.hamcrest.CoreMatchers.not;
|
||||
import static org.hamcrest.CoreMatchers.nullValue;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
|
||||
@LuceneTestCase.SuppressFileSystems("*")
|
||||
public class RemovePluginActionTests extends ESTestCase {
|
||||
|
@ -271,6 +272,47 @@ public class RemovePluginActionTests extends ESTestCase {
|
|||
removePlugin("fake", home, randomBoolean());
|
||||
}
|
||||
|
||||
/**
|
||||
* Check that if a plugin exists that has since been migrated to a module, then it is still possible
|
||||
* to remove that plugin.
|
||||
*/
|
||||
public void testRemoveMigratedPluginsWhenInstalled() throws Exception {
|
||||
for (String id : List.of("repository-azure", "repository-gcs", "repository-s3")) {
|
||||
createPlugin(id);
|
||||
Files.createFile(env.pluginsFile().resolve(id).resolve("plugin.jar"));
|
||||
final MockTerminal terminal = removePlugin(id, home, randomBoolean());
|
||||
|
||||
assertThat(Files.exists(env.pluginsFile().resolve(id)), is(false));
|
||||
// This message shouldn't be printed if plugin was actually installed.
|
||||
assertThat(terminal.getErrorOutput(), not(containsString("plugin [" + id + "] is no longer a plugin")));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check that if we attempt to remove a plugin that has been migrated to a module, and that plugin is
|
||||
* not actually installed, then we print an appropriate message and exit with a success code.
|
||||
*/
|
||||
public void testRemoveMigratedPluginsWhenNotInstalled() throws Exception {
|
||||
for (String id : List.of("repository-azure", "repository-gcs", "repository-s3")) {
|
||||
final MockTerminal terminal = removePlugin(id, home, randomBoolean());
|
||||
assertThat(terminal.getErrorOutput(), containsString("plugin [" + id + "] is no longer a plugin"));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check that when removing (1) a regular, installed plugin and (2) an uninstalled plugin that has been migrated
|
||||
* to a module, then the overall removal succeeds, and a message is printed about the migrated pluging.
|
||||
*/
|
||||
public void testRemoveRegularInstalledPluginAndMigratedUninstalledPlugin() throws Exception {
|
||||
createPlugin("fake");
|
||||
Files.createFile(env.pluginsFile().resolve("fake").resolve("plugin.jar"));
|
||||
|
||||
final MockTerminal terminal = removePlugin(List.of("fake", "repository-s3"), home, randomBoolean());
|
||||
|
||||
assertThat(Files.exists(env.pluginsFile().resolve("fake")), is(false));
|
||||
assertThat(terminal.getErrorOutput(), containsString("plugin [repository-s3] is no longer a plugin"));
|
||||
}
|
||||
|
||||
private String expectedConfigDirPreservedMessage(final Path configDir) {
|
||||
return "-> preserving plugin config files [" + configDir + "] in case of upgrade; use --purge if not needed";
|
||||
}
|
||||
|
|
|
@ -28,6 +28,7 @@ import java.nio.file.Path;
|
|||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.empty;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.hasSize;
|
||||
|
@ -42,6 +43,7 @@ public class SyncPluginsActionTests extends ESTestCase {
|
|||
private Environment env;
|
||||
private SyncPluginsAction action;
|
||||
private PluginsConfig config;
|
||||
private MockTerminal terminal;
|
||||
|
||||
@Override
|
||||
@Before
|
||||
|
@ -55,7 +57,8 @@ public class SyncPluginsActionTests extends ESTestCase {
|
|||
Files.createDirectories(env.configFile());
|
||||
Files.createDirectories(env.pluginsFile());
|
||||
|
||||
action = new SyncPluginsAction(new MockTerminal(), env);
|
||||
terminal = new MockTerminal();
|
||||
action = new SyncPluginsAction(terminal, env);
|
||||
config = new PluginsConfig();
|
||||
}
|
||||
|
||||
|
@ -187,6 +190,51 @@ public class SyncPluginsActionTests extends ESTestCase {
|
|||
assertThat(pluginChanges.upgrade.get(0).getId(), equalTo("my-plugin"));
|
||||
}
|
||||
|
||||
/**
|
||||
* Check that the config file can still specify plugins that have been migrated to modules, but
|
||||
* they are ignored.
|
||||
*/
|
||||
public void test_getPluginChanges_withModularisedPluginsToInstall_ignoresPlugins() throws Exception {
|
||||
config.setPlugins(
|
||||
List.of(new PluginDescriptor("repository-azure"), new PluginDescriptor("repository-gcs"), new PluginDescriptor("repository-s3"))
|
||||
);
|
||||
|
||||
final PluginChanges pluginChanges = action.getPluginChanges(config, Optional.empty());
|
||||
|
||||
assertThat(pluginChanges.isEmpty(), is(true));
|
||||
for (String plugin : List.of("repository-azure", "repository-gcs", "repository-s3")) {
|
||||
assertThat(
|
||||
terminal.getErrorOutput(),
|
||||
containsString(
|
||||
"[" + plugin + "] is no longer a plugin but instead a module packaged with this distribution of Elasticsearch"
|
||||
)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check that if there are plugins already installed that have been migrated to modules, then they are removed,
|
||||
* even if they are specified in the config file.
|
||||
*/
|
||||
public void test_getPluginChanges_withModularisedPluginsToRemove_removesPlugins() throws Exception {
|
||||
createPlugin("repository-azure");
|
||||
createPlugin("repository-gcs");
|
||||
createPlugin("repository-s3");
|
||||
config.setPlugins(
|
||||
List.of(new PluginDescriptor("repository-azure"), new PluginDescriptor("repository-gcs"), new PluginDescriptor("repository-s3"))
|
||||
);
|
||||
|
||||
final PluginChanges pluginChanges = action.getPluginChanges(config, Optional.empty());
|
||||
|
||||
assertThat(pluginChanges.isEmpty(), is(false));
|
||||
assertThat(pluginChanges.install, empty());
|
||||
assertThat(pluginChanges.remove, hasSize(3));
|
||||
assertThat(pluginChanges.upgrade, empty());
|
||||
assertThat(pluginChanges.remove.get(0).getId(), equalTo("repository-azure"));
|
||||
assertThat(pluginChanges.remove.get(1).getId(), equalTo("repository-gcs"));
|
||||
assertThat(pluginChanges.remove.get(2).getId(), equalTo("repository-s3"));
|
||||
}
|
||||
|
||||
/**
|
||||
* Check that if there are no changes to apply, then the install and remove actions are not used.
|
||||
* This is a redundant test, really, because the sync action exits early if there are no
|
||||
|
|
6
docs/changelog/81870.yaml
Normal file
6
docs/changelog/81870.yaml
Normal file
|
@ -0,0 +1,6 @@
|
|||
pr: 81870
|
||||
summary: Convert repository plugins to modules
|
||||
area: Infra/Plugins
|
||||
type: enhancement
|
||||
issues:
|
||||
- 81652
|
|
@ -126,12 +126,11 @@ sudo bin/elasticsearch-plugin install [plugin_id] [plugin_id] ... [plugin_id]
|
|||
Each `plugin_id` can be any valid form for installing a single plugin (e.g., the
|
||||
name of a core plugin, or a custom URL).
|
||||
|
||||
For instance, to install the core <<analysis-icu,ICU plugin>>, and
|
||||
<<repository-s3,S3 repository plugin>> run the following command:
|
||||
For instance, to install the core <<analysis-icu,ICU plugin>>, run the following command:
|
||||
|
||||
[source,shell]
|
||||
-----------------------------------
|
||||
sudo bin/elasticsearch-plugin install analysis-icu repository-s3
|
||||
sudo bin/elasticsearch-plugin install analysis-icu
|
||||
-----------------------------------
|
||||
|
||||
This command will install the versions of the plugins that matches your
|
||||
|
|
|
@ -18,7 +18,7 @@ Looking for a hosted solution for Elasticsearch on AWS? Check out https://www.el
|
|||
The Elasticsearch `cloud-aws` plugin has been split into two separate plugins:
|
||||
|
||||
* <<discovery-ec2>> (`discovery-ec2`)
|
||||
* <<repository-s3>> (`repository-s3`)
|
||||
* {ref}/repository-s3.html[`repository-s3`]
|
||||
|
||||
[role="exclude",id="cloud-azure"]
|
||||
=== Azure Cloud Plugin
|
||||
|
@ -26,7 +26,7 @@ The Elasticsearch `cloud-aws` plugin has been split into two separate plugins:
|
|||
The `cloud-azure` plugin has been split into two separate plugins:
|
||||
|
||||
* <<discovery-azure-classic>> (`discovery-azure-classic`)
|
||||
* <<repository-azure>> (`repository-azure`)
|
||||
* {ref}/repository-azure.html[`repository-azure`]
|
||||
|
||||
|
||||
[role="exclude",id="cloud-gce"]
|
||||
|
|
|
@ -6,27 +6,17 @@ functionality in Elasticsearch by adding repositories backed by the cloud or
|
|||
by distributed file systems:
|
||||
|
||||
[discrete]
|
||||
==== Core repository plugins
|
||||
==== Offical repository plugins
|
||||
|
||||
The core repository plugins are:
|
||||
NOTE: Support for S3, GCS and Azure repositories is now bundled in {es} by
|
||||
default.
|
||||
|
||||
<<repository-s3,S3 Repository>>::
|
||||
|
||||
The S3 repository plugin adds support for using S3 as a repository.
|
||||
|
||||
<<repository-azure,Azure Repository>>::
|
||||
|
||||
The Azure repository plugin adds support for using Azure as a repository.
|
||||
The official repository plugins are:
|
||||
|
||||
<<repository-hdfs,HDFS Repository>>::
|
||||
|
||||
The Hadoop HDFS Repository plugin adds support for using HDFS as a repository.
|
||||
|
||||
<<repository-gcs,Google Cloud Storage Repository>>::
|
||||
|
||||
The GCS repository plugin adds support for using Google Cloud Storage service as a repository.
|
||||
|
||||
|
||||
[discrete]
|
||||
=== Community contributed repository plugins
|
||||
|
||||
|
@ -34,11 +24,4 @@ The following plugin has been contributed by our community:
|
|||
|
||||
* https://github.com/BigDataBoutique/elasticsearch-repository-swift[Openstack Swift] (by Wikimedia Foundation and BigData Boutique)
|
||||
|
||||
|
||||
include::repository-azure.asciidoc[]
|
||||
|
||||
include::repository-s3.asciidoc[]
|
||||
|
||||
include::repository-hdfs.asciidoc[]
|
||||
|
||||
include::repository-gcs.asciidoc[]
|
||||
|
|
|
@ -36,6 +36,7 @@ include::migrate_8_0/logging-changes.asciidoc[]
|
|||
include::migrate_8_0/mapping-changes.asciidoc[]
|
||||
include::migrate_8_0/packaging-changes.asciidoc[]
|
||||
include::migrate_8_0/painless-changes.asciidoc[]
|
||||
include::migrate_8_0/plugin-changes.asciidoc[]
|
||||
include::migrate_8_0/rest-api-changes.asciidoc[]
|
||||
include::migrate_8_0/system-req-changes.asciidoc[]
|
||||
include::migrate_8_0/transform.asciidoc[]
|
||||
|
@ -94,7 +95,7 @@ The `elasticsearch-setup-passwords` tool is deprecated in 8.0. To
|
|||
manually reset the password for built-in users (including the `elastic` user), use
|
||||
the {ref}/reset-password.html[`elasticsearch-reset-password`] tool, the {es}
|
||||
{ref}/security-api-change-password.html[change passwords API], or the
|
||||
User Management features in {kib}.
|
||||
User Management features in {kib}.
|
||||
`elasticsearch-setup-passwords` will be removed in a future release.
|
||||
|
||||
*Impact* +
|
||||
|
|
48
docs/reference/migration/migrate_8_0/plugin-changes.asciidoc
Normal file
48
docs/reference/migration/migrate_8_0/plugin-changes.asciidoc
Normal file
|
@ -0,0 +1,48 @@
|
|||
[discrete]
|
||||
[[breaking_80_plugin_changes]]
|
||||
==== Plugin changes
|
||||
|
||||
//NOTE: The notable-breaking-changes tagged regions are re-used in the
|
||||
//Installation and Upgrade Guide
|
||||
|
||||
//tag::notable-breaking-changes[]
|
||||
TIP: {ess-skip-section}
|
||||
|
||||
.The S3, GCS and Azure repository plugins are now included in Elasticsearch
|
||||
[%collapsible]
|
||||
====
|
||||
*Details* +
|
||||
In previous versions of {es}, in order to register a snapshot repository
|
||||
backed by Amazon S3, Google Cloud Storge (GCS) or Microsoft Azure Blob
|
||||
Storage, you first had to install the corresponding Elasticsearch plugin,
|
||||
for example `repository-s3`. These plugins are now included in {es} by
|
||||
default.
|
||||
|
||||
*Impact* +
|
||||
You no longer need to install the following plugins, and not should attempt
|
||||
to do so.
|
||||
|
||||
* `repository-azure`
|
||||
* `repository-gcs`
|
||||
* `repository-s3`
|
||||
|
||||
{es} and the `elasticsearch-plugin` CLI tool have been changed to tolerate
|
||||
attempted installation and removal of these plugins in order to avoid
|
||||
breaking any existing automation. In the future, attempting to install
|
||||
these plugins will be an error.
|
||||
|
||||
Specifically, the `elasticsearch-plugin` CLI tool will not fail if you
|
||||
attempt to install any of the above plugins, and will instead print a
|
||||
warning and skip the plugins. If any of these plugins are already
|
||||
installed, for example because you installed them when running an older
|
||||
version of {es}, then you can still remove them with
|
||||
`elasticsearch-plugin`. Attempting to remove them if they are not installed
|
||||
will succeed but print a warnings.
|
||||
|
||||
If you run {es} using Docker and you are managing plugins using a
|
||||
{plugins}/manage-plugins-using-configuration-file.html[configuration file], then when
|
||||
{es} first starts after you upgrade it, it will remove the above plugins if
|
||||
they already installed. If any of these plugins are specified in your
|
||||
configuration file, {es} will ignore them and emit a warning log message.
|
||||
====
|
||||
//end::notable-breaking-changes[]
|
|
@ -75,16 +75,15 @@ For more complex or time-consuming searches, you can use <<async-search>> with
|
|||
// tag::searchable-snapshot-repo-types[]
|
||||
Use any of the following repository types with searchable snapshots:
|
||||
|
||||
* {plugins}/repository-s3.html[AWS S3]
|
||||
* {plugins}/repository-gcs.html[Google Cloud Storage]
|
||||
* {plugins}/repository-azure.html[Azure Blob Storage]
|
||||
* <<repository-s3,AWS S3>>
|
||||
* <<repository-gcs,Google Cloud Storage>>
|
||||
* <<repository-azure,Azure Blob Storage>>
|
||||
* {plugins}/repository-hdfs.html[Hadoop Distributed File Store (HDFS)]
|
||||
* <<snapshots-filesystem-repository,Shared filesystems>> such as NFS
|
||||
* <<snapshots-read-only-repository,Read-only HTTP and HTTPS repositories>>
|
||||
|
||||
You can also use alternative implementations of these repository types, for
|
||||
instance
|
||||
{plugins}/repository-s3-client.html#repository-s3-compatible-services[MinIO],
|
||||
instance <<repository-s3-client,MinIO>>,
|
||||
as long as they are fully compatible. Use the <<repo-analysis-api>> API
|
||||
to analyze your repository's suitability for use with searchable snapshots.
|
||||
// end::searchable-snapshot-repo-types[]
|
||||
|
|
|
@ -55,9 +55,9 @@ call instead of reloading after each modification.
|
|||
|
||||
There are reloadable secure settings for:
|
||||
|
||||
* {plugins}/repository-azure-client-settings.html[The Azure repository plugin]
|
||||
* <<repository-azure,The Azure repository plugin>>
|
||||
* {plugins}/discovery-ec2-usage.html#_configuring_ec2_discovery[The EC2 discovery plugin]
|
||||
* {plugins}/repository-gcs-client.html[The GCS repository plugin]
|
||||
* {plugins}/repository-s3-client.html[The S3 repository plugin]
|
||||
* <<repository-gcs,The GCS repository plugin>>
|
||||
* <<repository-s3,The S3 repository plugin>>
|
||||
* <<monitoring-settings>>
|
||||
* <<notification-settings>>
|
||||
|
|
|
@ -97,13 +97,11 @@ URL repository. See <<snapshots-read-only-repository>>.
|
|||
|
||||
More repository types are available through these official plugins:
|
||||
|
||||
* {plugins}/repository-s3.html[repository-s3] for S3 repository support
|
||||
* <<repository-s3, repository-s3>> for S3 repository support
|
||||
* {plugins}/repository-hdfs.html[repository-hdfs] for HDFS repository support in
|
||||
Hadoop environments
|
||||
* {plugins}/repository-azure.html[repository-azure] for Azure storage
|
||||
repositories
|
||||
* {plugins}/repository-gcs.html[repository-gcs] for Google Cloud Storage
|
||||
repositories
|
||||
* <<repository-azure,repository-azure>> for Azure storage repositories
|
||||
* <<repository-gcs,repository-gcs>> for Google Cloud Storage repositories
|
||||
--
|
||||
|
||||
`settings`::
|
||||
|
|
|
@ -124,13 +124,11 @@ See <<snapshots-read-only-repository>>.
|
|||
More repository types are available through these official
|
||||
plugins:
|
||||
|
||||
* {plugins}/repository-s3.html[repository-s3] for S3 repository support
|
||||
* <<repository-s3,repository-s3>> for S3 repository support
|
||||
* {plugins}/repository-hdfs.html[repository-hdfs] for HDFS repository support in
|
||||
Hadoop environments
|
||||
* {plugins}/repository-azure.html[repository-azure] for Azure storage
|
||||
repositories
|
||||
* {plugins}/repository-gcs.html[repository-gcs] for Google Cloud Storage
|
||||
repositories
|
||||
* <<repository-azure,repository-azure>> for Azure storage repositories
|
||||
* <<repository-gcs,repository-gcs>> for Google Cloud Storage repositories
|
||||
--
|
||||
|
||||
[[put-snapshot-repo-api-settings-param]]
|
||||
|
|
|
@ -106,6 +106,9 @@ clusters].
|
|||
If you run the {es} on your own hardware, you can use the following built-in
|
||||
snapshot repository types:
|
||||
|
||||
* <<repository-s3,AWS S3>>
|
||||
* <<repository-gcs,Google Cloud Storage>>
|
||||
* <<repository-azure,Azure>>
|
||||
* <<snapshots-filesystem-repository,Shared file system>>
|
||||
* <<snapshots-read-only-repository>>
|
||||
* <<snapshots-source-only-repository>>
|
||||
|
@ -113,10 +116,7 @@ snapshot repository types:
|
|||
[[snapshots-repository-plugins]]
|
||||
Other repository types are available through official plugins:
|
||||
|
||||
* {plugins}/repository-s3.html[AWS S3]
|
||||
* {plugins}/repository-gcs.html[Google Cloud Storage (GCS)]
|
||||
* {plugins}/repository-hdfs.html[Hadoop Distributed File System (HDFS)]
|
||||
* {plugins}/repository-azure.html[Microsoft Azure]
|
||||
|
||||
You can also use alternative implementations of these repository types, such as
|
||||
MinIO, as long as they're compatible. To verify a repository's compatibility,
|
||||
|
@ -335,3 +335,10 @@ When restoring a repository from a backup, you must not register the repository
|
|||
with {es} until the repository contents are fully restored. If you alter the
|
||||
contents of a repository while it is registered with {es} then the repository
|
||||
may become unreadable or may silently lose some of its contents.
|
||||
|
||||
|
||||
include::repository-s3.asciidoc[]
|
||||
|
||||
include::repository-gcs.asciidoc[]
|
||||
|
||||
include::repository-azure.asciidoc[]
|
||||
|
|
|
@ -1,12 +1,9 @@
|
|||
[[repository-azure]]
|
||||
=== Azure Repository Plugin
|
||||
=== Azure Repository
|
||||
|
||||
The Azure Repository plugin adds support for using https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blobs-introduction[Azure Blob storage] as a repository for
|
||||
You can use https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blobs-introduction[Azure Blob storage] as a repository for
|
||||
{ref}/modules-snapshots.html[Snapshot/Restore].
|
||||
|
||||
:plugin_name: repository-azure
|
||||
include::install_remove.asciidoc[]
|
||||
|
||||
[[repository-azure-usage]]
|
||||
==== Azure Repository
|
||||
|
||||
|
@ -35,7 +32,7 @@ For more information about these settings, see
|
|||
[IMPORTANT]
|
||||
.Supported Azure Storage Account types
|
||||
===============================================
|
||||
The Azure Repository plugin works with all Standard storage accounts
|
||||
The Azure repository type works with all Standard storage accounts
|
||||
|
||||
* Standard Locally Redundant Storage - `Standard_LRS`
|
||||
* Standard Zone-Redundant Storage - `Standard_ZRS`
|
|
@ -1,16 +1,13 @@
|
|||
[[repository-gcs]]
|
||||
=== Google Cloud Storage Repository Plugin
|
||||
=== Google Cloud Storage Repository
|
||||
|
||||
The GCS repository plugin adds support for using the https://cloud.google.com/storage/[Google Cloud Storage]
|
||||
You can use the https://cloud.google.com/storage/[Google Cloud Storage]
|
||||
service as a repository for {ref}/modules-snapshots.html[Snapshot/Restore].
|
||||
|
||||
:plugin_name: repository-gcs
|
||||
include::install_remove.asciidoc[]
|
||||
|
||||
[[repository-gcs-usage]]
|
||||
==== Getting started
|
||||
|
||||
The plugin uses the https://github.com/GoogleCloudPlatform/google-cloud-java/tree/master/google-cloud-clients/google-cloud-storage[Google Cloud Java Client for Storage]
|
||||
This repository type uses the https://github.com/GoogleCloudPlatform/google-cloud-java/tree/master/google-cloud-clients/google-cloud-storage[Google Cloud Java Client for Storage]
|
||||
to connect to the Storage service. If you are using
|
||||
https://cloud.google.com/storage/[Google Cloud Storage] for the first time, you
|
||||
must connect to the https://console.cloud.google.com/[Google Cloud Platform Console]
|
||||
|
@ -23,8 +20,8 @@ Cloud Storage Service for your project.
|
|||
The Google Cloud Storage service uses the concept of a
|
||||
https://cloud.google.com/storage/docs/key-terms[bucket] as a container for all
|
||||
the data. Buckets are usually created using the
|
||||
https://console.cloud.google.com/[Google Cloud Platform Console]. The plugin
|
||||
does not automatically create buckets.
|
||||
https://console.cloud.google.com/[Google Cloud Platform Console]. This
|
||||
repository type does not automatically create buckets.
|
||||
|
||||
To create a new bucket:
|
||||
|
||||
|
@ -43,10 +40,10 @@ https://cloud.google.com/storage/docs/quickstart-console#create_a_bucket[Google
|
|||
[[repository-gcs-service-authentication]]
|
||||
===== Service Authentication
|
||||
|
||||
The plugin must authenticate the requests it makes to the Google Cloud Storage
|
||||
The repository must authenticate the requests it makes to the Google Cloud Storage
|
||||
service. It is common for Google client libraries to employ a strategy named https://cloud.google.com/docs/authentication/production#providing_credentials_to_your_application[application default credentials].
|
||||
However, that strategy is only **partially supported** by Elasticsearch. The
|
||||
plugin operates under the Elasticsearch process, which runs with the security
|
||||
repository operates under the Elasticsearch process, which runs with the security
|
||||
manager enabled. The security manager obstructs the "automatic" credential discovery
|
||||
when the environment variable `GOOGLE_APPLICATION_CREDENTIALS` is used to point to a
|
||||
local file on disk. It can, however, retrieve the service account that is attached to
|
||||
|
@ -62,7 +59,7 @@ You have to obtain and provide https://cloud.google.com/iam/docs/overview#servic
|
|||
manually.
|
||||
|
||||
For detailed information about generating JSON service account files, see the https://cloud.google.com/storage/docs/authentication?hl=en#service_accounts[Google Cloud documentation].
|
||||
Note that the PKCS12 format is not supported by this plugin.
|
||||
Note that the PKCS12 format is not supported by this repository type.
|
||||
|
||||
Here is a summary of the steps:
|
||||
|
||||
|
@ -93,7 +90,7 @@ A JSON service account file looks like this:
|
|||
----
|
||||
// NOTCONSOLE
|
||||
|
||||
To provide this file to the plugin, it must be stored in the {ref}/secure-settings.html[Elasticsearch keystore]. You must
|
||||
To provide this file to the repository, it must be stored in the {ref}/secure-settings.html[Elasticsearch keystore]. You must
|
||||
add a `file` setting with the name `gcs.client.NAME.credentials_file` using the `add-file` subcommand.
|
||||
`NAME` is the name of the client configuration for the repository. The implicit client
|
||||
name is `default`, but a different client name can be specified in the
|
|
@ -1,20 +1,16 @@
|
|||
[[repository-s3]]
|
||||
=== S3 Repository Plugin
|
||||
=== S3 Repository
|
||||
|
||||
The S3 repository plugin adds support for using AWS S3 as a repository for
|
||||
{ref}/modules-snapshots.html[Snapshot/Restore].
|
||||
You can use AWS S3 as a repository for {ref}/modules-snapshots.html[Snapshot/Restore].
|
||||
|
||||
*If you are looking for a hosted solution of Elasticsearch on AWS, please visit
|
||||
https://www.elastic.co/cloud/.*
|
||||
|
||||
:plugin_name: repository-s3
|
||||
include::install_remove.asciidoc[]
|
||||
|
||||
[[repository-s3-usage]]
|
||||
==== Getting Started
|
||||
|
||||
The plugin provides a repository type named `s3` which may be used when creating
|
||||
a repository. The repository defaults to using
|
||||
To register an S3 repository, specify the type as `s3` when creating
|
||||
the repository. The repository defaults to using
|
||||
https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html[ECS
|
||||
IAM Role] or
|
||||
https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html[EC2
|
||||
|
@ -84,7 +80,7 @@ bin/elasticsearch-keystore remove s3.client.default.secret_key
|
|||
bin/elasticsearch-keystore remove s3.client.default.session_token
|
||||
----
|
||||
|
||||
*All* client secure settings of this plugin are
|
||||
*All* client secure settings of this repository type are
|
||||
{ref}/secure-settings.html#reloadable-secure-settings[reloadable]. After you
|
||||
reload the settings, the internal `s3` clients, used to transfer the snapshot
|
||||
contents, will utilize the latest settings from the keystore. Any existing `s3`
|
||||
|
@ -125,7 +121,7 @@ settings belong in the `elasticsearch.yml` file.
|
|||
`protocol`::
|
||||
|
||||
The protocol to use to connect to S3. Valid values are either `http` or
|
||||
`https`. Defaults to `https`. When using HTTPS, this plugin validates the
|
||||
`https`. Defaults to `https`. When using HTTPS, this repository type validates the
|
||||
repository's certificate chain using the JVM-wide truststore. Ensure that
|
||||
the root certificate authority is in this truststore using the JVM's
|
||||
`keytool` tool.
|
||||
|
@ -208,7 +204,7 @@ pattern then you should set this setting to `true` when upgrading.
|
|||
===== S3-compatible services
|
||||
|
||||
There are a number of storage systems that provide an S3-compatible API, and
|
||||
the `repository-s3` plugin allows you to use these systems in place of AWS S3.
|
||||
the `repository-s3` type allows you to use these systems in place of AWS S3.
|
||||
To do so, you should set the `s3.client.CLIENT_NAME.endpoint` setting to the
|
||||
system's endpoint. This setting accepts IP addresses and hostnames and may
|
||||
include a port. For example, the endpoint may be `172.17.0.2` or
|
||||
|
@ -216,13 +212,13 @@ include a port. For example, the endpoint may be `172.17.0.2` or
|
|||
`http` if the endpoint does not support HTTPS.
|
||||
|
||||
https://minio.io[MinIO] is an example of a storage system that provides an
|
||||
S3-compatible API. The `repository-s3` plugin allows {es} to work with
|
||||
S3-compatible API. The `repository-s3` type allows {es} to work with
|
||||
MinIO-backed repositories as well as repositories stored on AWS S3. Other
|
||||
S3-compatible storage systems may also work with {es}, but these are not
|
||||
covered by the {es} test suite.
|
||||
|
||||
Note that some storage systems claim to be S3-compatible without correctly
|
||||
supporting the full S3 API. The `repository-s3` plugin requires full
|
||||
supporting the full S3 API. The `repository-s3` type requires full
|
||||
compatibility with S3. In particular it must support the same set of API
|
||||
endpoints, return the same errors in case of failures, and offer a consistency
|
||||
model no weaker than S3's when accessed concurrently by multiple nodes.
|
||||
|
@ -328,8 +324,8 @@ include::repository-shared-settings.asciidoc[]
|
|||
storage class for newly created objects, resulting in a mixed usage of
|
||||
storage classes. Additionally, S3 Lifecycle Policies can be used to manage
|
||||
the storage class of existing objects. Due to the extra complexity with the
|
||||
Glacier class lifecycle, it is not currently supported by the plugin. For
|
||||
more information about the different classes, see
|
||||
Glacier class lifecycle, it is not currently supported by this
|
||||
repository type. For more information about the different classes, see
|
||||
https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html[AWS
|
||||
Storage Classes Guide]
|
||||
|
|
@ -0,0 +1,12 @@
|
|||
`max_restore_bytes_per_sec`::
|
||||
|
||||
Throttles per node restore rate. Defaults to unlimited.
|
||||
Note that restores are also throttled through {ref}/recovery.html[recovery settings].
|
||||
|
||||
`max_snapshot_bytes_per_sec`::
|
||||
|
||||
Throttles per node snapshot rate. Defaults to `40mb` per second.
|
||||
|
||||
`readonly`::
|
||||
|
||||
Makes repository read-only. Defaults to `false`.
|
|
@ -22,7 +22,4 @@ configure(subprojects.findAll { it.parent.path == project.path }) {
|
|||
if (project.file('src/main/bin').exists()) {
|
||||
throw new InvalidModelException("Modules cannot contain bin files")
|
||||
}
|
||||
if (project.file('src/main/config').exists()) {
|
||||
throw new InvalidModelException("Modules cannot contain config files")
|
||||
}
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue