Introduce Cloud docker variant (#74980)

Closes #74795.

Introduce two Docker image variants for Cloud. The first bundles
(actually installs) the S3, Azure and GCS repository plugins. The
second bundles all official plugins, but only installs the repository
plugins.

Both images also bundle Filebeat and Metricbeat.

The testing utils have been refactored to introduce a `docker`
sub-package. This allows the static `Docker.containerId` to be
shared without needing all the code in one big class. The code for
checking file ownership / permissions has also been refactored to
a more Hamcrest style, using a custom Docker file matcher.
This commit is contained in:
Rory Hunter 2021-08-20 20:11:05 +01:00 committed by GitHub
parent 484a0780f8
commit a6f2a4df8b
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
29 changed files with 817 additions and 210 deletions

View file

@ -18,7 +18,14 @@ public enum DockerBase {
UBI("docker.elastic.co/ubi8/ubi-minimal:latest", "-ubi8"),
// The Iron Bank base image is UBI (albeit hardened), but we are required to parameterize the Docker build
IRON_BANK("${BASE_REGISTRY}/${BASE_IMAGE}:${BASE_TAG}", "-ironbank");
IRON_BANK("${BASE_REGISTRY}/${BASE_IMAGE}:${BASE_TAG}", "-ironbank"),
// Base image with extras for Cloud
CLOUD("centos:8", "-cloud"),
// Based on CLOUD above, with more extras. We don't set a base image because
// we programmatically extend from the Cloud image.
CLOUD_ESS(null, "-cloud-ess");
private final String image;
private final String suffix;

View file

@ -161,6 +161,12 @@ public class InternalDistributionDownloadPlugin implements InternalPlugin {
if (distribution.getType() == InternalElasticsearchDistributionTypes.DOCKER_IRONBANK) {
return projectName + "ironbank-docker" + archString + "-export";
}
if (distribution.getType() == InternalElasticsearchDistributionTypes.DOCKER_CLOUD) {
return projectName + "cloud-docker" + archString + "-export";
}
if (distribution.getType() == InternalElasticsearchDistributionTypes.DOCKER_CLOUD_ESS) {
return projectName + "cloud-ess-docker" + archString + "-export";
}
return projectName + distribution.getType().getName();
}

View file

@ -0,0 +1,26 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
package org.elasticsearch.gradle.internal.distribution;
import org.elasticsearch.gradle.ElasticsearchDistributionType;
public class DockerCloudElasticsearchDistributionType implements ElasticsearchDistributionType {
DockerCloudElasticsearchDistributionType() {}
@Override
public String getName() {
return "dockerCloud";
}
@Override
public boolean isDocker() {
return true;
}
}

View file

@ -0,0 +1,26 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
package org.elasticsearch.gradle.internal.distribution;
import org.elasticsearch.gradle.ElasticsearchDistributionType;
public class DockerCloudEssElasticsearchDistributionType implements ElasticsearchDistributionType {
DockerCloudEssElasticsearchDistributionType() {}
@Override
public String getName() {
return "dockerCloudEss";
}
@Override
public boolean isDocker() {
return true;
}
}

View file

@ -18,6 +18,16 @@ public class InternalElasticsearchDistributionTypes {
public static ElasticsearchDistributionType DOCKER = new DockerElasticsearchDistributionType();
public static ElasticsearchDistributionType DOCKER_UBI = new DockerUbiElasticsearchDistributionType();
public static ElasticsearchDistributionType DOCKER_IRONBANK = new DockerIronBankElasticsearchDistributionType();
public static ElasticsearchDistributionType DOCKER_CLOUD = new DockerCloudElasticsearchDistributionType();
public static ElasticsearchDistributionType DOCKER_CLOUD_ESS = new DockerCloudEssElasticsearchDistributionType();
public static List<ElasticsearchDistributionType> ALL_INTERNAL = List.of(DEB, RPM, DOCKER, DOCKER_UBI, DOCKER_IRONBANK);
public static List<ElasticsearchDistributionType> ALL_INTERNAL = List.of(
DEB,
RPM,
DOCKER,
DOCKER_UBI,
DOCKER_IRONBANK,
DOCKER_CLOUD,
DOCKER_CLOUD_ESS
);
}

View file

@ -55,6 +55,8 @@ import static org.elasticsearch.gradle.distribution.ElasticsearchDistributionTyp
import static org.elasticsearch.gradle.internal.distribution.InternalElasticsearchDistributionTypes.ALL_INTERNAL;
import static org.elasticsearch.gradle.internal.distribution.InternalElasticsearchDistributionTypes.DEB;
import static org.elasticsearch.gradle.internal.distribution.InternalElasticsearchDistributionTypes.DOCKER;
import static org.elasticsearch.gradle.internal.distribution.InternalElasticsearchDistributionTypes.DOCKER_CLOUD;
import static org.elasticsearch.gradle.internal.distribution.InternalElasticsearchDistributionTypes.DOCKER_CLOUD_ESS;
import static org.elasticsearch.gradle.internal.distribution.InternalElasticsearchDistributionTypes.DOCKER_IRONBANK;
import static org.elasticsearch.gradle.internal.distribution.InternalElasticsearchDistributionTypes.DOCKER_UBI;
import static org.elasticsearch.gradle.internal.distribution.InternalElasticsearchDistributionTypes.RPM;
@ -231,6 +233,8 @@ public class DistroTestPlugin implements Plugin<Project> {
lifecyleTasks.put(DOCKER, project.getTasks().register(taskPrefix + ".docker"));
lifecyleTasks.put(DOCKER_UBI, project.getTasks().register(taskPrefix + ".docker-ubi"));
lifecyleTasks.put(DOCKER_IRONBANK, project.getTasks().register(taskPrefix + ".docker-ironbank"));
lifecyleTasks.put(DOCKER_CLOUD, project.getTasks().register(taskPrefix + ".docker-cloud"));
lifecyleTasks.put(DOCKER_CLOUD_ESS, project.getTasks().register(taskPrefix + ".docker-cloud-ess"));
lifecyleTasks.put(ARCHIVE, project.getTasks().register(taskPrefix + ".archives"));
lifecyleTasks.put(DEB, project.getTasks().register(taskPrefix + ".packages"));
lifecyleTasks.put(RPM, lifecyleTasks.get(DEB));

View file

@ -14,7 +14,7 @@ public interface ElasticsearchDistributionType {
default boolean isDocker() {
return false;
};
}
default boolean shouldExtract() {
return false;

View file

@ -1,13 +1,12 @@
import org.elasticsearch.gradle.Architecture
import org.elasticsearch.gradle.internal.DockerBase
import org.elasticsearch.gradle.LoggedExec
import org.elasticsearch.gradle.VersionProperties
import org.elasticsearch.gradle.internal.DockerBase
import org.elasticsearch.gradle.internal.distribution.InternalElasticsearchDistributionTypes
import org.elasticsearch.gradle.internal.docker.DockerBuildTask
import org.elasticsearch.gradle.internal.docker.ShellRetry
import org.elasticsearch.gradle.internal.docker.TransformLog4jConfigFilter
import org.elasticsearch.gradle.internal.info.BuildParams
import org.elasticsearch.gradle.internal.testfixtures.TestFixturesPlugin
import static org.elasticsearch.gradle.internal.distribution.InternalElasticsearchDistributionTypes.DOCKER
import java.nio.file.Path
@ -16,20 +15,39 @@ apply plugin: 'elasticsearch.test.fixtures'
apply plugin: 'elasticsearch.internal-distribution-download'
apply plugin: 'elasticsearch.rest-resources'
String buildId = providers.systemProperty('build.id').forUseAtConfigurationTime().getOrNull()
boolean useLocalArtifacts = buildId != null && buildId.isBlank() == false
repositories {
// Define a repository that allows Gradle to fetch a resource from GitHub. This
// is only used to fetch the `tini` binary, when building the Iron Bank docker image
// for testing purposes.
repositories {
// for testing purposes. While in theory we could download `tini` this way for the
// other Docker variants, the need for the main image to be rebuildable by Docker Hub
// means that the Dockerfile itself has to fetch the binary.
ivy {
url 'https://github.com/'
patternLayout {
artifact '/[organisation]/[module]/releases/download/v[revision]/[ext]'
artifact '/[organisation]/[module]/releases/download/v[revision]/[module]-[classifier]'
}
metadataSources { artifact() }
content { includeGroup 'krallin' }
}
// This is required in Gradle 6.0+ as metadata file (ivy.xml)
// is mandatory. Docs linked below this code section
// Cloud builds bundle some beats
ivy {
if (useLocalArtifacts) {
url "file://${buildDir}/artifacts/"
patternLayout {
artifact '/[organisation]/[module]-[revision]-linux-[classifier].[ext]'
}
} else {
url "https://${VersionProperties.isElasticsearchSnapshot() ? 'snapshots' : 'artifacts'}-no-kpi.elastic.co/"
patternLayout {
artifact '/downloads/[organization]/[module]/[module]-[revision]-linux-[classifier].[ext]'
}
}
metadataSources { artifact() }
content { includeGroup 'beats' }
}
}
@ -40,31 +58,32 @@ configurations {
dockerSource
log4jConfig
tini
repositoryPlugins
nonRepositoryPlugins
filebeat
metricbeat
}
String beatsArch = Architecture.current() == Architecture.AARCH64 ? 'arm64' : 'x86_64'
String tiniArch = Architecture.current() == Architecture.AARCH64 ? 'arm64' : 'amd64'
dependencies {
aarch64DockerSource project(path: ":distribution:archives:linux-aarch64-tar", configuration: 'default')
dockerSource project(path: ":distribution:archives:linux-tar", configuration: 'default')
log4jConfig project(path: ":distribution", configuration: 'log4jConfig')
tini 'krallin:tini:0.19.0@tini-amd64'
tini "krallin:tini:0.19.0:${tiniArch}"
repositoryPlugins project(path: ':plugins', configuration: 'repositoryPlugins')
nonRepositoryPlugins project(path: ':plugins', configuration: 'nonRepositoryPlugins')
filebeat "beats:filebeat:${VersionProperties.elasticsearch}:${beatsArch}@tar.gz"
metricbeat "beats:metricbeat:${VersionProperties.elasticsearch}:${beatsArch}@tar.gz"
}
ext.expansions = { Architecture architecture, DockerBase base ->
String buildArgs = ''
if (base == DockerBase.IRON_BANK) {
buildArgs = """
ARG BASE_REGISTRY=registry1.dso.mil
ARG BASE_IMAGE=ironbank/redhat/ubi/ubi8
ARG BASE_TAG=8.4
"""
}
def (major,minor) = VersionProperties.elasticsearch.split("\\.")
return [
'base_image' : base.getImage(),
'base_image' : base.image,
'bin_dir' : base == DockerBase.IRON_BANK ? 'scripts' : 'bin',
'build_args' : buildArgs,
'build_date' : BuildParams.buildDate,
'config_dir' : base == DockerBase.IRON_BANK ? 'scripts' : 'config',
'git_revision' : BuildParams.gitRevision,
@ -87,10 +106,14 @@ class SquashNewlinesFilter extends FilterReader {
}
}
private static String toCamel(String input) {
return input.split("[^a-zA-Z0-9]").collect({ it.substring(0, 1) + it.substring(1).toLowerCase(Locale.ROOT) }).join("")
}
private static String taskName(String prefix, Architecture architecture, DockerBase base, String suffix) {
return prefix +
(architecture == Architecture.AARCH64 ? 'Aarch64' : '') +
(base == DockerBase.UBI ? 'Ubi' : (base == DockerBase.IRON_BANK ? 'IronBank' : '')) +
(base == DockerBase.CENTOS ? "" : toCamel(base.name())) +
suffix
}
@ -150,7 +173,7 @@ elasticsearch_distributions {
Architecture.values().each { eachArchitecture ->
"docker_${eachArchitecture == Architecture.AARCH64 ? '_aarch64' : ''}" {
architecture = eachArchitecture
type = DOCKER
type = InternalElasticsearchDistributionTypes.DOCKER
version = VersionProperties.getElasticsearch()
failIfUnavailable = false // This ensures we don't attempt to build images if docker is unavailable
}
@ -216,6 +239,20 @@ void addBuildDockerContextTask(Architecture architecture, DockerBase base) {
}
}
if (base == DockerBase.CLOUD) {
from configurations.repositoryPlugins
from configurations.filebeat
from configurations.metricbeat
// For some reason, the artifact name can differ depending on what repository we used.
rename ~/((?:file|metric)beat)-.*\.tar\.gz$/, "\$1-${VersionProperties.elasticsearch}.tar.gz"
into('bin') {
from(project.projectDir.toPath().resolve('src/docker/cloud')) {
expand([ version: VersionProperties.elasticsearch ])
}
}
}
onlyIf { Architecture.current() == architecture }
}
@ -226,7 +263,7 @@ void addBuildDockerContextTask(Architecture architecture, DockerBase base) {
}
}
void addUnpackDockerContextTask(Architecture architecture, DockerBase base) {
void addTransformDockerContextTask(Architecture architecture, DockerBase base) {
tasks.register(taskName("transform", architecture, base, "DockerContext"), Sync) {
TaskProvider<Tar> buildContextTask = tasks.named(taskName("build", architecture, base, "DockerContext"))
dependsOn(buildContextTask)
@ -238,7 +275,7 @@ void addUnpackDockerContextTask(Architecture architecture, DockerBase base) {
from(tarTree("${project.buildDir}/distributions/${archiveName}.tar.gz")) {
eachFile { FileCopyDetails details ->
if (details.name.equals("Dockerfile")) {
filter { it.replaceAll('^RUN curl.*artifacts-no-kpi.*$', "COPY ${distributionName} /opt/elasticsearch.tar.gz")}
filter { it.replaceAll('^RUN curl.*artifacts-no-kpi.*$', "COPY ${distributionName} /tmp/elasticsearch.tar.gz") }
}
}
}
@ -266,12 +303,20 @@ void addUnpackDockerContextTask(Architecture architecture, DockerBase base) {
}
private List<String> generateTags(DockerBase base) {
String version = VersionProperties.elasticsearch
private static List<String> generateTags(DockerBase base) {
final String version = VersionProperties.elasticsearch
String image = "elasticsearch${base.suffix}"
String namespace = 'elasticsearch'
if (base == DockerBase.CLOUD || base == DockerBase.CLOUD_ESS) {
namespace += '-ci'
}
return [
"elasticsearch${base.suffix}:test",
"elasticsearch${base.suffix}:${version}",
"docker.elastic.co/elasticsearch/elasticsearch${base.suffix}:${version}"
"${image}:test",
"${image}:${version}",
"docker.elastic.co/${namespace}/${image}:${version}"
]
}
@ -279,10 +324,10 @@ void addBuildDockerImageTask(Architecture architecture, DockerBase base) {
final TaskProvider<DockerBuildTask> buildDockerImageTask =
tasks.register(taskName("build", architecture, base, "DockerImage"), DockerBuildTask) {
TaskProvider<Tar> transformTask = tasks.named(taskName("transform", architecture, base, "DockerContext"))
TaskProvider<Sync> transformTask = tasks.named(taskName("transform", architecture, base, "DockerContext"))
dependsOn(transformTask)
dockerContext.fileProvider(transformTask.map { it.destinationDir })
dockerContext.fileProvider(transformTask.map { Sync task -> task.getDestinationDir() })
tags = generateTags(base)
@ -301,7 +346,7 @@ void addBuildDockerImageTask(Architecture architecture, DockerBase base) {
baseImages = [baseImage]
buildArgs = buildArgsMap
} else if (base == DockerBase.CENTOS) {
} else if (base == DockerBase.CENTOS || base == DockerBase.CLOUD) {
baseImages = ['alpine:3.13', base.image]
} else {
baseImages = [base.image]
@ -317,13 +362,61 @@ void addBuildDockerImageTask(Architecture architecture, DockerBase base) {
}
}
void addBuildEssDockerImageTask(Architecture architecture) {
DockerBase base = DockerBase.CLOUD_ESS
String arch = architecture == Architecture.AARCH64 ? '-aarch64' : ''
String contextDir = "${project.buildDir}/docker-context/elasticsearch${base.suffix}-${VersionProperties.elasticsearch}-docker-build-context${arch}"
final TaskProvider<Sync> buildContextTask =
tasks.register(taskName('build', architecture, base, 'DockerContext'), Sync) {
into contextDir
final Path projectDir = project.projectDir.toPath()
into("plugins") {
from configurations.nonRepositoryPlugins
}
from(projectDir.resolve("src/docker/Dockerfile.cloud-ess")) {
expand([
base_image: "elasticsearch${DockerBase.CLOUD.suffix}:${VersionProperties.elasticsearch}"
])
filter SquashNewlinesFilter
rename ~/Dockerfile\.cloud-ess$/, 'Dockerfile'
}
}
final TaskProvider<DockerBuildTask> buildDockerImageTask =
tasks.register(taskName("build", architecture, base, "DockerImage"), DockerBuildTask) {
TaskProvider<DockerBuildTask> buildCloudTask = tasks.named(taskName("build", architecture, DockerBase.CLOUD, "DockerImage"))
dependsOn(buildCloudTask)
dependsOn(buildContextTask)
dockerContext.fileProvider(buildContextTask.map { it.getDestinationDir() })
baseImages = []
tags = generateTags(base)
onlyIf { Architecture.current() == architecture }
}
tasks.named("assemble").configure {
dependsOn(buildDockerImageTask)
}
}
for (final Architecture architecture : Architecture.values()) {
for (final DockerBase base : DockerBase.values()) {
if (base == DockerBase.CLOUD_ESS) {
continue
}
addBuildDockerContextTask(architecture, base)
addUnpackDockerContextTask(architecture, base)
addTransformDockerContextTask(architecture, base)
addBuildDockerImageTask(architecture, base)
}
addBuildEssDockerImageTask(architecture)
}
/*
@ -340,10 +433,18 @@ subprojects { Project subProject ->
base = DockerBase.UBI
} else if (subProject.name.contains('ironbank-')) {
base = DockerBase.IRON_BANK
} else if (subProject.name.contains('cloud-ess-')) {
base = DockerBase.CLOUD_ESS
} else if (subProject.name.contains('cloud-')) {
base = DockerBase.CLOUD
}
final String arch = architecture == Architecture.AARCH64 ? '-aarch64' : ''
final String extension = base == DockerBase.UBI ? 'ubi.tar' : (base == DockerBase.IRON_BANK ? 'ironbank.tar' : 'docker.tar')
final String extension = base == DockerBase.UBI ? 'ubi.tar' :
(base == DockerBase.IRON_BANK ? 'ironbank.tar' :
(base == DockerBase.CLOUD ? 'cloud.tar' :
(base == DockerBase.CLOUD_ESS ? 'cloud-ess.tar' :
'docker.tar')))
final String artifactName = "elasticsearch${arch}${base.suffix}_test"
final String exportTaskName = taskName("export", architecture, base, 'DockerImage')

View file

@ -0,0 +1,2 @@
// This file is intentionally blank. All configuration of the
// export is done in the parent project.

View file

@ -0,0 +1,2 @@
// This file is intentionally blank. All configuration of the
// export is done in the parent project.

View file

@ -0,0 +1,2 @@
// This file is intentionally blank. All configuration of the
// export is done in the parent project.

View file

@ -0,0 +1,2 @@
// This file is intentionally blank. All configuration of the
// export is done in the parent project.

View file

@ -55,7 +55,9 @@ RUN set -eux ; \\
# Extract Elasticsearch artifact
################################################################################
${build_args}
ARG BASE_REGISTRY=registry1.dso.mil
ARG BASE_IMAGE=ironbank/redhat/ubi/ubi8
ARG BASE_TAG=8.4
FROM ${base_image} AS builder
@ -224,16 +226,16 @@ WORKDIR /usr/share/elasticsearch
<% if (docker_base == "iron_bank") {
// Iron Bank always copies the local artifact
%>
COPY elasticsearch-${version}-linux-x86_64.tar.gz /opt/elasticsearch.tar.gz
COPY elasticsearch-${version}-linux-x86_64.tar.gz /tmp/elasticsearch.tar.gz
<% } else {
// Fetch the appropriate Elasticsearch distribution for this architecture.
// Keep this command on one line - it is replaced with a `COPY` during local builds.
// It uses the `arch` command to fetch the correct distro for the build machine.
%>
RUN curl --retry 10 -S -L --output /opt/elasticsearch.tar.gz https://artifacts-no-kpi.elastic.co/downloads/elasticsearch/elasticsearch-${version}-linux-\$(arch).tar.gz
RUN curl --retry 10 -S -L --output /tmp/elasticsearch.tar.gz https://artifacts-no-kpi.elastic.co/downloads/elasticsearch/elasticsearch-${version}-linux-\$(arch).tar.gz
<% } %>
RUN tar -zxf /opt/elasticsearch.tar.gz --strip-components=1
RUN tar -zxf /tmp/elasticsearch.tar.gz --strip-components=1
# The distribution includes a `config` directory, no need to create it
COPY ${config_dir}/elasticsearch.yml config/
@ -259,6 +261,27 @@ RUN sed -i -e 's/ES_DISTRIBUTION_TYPE=tar/ES_DISTRIBUTION_TYPE=docker/' bin/elas
chmod 0775 bin config config/jvm.options.d data logs plugins && \\
find config -type f -exec chmod 0664 {} +
<% if (docker_base == "cloud") { %>
# Preinstall common plugins
COPY repository-s3-${version}.zip repository-gcs-${version}.zip repository-azure-${version}.zip /tmp/
RUN bin/elasticsearch-plugin install --batch \\
file:/tmp/repository-s3-${version}.zip \\
file:/tmp/repository-gcs-${version}.zip \\
file:/tmp/repository-azure-${version}.zip
<% /* I tried to use `ADD` here, but I couldn't force it to do what I wanted */ %>
COPY filebeat-${version}.tar.gz metricbeat-${version}.tar.gz /tmp/
RUN mkdir -p /opt/filebeat /opt/metricbeat && \\
tar xf /tmp/filebeat-${version}.tar.gz -C /opt/filebeat --strip-components=1 && \\
tar xf /tmp/metricbeat-${version}.tar.gz -C /opt/metricbeat --strip-components=1
# Add plugins infrastructure
RUN mkdir -p /opt/plugins/archive
COPY bin/plugin-wrapper.sh /opt/plugins
# These are the correct permissions for both the directories and the script
RUN chmod -R 0555 /opt/plugins
<% } %>
<% if (docker_base == "ubi" || docker_base == "iron_bank") { %>
################################################################################
@ -324,6 +347,10 @@ COPY --from=builder --chown=0:0 /usr/share/elasticsearch /usr/share/elasticsearc
COPY --from=builder --chown=0:0 /bin/tini /bin/tini
<% } %>
<% if (docker_base == 'cloud') { %>
COPY --from=builder --chown=0:0 /opt /opt
<% } %>
ENV PATH /usr/share/elasticsearch/bin:\$PATH
COPY ${bin_dir}/docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh
@ -387,14 +414,23 @@ RUN mkdir /licenses && cp LICENSE.txt /licenses/LICENSE
COPY LICENSE /licenses/LICENSE.addendum
<% } %>
USER elasticsearch:root
<% if (docker_base == "cloud") { %>
ENTRYPOINT ["/bin/tini", "--"]
CMD ["/app/elasticsearch.sh"]
# Generate a stub command that will be overwritten at runtime
RUN mkdir /app && \\
echo -e '#!/bin/sh\\nexec /usr/local/bin/docker-entrypoint.sh eswrapper' > /app/elasticsearch.sh && \\
chmod 0555 /app/elasticsearch.sh
<% } else { %>
# Our actual entrypoint is `tini`, a minimal but functional init program. It
# calls the entrypoint we provide, while correctly forwarding signals.
ENTRYPOINT ["/bin/tini", "--", "/usr/local/bin/docker-entrypoint.sh"]
# Dummy overridable parameter parsed by entrypoint
CMD ["eswrapper"]
<% } %>
USER elasticsearch:root
<% if (docker_base == 'iron_bank') { %>
HEALTHCHECK --interval=10s --timeout=5s --start-period=1m --retries=5 CMD curl -I -f --max-time 5 http://localhost:9200 || exit 1

View file

@ -0,0 +1,12 @@
FROM ${base_image} AS builder
USER root
COPY plugins/*.zip /opt/plugins/archive
RUN chown root.root /opt/plugins/archive/*
RUN chmod 0444 /opt/plugins/archive/*
FROM ${base_image}
COPY --from=builder /opt/plugins /opt/plugins

View file

@ -0,0 +1,34 @@
#!/bin/bash
#
# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
# or more contributor license agreements. Licensed under the Elastic License
# 2.0 and the Server Side Public License, v 1; you may not use this file except
# in compliance with, at your election, the Elastic License 2.0 or the Server
# Side Public License, v 1.
#
<% /* Populated by Gradle */ %>
VERSION="$version"
plugin_name_is_next=0
declare -a args_array
while test \$# -gt 0; do
opt="\$1"
shift
if [[ \$plugin_name_is_next -eq 1 ]]; then
if [[ -f "/opt/plugins/archive/\$opt-\${VERSION}.zip" ]]; then
opt="file:/opt/plugins/archive/\$opt-\${VERSION}.zip"
fi
elif [[ "\$opt" == "install" ]]; then
plugin_name_is_next=1
fi
args_array+=("\$opt")
done
set -- "\$@" "\${args_array[@]}"
exec /usr/share/elasticsearch/bin/elasticsearch-plugin "\$@"

View file

@ -10,6 +10,18 @@ subprojects {
apply plugin: 'elasticsearch.internal-testclusters'
}
configurations {
repositoryPlugins
nonRepositoryPlugins
}
// Intentionally doesn't include `repository-hdfs`
List<String> repositoryPlugins = [
'repository-azure',
'repository-gcs',
'repository-s3'
]
// only configure immediate children of plugins dir
configure(subprojects.findAll { it.parent.path == project.path }) {
group = 'org.elasticsearch.plugin'
@ -22,4 +34,11 @@ configure(subprojects.findAll { it.parent.path == project.path }) {
licenseFile rootProject.file('licenses/SSPL-1.0+ELASTIC-LICENSE-2.0.txt')
noticeFile rootProject.file('NOTICE.txt')
}
if (repositoryPlugins.contains(project.name)) {
parent.artifacts.add('repositoryPlugins', tasks.named('bundlePlugin'))
} else {
parent.artifacts.add('nonRepositoryPlugins', tasks.named('bundlePlugin'))
}
}

View file

@ -12,13 +12,13 @@ import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.apache.http.client.fluent.Request;
import org.elasticsearch.packaging.util.DockerRun;
import org.elasticsearch.packaging.util.Installation;
import org.elasticsearch.packaging.util.Platforms;
import org.elasticsearch.packaging.util.ProcessInfo;
import org.elasticsearch.packaging.util.ServerUtils;
import org.elasticsearch.packaging.util.Shell;
import org.elasticsearch.packaging.util.Shell.Result;
import org.elasticsearch.packaging.util.docker.DockerRun;
import org.junit.After;
import org.junit.Before;
import org.junit.BeforeClass;
@ -33,32 +33,37 @@ import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import static java.nio.file.attribute.PosixFilePermissions.fromString;
import static org.elasticsearch.packaging.util.Distribution.Packaging;
import static org.elasticsearch.packaging.util.Docker.assertPermissionsAndOwnership;
import static org.elasticsearch.packaging.util.Docker.chownWithPrivilegeEscalation;
import static org.elasticsearch.packaging.util.Docker.copyFromContainer;
import static org.elasticsearch.packaging.util.Docker.existsInContainer;
import static org.elasticsearch.packaging.util.Docker.getContainerLogs;
import static org.elasticsearch.packaging.util.Docker.getImageHealthcheck;
import static org.elasticsearch.packaging.util.Docker.getImageLabels;
import static org.elasticsearch.packaging.util.Docker.getJson;
import static org.elasticsearch.packaging.util.Docker.mkDirWithPrivilegeEscalation;
import static org.elasticsearch.packaging.util.Docker.removeContainer;
import static org.elasticsearch.packaging.util.Docker.restartContainer;
import static org.elasticsearch.packaging.util.Docker.rmDirWithPrivilegeEscalation;
import static org.elasticsearch.packaging.util.Docker.runContainer;
import static org.elasticsearch.packaging.util.Docker.runContainerExpectingFailure;
import static org.elasticsearch.packaging.util.Docker.verifyContainerInstallation;
import static org.elasticsearch.packaging.util.Docker.waitForElasticsearch;
import static org.elasticsearch.packaging.util.DockerRun.builder;
import static org.elasticsearch.packaging.util.FileMatcher.Fileness.Directory;
import static org.elasticsearch.packaging.util.FileMatcher.Fileness.File;
import static org.elasticsearch.packaging.util.FileMatcher.p600;
import static org.elasticsearch.packaging.util.FileMatcher.p644;
import static org.elasticsearch.packaging.util.FileMatcher.p660;
import static org.elasticsearch.packaging.util.FileMatcher.p755;
import static org.elasticsearch.packaging.util.FileMatcher.p775;
import static org.elasticsearch.packaging.util.FileUtils.append;
import static org.elasticsearch.packaging.util.FileUtils.rm;
import static org.elasticsearch.packaging.util.docker.Docker.chownWithPrivilegeEscalation;
import static org.elasticsearch.packaging.util.docker.Docker.copyFromContainer;
import static org.elasticsearch.packaging.util.docker.Docker.existsInContainer;
import static org.elasticsearch.packaging.util.docker.Docker.getContainerLogs;
import static org.elasticsearch.packaging.util.docker.Docker.getImageHealthcheck;
import static org.elasticsearch.packaging.util.docker.Docker.getImageLabels;
import static org.elasticsearch.packaging.util.docker.Docker.getJson;
import static org.elasticsearch.packaging.util.docker.Docker.listContents;
import static org.elasticsearch.packaging.util.docker.Docker.mkDirWithPrivilegeEscalation;
import static org.elasticsearch.packaging.util.docker.Docker.removeContainer;
import static org.elasticsearch.packaging.util.docker.Docker.restartContainer;
import static org.elasticsearch.packaging.util.docker.Docker.rmDirWithPrivilegeEscalation;
import static org.elasticsearch.packaging.util.docker.Docker.runContainer;
import static org.elasticsearch.packaging.util.docker.Docker.runContainerExpectingFailure;
import static org.elasticsearch.packaging.util.docker.Docker.verifyContainerInstallation;
import static org.elasticsearch.packaging.util.docker.Docker.waitForElasticsearch;
import static org.elasticsearch.packaging.util.docker.DockerFileMatcher.file;
import static org.elasticsearch.packaging.util.docker.DockerRun.builder;
import static org.hamcrest.Matchers.arrayContaining;
import static org.hamcrest.Matchers.contains;
import static org.hamcrest.Matchers.containsString;
@ -117,7 +122,12 @@ public class DockerTests extends PackagingTestCase {
waitForElasticsearch(installation, USERNAME, PASSWORD);
final int statusCode = ServerUtils.makeRequestAndGetStatus(Request.Get("http://localhost:9200"), USERNAME, "wrong_password", null);
assertThat(statusCode, equalTo(401));
}
/**
* Check that security can be disabled
*/
public void test012SecurityCanBeDisabled() throws Exception {
// restart container with security disabled
runContainer(distribution(), builder().envVars(Map.of("xpack.security.enabled", "false")));
waitForElasticsearch(installation);
@ -129,12 +139,59 @@ public class DockerTests extends PackagingTestCase {
* Checks that no plugins are initially active.
*/
public void test020PluginsListWithNoPlugins() {
assumeTrue(
"Only applies to non-Cloud images",
distribution.packaging != Packaging.DOCKER_CLOUD && distribution().packaging != Packaging.DOCKER_CLOUD_ESS
);
final Installation.Executables bin = installation.executables();
final Result r = sh.run(bin.pluginTool + " list");
assertThat("Expected no plugins to be listed", r.stdout, emptyString());
}
/**
* Check that Cloud images bundle a selection of plugins.
*/
public void test021PluginsListWithPlugins() {
assumeTrue(
"Only applies to non-Cloud images",
distribution.packaging == Packaging.DOCKER_CLOUD || distribution().packaging == Packaging.DOCKER_CLOUD_ESS
);
final Installation.Executables bin = installation.executables();
final List<String> plugins = sh.run(bin.pluginTool + " list").stdout.lines().collect(Collectors.toList());
assertThat(
"Expected standard plugins to be listed",
plugins,
equalTo(List.of("repository-azure", "repository-gcs", "repository-s3"))
);
}
/**
* Checks that ESS images can install plugins from the local archive.
*/
public void test022InstallPluginsFromLocalArchive() {
assumeTrue("Only applies to ESS images", distribution().packaging == Packaging.DOCKER_CLOUD_ESS);
final String plugin = "analysis-icu";
final Installation.Executables bin = installation.executables();
List<String> plugins = sh.run(bin.pluginTool + " list").stdout.lines().collect(Collectors.toList());
assertThat("Expected " + plugin + " to not be installed", plugins, not(hasItems(plugin)));
// Stuff the proxy settings with garbage, so any attempt to go out to the internet would fail
sh.getEnv()
.put("ES_JAVA_OPTS", "-Dhttp.proxyHost=example.org -Dhttp.proxyPort=9999 -Dhttps.proxyHost=example.org -Dhttps.proxyPort=9999");
sh.run("/opt/plugins/plugin-wrapper.sh install --batch analysis-icu");
plugins = sh.run(bin.pluginTool + " list").stdout.lines().collect(Collectors.toList());
assertThat("Expected " + plugin + " to be installed", plugins, hasItems(plugin));
}
/**
* Check that the JDK's cacerts file is a symlink to the copy provided by the operating system.
*/
@ -160,7 +217,7 @@ public class DockerTests extends PackagingTestCase {
public void test042KeystorePermissionsAreCorrect() throws Exception {
waitForElasticsearch(installation, USERNAME, PASSWORD);
assertPermissionsAndOwnership(installation.config("elasticsearch.keystore"), "elasticsearch", "root", p660);
assertThat(installation.config("elasticsearch.keystore"), file(p660));
}
/**
@ -934,4 +991,21 @@ public class DockerTests extends PackagingTestCase {
assertFalse(labelKeys.stream().anyMatch(l -> l.startsWith("org.label-schema.")));
assertFalse(labelKeys.stream().anyMatch(l -> l.startsWith("org.opencontainers.")));
}
/**
* Check that the Cloud image contains the required Beats
*/
public void test400CloudImageBundlesBeats() {
assumeTrue(distribution.packaging == Packaging.DOCKER_CLOUD || distribution.packaging == Packaging.DOCKER_CLOUD_ESS);
final List<String> contents = listContents("/opt");
assertThat("Expected beats in /opt", contents, hasItems("filebeat", "metricbeat"));
Stream.of("filebeat", "metricbeat").forEach(beat -> {
assertThat(Path.of("/opt/" + beat), file(Directory, "root", "root", p755));
assertThat(Path.of("/opt/" + beat + "/" + beat), file(File, "root", "root", p755));
assertThat(Path.of("/opt/" + beat + "/module"), file(Directory, "root", "root", p755));
assertThat(Path.of("/opt/" + beat + "/modules.d"), file(Directory, "root", "root", p755));
});
}
}

View file

@ -9,13 +9,14 @@
package org.elasticsearch.packaging.test;
import org.elasticsearch.packaging.util.Distribution;
import org.elasticsearch.packaging.util.Docker;
import org.elasticsearch.packaging.util.FileUtils;
import org.elasticsearch.packaging.util.Installation;
import org.elasticsearch.packaging.util.Packages;
import org.elasticsearch.packaging.util.Platforms;
import org.elasticsearch.packaging.util.ServerUtils;
import org.elasticsearch.packaging.util.Shell;
import org.elasticsearch.packaging.util.docker.Docker;
import org.elasticsearch.packaging.util.docker.DockerFileMatcher;
import java.io.IOException;
import java.nio.file.Files;
@ -27,12 +28,6 @@ import java.util.Map;
import static org.elasticsearch.packaging.util.Archives.ARCHIVE_OWNER;
import static org.elasticsearch.packaging.util.Archives.installArchive;
import static org.elasticsearch.packaging.util.Archives.verifyArchiveInstallation;
import static org.elasticsearch.packaging.util.Docker.assertPermissionsAndOwnership;
import static org.elasticsearch.packaging.util.Docker.runContainer;
import static org.elasticsearch.packaging.util.Docker.runContainerExpectingFailure;
import static org.elasticsearch.packaging.util.Docker.waitForElasticsearch;
import static org.elasticsearch.packaging.util.Docker.waitForPathToExist;
import static org.elasticsearch.packaging.util.DockerRun.builder;
import static org.elasticsearch.packaging.util.FileMatcher.Fileness.File;
import static org.elasticsearch.packaging.util.FileMatcher.file;
import static org.elasticsearch.packaging.util.FileMatcher.p600;
@ -42,6 +37,11 @@ import static org.elasticsearch.packaging.util.Packages.assertInstalled;
import static org.elasticsearch.packaging.util.Packages.assertRemoved;
import static org.elasticsearch.packaging.util.Packages.installPackage;
import static org.elasticsearch.packaging.util.Packages.verifyPackageInstallation;
import static org.elasticsearch.packaging.util.docker.Docker.runContainer;
import static org.elasticsearch.packaging.util.docker.Docker.runContainerExpectingFailure;
import static org.elasticsearch.packaging.util.docker.Docker.waitForElasticsearch;
import static org.elasticsearch.packaging.util.docker.Docker.waitForPathToExist;
import static org.elasticsearch.packaging.util.docker.DockerRun.builder;
import static org.hamcrest.CoreMatchers.containsString;
import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.CoreMatchers.notNullValue;
@ -460,7 +460,9 @@ public class KeystoreManagementTests extends PackagingTestCase {
case DOCKER:
case DOCKER_UBI:
case DOCKER_IRON_BANK:
assertPermissionsAndOwnership(keystore, "elasticsearch", "root", p660);
case DOCKER_CLOUD:
case DOCKER_CLOUD_ESS:
assertThat(keystore, DockerFileMatcher.file(p660));
break;
default:
throw new IllegalStateException("Unknown Elasticsearch packaging type.");

View file

@ -23,12 +23,13 @@ import org.elasticsearch.core.CheckedRunnable;
import org.elasticsearch.core.internal.io.IOUtils;
import org.elasticsearch.packaging.util.Archives;
import org.elasticsearch.packaging.util.Distribution;
import org.elasticsearch.packaging.util.Docker;
import org.elasticsearch.packaging.util.FileUtils;
import org.elasticsearch.packaging.util.Installation;
import org.elasticsearch.packaging.util.Packages;
import org.elasticsearch.packaging.util.Platforms;
import org.elasticsearch.packaging.util.Shell;
import org.elasticsearch.packaging.util.docker.Docker;
import org.elasticsearch.packaging.util.docker.DockerShell;
import org.hamcrest.CoreMatchers;
import org.hamcrest.Matcher;
import org.junit.After;
@ -61,10 +62,10 @@ import java.util.Locale;
import java.util.concurrent.TimeUnit;
import static org.elasticsearch.packaging.util.Cleanup.cleanEverything;
import static org.elasticsearch.packaging.util.Docker.ensureImageIsLoaded;
import static org.elasticsearch.packaging.util.Docker.removeContainer;
import static org.elasticsearch.packaging.util.FileExistenceMatchers.fileExists;
import static org.elasticsearch.packaging.util.FileUtils.append;
import static org.elasticsearch.packaging.util.docker.Docker.ensureImageIsLoaded;
import static org.elasticsearch.packaging.util.docker.Docker.removeContainer;
import static org.hamcrest.CoreMatchers.anyOf;
import static org.hamcrest.CoreMatchers.containsString;
import static org.hamcrest.CoreMatchers.equalTo;
@ -146,7 +147,7 @@ public abstract class PackagingTestCase extends Assert {
// create shell
if (distribution().isDocker()) {
ensureImageIsLoaded(distribution);
sh = new Docker.DockerShell();
sh = new DockerShell();
} else {
sh = new Shell();
}
@ -221,6 +222,8 @@ public abstract class PackagingTestCase extends Assert {
case DOCKER:
case DOCKER_UBI:
case DOCKER_IRON_BANK:
case DOCKER_CLOUD:
case DOCKER_CLOUD_ESS:
installation = Docker.runContainer(distribution);
Docker.verifyContainerInstallation(installation);
break;
@ -303,6 +306,8 @@ public abstract class PackagingTestCase extends Assert {
case DOCKER:
case DOCKER_UBI:
case DOCKER_IRON_BANK:
case DOCKER_CLOUD:
case DOCKER_CLOUD_ESS:
// nothing, "installing" docker image is running it
return Shell.NO_OP;
default:
@ -323,6 +328,8 @@ public abstract class PackagingTestCase extends Assert {
case DOCKER:
case DOCKER_UBI:
case DOCKER_IRON_BANK:
case DOCKER_CLOUD:
case DOCKER_CLOUD_ESS:
// nothing, "installing" docker image is running it
break;
default:
@ -344,6 +351,8 @@ public abstract class PackagingTestCase extends Assert {
case DOCKER:
case DOCKER_UBI:
case DOCKER_IRON_BANK:
case DOCKER_CLOUD:
case DOCKER_CLOUD_ESS:
Docker.waitForElasticsearchToStart();
break;
default:

View file

@ -32,6 +32,10 @@ public class Distribution {
this.packaging = Packaging.DOCKER_UBI;
} else if (filename.endsWith(".ironbank.tar")) {
this.packaging = Packaging.DOCKER_IRON_BANK;
} else if (filename.endsWith(".cloud.tar")) {
this.packaging = Packaging.DOCKER_CLOUD;
} else if (filename.endsWith(".cloud-ess.tar")) {
this.packaging = Packaging.DOCKER_CLOUD_ESS;
} else {
int lastDot = filename.lastIndexOf('.');
this.packaging = Packaging.valueOf(filename.substring(lastDot + 1).toUpperCase(Locale.ROOT));
@ -63,6 +67,8 @@ public class Distribution {
case DOCKER:
case DOCKER_UBI:
case DOCKER_IRON_BANK:
case DOCKER_CLOUD:
case DOCKER_CLOUD_ESS:
return true;
}
return false;
@ -76,7 +82,9 @@ public class Distribution {
RPM(".rpm", Platforms.isRPM()),
DOCKER(".docker.tar", Platforms.isDocker()),
DOCKER_UBI(".ubi.tar", Platforms.isDocker()),
DOCKER_IRON_BANK(".ironbank.tar", Platforms.isDocker());
DOCKER_IRON_BANK(".ironbank.tar", Platforms.isDocker()),
DOCKER_CLOUD(".cloud.tar", Platforms.isDocker()),
DOCKER_CLOUD_ESS(".cloud-ess.tar", Platforms.isDocker());
/** The extension of this distribution's file */
public final String extension;

View file

@ -48,12 +48,12 @@ public class FileMatcher extends TypeSafeMatcher<Path> {
public static final Set<PosixFilePermission> p770 = fromString("rwxrwx---");
public static final Set<PosixFilePermission> p775 = fromString("rwxrwxr-x");
private final Fileness fileness;
private final String owner;
private final String group;
private final Set<PosixFilePermission> posixPermissions;
protected final Fileness fileness;
protected final String owner;
protected final String group;
protected final Set<PosixFilePermission> posixPermissions;
private String mismatch;
protected String mismatch;
public FileMatcher(Fileness fileness, String owner, String group, Set<PosixFilePermission> posixPermissions) {
this.fileness = Objects.requireNonNull(fileness);

View file

@ -50,7 +50,7 @@ import javax.net.ssl.TrustManagerFactory;
import static java.nio.file.StandardOpenOption.APPEND;
import static java.nio.file.StandardOpenOption.CREATE;
import static java.nio.file.StandardOpenOption.TRUNCATE_EXISTING;
import static org.elasticsearch.packaging.util.Docker.sh;
import static org.elasticsearch.packaging.util.docker.Docker.dockerShell;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.containsString;
@ -74,7 +74,7 @@ public class ServerUtils {
String configFile = Files.readString(configFilePath, StandardCharsets.UTF_8);
securityEnabled = configFile.contains(SECURITY_DISABLED) == false;
} else {
final Optional<String> commandLine = sh.run("bash -c 'COLUMNS=2000 ps ax'").stdout.lines()
final Optional<String> commandLine = dockerShell.run("bash -c 'COLUMNS=2000 ps ax'").stdout.lines()
.filter(line -> line.contains("org.elasticsearch.bootstrap.Elasticsearch"))
.findFirst();
if (commandLine.isPresent() == false) {

View file

@ -36,7 +36,7 @@ public class Shell {
public static final Result NO_OP = new Shell.Result(0, "", "");
protected final Logger logger = LogManager.getLogger(getClass());
final Map<String, String> env = new HashMap<>();
protected final Map<String, String> env = new HashMap<>();
String umask;
Path workingDirectory;

View file

@ -6,7 +6,7 @@
* Side Public License, v 1.
*/
package org.elasticsearch.packaging.util;
package org.elasticsearch.packaging.util.docker;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
@ -15,31 +15,43 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.http.client.fluent.Request;
import org.elasticsearch.core.CheckedRunnable;
import org.elasticsearch.packaging.util.Distribution;
import org.elasticsearch.packaging.util.Distribution.Packaging;
import org.elasticsearch.packaging.util.FileUtils;
import org.elasticsearch.packaging.util.Installation;
import org.elasticsearch.packaging.util.ServerUtils;
import org.elasticsearch.packaging.util.Shell;
import java.io.FileNotFoundException;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.nio.file.attribute.PosixFileAttributes;
import java.nio.file.attribute.PosixFilePermission;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import static java.nio.file.attribute.PosixFilePermissions.fromString;
import static org.elasticsearch.packaging.util.DockerRun.getImageName;
import static org.elasticsearch.packaging.util.FileMatcher.Fileness.Directory;
import static org.elasticsearch.packaging.util.FileMatcher.p444;
import static org.elasticsearch.packaging.util.FileMatcher.p555;
import static org.elasticsearch.packaging.util.FileMatcher.p664;
import static org.elasticsearch.packaging.util.FileMatcher.p770;
import static org.elasticsearch.packaging.util.FileMatcher.p775;
import static org.elasticsearch.packaging.util.ServerUtils.makeRequest;
import static org.elasticsearch.packaging.util.docker.DockerFileMatcher.file;
import static org.elasticsearch.packaging.util.docker.DockerRun.getImageName;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.empty;
import static org.hamcrest.Matchers.equalTo;
import static org.junit.Assert.assertEquals;
import static org.hamcrest.Matchers.not;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
@ -49,8 +61,8 @@ import static org.junit.Assert.fail;
public class Docker {
private static final Log logger = LogFactory.getLog(Docker.class);
static final Shell sh = new Shell();
private static final DockerShell dockerShell = new DockerShell();
public static final Shell sh = new Shell();
public static final DockerShell dockerShell = new DockerShell();
public static final int STARTUP_SLEEP_INTERVAL_MILLISECONDS = 1000;
public static final int STARTUP_ATTEMPTS_MAX = 10;
@ -59,7 +71,7 @@ public class Docker {
* but that appeared to cause problems with repeatedly destroying and recreating containers with
* the same name.
*/
private static String containerId = null;
static String containerId = null;
/**
* Checks whether the required Docker image exists. If not, the image is loaded from disk. No check is made
@ -236,58 +248,6 @@ public class Docker {
sh.run(script);
}
/**
* Extends {@link Shell} so that executed commands happen in the currently running Docker container.
*/
public static class DockerShell extends Shell {
@Override
protected String[] getScriptCommand(String script) {
assert containerId != null;
List<String> cmd = new ArrayList<>();
cmd.add("docker");
cmd.add("exec");
cmd.add("--tty");
env.forEach((key, value) -> cmd.add("--env " + key + "=\"" + value + "\""));
cmd.add(containerId);
cmd.add(script);
return super.getScriptCommand(String.join(" ", cmd));
}
/**
* Overrides {@link Shell#run(String)} to attempt to collect Docker container
* logs when a command fails to execute successfully.
* @param script the command to run
* @return the command's output
*/
@Override
public Result run(String script) {
try {
return super.run(script);
} catch (ShellException e) {
try {
final Shell.Result dockerLogs = getContainerLogs();
logger.error(
"Command [{}] failed.\n\nContainer stdout: [{}]\n\nContainer stderr: [{}]",
script,
dockerLogs.stdout,
dockerLogs.stderr
);
} catch (ShellException shellException) {
logger.error(
"Command [{}] failed.\n\nTried to dump container logs but that failed too: [{}]",
script,
shellException.getMessage()
);
}
throw e;
}
}
}
/**
* Checks whether a path exists in the Docker container.
* @param path the path that ought to exist
@ -397,49 +357,6 @@ public class Docker {
executePrivilegeEscalatedShellCmd(command, localPath, containerPath);
}
/**
* Checks that the specified path's permissions and ownership match those specified.
* <p>
* The implementation supports multiple files being matched by the path, via bash expansion, although
* it is expected that only the final part of the path will contain expansions.
*
* @param path the path to check, possibly with e.g. a wildcard (<code>*</code>)
* @param expectedUser the file's expected user
* @param expectedGroup the file's expected group
* @param expectedPermissions the unix permissions that the path ought to have
*/
public static void assertPermissionsAndOwnership(
Path path,
String expectedUser,
String expectedGroup,
Set<PosixFilePermission> expectedPermissions
) {
logger.debug("Checking permissions and ownership of [" + path + "]");
final Shell.Result result = dockerShell.run("bash -c 'stat -c \"%n %U %G %A\" " + path + "'");
final Path parent = path.getParent();
result.stdout.lines().forEach(line -> {
final String[] components = line.split("\\s+");
final String filename = components[0];
final String username = components[1];
final String group = components[2];
final String permissions = components[3];
// The final substring() is because we don't check the directory bit, and we
// also don't want any SELinux security context indicator.
Set<PosixFilePermission> actualPermissions = fromString(permissions.substring(1, 10));
String fullPath = filename.startsWith("/") ? filename : parent + "/" + filename;
assertEquals("Permissions of " + fullPath + " are wrong", expectedPermissions, actualPermissions);
assertThat("File owner of " + fullPath + " is wrong", username, equalTo(expectedUser));
assertThat("File group of " + fullPath + " is wrong", group, equalTo(expectedGroup));
});
}
/**
* Waits for up to 20 seconds for a path to exist in the container.
* @param path the path to await
@ -472,24 +389,30 @@ public class Docker {
final String homeDir = passwdResult.stdout.trim().split(":")[5];
assertThat("elasticsearch user's home directory is incorrect", homeDir, equalTo("/usr/share/elasticsearch"));
assertPermissionsAndOwnership(es.home, "root", "root", p775);
assertThat(es.home, file(Directory, "root", "root", p775));
Stream.of(es.bundledJdk, es.lib, es.modules).forEach(dir -> assertPermissionsAndOwnership(dir, "root", "root", p555));
Stream.of(es.bundledJdk, es.lib, es.modules).forEach(dir -> assertThat(dir, file(Directory, "root", "root", p555)));
// You can't install plugins that include configuration when running as `elasticsearch` and the `config`
// You couldn't install plugins that include configuration when running as `elasticsearch` if the `config`
// dir is owned by `root`, because the installed tries to manipulate the permissions on the plugin's
// config directory.
Stream.of(es.bin, es.config, es.logs, es.config.resolve("jvm.options.d"), es.data, es.plugins)
.forEach(dir -> assertPermissionsAndOwnership(dir, "elasticsearch", "root", p775));
.forEach(dir -> assertThat(dir, file(Directory, "elasticsearch", "root", p775)));
Stream.of(es.bin, es.bundledJdk.resolve("bin"), es.modules.resolve("x-pack-ml/platform/linux-*/bin"))
.forEach(binariesPath -> assertPermissionsAndOwnership(binariesPath.resolve("*"), "root", "root", p555));
final String arch = dockerShell.run("arch").stdout.trim();
Stream.of(es.bin, es.bundledJdk.resolve("bin"), es.modules.resolve("x-pack-ml/platform/linux-" + arch + "/bin"))
.forEach(
binariesPath -> listContents(binariesPath).forEach(
binFile -> assertThat(binariesPath.resolve(binFile), file("root", "root", p555))
)
);
Stream.of("elasticsearch.yml", "jvm.options", "log4j2.properties", "role_mapping.yml", "roles.yml", "users", "users_roles")
.forEach(configFile -> assertPermissionsAndOwnership(es.config(configFile), "root", "root", p664));
.forEach(configFile -> assertThat(es.config(configFile), file("root", "root", p664)));
Stream.of("LICENSE.txt", "NOTICE.txt", "README.asciidoc")
.forEach(doc -> assertPermissionsAndOwnership(es.home.resolve(doc), "root", "root", p444));
.forEach(doc -> assertThat(es.home.resolve(doc), file("root", "root", p444)));
assertThat(dockerShell.run(es.bin("elasticsearch-keystore") + " list").stdout, containsString("keystore.seed"));
@ -502,6 +425,31 @@ public class Docker {
dockerShell.runIgnoreExitCode("bash -c 'hash " + cliBinary + "'").isSuccess()
)
);
if (es.distribution.packaging == Packaging.DOCKER_CLOUD || es.distribution.packaging == Packaging.DOCKER_CLOUD_ESS) {
verifyCloudContainerInstallation(es);
}
}
private static void verifyCloudContainerInstallation(Installation es) {
assertThat(Path.of("/opt/plugins/plugin-wrapper.sh"), file("root", "root", p555));
final String pluginArchive = "/opt/plugins/archive";
final List<String> plugins = listContents(pluginArchive);
if (es.distribution.packaging == Packaging.DOCKER_CLOUD_ESS) {
assertThat("ESS image should come with plugins in " + pluginArchive, plugins, not(empty()));
final List<String> repositoryPlugins = plugins.stream().filter(p -> p.startsWith("repository")).collect(Collectors.toList());
// Assert on equality to that the error reports the unexpected values.
assertThat(
"ESS image should not have repository plugins in " + pluginArchive,
repositoryPlugins,
equalTo(Collections.emptyList())
);
} else {
assertThat("Cloud image should not have any plugins in " + pluginArchive, plugins, empty());
}
}
public static void waitForElasticsearch(Installation installation) throws Exception {
@ -634,4 +582,42 @@ public class Docker {
public static void restartContainer() {
sh.run("docker restart " + containerId);
}
public static PosixFileAttributes getAttributes(Path path) throws FileNotFoundException {
final Shell.Result result = dockerShell.runIgnoreExitCode("stat -c \"%U %G %A\" " + path);
if (result.isSuccess() == false) {
throw new FileNotFoundException(path + " does not exist");
}
final String[] components = result.stdout.split("\\s+");
final String permissions = components[2];
final String fileType = permissions.substring(0, 1);
// The final substring() is because we don't check the directory bit, and we
// also don't want any SELinux security context indicator.
Set<PosixFilePermission> posixPermissions = fromString(permissions.substring(1, 10));
final DockerFileAttributes attrs = new DockerFileAttributes();
attrs.owner = components[0];
attrs.group = components[1];
attrs.permissions = posixPermissions;
attrs.isDirectory = fileType.equals("d");
attrs.isSymbolicLink = fileType.equals("l");
return attrs;
}
/**
* Returns a list of the file contents of the supplied path.
* @param path the path to list
* @return the listing
*/
public static List<String> listContents(String path) {
return dockerShell.run("ls -1 --color=never " + path).stdout.lines().collect(Collectors.toList());
}
public static List<String> listContents(Path path) {
return listContents(path.toString());
}
}

View file

@ -0,0 +1,84 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
package org.elasticsearch.packaging.util.docker;
import java.nio.file.attribute.FileTime;
import java.nio.file.attribute.GroupPrincipal;
import java.nio.file.attribute.PosixFileAttributes;
import java.nio.file.attribute.PosixFilePermission;
import java.nio.file.attribute.UserPrincipal;
import java.util.Set;
class DockerFileAttributes implements PosixFileAttributes {
String owner;
String group;
Set<PosixFilePermission> permissions;
boolean isDirectory;
boolean isSymbolicLink;
@Override
public UserPrincipal owner() {
return () -> owner;
}
@Override
public GroupPrincipal group() {
return () -> group;
}
@Override
public Set<PosixFilePermission> permissions() {
return permissions;
}
@Override
public FileTime lastModifiedTime() {
return null;
}
@Override
public FileTime lastAccessTime() {
return null;
}
@Override
public FileTime creationTime() {
return null;
}
@Override
public boolean isRegularFile() {
return isDirectory == false && isSymbolicLink == false;
}
@Override
public boolean isDirectory() {
return isDirectory;
}
@Override
public boolean isSymbolicLink() {
return isDirectory;
}
@Override
public boolean isOther() {
return false;
}
@Override
public long size() {
return 0;
}
@Override
public Object fileKey() {
return null;
}
}

View file

@ -0,0 +1,73 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
package org.elasticsearch.packaging.util.docker;
import org.elasticsearch.packaging.util.FileMatcher;
import java.io.FileNotFoundException;
import java.nio.file.Path;
import java.nio.file.attribute.PosixFileAttributes;
import java.nio.file.attribute.PosixFilePermission;
import java.util.Set;
public class DockerFileMatcher extends FileMatcher {
public DockerFileMatcher(Fileness fileness, String owner, String group, Set<PosixFilePermission> posixPermissions) {
super(fileness, owner, group, posixPermissions);
}
@Override
protected boolean matchesSafely(Path path) {
final PosixFileAttributes attributes;
try {
attributes = Docker.getAttributes(path);
} catch (FileNotFoundException e) {
mismatch = "Does not exist";
return false;
}
if (fileness.equals(Fileness.Directory) != attributes.isDirectory()) {
mismatch = "Is " + (attributes.isDirectory() ? "a directory" : "a file");
return false;
}
if (owner != null && owner.equals(attributes.owner().getName()) == false) {
mismatch = "Owned by " + attributes.owner().getName();
return false;
}
if (group != null && group.equals(attributes.group().getName()) == false) {
mismatch = "Owned by group " + attributes.group().getName();
return false;
}
if (posixPermissions != null && posixPermissions.equals(attributes.permissions()) == false) {
mismatch = "Has permissions " + attributes.permissions();
return false;
}
return true;
}
public static DockerFileMatcher file(Set<PosixFilePermission> permissions) {
return file(Fileness.File, permissions);
}
public static DockerFileMatcher file(Fileness fileness, Set<PosixFilePermission> permissions) {
return file(fileness, "elasticsearch", "root", permissions);
}
public static DockerFileMatcher file(String owner, String group, Set<PosixFilePermission> permissions) {
return file(Fileness.File, owner, group, permissions);
}
public static DockerFileMatcher file(Fileness fileness, String owner, String group, Set<PosixFilePermission> permissions) {
return new DockerFileMatcher(fileness, owner, group, permissions);
}
}

View file

@ -6,7 +6,10 @@
* Side Public License, v 1.
*/
package org.elasticsearch.packaging.util;
package org.elasticsearch.packaging.util.docker;
import org.elasticsearch.packaging.util.Distribution;
import org.elasticsearch.packaging.util.Platforms;
import java.nio.file.Path;
import java.util.ArrayList;
@ -150,6 +153,14 @@ public class DockerRun {
suffix = "-ironbank";
break;
case DOCKER_CLOUD:
suffix = "-cloud";
break;
case DOCKER_CLOUD_ESS:
suffix = "-cloud-ess";
break;
default:
throw new IllegalStateException("Unexpected distribution packaging type: " + distribution.packaging);
}

View file

@ -0,0 +1,67 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
package org.elasticsearch.packaging.util.docker;
import org.elasticsearch.packaging.util.Shell;
import java.util.ArrayList;
import java.util.List;
/**
* Extends {@link Shell} so that executed commands happen in the currently running Docker container.
*/
public class DockerShell extends Shell {
@Override
protected String[] getScriptCommand(String script) {
assert Docker.containerId != null;
List<String> cmd = new ArrayList<>();
cmd.add("docker");
cmd.add("exec");
cmd.add("--tty");
env.forEach((key, value) -> cmd.add("--env " + key + "=\"" + value + "\""));
cmd.add(Docker.containerId);
cmd.add(script);
return super.getScriptCommand(String.join(" ", cmd));
}
/**
* Overrides {@link Shell#run(String)} to attempt to collect Docker container
* logs when a command fails to execute successfully.
*
* @param script the command to run
* @return the command's output
*/
@Override
public Result run(String script) {
try {
return super.run(script);
} catch (ShellException e) {
try {
final Result dockerLogs = Docker.getContainerLogs();
logger.error(
"Command [{}] failed.\n\nContainer stdout: [{}]\n\nContainer stderr: [{}]",
script,
dockerLogs.stdout,
dockerLogs.stderr
);
} catch (ShellException shellException) {
logger.error(
"Command [{}] failed.\n\nTried to dump container logs but that failed too: [{}]",
script,
shellException.getMessage()
);
}
throw e;
}
}
}

View file

@ -31,6 +31,10 @@ List projects = [
'distribution:archives:linux-tar',
'distribution:archives:no-jdk-linux-tar',
'distribution:docker',
'distribution:docker:cloud-docker-export',
'distribution:docker:cloud-docker-aarch64-export',
'distribution:docker:cloud-ess-docker-export',
'distribution:docker:cloud-ess-docker-aarch64-export',
'distribution:docker:docker-aarch64-export',
'distribution:docker:docker-export',
'distribution:docker:ironbank-docker-aarch64-export',