Use almalinux as the Docker base image (#80524)

Closes #76681. Our approach to using `scratch` for building Docker
images has caused problems at Docker Hub. Fix this situation by
removing the whole process of using scratch and instead bases the
default distribution on `almalinux:8.4-minimal`. Alma Linux is
binary-compatible with RHEL, and therefore very similar to UBI.
This commit is contained in:
Rory Hunter 2021-11-10 13:04:56 +00:00 committed by GitHub
parent 3fa33b7460
commit ba87234f51
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
6 changed files with 53 additions and 238 deletions

View file

@ -12,7 +12,7 @@ package org.elasticsearch.gradle.internal;
* This class models the different Docker base images that are used to build Docker distributions of Elasticsearch.
*/
public enum DockerBase {
CENTOS("centos:8", ""),
DEFAULT("almalinux:8.4-minimal", ""),
// "latest" here is intentional, since the image name specifies "8"
UBI("docker.elastic.co/ubi8/ubi-minimal:latest", "-ubi8"),
@ -21,7 +21,7 @@ public enum DockerBase {
IRON_BANK("${BASE_REGISTRY}/${BASE_IMAGE}:${BASE_TAG}", "-ironbank"),
// Base image with extras for Cloud
CLOUD("centos:8", "-cloud"),
CLOUD("almalinux:8.4-minimal", "-cloud"),
// Based on CLOUD above, with more extras. We don't set a base image because
// we programmatically extend from the Cloud image.

View file

@ -96,7 +96,7 @@ ext.expansions = { Architecture architecture, DockerBase base ->
'config_dir' : base == DockerBase.IRON_BANK ? 'scripts' : 'config',
'git_revision' : BuildParams.gitRevision,
'license' : base == DockerBase.IRON_BANK ? 'Elastic License 2.0' : 'Elastic-License-2.0',
'package_manager' : base == DockerBase.UBI ? 'microdnf' : 'yum',
'package_manager' : base == DockerBase.IRON_BANK ? 'yum' : 'microdnf',
'docker_base' : base.name().toLowerCase(),
'version' : VersionProperties.elasticsearch,
'major_minor_version': "${major}.${minor}",
@ -121,7 +121,7 @@ private static String toCamel(String input) {
private static String taskName(String prefix, Architecture architecture, DockerBase base, String suffix) {
return prefix +
(architecture == Architecture.AARCH64 ? 'Aarch64' : '') +
(base == DockerBase.CENTOS ? "" : toCamel(base.name())) +
(base == DockerBase.DEFAULT ? "" : toCamel(base.name())) +
suffix
}
@ -367,7 +367,7 @@ void addBuildDockerImageTask(Architecture architecture, DockerBase base) {
baseImages = [baseImage]
buildArgs = buildArgsMap
} else if (base == DockerBase.CENTOS || base == DockerBase.CLOUD) {
} else if (base == DockerBase.DEFAULT || base == DockerBase.CLOUD) {
baseImages = ['alpine:3.13', base.image]
} else {
baseImages = [base.image]
@ -449,7 +449,7 @@ subprojects { Project subProject ->
apply plugin: 'distribution'
final Architecture architecture = subProject.name.contains('aarch64-') ? Architecture.AARCH64 : Architecture.X64
DockerBase base = DockerBase.CENTOS
DockerBase base = DockerBase.DEFAULT
if (subProject.name.contains('ubi-')) {
base = DockerBase.UBI
} else if (subProject.name.contains('ironbank-')) {

View file

@ -19,7 +19,7 @@
add as many newlines here as necessary to improve legibility.
*/ %>
<% if (docker_base == "ubi") { %>
<% if (docker_base == 'default' || docker_base == "ubi") { %>
################################################################################
# Build stage 0 `builder`:
# Extract Elasticsearch artifact
@ -66,158 +66,6 @@ FROM ${base_image} AS builder
COPY tini /bin/tini
RUN chmod 0555 /bin/tini
<% } else { %>
<% /* CentOS builds are actaully a custom base image with a minimal set of dependencies */ %>
################################################################################
# Stage 1. Build curl statically. Installing it from RPM on CentOS pulls in too
# many dependencies.
################################################################################
FROM alpine:3.13 AS curl
ENV VERSION 7.71.0
ENV TARBALL_URL https://curl.haxx.se/download/curl-\${VERSION}.tar.xz
ENV TARBALL_PATH curl-\${VERSION}.tar.xz
# Install dependencies
RUN <%= retry.loop('apk', 'apk add gnupg gcc make musl-dev openssl-dev openssl-libs-static file') %>
RUN mkdir /work
WORKDIR /work
# Fetch curl sources and files for validation. Note that alpine's `wget` doesn't have retry options.
RUN function retry_wget() { \\
local URL="\$1" ; \\
local DEST="\$2" ; \\
<%= retry.loop('wget', 'wget "\$URL\" -O "\$DEST"', 6, 'return') %> ; \\
} ; \\
retry_wget "https://daniel.haxx.se/mykey.asc" "curl-gpg.pub" && \\
retry_wget "\${TARBALL_URL}.asc" "\${TARBALL_PATH}.asc" && \\
retry_wget "\${TARBALL_URL}" "\${TARBALL_PATH}"
# Validate source
RUN gpg --import --always-trust "curl-gpg.pub" && \\
gpg --verify "\${TARBALL_PATH}.asc" "\${TARBALL_PATH}"
# Unpack and build
RUN set -e ; \\
tar xfJ "\${TARBALL_PATH}" ; \\
cd "curl-\${VERSION}" ; \\
if ! ./configure --disable-shared --with-ca-fallback --with-ca-bundle=/etc/pki/tls/certs/ca-bundle.crt ; then \\
[[ -e config.log ]] && cat config.log ; \\
exit 1 ; \\
fi ; \\
make curl_LDFLAGS="-all-static" ; \\
cp src/curl /work/curl ; \\
strip /work/curl
################################################################################
# Step 2. Create a minimal root filesystem directory. This will form the basis
# for our image.
################################################################################
FROM ${base_image} AS rootfs
ENV TINI_VERSION 0.19.0
# Start off with an up-to-date system
RUN ${package_manager} update --setopt=tsflags=nodocs -y
# Create a directory into which we will install files
RUN mkdir /rootfs
# Create required devices
RUN mkdir -m 755 /rootfs/dev && \\
mknod -m 600 /rootfs/dev/console c 5 1 && \\
mknod -m 600 /rootfs/dev/initctl p && \\
mknod -m 666 /rootfs/dev/full c 1 7 && \\
mknod -m 666 /rootfs/dev/null c 1 3 && \\
mknod -m 666 /rootfs/dev/ptmx c 5 2 && \\
mknod -m 666 /rootfs/dev/random c 1 8 && \\
mknod -m 666 /rootfs/dev/tty c 5 0 && \\
mknod -m 666 /rootfs/dev/tty0 c 4 0 && \\
mknod -m 666 /rootfs/dev/urandom c 1 9 && \\
mknod -m 666 /rootfs/dev/zero c 1 5
# Install a minimal set of dependencies, and some for Elasticsearch
RUN ${package_manager} --installroot=/rootfs --releasever=/ --setopt=tsflags=nodocs \\
--setopt=group_package_types=mandatory -y \\
--skip-broken \\
install basesystem bash zip zlib
# `tini` is a tiny but valid init for containers. This is used to cleanly
# control how ES and any child processes are shut down.
#
# The tini GitHub page gives instructions for verifying the binary using
# gpg, but the keyservers are slow to return the key and this can fail the
# build. Instead, we check the binary against the published checksum.
#
# Also, we use busybox instead of installing utility RPMs, which pulls in
# all kinds of stuff we don't want.
RUN set -e ; \\
TINI_BIN="" ; \\
BUSYBOX_COMMIT="" ; \\
case "\$(arch)" in \\
aarch64) \\
BUSYBOX_COMMIT='8a500845daeaeb926b25f73089c0668cac676e97' ; \\
TINI_BIN='tini-arm64' ; \\
;; \\
x86_64) \\
BUSYBOX_COMMIT='cc81bf8a3c979f596af2d811a3910aeaa230e8ef' ; \\
TINI_BIN='tini-amd64' ; \\
;; \\
*) echo >&2 "Unsupported architecture \$(arch)" ; exit 1 ;; \\
esac ; \\
curl --retry 10 -S -L -O "https://github.com/krallin/tini/releases/download/v0.19.0/\${TINI_BIN}" ; \\
curl --retry 10 -S -L -O "https://github.com/krallin/tini/releases/download/v0.19.0/\${TINI_BIN}.sha256sum" ; \\
sha256sum -c "\${TINI_BIN}.sha256sum" ; \\
rm "\${TINI_BIN}.sha256sum" ; \\
mv "\${TINI_BIN}" /rootfs/bin/tini ; \\
chmod 0555 /rootfs/bin/tini ; \\
curl --retry 10 -L -O \\
# Here we're fetching the same binaries used for the official busybox docker image from their GtiHub repository
"https://github.com/docker-library/busybox/raw/\${BUSYBOX_COMMIT}/stable/musl/busybox.tar.xz" ; \\
tar -xf busybox.tar.xz -C /rootfs/bin --strip=2 ./bin ; \\
rm busybox.tar.xz ;
# Curl needs files under here. More importantly, we change Elasticsearch's
# bundled JDK to use /etc/pki/ca-trust/extracted/java/cacerts instead of
# the bundled cacerts.
RUN mkdir -p /rootfs/etc && \\
cp -a /etc/pki /rootfs/etc/
# Cleanup the filesystem
RUN ${package_manager} --installroot=/rootfs -y clean all && \\
cd /rootfs && \\
rm -rf \\
etc/{X11,centos-release*,csh*,profile*,skel*,yum*} \\
sbin/sln \\
usr/bin/rpm \\
{usr,var}/games \\
usr/lib/{dracut,systemd,udev} \\
usr/lib64/X11 \\
usr/local \\
usr/share/{awk,centos-release,cracklib,desktop-directories,gcc-*,i18n,icons,licenses,xsessions,zoneinfo} \\
usr/share/{man,doc,info,games,gdb,ghostscript,gnome,groff,icons,pixmaps,sounds,backgrounds,themes,X11} \\
usr/{{lib,share}/locale,{lib,lib64}/gconv,bin/localedef,sbin/build-locale-archive} \\
var/cache/yum \\
var/lib/{rpm,yum} \\
var/log/yum.log
# ldconfig
RUN rm -rf /rootfs/etc/ld.so.cache /rootfs/var/cache/ldconfig && \\
mkdir -p --mode=0755 /rootfs/var/cache/ldconfig
COPY --from=curl /work/curl /rootfs/usr/bin/curl
# Ensure that there are no files with setuid or setgid, in order to mitigate "stackclash" attacks.
RUN find /rootfs -xdev -perm -4000 -exec chmod ug-s {} +
################################################################################
# Step 3. Fetch the Elasticsearch distribution and configure it for Docker
################################################################################
FROM ${base_image} AS builder
<% } %>
RUN mkdir /usr/share/elasticsearch
@ -282,8 +130,6 @@ COPY bin/plugin-wrapper.sh /opt/plugins
RUN chmod -R 0555 /opt/plugins
<% } %>
<% if (docker_base == "ubi" || docker_base == "iron_bank") { %>
################################################################################
# Build stage 1 (the actual Elasticsearch image):
#
@ -293,7 +139,17 @@ RUN chmod -R 0555 /opt/plugins
FROM ${base_image}
<% if (docker_base == "ubi") { %>
<% if (docker_base == "iron_bank") { %>
<%
/* Reviews of the Iron Bank Dockerfile said that they preferred simpler */
/* scripting so this version doesn't have the retry loop featured below. */
%>
RUN ${package_manager} update --setopt=tsflags=nodocs -y && \\
${package_manager} install --setopt=tsflags=nodocs -y \\
nc shadow-utils zip findutils unzip procps-ng && \\
${package_manager} clean all
<% } else { %>
RUN <%= retry.loop(
package_manager,
@ -303,49 +159,18 @@ RUN <%= retry.loop(
" ${package_manager} clean all"
) %>
<% } else { %>
<%
/* Reviews of the Iron Bank Dockerfile said that they preferred simpler */
/* scripting so this version doesn't have the retry loop featured above. */
%>
RUN ${package_manager} update --setopt=tsflags=nodocs -y && \\
${package_manager} install --setopt=tsflags=nodocs -y \\
nc shadow-utils zip findutils unzip procps-ng && \\
${package_manager} clean all
<% } %>
RUN groupadd -g 1000 elasticsearch && \\
adduser -u 1000 -g 1000 -G 0 -d /usr/share/elasticsearch elasticsearch && \\
chown -R 0:0 /usr/share/elasticsearch
<% } else { %>
################################################################################
# Stage 4. Build the final image, using the rootfs above as the basis, and
# copying in the Elasticsearch distribution
################################################################################
FROM scratch
# Setup the initial filesystem.
COPY --from=rootfs /rootfs /
RUN addgroup -g 1000 elasticsearch && \\
adduser -D -u 1000 -G elasticsearch -g elasticsearch -h /usr/share/elasticsearch elasticsearch && \\
addgroup elasticsearch root && \\
chown -R 0:0 /usr/share/elasticsearch
<% } %>
ENV ELASTIC_CONTAINER true
WORKDIR /usr/share/elasticsearch
COPY --from=builder --chown=0:0 /usr/share/elasticsearch /usr/share/elasticsearch
<% if (docker_base == "ubi" || docker_base == "iron_bank") { %>
COPY --from=builder --chown=0:0 /bin/tini /bin/tini
<% } %>
<% if (docker_base == 'cloud') { %>
COPY --from=builder --chown=0:0 /opt /opt

View file

@ -0,0 +1,6 @@
pr: 80524
summary: Use almalinux as the Docker base image
area: Packaging
type: enhancement
issues:
- 76681

View file

@ -1,13 +1,10 @@
[[docker]]
=== Install {es} with Docker
{es} is also available as Docker images. Starting with version 8.0.0, these
are based upon a tiny core of essential files. Prior versions used
https://hub.docker.com/_/centos/[centos:8] as the base image.
A list of all published Docker images and tags is available at
https://www.docker.elastic.co[www.docker.elastic.co]. The source files
are in
{es} is also available as Docker images. A list of all published Docker
images and tags is available at
https://www.docker.elastic.co[www.docker.elastic.co]. The source files are
in
https://github.com/elastic/elasticsearch/blob/{branch}/distribution/docker[Github].
include::license.asciidoc[]
@ -46,7 +43,7 @@ docker pull {docker-repo}:{version}
endif::[]
Now that you have the {es} Docker image, you can start a
Now that you have the {es} Docker image, you can start a
<<docker-cli-run-dev-mode,single-node>> or <<docker-compose-file,multi-node>>
cluster.
@ -73,7 +70,7 @@ for the transport and HTTP layers.
You can then {kibana-ref}/docker.html[start {kib}] and enter the enrollment
token, which is valid for 30 minutes. This token automatically applies the
security settings from your {es} cluster, authenticates to {es} with the
`kibana_system` user, and writes the security configuration to `kibana.yml`.
`kibana_system` user, and writes the security configuration to `kibana.yml`.
The following command starts a single-node {es} cluster for development or
testing.
@ -160,7 +157,7 @@ the nodes in your cluster.
When defining multiple nodes in a `docker-compose.yml` file, you'll need to
explicitly enable and configure security so that {es} doesn't try to generate a
password for the `elastic` user on every node.
password for the `elastic` user on every node.
===== Prepare the environment
@ -380,7 +377,7 @@ docker-compose down -v
----
WARNING: Deleting data volumes will remove the generated security certificates
for your nodes. You will need to run `docker-compose` and
for your nodes. You will need to run `docker-compose` and
<<docker-generate-certificates,regenerate the security certificates>> before
starting your cluster.
@ -505,7 +502,7 @@ To check the Docker daemon defaults for ulimits, run:
[source,sh]
--------------------------------------------
docker run --rm centos:8 /bin/bash -c 'ulimit -Hn && ulimit -Sn && ulimit -Hu && ulimit -Su'
docker run --rm docker.elastic.co/elasticsearch/elasticsearch:{version} /bin/bash -c 'ulimit -Hn && ulimit -Sn && ulimit -Hu && ulimit -Su'
--------------------------------------------
If needed, adjust them in the Daemon or override them per container.
@ -741,25 +738,6 @@ You must explicitly accept them either by:
See {plugins}/_other_command_line_parameters.html[Plugin management]
for more information.
The {es} Docker image only includes what is required to run {es}, and does
not provide a package manager. It is possible to add additional utilities
with a multi-phase Docker build. You must also copy any dependencies, for
example shared libraries.
[source,sh,subs="attributes"]
--------------------------------------------
FROM centos:8 AS builder
yum install -y some-package
FROM docker.elastic.co/elasticsearch/elasticsearch:{version}
COPY --from=builder /usr/bin/some-utility /usr/bin/
COPY --from=builder /usr/lib/some-lib.so /usr/lib/
--------------------------------------------
You should use `centos:8` as a base in order to avoid incompatibilities.
Use http://man7.org/linux/man-pages/man1/ldd.1.html[`ldd`] to list the
shared libraries required by a utility.
[discrete]
[[troubleshoot-docker-errors]]
==== Troubleshoot Docker errors for {es}
@ -803,4 +781,4 @@ To resolve this error:
. Update the `-v` or `--volume` flag to point to the `config` directory
path rather than the keystore file's path. For an example, see
<<docker-keystore-bind-mount>>.
. Retry the command.
. Retry the command.

View file

@ -77,6 +77,7 @@ import static org.hamcrest.Matchers.hasKey;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.matchesPattern;
import static org.hamcrest.Matchers.not;
import static org.hamcrest.Matchers.notNullValue;
import static org.hamcrest.Matchers.nullValue;
import static org.hamcrest.Matchers.startsWith;
import static org.junit.Assume.assumeFalse;
@ -337,6 +338,7 @@ public class DockerTests extends PackagingTestCase {
copyFromContainer(installation.config("elasticsearch.keystore"), tempEsConfigDir);
copyFromContainer(installation.config("log4j2.properties"), tempEsConfigDir);
final Path autoConfigurationDir = findInContainer(installation.config, "d", "\"tls_auto_config_*\"");
assertThat(autoConfigurationDir, notNullValue());
final String autoConfigurationDirName = autoConfigurationDir.getFileName().toString();
copyFromContainer(autoConfigurationDir, tempEsConfigDir.resolve(autoConfigurationDirName));
@ -344,20 +346,24 @@ public class DockerTests extends PackagingTestCase {
chownWithPrivilegeEscalation(tempEsDataDir, "501:501");
chownWithPrivilegeEscalation(tempEsLogsDir, "501:501");
// Restart the container
runContainer(
distribution(),
builder().envVar("ELASTIC_PASSWORD", PASSWORD)
.uid(501, 501)
.volume(tempEsDataDir.toAbsolutePath(), installation.data)
.volume(tempEsConfigDir.toAbsolutePath(), installation.config)
.volume(tempEsLogsDir.toAbsolutePath(), installation.logs)
);
try {
// Restart the container
runContainer(
distribution(),
builder().envVar("ELASTIC_PASSWORD", PASSWORD)
.uid(501, 501)
.volume(tempEsDataDir.toAbsolutePath(), installation.data)
.volume(tempEsConfigDir.toAbsolutePath(), installation.config)
.volume(tempEsLogsDir.toAbsolutePath(), installation.logs)
);
waitForElasticsearch(installation, "elastic", PASSWORD);
rmDirWithPrivilegeEscalation(tempEsConfigDir);
rmDirWithPrivilegeEscalation(tempEsDataDir);
rmDirWithPrivilegeEscalation(tempEsLogsDir);
waitForElasticsearch(installation, "elastic", PASSWORD);
removeContainer();
} finally {
rmDirWithPrivilegeEscalation(tempEsConfigDir);
rmDirWithPrivilegeEscalation(tempEsDataDir);
rmDirWithPrivilegeEscalation(tempEsLogsDir);
}
}
/**