[cloud-first-testing] Automated deployments (#122102)

* [ci/cloud] Deploy docker image to cloud

* add git commit

* fix template path

* Split cloud build and deploy into separate step

* Remove file

* update field

* Add bootstrap

* WIP purge old cloud deployments

* Fix variable reference

* rm apm

* update path

* fix apm edit

* s/kibana/elasticsearch

* Fix purge bug, remove commit hash from deployment name, track deployment, grab logs and credentials from output

* Use n2-2 agent for cloud build/deploy, fix output flag

* Fix file path

* Use a tmp file for deployment json

* update if kibana exists

* Fix syntax error and skip build for now

* Don't skip build

* grab endpoints

* cleanup

* vault write

* redirect stderr

* refresh vault token

* fix id

* fix annotation

* cleanup

* Update .buildkite/scripts/steps/cloud/build_and_deploy.sh

Co-authored-by: Tyler Smalley <tylersmalley@gmail.com>

Co-authored-by: Brian Seeders <brian.seeders@elastic.co>
Co-authored-by: Kibana Machine <42973632+kibanamachine@users.noreply.github.com>
Co-authored-by: Tyler Smalley <tylersmalley@gmail.com>
This commit is contained in:
Jonathan Budzenski 2022-02-02 15:52:51 -06:00 committed by GitHub
parent 3b63ca1f59
commit f046541315
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
9 changed files with 322 additions and 24 deletions

View file

@ -0,0 +1,7 @@
steps:
- command: .buildkite/scripts/steps/cloud/build_and_deploy.sh
label: 'Build and Deploy to Cloud'
agents:
queue: n2-2
depends_on: build
timeout_in_minutes: 30

View file

@ -0,0 +1,4 @@
steps:
- command: .buildkite/scripts/steps/cloud/purge.sh
label: Purge old cloud deployments
timeout_in_minutes: 10

View file

@ -13,30 +13,6 @@ else
node scripts/build
fi
if [[ "${GITHUB_PR_LABELS:-}" == *"ci:deploy-cloud"* ]]; then
echo "--- Build and push Kibana Cloud Distribution"
echo "$KIBANA_DOCKER_PASSWORD" | docker login -u "$KIBANA_DOCKER_USERNAME" --password-stdin docker.elastic.co
trap 'docker logout docker.elastic.co' EXIT
node scripts/build \
--skip-initialize \
--skip-generic-folders \
--skip-platform-folders \
--skip-archives \
--docker-images \
--docker-tag-qualifier="$GIT_COMMIT" \
--docker-push \
--skip-docker-ubi \
--skip-docker-ubuntu \
--skip-docker-contexts
CLOUD_IMAGE=$(docker images --format "{{.Repository}}:{{.Tag}}" docker.elastic.co/kibana-ci/kibana-cloud)
cat << EOF | buildkite-agent annotate --style "info" --context cloud-image
Cloud image: $CLOUD_IMAGE
EOF
fi
echo "--- Archive Kibana Distribution"
linuxBuild="$(find "$KIBANA_DIR/target" -name 'kibana-*-linux-x86_64.tar.gz')"
installDir="$KIBANA_DIR/install/kibana"

View file

@ -92,6 +92,9 @@ export KIBANA_DOCKER_USERNAME
KIBANA_DOCKER_PASSWORD="$(retry 5 5 vault read -field=password secret/kibana-issues/dev/container-registry)"
export KIBANA_DOCKER_PASSWORD
EC_API_KEY="$(retry 5 5 vault read -field=pr_deploy_api_key secret/kibana-issues/dev/kibana-ci-cloud-deploy)"
export EC_API_KEY
SYNTHETICS_SERVICE_USERNAME="$(retry 5 5 vault read -field=username secret/kibana-issues/dev/kibana-ci-synthetics-credentials)"
export SYNTHETICS_SERVICE_USERNAME

View file

@ -103,6 +103,10 @@ const uploadPipeline = (pipelineContent) => {
pipeline.push(getPipeline('.buildkite/pipelines/pull_request/uptime.yml'));
}
if (process.env.GITHUB_PR_LABELS.includes('ci:deploy-cloud')) {
pipeline.push(getPipeline('.buildkite/pipelines/pull_request/deploy_cloud.yml'));
}
pipeline.push(getPipeline('.buildkite/pipelines/pull_request/post_build.yml'));
uploadPipeline(pipeline.join('\n'));

View file

@ -0,0 +1,80 @@
#!/usr/bin/env bash
set -euo pipefail
source .buildkite/scripts/common/util.sh
.buildkite/scripts/bootstrap.sh
export KBN_NP_PLUGINS_BUILT=true
VERSION="$(jq -r '.version' package.json)-SNAPSHOT"
echo "--- Download kibana distribution"
mkdir -p ./target
buildkite-agent artifact download "kibana-$VERSION-linux-x86_64.tar.gz" ./target --build "${KIBANA_BUILD_ID:-$BUILDKITE_BUILD_ID}"
echo "--- Build and push Kibana Cloud Distribution"
echo "$KIBANA_DOCKER_PASSWORD" | docker login -u "$KIBANA_DOCKER_USERNAME" --password-stdin docker.elastic.co
trap 'docker logout docker.elastic.co' EXIT
node scripts/build \
--skip-initialize \
--skip-generic-folders \
--skip-platform-folders \
--skip-archives \
--docker-images \
--docker-tag-qualifier="$GIT_COMMIT" \
--docker-push \
--skip-docker-ubi \
--skip-docker-ubuntu \
--skip-docker-contexts
CLOUD_IMAGE=$(docker images --format "{{.Repository}}:{{.Tag}}" docker.elastic.co/kibana-ci/kibana-cloud)
CLOUD_DEPLOYMENT_NAME="kibana-pr-$BUILDKITE_PULL_REQUEST"
jq '
.resources.kibana[0].plan.kibana.docker_image = "'$CLOUD_IMAGE'" |
.name = "'$CLOUD_DEPLOYMENT_NAME'" |
.resources.kibana[0].plan.kibana.version = "'$VERSION'" |
.resources.elasticsearch[0].plan.elasticsearch.version = "'$VERSION'"
' .buildkite/scripts/steps/cloud/deploy.json > /tmp/deploy.json
CLOUD_DEPLOYMENT_ID=$(ecctl deployment list --output json | jq -r '.deployments[] | select(.name == "'$CLOUD_DEPLOYMENT_NAME'") | .id')
JSON_FILE=$(mktemp --suffix ".json")
if [ -z "${CLOUD_DEPLOYMENT_ID}" ]; then
ecctl deployment create --track --output json --file /tmp/deploy.json &> "$JSON_FILE"
CLOUD_DEPLOYMENT_USERNAME=$(jq --slurp '.[]|select(.resources).resources[] | select(.credentials).credentials.username' "$JSON_FILE")
CLOUD_DEPLOYMENT_PASSWORD=$(jq --slurp '.[]|select(.resources).resources[] | select(.credentials).credentials.password' "$JSON_FILE")
CLOUD_DEPLOYMENT_ID=$(jq -r --slurp '.[0].id' "$JSON_FILE")
CLOUD_DEPLOYMENT_STATUS_MESSAGES=$(jq --slurp '[.[]|select(.resources == null)]' "$JSON_FILE")
# Refresh vault token
VAULT_ROLE_ID="$(retry 5 15 gcloud secrets versions access latest --secret=kibana-buildkite-vault-role-id)"
VAULT_SECRET_ID="$(retry 5 15 gcloud secrets versions access latest --secret=kibana-buildkite-vault-secret-id)"
VAULT_TOKEN=$(retry 5 30 vault write -field=token auth/approle/login role_id="$VAULT_ROLE_ID" secret_id="$VAULT_SECRET_ID")
retry 5 30 vault login -no-print "$VAULT_TOKEN"
retry 5 5 vault write "secret/kibana-issues/dev/cloud-deploy/$CLOUD_DEPLOYMENT_NAME" username="$CLOUD_DEPLOYMENT_USERNAME" password="$CLOUD_DEPLOYMENT_PASSWORD"
else
ecctl deployment update "$CLOUD_DEPLOYMENT_ID" --track --output json --file /tmp/deploy.json &> "$JSON_FILE"
fi
CLOUD_DEPLOYMENT_KIBANA_URL=$(ecctl deployment show "$CLOUD_DEPLOYMENT_ID" | jq -r '.resources.kibana[0].info.metadata.aliased_url')
CLOUD_DEPLOYMENT_ELASTICSEARCH_URL=$(ecctl deployment show "$CLOUD_DEPLOYMENT_ID" | jq -r '.resources.elasticsearch[0].info.metadata.aliased_url')
cat << EOF | buildkite-agent annotate --style "info" --context cloud
### Cloud Deployment
Kibana: $CLOUD_DEPLOYMENT_KIBANA_URL
Elasticsearch: $CLOUD_DEPLOYMENT_ELASTICSEARCH_URL
Credentials: \`vault read secret/kibana-issues/dev/cloud-deploy/$CLOUD_DEPLOYMENT_NAME\`
Image: $CLOUD_IMAGE
EOF
buildkite-agent meta-data set pr_comment:deploy_cloud:head "* [Cloud Deployment](${CLOUD_DEPLOYMENT_KIBANA_URL})"

View file

@ -0,0 +1,163 @@
{
"resources": {
"elasticsearch": [
{
"region": "gcp-us-west2",
"settings": {
"dedicated_masters_threshold": 6
},
"plan": {
"autoscaling_enabled": false,
"cluster_topology": [
{
"zone_count": 2,
"instance_configuration_id": "gcp.coordinating.1",
"node_roles": ["ingest", "remote_cluster_client"],
"id": "coordinating",
"size": {
"resource": "memory",
"value": 0
},
"elasticsearch": {
"enabled_built_in_plugins": []
}
},
{
"zone_count": 1,
"elasticsearch": {
"node_attributes": {
"data": "hot"
},
"enabled_built_in_plugins": []
},
"instance_configuration_id": "gcp.data.highio.1",
"node_roles": [
"master",
"ingest",
"transform",
"data_hot",
"remote_cluster_client",
"data_content"
],
"id": "hot_content",
"size": {
"value": 1024,
"resource": "memory"
}
},
{
"zone_count": 2,
"elasticsearch": {
"node_attributes": {
"data": "warm"
},
"enabled_built_in_plugins": []
},
"instance_configuration_id": "gcp.data.highstorage.1",
"node_roles": ["data_warm", "remote_cluster_client"],
"id": "warm",
"size": {
"resource": "memory",
"value": 0
}
},
{
"zone_count": 1,
"elasticsearch": {
"node_attributes": {
"data": "cold"
},
"enabled_built_in_plugins": []
},
"instance_configuration_id": "gcp.data.highstorage.1",
"node_roles": ["data_cold", "remote_cluster_client"],
"id": "cold",
"size": {
"resource": "memory",
"value": 0
}
},
{
"zone_count": 1,
"elasticsearch": {
"node_attributes": {
"data": "frozen"
},
"enabled_built_in_plugins": []
},
"instance_configuration_id": "gcp.es.datafrozen.n1.64x10x95",
"node_roles": ["data_frozen"],
"id": "frozen",
"size": {
"resource": "memory",
"value": 0
}
},
{
"zone_count": 3,
"instance_configuration_id": "gcp.master.1",
"node_roles": ["master", "remote_cluster_client"],
"id": "master",
"size": {
"resource": "memory",
"value": 0
},
"elasticsearch": {
"enabled_built_in_plugins": []
}
},
{
"zone_count": 1,
"instance_configuration_id": "gcp.ml.1",
"node_roles": ["ml", "remote_cluster_client"],
"id": "ml",
"size": {
"resource": "memory",
"value": 0
},
"elasticsearch": {
"enabled_built_in_plugins": []
}
}
],
"elasticsearch": {
"version": null
},
"deployment_template": {
"id": "gcp-io-optimized-v2"
}
},
"ref_id": "main-elasticsearch"
}
],
"enterprise_search": [],
"kibana": [
{
"elasticsearch_cluster_ref_id": "main-elasticsearch",
"region": "gcp-us-west2",
"plan": {
"cluster_topology": [
{
"instance_configuration_id": "gcp.kibana.1",
"zone_count": 1,
"size": {
"resource": "memory",
"value": 1024
}
}
],
"kibana": {
"version": null,
"docker_image": null
}
},
"ref_id": "main-kibana"
}
],
"apm": []
},
"name": null,
"metadata": {
"system_owned": false
}
}

View file

@ -0,0 +1,56 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
const { execSync } = require('child_process');
const deploymentsListJson = execSync('ecctl deployment list --output json').toString();
const { deployments } = JSON.parse(deploymentsListJson);
const prDeployments = deployments.filter((deployment) => deployment.name.startsWith('kibana-pr-'));
const deploymentsToPurge = [];
const NOW = new Date().getTime() / 1000;
for (const deployment of prDeployments) {
try {
const prNumber = deployment.name.match(/^kibana-pr-([0-9]+)$/)[1];
const prJson = execSync(`gh pr view '${prNumber}' --json state,labels,commits`).toString();
const pullRequest = JSON.parse(prJson);
const lastCommit = pullRequest.commits.slice(-1)[0];
const lastCommitTimestamp = new Date(lastCommit.committedDate).getTime() / 1000;
if (pullRequest.state !== 'open') {
console.log(`Pull Request #${prNumber} is no longer open, will delete associated deployment`);
deploymentsToPurge.push(deployment);
} else if (!pullRequest.labels.filter((label) => label.name === 'ci:deploy-cloud')) {
console.log(
`Pull Request #${prNumber} no longer has the ci:deploy-cloud label, will delete associated deployment`
);
deploymentsToPurge.push(deployment);
} else if (lastCommitTimestamp < NOW - 60 * 60 * 24 * 7) {
console.log(
`Pull Request #${prNumber} has not been updated in more than 7 days, will delete associated deployment`
);
deploymentsToPurge.push(deployment);
}
} catch (ex) {
console.error(ex.toString());
// deploymentsToPurge.push(deployment); // TODO should we delete on error?
}
}
for (const deployment of deploymentsToPurge) {
console.log(`Scheduling deployment for deletion: ${deployment.name} / ${deployment.id}`);
try {
execSync(`ecctl deployment shutdown --force '${deployment.id}'`, { stdio: 'inherit' });
} catch (ex) {
console.error(ex.toString());
}
}

View file

@ -0,0 +1,5 @@
#!/usr/bin/env bash
set -euo pipefail
node .buildkite/scripts/steps/cloud/purge.js