mirror of
https://github.com/elastic/logstash.git
synced 2025-04-21 13:18:16 -04:00
Compare commits
288 commits
Author | SHA1 | Date | |
---|---|---|---|
|
b519cf4213 | ||
|
f91f5a692d | ||
|
005358ffb4 | ||
|
6646400637 | ||
|
47d430d4fb | ||
|
49cf7acad0 | ||
|
dae2b61ab2 | ||
|
86042f8c98 | ||
|
2c95068e04 | ||
|
187c925cc8 | ||
|
8e6e183adc | ||
|
712b37e1df | ||
|
815fa8be1c | ||
|
b9bac5dfc6 | ||
|
b9469e0726 | ||
|
d66a2cf758 | ||
|
e13fcadad8 | ||
|
cb4c234aee | ||
|
eeb2162ae4 | ||
|
ae3b3ed17c | ||
|
3c6cbbf35b | ||
|
5a052b33f9 | ||
|
7913f91340 | ||
|
5f5b4bb3c3 | ||
|
422cd4e06b | ||
|
26af21df85 | ||
|
a539695830 | ||
|
4b88773726 | ||
|
e5bebcea17 | ||
|
e2c6254c81 | ||
|
add7b3f4d3 | ||
|
6de59f2c02 | ||
|
075fdb4152 | ||
|
6b277ccf0d | ||
|
f76edcea5e | ||
|
f705a9de48 | ||
|
7ac0423de0 | ||
|
284272b137 | ||
|
960d997e9f | ||
|
3e0f488df2 | ||
|
7683983168 | ||
|
afde43f918 | ||
|
9c0e50faac | ||
|
10b5a84f84 | ||
|
787fd2c62f | ||
|
193af6a272 | ||
|
8d10baa957 | ||
|
ea99e1db58 | ||
|
4779d5e250 | ||
|
964468f922 | ||
|
0d931a502a | ||
|
e748488e4a | ||
|
d916972877 | ||
|
bff0d5c40f | ||
|
cb6886814c | ||
|
feb2b92ba2 | ||
|
d61a83abbe | ||
|
07a3c8e73b | ||
|
a736178d59 | ||
|
b993bec499 | ||
|
ba5f21576c | ||
|
1e06eea86e | ||
|
7446e6bf6a | ||
|
f4ca06cfed | ||
|
73ffa243bf | ||
|
0a745686f6 | ||
|
7d1458fad3 | ||
|
0a3a2a302c | ||
|
34416fd971 | ||
|
c95430b586 | ||
|
062154494a | ||
|
8c96913807 | ||
|
53d39adb21 | ||
|
50671709e3 | ||
|
24fd2a6c75 | ||
|
86785815bd | ||
|
a4cf2bcc52 | ||
|
f562f37df2 | ||
|
fecfc7c602 | ||
|
2d69d06809 | ||
|
0f81816311 | ||
|
793e8c0b45 | ||
|
d40386a335 | ||
|
3115c78bf8 | ||
|
884ae815b5 | ||
|
823dcd25fa | ||
|
4d52b7258d | ||
|
227c0d8150 | ||
|
91258c3f98 | ||
|
e8e24a0397 | ||
|
e094054c0e | ||
|
089558801e | ||
|
9abad6609c | ||
|
637f447b88 | ||
|
e896cd727d | ||
|
d20eb4dbcb | ||
|
78c34465dc | ||
|
8cd38499b5 | ||
|
a847ef7764 | ||
|
5573b5ad77 | ||
|
c7204fd7d6 | ||
|
e23da7985c | ||
|
1c8cf546c2 | ||
|
32cc85b9a7 | ||
|
14c16de0c5 | ||
|
786911fa6d | ||
|
2172879989 | ||
|
51ab5d85d2 | ||
|
7378b85f41 | ||
|
70a6c9aea6 | ||
|
8a41a4e0e5 | ||
|
6660395f4d | ||
|
d3093e4b44 | ||
|
6943df5570 | ||
|
1fda320ed9 | ||
|
2be4812118 | ||
|
3f41828ebb | ||
|
c8a6566877 | ||
|
03b11e9827 | ||
|
dc740b46ca | ||
|
f66e00ac10 | ||
|
52b7fb0ae6 | ||
|
d4ba08c358 | ||
|
9385cfac5a | ||
|
58e6dac94b | ||
|
92d7210146 | ||
|
cd729b7682 | ||
|
9a2cd015d4 | ||
|
ff44b7cc20 | ||
|
348f1627a5 | ||
|
356ecb3705 | ||
|
ae8ad28aaa | ||
|
db34116c46 | ||
|
a215101032 | ||
|
d978e07f2c | ||
|
47d04d06b2 | ||
|
4554749da2 | ||
|
16392908e2 | ||
|
dae7fd93db | ||
|
274c212d9d | ||
|
e2b322e8c1 | ||
|
ef36df6b81 | ||
|
de6a6c5b0f | ||
|
531f795037 | ||
|
cc608eb88b | ||
|
01c8e8bb55 | ||
|
ae75636e17 | ||
|
05789744d2 | ||
|
03ddf12893 | ||
|
6e0d235c9d | ||
|
e1f4e772dc | ||
|
e6e0f9f6eb | ||
|
2d51cc0ba9 | ||
|
188d9e7ed8 | ||
|
65495263d4 | ||
|
264283889e | ||
|
5bff2ad436 | ||
|
095fbbb992 | ||
|
e36cacedc8 | ||
|
202d07cbbf | ||
|
ab19769521 | ||
|
4d9942d68a | ||
|
e3265d93e8 | ||
|
af76c45e65 | ||
|
1851fe6b2d | ||
|
d913e2ae3d | ||
|
ccde1eb8fb | ||
|
0e58e417ee | ||
|
615545027f | ||
|
eb7e1253e0 | ||
|
aff8d1cce7 | ||
|
2f0e10468d | ||
|
0dd64a9d63 | ||
|
15b203448a | ||
|
7b3d23b9d5 | ||
|
977efbddde | ||
|
e0ed994ab1 | ||
|
d4fb06e498 | ||
|
a94659cf82 | ||
|
2a23680cfd | ||
|
ff8c154c4d | ||
|
74d87c9ea0 | ||
|
5826c6f902 | ||
|
d9ead9a8db | ||
|
046ea1f5a8 | ||
|
efbee31461 | ||
|
5847d77331 | ||
|
113585d4a5 | ||
|
6703aec476 | ||
|
849f431033 | ||
|
852149be2e | ||
|
8ce58b8355 | ||
|
00da72378b | ||
|
0006937e46 | ||
|
9eced9a106 | ||
|
472e27a014 | ||
|
db59cd0fbd | ||
|
c602b851bf | ||
|
5d523aa5c8 | ||
|
ed5874bc27 | ||
|
ca19f0029e | ||
|
93b0913fd9 | ||
|
566bdf66fc | ||
|
467ab3f44b | ||
|
dcafa0835e | ||
|
daf979c189 | ||
|
3f0ad12d06 | ||
|
b6f16c8b81 | ||
|
85493ce864 | ||
|
ab77d36daa | ||
|
63706c1a36 | ||
|
cb3b7c01dc | ||
|
3f2a659289 | ||
|
dfd256e307 | ||
|
b571e8f3e3 | ||
|
fc119df24a | ||
|
937a9ea49f | ||
|
8cd0fa8767 | ||
|
6064587bc4 | ||
|
a931b2cde6 | ||
|
065769636b | ||
|
4037adfc4a | ||
|
7f7af057f0 | ||
|
648472106f | ||
|
dc24f02972 | ||
|
a4eddb8a2a | ||
|
3480c32b6e | ||
|
5d4825f000 | ||
|
5aabeda5fd | ||
|
e84fb458ce | ||
|
60670087cb | ||
|
07c01f8231 | ||
|
4e49adc6f3 | ||
|
b69d993d71 | ||
|
fd1de39005 | ||
|
61de60fe26 | ||
|
8368c00367 | ||
|
2fe91226eb | ||
|
f35e10d792 | ||
|
5c57adebb9 | ||
|
51cca7320e | ||
|
0ef4c7da32 | ||
|
b54caf3fd8 | ||
|
3e98cb1625 | ||
|
7c64c7394b | ||
|
4e82655cd5 | ||
|
1ec37b7c41 | ||
|
2ebf2658ff | ||
|
5452cccf76 | ||
|
5195332bc6 | ||
|
701108f88b | ||
|
17dba9f829 | ||
|
f60e987173 | ||
|
69f0fa54ca | ||
|
bb7ecc203f | ||
|
58b6a0ac77 | ||
|
285d13a515 | ||
|
b88e23702c | ||
|
ac034a14ee | ||
|
6e93b30c7f | ||
|
b2796afc92 | ||
|
d4519711a6 | ||
|
e104704830 | ||
|
3d13ebe33e | ||
|
2db2a224ed | ||
|
62ef8a0847 | ||
|
09a2827802 | ||
|
03841cace3 | ||
|
629d8fe5a8 | ||
|
90f303e401 | ||
|
c633ad2568 | ||
|
eff9b540df | ||
|
8f2dae618c | ||
|
c30aa1c7f5 | ||
|
e065088cd8 | ||
|
758098cdcd | ||
|
01b08c7640 | ||
|
9c6550a0df | ||
|
f728c44a0a | ||
|
66aeeeef83 | ||
|
2404bad9a9 | ||
|
ea0c16870f | ||
|
db06ec415a | ||
|
a63d8a831d | ||
|
b51b5392e1 | ||
|
e3271db946 | ||
|
9872159c71 | ||
|
83506eabe7 |
1953 changed files with 28410 additions and 59559 deletions
|
@ -35,48 +35,71 @@ steps:
|
|||
automatic:
|
||||
- limit: 3
|
||||
|
||||
- label: ":lab_coat: Integration Tests / part 1"
|
||||
key: "integration-tests-part-1"
|
||||
- label: ":lab_coat: Integration Tests / part 1-of-3"
|
||||
key: "integration-tests-part-1-of-3"
|
||||
command: |
|
||||
set -euo pipefail
|
||||
|
||||
source .buildkite/scripts/common/vm-agent.sh
|
||||
ci/integration_tests.sh split 0
|
||||
ci/integration_tests.sh split 0 3
|
||||
retry:
|
||||
automatic:
|
||||
- limit: 3
|
||||
|
||||
- label: ":lab_coat: Integration Tests / part 2"
|
||||
key: "integration-tests-part-2"
|
||||
- label: ":lab_coat: Integration Tests / part 2-of-3"
|
||||
key: "integration-tests-part-2-of-3"
|
||||
command: |
|
||||
set -euo pipefail
|
||||
|
||||
source .buildkite/scripts/common/vm-agent.sh
|
||||
ci/integration_tests.sh split 1
|
||||
ci/integration_tests.sh split 1 3
|
||||
retry:
|
||||
automatic:
|
||||
- limit: 3
|
||||
|
||||
- label: ":lab_coat: IT Persistent Queues / part 1"
|
||||
key: "integration-tests-qa-part-1"
|
||||
- label: ":lab_coat: Integration Tests / part 3-of-3"
|
||||
key: "integration-tests-part-3-of-3"
|
||||
command: |
|
||||
set -euo pipefail
|
||||
|
||||
source .buildkite/scripts/common/vm-agent.sh
|
||||
ci/integration_tests.sh split 2 3
|
||||
retry:
|
||||
automatic:
|
||||
- limit: 3
|
||||
|
||||
- label: ":lab_coat: IT Persistent Queues / part 1-of-3"
|
||||
key: "integration-tests-qa-part-1-of-3"
|
||||
command: |
|
||||
set -euo pipefail
|
||||
|
||||
source .buildkite/scripts/common/vm-agent.sh
|
||||
export FEATURE_FLAG=persistent_queues
|
||||
ci/integration_tests.sh split 0
|
||||
ci/integration_tests.sh split 0 3
|
||||
retry:
|
||||
automatic:
|
||||
- limit: 3
|
||||
|
||||
- label: ":lab_coat: IT Persistent Queues / part 2"
|
||||
key: "integration-tests-qa-part-2"
|
||||
- label: ":lab_coat: IT Persistent Queues / part 2-of-3"
|
||||
key: "integration-tests-qa-part-2-of-3"
|
||||
command: |
|
||||
set -euo pipefail
|
||||
|
||||
source .buildkite/scripts/common/vm-agent.sh
|
||||
export FEATURE_FLAG=persistent_queues
|
||||
ci/integration_tests.sh split 1
|
||||
ci/integration_tests.sh split 1 3
|
||||
retry:
|
||||
automatic:
|
||||
- limit: 3
|
||||
|
||||
- label: ":lab_coat: IT Persistent Queues / part 3-of-3"
|
||||
key: "integration-tests-qa-part-3-of-3"
|
||||
command: |
|
||||
set -euo pipefail
|
||||
|
||||
source .buildkite/scripts/common/vm-agent.sh
|
||||
export FEATURE_FLAG=persistent_queues
|
||||
ci/integration_tests.sh split 2 3
|
||||
retry:
|
||||
automatic:
|
||||
- limit: 3
|
||||
|
|
11
.buildkite/benchmark_marathon_pipeline.yml
Normal file
11
.buildkite/benchmark_marathon_pipeline.yml
Normal file
|
@ -0,0 +1,11 @@
|
|||
agents:
|
||||
provider: gcp
|
||||
imageProject: elastic-images-prod
|
||||
image: family/platform-ingest-logstash-ubuntu-2204
|
||||
machineType: "n2-standard-16"
|
||||
diskSizeGb: 100
|
||||
diskType: pd-ssd
|
||||
|
||||
steps:
|
||||
- label: "Benchmark Marathon"
|
||||
command: .buildkite/scripts/benchmark/marathon.sh
|
|
@ -4,8 +4,12 @@ steps:
|
|||
- label: ":pipeline: Generate steps"
|
||||
command: |
|
||||
set -euo pipefail
|
||||
|
||||
echo "--- Building [${WORKFLOW_TYPE}] artifacts"
|
||||
|
||||
echo "--- Building [$${WORKFLOW_TYPE}] artifacts"
|
||||
python3 -m pip install pyyaml
|
||||
echo "--- Building dynamic pipeline steps"
|
||||
python3 .buildkite/scripts/dra/generatesteps.py | buildkite-agent pipeline upload
|
||||
python3 .buildkite/scripts/dra/generatesteps.py > steps.yml
|
||||
echo "--- Printing dynamic pipeline steps"
|
||||
cat steps.yml
|
||||
echo "--- Uploading dynamic pipeline steps"
|
||||
cat steps.yml | buildkite-agent pipeline upload
|
||||
|
|
20
.buildkite/health_report_tests_pipeline.yml
Normal file
20
.buildkite/health_report_tests_pipeline.yml
Normal file
|
@ -0,0 +1,20 @@
|
|||
# yaml-language-server: $schema=https://raw.githubusercontent.com/buildkite/pipeline-schema/main/schema.json
|
||||
|
||||
agents:
|
||||
provider: gcp
|
||||
imageProject: elastic-images-prod
|
||||
image: family/platform-ingest-logstash-ubuntu-2204
|
||||
machineType: "n2-standard-4"
|
||||
diskSizeGb: 64
|
||||
|
||||
steps:
|
||||
- group: ":logstash: Health API integration tests"
|
||||
key: "testing-phase"
|
||||
steps:
|
||||
- label: "main branch"
|
||||
key: "integ-tests-on-main-branch"
|
||||
command:
|
||||
- .buildkite/scripts/health-report-tests/main.sh
|
||||
retry:
|
||||
automatic:
|
||||
- limit: 3
|
14
.buildkite/jdk_availability_check_pipeline.yml
Normal file
14
.buildkite/jdk_availability_check_pipeline.yml
Normal file
|
@ -0,0 +1,14 @@
|
|||
steps:
|
||||
- label: "JDK Availability check"
|
||||
key: "jdk-availability-check"
|
||||
agents:
|
||||
image: "docker.elastic.co/ci-agent-images/platform-ingest/buildkite-agent-logstash-ci"
|
||||
cpu: "4"
|
||||
memory: "6Gi"
|
||||
ephemeralStorage: "100Gi"
|
||||
command: |
|
||||
set -euo pipefail
|
||||
|
||||
source .buildkite/scripts/common/container-agent.sh
|
||||
export GRADLE_OPTS="-Xmx2g -Dorg.gradle.daemon=false -Dorg.gradle.logging.level=info"
|
||||
ci/check_jdk_version_availability.sh
|
|
@ -5,7 +5,7 @@
|
|||
|
||||
env:
|
||||
DEFAULT_MATRIX_OS: "ubuntu-2204"
|
||||
DEFAULT_MATRIX_JDK: "adoptiumjdk_17"
|
||||
DEFAULT_MATRIX_JDK: "adoptiumjdk_21"
|
||||
|
||||
steps:
|
||||
- input: "Test Parameters"
|
||||
|
@ -18,6 +18,8 @@ steps:
|
|||
multiple: true
|
||||
default: "${DEFAULT_MATRIX_OS}"
|
||||
options:
|
||||
- label: "Ubuntu 24.04"
|
||||
value: "ubuntu-2404"
|
||||
- label: "Ubuntu 22.04"
|
||||
value: "ubuntu-2204"
|
||||
- label: "Ubuntu 20.04"
|
||||
|
@ -26,14 +28,10 @@ steps:
|
|||
value: "debian-12"
|
||||
- label: "Debian 11"
|
||||
value: "debian-11"
|
||||
- label: "Debian 10"
|
||||
value: "debian-10"
|
||||
- label: "RHEL 9"
|
||||
value: "rhel-9"
|
||||
- label: "RHEL 8"
|
||||
value: "rhel-8"
|
||||
- label: "CentOS 7"
|
||||
value: "centos-7"
|
||||
- label: "Oracle Linux 8"
|
||||
value: "oraclelinux-8"
|
||||
- label: "Oracle Linux 7"
|
||||
|
@ -62,20 +60,12 @@ steps:
|
|||
value: "adoptiumjdk_21"
|
||||
- label: "Adoptium JDK 17 (Eclipse Temurin)"
|
||||
value: "adoptiumjdk_17"
|
||||
- label: "Adoptium JDK 11 (Eclipse Temurin)"
|
||||
value: "adoptiumjdk_11"
|
||||
- label: "OpenJDK 21"
|
||||
value: "openjdk_21"
|
||||
- label: "OpenJDK 17"
|
||||
value: "openjdk_17"
|
||||
- label: "OpenJDK 11"
|
||||
value: "openjdk_11"
|
||||
- label: "Zulu 21"
|
||||
value: "zulu_21"
|
||||
- label: "Zulu 17"
|
||||
value: "zulu_17"
|
||||
- label: "Zulu 11"
|
||||
value: "zulu_11"
|
||||
|
||||
- wait: ~
|
||||
if: build.source != "schedule" && build.source != "trigger_job"
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
"pipeline_slug": "logstash-pull-request-pipeline",
|
||||
"allow_org_users": true,
|
||||
"allowed_repo_permissions": ["admin", "write"],
|
||||
"allowed_list": ["dependabot[bot]", "mergify[bot]", "github-actions[bot]"],
|
||||
"allowed_list": ["dependabot[bot]", "mergify[bot]", "github-actions[bot]", "elastic-vault-github-plugin-prod[bot]"],
|
||||
"set_commit_status": true,
|
||||
"build_on_commit": true,
|
||||
"build_on_comment": true,
|
||||
|
@ -14,7 +14,11 @@
|
|||
"skip_ci_labels": [ ],
|
||||
"skip_target_branches": [ ],
|
||||
"skip_ci_on_only_changed": [
|
||||
"^docs/"
|
||||
"^.github/",
|
||||
"^docs/",
|
||||
"^.mergify.yml$",
|
||||
"^.pre-commit-config.yaml",
|
||||
"\\.md$"
|
||||
],
|
||||
"always_require_ci_on_changed": [ ]
|
||||
}
|
||||
|
|
|
@ -22,10 +22,12 @@ steps:
|
|||
- label: ":rspec: Ruby unit tests"
|
||||
key: "ruby-unit-tests"
|
||||
agents:
|
||||
image: "docker.elastic.co/ci-agent-images/platform-ingest/buildkite-agent-logstash-ci"
|
||||
image: "docker.elastic.co/ci-agent-images/platform-ingest/buildkite-agent-logstash-ci-no-root"
|
||||
cpu: "4"
|
||||
memory: "8Gi"
|
||||
ephemeralStorage: "100Gi"
|
||||
# Run as a non-root user
|
||||
imageUID: "1002"
|
||||
retry:
|
||||
automatic:
|
||||
- limit: 3
|
||||
|
@ -79,8 +81,8 @@ steps:
|
|||
manual:
|
||||
allowed: true
|
||||
|
||||
- label: ":lab_coat: Integration Tests / part 1"
|
||||
key: "integration-tests-part-1"
|
||||
- label: ":lab_coat: Integration Tests / part 1-of-3"
|
||||
key: "integration-tests-part-1-of-3"
|
||||
agents:
|
||||
image: "docker.elastic.co/ci-agent-images/platform-ingest/buildkite-agent-logstash-ci-no-root"
|
||||
cpu: "8"
|
||||
|
@ -95,10 +97,10 @@ steps:
|
|||
set -euo pipefail
|
||||
|
||||
source .buildkite/scripts/common/container-agent.sh
|
||||
ci/integration_tests.sh split 0
|
||||
ci/integration_tests.sh split 0 3
|
||||
|
||||
- label: ":lab_coat: Integration Tests / part 2"
|
||||
key: "integration-tests-part-2"
|
||||
- label: ":lab_coat: Integration Tests / part 2-of-3"
|
||||
key: "integration-tests-part-2-of-3"
|
||||
agents:
|
||||
image: "docker.elastic.co/ci-agent-images/platform-ingest/buildkite-agent-logstash-ci-no-root"
|
||||
cpu: "8"
|
||||
|
@ -113,10 +115,28 @@ steps:
|
|||
set -euo pipefail
|
||||
|
||||
source .buildkite/scripts/common/container-agent.sh
|
||||
ci/integration_tests.sh split 1
|
||||
ci/integration_tests.sh split 1 3
|
||||
|
||||
- label: ":lab_coat: IT Persistent Queues / part 1"
|
||||
key: "integration-tests-qa-part-1"
|
||||
- label: ":lab_coat: Integration Tests / part 3-of-3"
|
||||
key: "integration-tests-part-3-of-3"
|
||||
agents:
|
||||
image: "docker.elastic.co/ci-agent-images/platform-ingest/buildkite-agent-logstash-ci-no-root"
|
||||
cpu: "8"
|
||||
memory: "16Gi"
|
||||
ephemeralStorage: "100Gi"
|
||||
# Run as a non-root user
|
||||
imageUID: "1002"
|
||||
retry:
|
||||
automatic:
|
||||
- limit: 3
|
||||
command: |
|
||||
set -euo pipefail
|
||||
|
||||
source .buildkite/scripts/common/container-agent.sh
|
||||
ci/integration_tests.sh split 2 3
|
||||
|
||||
- label: ":lab_coat: IT Persistent Queues / part 1-of-3"
|
||||
key: "integration-tests-qa-part-1-of-3"
|
||||
agents:
|
||||
image: "docker.elastic.co/ci-agent-images/platform-ingest/buildkite-agent-logstash-ci-no-root"
|
||||
cpu: "8"
|
||||
|
@ -132,10 +152,10 @@ steps:
|
|||
|
||||
source .buildkite/scripts/common/container-agent.sh
|
||||
export FEATURE_FLAG=persistent_queues
|
||||
ci/integration_tests.sh split 0
|
||||
ci/integration_tests.sh split 0 3
|
||||
|
||||
- label: ":lab_coat: IT Persistent Queues / part 2"
|
||||
key: "integration-tests-qa-part-2"
|
||||
- label: ":lab_coat: IT Persistent Queues / part 2-of-3"
|
||||
key: "integration-tests-qa-part-2-of-3"
|
||||
agents:
|
||||
image: "docker.elastic.co/ci-agent-images/platform-ingest/buildkite-agent-logstash-ci-no-root"
|
||||
cpu: "8"
|
||||
|
@ -151,7 +171,26 @@ steps:
|
|||
|
||||
source .buildkite/scripts/common/container-agent.sh
|
||||
export FEATURE_FLAG=persistent_queues
|
||||
ci/integration_tests.sh split 1
|
||||
ci/integration_tests.sh split 1 3
|
||||
|
||||
- label: ":lab_coat: IT Persistent Queues / part 3-of-3"
|
||||
key: "integration-tests-qa-part-3-of-3"
|
||||
agents:
|
||||
image: "docker.elastic.co/ci-agent-images/platform-ingest/buildkite-agent-logstash-ci-no-root"
|
||||
cpu: "8"
|
||||
memory: "16Gi"
|
||||
ephemeralStorage: "100Gi"
|
||||
# Run as non root (logstash) user. UID is hardcoded in image.
|
||||
imageUID: "1002"
|
||||
retry:
|
||||
automatic:
|
||||
- limit: 3
|
||||
command: |
|
||||
set -euo pipefail
|
||||
|
||||
source .buildkite/scripts/common/container-agent.sh
|
||||
export FEATURE_FLAG=persistent_queues
|
||||
ci/integration_tests.sh split 2 3
|
||||
|
||||
- label: ":lab_coat: x-pack unit tests"
|
||||
key: "x-pack-unit-tests"
|
||||
|
|
22
.buildkite/scripts/benchmark/README.md
Normal file
22
.buildkite/scripts/benchmark/README.md
Normal file
|
@ -0,0 +1,22 @@
|
|||
## Steps to set up GCP instance to run benchmark script
|
||||
- Create an instance "n2-standard-16" with Ubuntu image
|
||||
- Install docker
|
||||
- `sudo snap install docker`
|
||||
- `sudo usermod -a -G docker $USER`
|
||||
- Install jq
|
||||
- Install vault
|
||||
- `sudo snap install vault`
|
||||
- `vault login --method github`
|
||||
- `vault kv get -format json secret/ci/elastic-logstash/benchmark`
|
||||
- Setup Elasticsearch index mapping and alias with `setup/*`
|
||||
- Import Kibana dashboard with `save-objects/*`
|
||||
- Run the benchmark script
|
||||
- Send data to your own Elasticsearch. Customise `VAULT_PATH="secret/ci/elastic-logstash/your/path"`
|
||||
- Run the script `main.sh`
|
||||
- or run in background `nohup bash -x main.sh > log.log 2>&1 &`
|
||||
|
||||
## Notes
|
||||
- Benchmarks should only be compared using the same hardware setup.
|
||||
- Please do not send the test metrics to the benchmark cluster. You can set `VAULT_PATH` to send data and metrics to your own server.
|
||||
- Run `all.sh` as calibration which gives you a baseline of performance in different versions.
|
||||
- [#16586](https://github.com/elastic/logstash/pull/16586) allows legacy monitoring using the configuration `xpack.monitoring.allow_legacy_collection: true`, which is not recognized in version 8. To run benchmarks in version 8, use the script of the corresponding branch (e.g. `8.16`) instead of `main` in buildkite.
|
|
@ -3,6 +3,7 @@ pipeline.workers: ${WORKER}
|
|||
pipeline.batch.size: ${BATCH_SIZE}
|
||||
queue.type: ${QTYPE}
|
||||
|
||||
xpack.monitoring.allow_legacy_collection: true
|
||||
xpack.monitoring.enabled: true
|
||||
xpack.monitoring.elasticsearch.username: ${MONITOR_ES_USER}
|
||||
xpack.monitoring.elasticsearch.password: ${MONITOR_ES_PW}
|
||||
|
|
1
.buildkite/scripts/benchmark/config/uuid
Normal file
1
.buildkite/scripts/benchmark/config/uuid
Normal file
|
@ -0,0 +1 @@
|
|||
f74f1a28-25e9-494f-ba41-ca9f13d4446d
|
315
.buildkite/scripts/benchmark/core.sh
Executable file
315
.buildkite/scripts/benchmark/core.sh
Executable file
|
@ -0,0 +1,315 @@
|
|||
#!/usr/bin/env bash
|
||||
set -eo pipefail
|
||||
|
||||
SCRIPT_PATH="$(dirname "${BASH_SOURCE[0]}")"
|
||||
CONFIG_PATH="$SCRIPT_PATH/config"
|
||||
source "$SCRIPT_PATH/util.sh"
|
||||
|
||||
usage() {
|
||||
echo "Usage: $0 [FB_CNT] [QTYPE] [CPU] [MEM]"
|
||||
echo "Example: $0 4 {persisted|memory|all} 2 2"
|
||||
exit 1
|
||||
}
|
||||
|
||||
parse_args() {
|
||||
while [[ "$#" -gt 0 ]]; do
|
||||
if [ -z "$FB_CNT" ]; then
|
||||
FB_CNT=$1
|
||||
elif [ -z "$QTYPE" ]; then
|
||||
case $1 in
|
||||
all | persisted | memory)
|
||||
QTYPE=$1
|
||||
;;
|
||||
*)
|
||||
echo "Error: wrong queue type $1"
|
||||
usage
|
||||
;;
|
||||
esac
|
||||
elif [ -z "$CPU" ]; then
|
||||
CPU=$1
|
||||
elif [ -z "$MEM" ]; then
|
||||
MEM=$1
|
||||
else
|
||||
echo "Error: Too many arguments"
|
||||
usage
|
||||
fi
|
||||
shift
|
||||
done
|
||||
|
||||
# set default value
|
||||
# number of filebeat
|
||||
FB_CNT=${FB_CNT:-4}
|
||||
# all | persisted | memory
|
||||
QTYPE=${QTYPE:-all}
|
||||
CPU=${CPU:-4}
|
||||
MEM=${MEM:-4}
|
||||
XMX=$((MEM / 2))
|
||||
|
||||
IFS=','
|
||||
# worker multiplier: 1,2,4
|
||||
MULTIPLIERS="${MULTIPLIERS:-1,2,4}"
|
||||
read -ra MULTIPLIERS <<< "$MULTIPLIERS"
|
||||
BATCH_SIZES="${BATCH_SIZES:-500}"
|
||||
read -ra BATCH_SIZES <<< "$BATCH_SIZES"
|
||||
# tags to json array
|
||||
read -ra TAG_ARRAY <<< "$TAGS"
|
||||
JSON_TAGS=$(printf '"%s",' "${TAG_ARRAY[@]}" | sed 's/,$//')
|
||||
JSON_TAGS="[$JSON_TAGS]"
|
||||
|
||||
IFS=' '
|
||||
echo "filebeats: $FB_CNT, cpu: $CPU, mem: $MEM, Queue: $QTYPE, worker multiplier: ${MULTIPLIERS[@]}, batch size: ${BATCH_SIZES[@]}"
|
||||
}
|
||||
|
||||
get_secret() {
|
||||
VAULT_PATH=${VAULT_PATH:-secret/ci/elastic-logstash/benchmark}
|
||||
VAULT_DATA=$(vault kv get -format json $VAULT_PATH)
|
||||
BENCHMARK_ES_HOST=$(echo $VAULT_DATA | jq -r '.data.es_host')
|
||||
BENCHMARK_ES_USER=$(echo $VAULT_DATA | jq -r '.data.es_user')
|
||||
BENCHMARK_ES_PW=$(echo $VAULT_DATA | jq -r '.data.es_pw')
|
||||
|
||||
MONITOR_ES_HOST=$(echo $VAULT_DATA | jq -r '.data.monitor_es_host')
|
||||
MONITOR_ES_USER=$(echo $VAULT_DATA | jq -r '.data.monitor_es_user')
|
||||
MONITOR_ES_PW=$(echo $VAULT_DATA | jq -r '.data.monitor_es_pw')
|
||||
}
|
||||
|
||||
pull_images() {
|
||||
echo "--- Pull docker images"
|
||||
|
||||
if [[ -n "$LS_VERSION" ]]; then
|
||||
# pull image if it doesn't exist in local
|
||||
[[ -z $(docker images -q docker.elastic.co/logstash/logstash:$LS_VERSION) ]] && docker pull "docker.elastic.co/logstash/logstash:$LS_VERSION"
|
||||
else
|
||||
# pull the latest snapshot logstash image
|
||||
# select the SNAPSHOT artifact with the highest semantic version number
|
||||
LS_VERSION=$( curl --retry-all-errors --retry 5 --retry-delay 1 -s "https://storage.googleapis.com/artifacts-api/snapshots/main.json" | jq -r '.version' )
|
||||
BUILD_ID=$(curl --retry-all-errors --retry 5 --retry-delay 1 -s "https://storage.googleapis.com/artifacts-api/snapshots/main.json" | jq -r '.build_id')
|
||||
ARCH=$(arch)
|
||||
IMAGE_URL="https://snapshots.elastic.co/${BUILD_ID}/downloads/logstash/logstash-$LS_VERSION-docker-image-$ARCH.tar.gz"
|
||||
IMAGE_FILENAME="$LS_VERSION.tar.gz"
|
||||
|
||||
echo "Download $LS_VERSION from $IMAGE_URL"
|
||||
[[ ! -e $IMAGE_FILENAME ]] && curl -fsSL --retry-max-time 60 --retry 3 --retry-delay 5 -o "$IMAGE_FILENAME" "$IMAGE_URL"
|
||||
[[ -z $(docker images -q docker.elastic.co/logstash/logstash:$LS_VERSION) ]] && docker load -i "$IMAGE_FILENAME"
|
||||
fi
|
||||
|
||||
# pull filebeat image
|
||||
FB_DEFAULT_VERSION="8.13.4"
|
||||
FB_VERSION=${FB_VERSION:-$FB_DEFAULT_VERSION}
|
||||
docker pull "docker.elastic.co/beats/filebeat:$FB_VERSION"
|
||||
}
|
||||
|
||||
generate_logs() {
|
||||
FLOG_FILE_CNT=${FLOG_FILE_CNT:-4}
|
||||
SINGLE_SIZE=524288000
|
||||
TOTAL_SIZE="$((FLOG_FILE_CNT * SINGLE_SIZE))"
|
||||
FLOG_PATH="$SCRIPT_PATH/flog"
|
||||
mkdir -p $FLOG_PATH
|
||||
|
||||
if [[ ! -e "$FLOG_PATH/log${FLOG_FILE_CNT}.log" ]]; then
|
||||
echo "--- Generate logs in background. log: ${FLOG_FILE_CNT}, each size: 500mb"
|
||||
docker run -d --name=flog --rm -v $FLOG_PATH:/go/src/data mingrammer/flog -t log -w -o "/go/src/data/log.log" -b $TOTAL_SIZE -p $SINGLE_SIZE
|
||||
fi
|
||||
}
|
||||
|
||||
check_logs() {
|
||||
echo "--- Check log generation"
|
||||
|
||||
local cnt=0
|
||||
until [[ -e "$FLOG_PATH/log${FLOG_FILE_CNT}.log" || $cnt -gt 600 ]]; do
|
||||
echo "wait 30s" && sleep 30
|
||||
cnt=$((cnt + 30))
|
||||
done
|
||||
|
||||
ls -lah $FLOG_PATH
|
||||
}
|
||||
|
||||
start_logstash() {
|
||||
LS_CONFIG_PATH=$SCRIPT_PATH/ls/config
|
||||
mkdir -p $LS_CONFIG_PATH
|
||||
|
||||
cp $CONFIG_PATH/pipelines.yml $LS_CONFIG_PATH/pipelines.yml
|
||||
cp $CONFIG_PATH/logstash.yml $LS_CONFIG_PATH/logstash.yml
|
||||
cp $CONFIG_PATH/uuid $LS_CONFIG_PATH/uuid
|
||||
|
||||
LS_JAVA_OPTS=${LS_JAVA_OPTS:--Xmx${XMX}g}
|
||||
docker run -d --name=ls --net=host --cpus=$CPU --memory=${MEM}g -e LS_JAVA_OPTS="$LS_JAVA_OPTS" \
|
||||
-e QTYPE="$QTYPE" -e WORKER="$WORKER" -e BATCH_SIZE="$BATCH_SIZE" \
|
||||
-e BENCHMARK_ES_HOST="$BENCHMARK_ES_HOST" -e BENCHMARK_ES_USER="$BENCHMARK_ES_USER" -e BENCHMARK_ES_PW="$BENCHMARK_ES_PW" \
|
||||
-e MONITOR_ES_HOST="$MONITOR_ES_HOST" -e MONITOR_ES_USER="$MONITOR_ES_USER" -e MONITOR_ES_PW="$MONITOR_ES_PW" \
|
||||
-v $LS_CONFIG_PATH/logstash.yml:/usr/share/logstash/config/logstash.yml:ro \
|
||||
-v $LS_CONFIG_PATH/pipelines.yml:/usr/share/logstash/config/pipelines.yml:ro \
|
||||
-v $LS_CONFIG_PATH/uuid:/usr/share/logstash/data/uuid:ro \
|
||||
docker.elastic.co/logstash/logstash:$LS_VERSION
|
||||
}
|
||||
|
||||
start_filebeat() {
|
||||
for ((i = 0; i < FB_CNT; i++)); do
|
||||
FB_PATH="$SCRIPT_PATH/fb${i}"
|
||||
mkdir -p $FB_PATH
|
||||
|
||||
cp $CONFIG_PATH/filebeat.yml $FB_PATH/filebeat.yml
|
||||
|
||||
docker run -d --name=fb$i --net=host --user=root \
|
||||
-v $FB_PATH/filebeat.yml:/usr/share/filebeat/filebeat.yml \
|
||||
-v $SCRIPT_PATH/flog:/usr/share/filebeat/flog \
|
||||
docker.elastic.co/beats/filebeat:$FB_VERSION filebeat -e --strict.perms=false
|
||||
done
|
||||
}
|
||||
|
||||
capture_stats() {
|
||||
CURRENT=$(jq -r '.flow.output_throughput.current' $NS_JSON)
|
||||
local eps_1m=$(jq -r '.flow.output_throughput.last_1_minute' $NS_JSON)
|
||||
local eps_5m=$(jq -r '.flow.output_throughput.last_5_minutes' $NS_JSON)
|
||||
local worker_util=$(jq -r '.pipelines.main.flow.worker_utilization.last_1_minute' $NS_JSON)
|
||||
local worker_concurr=$(jq -r '.pipelines.main.flow.worker_concurrency.last_1_minute' $NS_JSON)
|
||||
local cpu_percent=$(jq -r '.process.cpu.percent' $NS_JSON)
|
||||
local heap=$(jq -r '.jvm.mem.heap_used_in_bytes' $NS_JSON)
|
||||
local non_heap=$(jq -r '.jvm.mem.non_heap_used_in_bytes' $NS_JSON)
|
||||
local q_event_cnt=$(jq -r '.pipelines.main.queue.events_count' $NS_JSON)
|
||||
local q_size=$(jq -r '.pipelines.main.queue.queue_size_in_bytes' $NS_JSON)
|
||||
TOTAL_EVENTS_OUT=$(jq -r '.pipelines.main.events.out' $NS_JSON)
|
||||
printf "current: %s, 1m: %s, 5m: %s, worker_utilization: %s, worker_concurrency: %s, cpu: %s, heap: %s, non-heap: %s, q_events: %s, q_size: %s, total_events_out: %s\n" \
|
||||
$CURRENT $eps_1m $eps_5m $worker_util $worker_concurr $cpu_percent $heap $non_heap $q_event_cnt $q_size $TOTAL_EVENTS_OUT
|
||||
}
|
||||
|
||||
aggregate_stats() {
|
||||
local file_glob="$SCRIPT_PATH/$NS_DIR/${QTYPE:0:1}_w${WORKER}b${BATCH_SIZE}_*.json"
|
||||
MAX_EPS_1M=$( jqmax '.flow.output_throughput.last_1_minute' "$file_glob" )
|
||||
MAX_EPS_5M=$( jqmax '.flow.output_throughput.last_5_minutes' "$file_glob" )
|
||||
MAX_WORKER_UTIL=$( jqmax '.pipelines.main.flow.worker_utilization.last_1_minute' "$file_glob" )
|
||||
MAX_WORKER_CONCURR=$( jqmax '.pipelines.main.flow.worker_concurrency.last_1_minute' "$file_glob" )
|
||||
MAX_Q_EVENT_CNT=$( jqmax '.pipelines.main.queue.events_count' "$file_glob" )
|
||||
MAX_Q_SIZE=$( jqmax '.pipelines.main.queue.queue_size_in_bytes' "$file_glob" )
|
||||
|
||||
AVG_CPU_PERCENT=$( jqavg '.process.cpu.percent' "$file_glob" )
|
||||
AVG_VIRTUAL_MEM=$( jqavg '.process.mem.total_virtual_in_bytes' "$file_glob" )
|
||||
AVG_HEAP=$( jqavg '.jvm.mem.heap_used_in_bytes' "$file_glob" )
|
||||
AVG_NON_HEAP=$( jqavg '.jvm.mem.non_heap_used_in_bytes' "$file_glob" )
|
||||
}
|
||||
|
||||
send_summary() {
|
||||
echo "--- Send summary to Elasticsearch"
|
||||
|
||||
# build json
|
||||
local timestamp
|
||||
timestamp=$(date -u +"%Y-%m-%dT%H:%M:%S")
|
||||
SUMMARY="{\"timestamp\": \"$timestamp\", \"version\": \"$LS_VERSION\", \"cpu\": \"$CPU\", \"mem\": \"$MEM\", \"workers\": \"$WORKER\", \"batch_size\": \"$BATCH_SIZE\", \"queue_type\": \"$QTYPE\""
|
||||
not_empty "$TOTAL_EVENTS_OUT" && SUMMARY="$SUMMARY, \"total_events_out\": \"$TOTAL_EVENTS_OUT\""
|
||||
not_empty "$MAX_EPS_1M" && SUMMARY="$SUMMARY, \"max_eps_1m\": \"$MAX_EPS_1M\""
|
||||
not_empty "$MAX_EPS_5M" && SUMMARY="$SUMMARY, \"max_eps_5m\": \"$MAX_EPS_5M\""
|
||||
not_empty "$MAX_WORKER_UTIL" && SUMMARY="$SUMMARY, \"max_worker_utilization\": \"$MAX_WORKER_UTIL\""
|
||||
not_empty "$MAX_WORKER_CONCURR" && SUMMARY="$SUMMARY, \"max_worker_concurrency\": \"$MAX_WORKER_CONCURR\""
|
||||
not_empty "$AVG_CPU_PERCENT" && SUMMARY="$SUMMARY, \"avg_cpu_percentage\": \"$AVG_CPU_PERCENT\""
|
||||
not_empty "$AVG_HEAP" && SUMMARY="$SUMMARY, \"avg_heap\": \"$AVG_HEAP\""
|
||||
not_empty "$AVG_NON_HEAP" && SUMMARY="$SUMMARY, \"avg_non_heap\": \"$AVG_NON_HEAP\""
|
||||
not_empty "$AVG_VIRTUAL_MEM" && SUMMARY="$SUMMARY, \"avg_virtual_memory\": \"$AVG_VIRTUAL_MEM\""
|
||||
not_empty "$MAX_Q_EVENT_CNT" && SUMMARY="$SUMMARY, \"max_queue_events\": \"$MAX_Q_EVENT_CNT\""
|
||||
not_empty "$MAX_Q_SIZE" && SUMMARY="$SUMMARY, \"max_queue_bytes_size\": \"$MAX_Q_SIZE\""
|
||||
not_empty "$TAGS" && SUMMARY="$SUMMARY, \"tags\": $JSON_TAGS"
|
||||
SUMMARY="$SUMMARY}"
|
||||
|
||||
tee summary.json << EOF
|
||||
{"index": {}}
|
||||
$SUMMARY
|
||||
EOF
|
||||
|
||||
# send to ES
|
||||
local resp
|
||||
local err_status
|
||||
resp=$(curl -s -X POST -u "$BENCHMARK_ES_USER:$BENCHMARK_ES_PW" "$BENCHMARK_ES_HOST/benchmark_summary/_bulk" -H 'Content-Type: application/json' --data-binary @"summary.json")
|
||||
echo "$resp"
|
||||
err_status=$(echo "$resp" | jq -r ".errors")
|
||||
if [[ "$err_status" == "true" ]]; then
|
||||
echo "Failed to send summary"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# $1: snapshot index
|
||||
node_stats() {
|
||||
NS_JSON="$SCRIPT_PATH/$NS_DIR/${QTYPE:0:1}_w${WORKER}b${BATCH_SIZE}_$1.json" # m_w8b1000_0.json
|
||||
|
||||
# curl inside container because docker on mac cannot resolve localhost to host network interface
|
||||
docker exec -i ls curl localhost:9600/_node/stats > "$NS_JSON" 2> /dev/null
|
||||
}
|
||||
|
||||
# $1: index
|
||||
snapshot() {
|
||||
node_stats $1
|
||||
capture_stats
|
||||
}
|
||||
|
||||
create_directory() {
|
||||
NS_DIR="fb${FB_CNT}c${CPU}m${MEM}" # fb4c4m4
|
||||
mkdir -p "$SCRIPT_PATH/$NS_DIR"
|
||||
}
|
||||
|
||||
queue() {
|
||||
for QTYPE in "persisted" "memory"; do
|
||||
worker
|
||||
done
|
||||
}
|
||||
|
||||
worker() {
|
||||
for m in "${MULTIPLIERS[@]}"; do
|
||||
WORKER=$((CPU * m))
|
||||
batch
|
||||
done
|
||||
}
|
||||
|
||||
batch() {
|
||||
for BATCH_SIZE in "${BATCH_SIZES[@]}"; do
|
||||
run_pipeline
|
||||
stop_pipeline
|
||||
done
|
||||
}
|
||||
|
||||
run_pipeline() {
|
||||
echo "--- Run pipeline. queue type: $QTYPE, worker: $WORKER, batch size: $BATCH_SIZE"
|
||||
|
||||
start_logstash
|
||||
start_filebeat
|
||||
docker ps
|
||||
|
||||
echo "(0) sleep 3m" && sleep 180
|
||||
snapshot "0"
|
||||
|
||||
for i in {1..8}; do
|
||||
echo "($i) sleep 30s" && sleep 30
|
||||
snapshot "$i"
|
||||
|
||||
# print docker log when ingestion rate is zero
|
||||
# remove '.' in number and return max val
|
||||
[[ $(max -g "${CURRENT/./}" "0") -eq 0 ]] &&
|
||||
docker logs fb0 &&
|
||||
docker logs ls
|
||||
done
|
||||
|
||||
aggregate_stats
|
||||
send_summary
|
||||
}
|
||||
|
||||
stop_pipeline() {
|
||||
echo "--- Stop Pipeline"
|
||||
|
||||
for ((i = 0; i < FB_CNT; i++)); do
|
||||
docker stop fb$i
|
||||
docker rm fb$i
|
||||
done
|
||||
|
||||
docker stop ls
|
||||
docker rm ls
|
||||
|
||||
curl -u "$BENCHMARK_ES_USER:$BENCHMARK_ES_PW" -X DELETE $BENCHMARK_ES_HOST/_data_stream/logs-generic-default
|
||||
echo " data stream deleted "
|
||||
|
||||
# TODO: clean page caches, reduce memory fragmentation
|
||||
# https://github.com/elastic/logstash/pull/16191#discussion_r1647050216
|
||||
}
|
||||
|
||||
clean_up() {
|
||||
# stop log generation if it has not done yet
|
||||
[[ -n $(docker ps | grep flog) ]] && docker stop flog || true
|
||||
# remove image
|
||||
docker image rm docker.elastic.co/logstash/logstash:$LS_VERSION
|
||||
}
|
|
@ -15,9 +15,8 @@ set -eo pipefail
|
|||
# - The script sends a summary of EPS and resource usage to index `benchmark_summary`
|
||||
# *******************************************************
|
||||
|
||||
SCRIPT_PATH="$(cd "$(dirname "$0")"; pwd)"
|
||||
CONFIG_PATH="$SCRIPT_PATH/config"
|
||||
source "$SCRIPT_PATH/util.sh"
|
||||
SCRIPT_PATH="$(dirname "${BASH_SOURCE[0]}")"
|
||||
source "$SCRIPT_PATH/core.sh"
|
||||
|
||||
## usage:
|
||||
## main.sh FB_CNT QTYPE CPU MEM
|
||||
|
@ -36,271 +35,9 @@ source "$SCRIPT_PATH/util.sh"
|
|||
## MEM=4 # number of GB for Logstash container
|
||||
## QTYPE=memory # queue type to test {persisted|memory|all}
|
||||
## FB_CNT=4 # number of filebeats to use in benchmark
|
||||
usage() {
|
||||
echo "Usage: $0 [FB_CNT] [QTYPE] [CPU] [MEM]"
|
||||
echo "Example: $0 4 {persisted|memory|all} 2 2"
|
||||
exit 1
|
||||
}
|
||||
|
||||
parse_args() {
|
||||
while [[ "$#" -gt 0 ]]; do
|
||||
if [ -z "$FB_CNT" ]; then
|
||||
FB_CNT=$1
|
||||
elif [ -z "$QTYPE" ]; then
|
||||
case $1 in
|
||||
all | persisted | memory)
|
||||
QTYPE=$1
|
||||
;;
|
||||
*)
|
||||
echo "Error: wrong queue type $1"
|
||||
usage
|
||||
;;
|
||||
esac
|
||||
elif [ -z "$CPU" ]; then
|
||||
CPU=$1
|
||||
elif [ -z "$MEM" ]; then
|
||||
MEM=$1
|
||||
else
|
||||
echo "Error: Too many arguments"
|
||||
usage
|
||||
fi
|
||||
shift
|
||||
done
|
||||
|
||||
# set default value
|
||||
# number of filebeat
|
||||
FB_CNT=${FB_CNT:-4}
|
||||
# all | persisted | memory
|
||||
QTYPE=${QTYPE:-all}
|
||||
CPU=${CPU:-4}
|
||||
MEM=${MEM:-4}
|
||||
XMX=$((MEM / 2))
|
||||
|
||||
IFS=','
|
||||
# worker multiplier: 1,2,4
|
||||
MULTIPLIERS="${MULTIPLIERS:-1,2,4}"
|
||||
read -ra MULTIPLIERS <<< "$MULTIPLIERS"
|
||||
BATCH_SIZES="${BATCH_SIZES:-500}"
|
||||
read -ra BATCH_SIZES <<< "$BATCH_SIZES"
|
||||
|
||||
IFS=' '
|
||||
echo "filebeats: $FB_CNT, cpu: $CPU, mem: $MEM, Queue: $QTYPE, worker multiplier: ${MULTIPLIERS[@]}, batch size: ${BATCH_SIZES[@]}"
|
||||
}
|
||||
|
||||
get_secret() {
|
||||
VAULT_PATH=secret/ci/elastic-logstash/benchmark
|
||||
VAULT_DATA=$(vault kv get -format json $VAULT_PATH)
|
||||
BENCHMARK_ES_HOST=$(echo $VAULT_DATA | jq -r '.data.es_host')
|
||||
BENCHMARK_ES_USER=$(echo $VAULT_DATA | jq -r '.data.es_user')
|
||||
BENCHMARK_ES_PW=$(echo $VAULT_DATA | jq -r '.data.es_pw')
|
||||
|
||||
MONITOR_ES_HOST=$(echo $VAULT_DATA | jq -r '.data.monitor_es_host')
|
||||
MONITOR_ES_USER=$(echo $VAULT_DATA | jq -r '.data.monitor_es_user')
|
||||
MONITOR_ES_PW=$(echo $VAULT_DATA | jq -r '.data.monitor_es_pw')
|
||||
}
|
||||
|
||||
pull_images() {
|
||||
echo "--- Pull docker images"
|
||||
|
||||
# pull the latest snapshot logstash image
|
||||
if [[ -n "$LS_VERSION" ]]; then
|
||||
docker pull "docker.elastic.co/logstash/logstash:$LS_VERSION"
|
||||
else
|
||||
LS_VERSION=$( curl --retry-all-errors --retry 5 --retry-delay 1 -s https://artifacts-api.elastic.co/v1/versions | jq -r ".versions[-1]" )
|
||||
BUILD_ID=$( curl --retry-all-errors --retry 5 --retry-delay 1 -s https://artifacts-api.elastic.co/v1/branches/master/builds | jq -r ".builds[0]" )
|
||||
ARCH=$(arch)
|
||||
IMAGE_URL="https://snapshots.elastic.co/${BUILD_ID}/downloads/logstash/logstash-$LS_VERSION-docker-image-$ARCH.tar.gz"
|
||||
IMAGE_FILENAME="$LS_VERSION.tar.gz"
|
||||
|
||||
echo "Download $LS_VERSION from $IMAGE_URL"
|
||||
[[ ! -e $IMAGE_FILENAME ]] && curl -fsSL --retry-max-time 60 --retry 3 --retry-delay 5 -o "$IMAGE_FILENAME" "$IMAGE_URL"
|
||||
[[ -z $(docker images -q docker.elastic.co/logstash/logstash:$LS_VERSION) ]] && docker load -i "$IMAGE_FILENAME"
|
||||
fi
|
||||
|
||||
# pull filebeat image
|
||||
FB_DEFAULT_VERSION="8.13.4"
|
||||
FB_VERSION=${FB_VERSION:-$FB_DEFAULT_VERSION}
|
||||
docker pull "docker.elastic.co/beats/filebeat:$FB_VERSION"
|
||||
}
|
||||
|
||||
generate_logs() {
|
||||
FLOG_PATH="$SCRIPT_PATH/flog"
|
||||
mkdir -p $FLOG_PATH
|
||||
|
||||
if [[ ! -e "$FLOG_PATH/log4.log" ]]; then
|
||||
echo "--- Generate logs in background. log: 5, size: 500mb"
|
||||
docker run -d --name=flog --rm -v $FLOG_PATH:/go/src/data mingrammer/flog -t log -w -o "/go/src/data/log.log" -b 2621440000 -p 524288000
|
||||
fi
|
||||
}
|
||||
|
||||
check_logs() {
|
||||
echo "--- Check log generation"
|
||||
|
||||
local cnt=0
|
||||
until [[ -e "$FLOG_PATH/log4.log" || $cnt -gt 600 ]]; do
|
||||
echo "wait 30s" && sleep 30
|
||||
cnt=$((cnt + 30))
|
||||
done
|
||||
|
||||
ls -lah $FLOG_PATH
|
||||
}
|
||||
|
||||
start_logstash() {
|
||||
LS_CONFIG_PATH=$SCRIPT_PATH/ls/config
|
||||
mkdir -p $LS_CONFIG_PATH
|
||||
|
||||
cp $CONFIG_PATH/pipelines.yml $LS_CONFIG_PATH/pipelines.yml
|
||||
cp $CONFIG_PATH/logstash.yml $LS_CONFIG_PATH/logstash.yml
|
||||
|
||||
LS_JAVA_OPTS=${LS_JAVA_OPTS:--Xmx${XMX}g}
|
||||
docker run -d --name=ls --net=host --cpus=$CPU --memory=${MEM}g -e LS_JAVA_OPTS="$LS_JAVA_OPTS" \
|
||||
-e QTYPE="$QTYPE" -e WORKER="$WORKER" -e BATCH_SIZE="$BATCH_SIZE" \
|
||||
-e BENCHMARK_ES_HOST="$BENCHMARK_ES_HOST" -e BENCHMARK_ES_USER="$BENCHMARK_ES_USER" -e BENCHMARK_ES_PW="$BENCHMARK_ES_PW" \
|
||||
-e MONITOR_ES_HOST="$MONITOR_ES_HOST" -e MONITOR_ES_USER="$MONITOR_ES_USER" -e MONITOR_ES_PW="$MONITOR_ES_PW" \
|
||||
-v $LS_CONFIG_PATH/logstash.yml:/usr/share/logstash/config/logstash.yml:ro \
|
||||
-v $LS_CONFIG_PATH/pipelines.yml:/usr/share/logstash/config/pipelines.yml:ro \
|
||||
docker.elastic.co/logstash/logstash:$LS_VERSION
|
||||
}
|
||||
|
||||
start_filebeat() {
|
||||
for ((i = 0; i < FB_CNT; i++)); do
|
||||
FB_PATH="$SCRIPT_PATH/fb${i}"
|
||||
mkdir -p $FB_PATH
|
||||
|
||||
cp $CONFIG_PATH/filebeat.yml $FB_PATH/filebeat.yml
|
||||
|
||||
docker run -d --name=fb$i --net=host --user=root \
|
||||
-v $FB_PATH/filebeat.yml:/usr/share/filebeat/filebeat.yml \
|
||||
-v $SCRIPT_PATH/flog:/usr/share/filebeat/flog \
|
||||
docker.elastic.co/beats/filebeat:$FB_VERSION filebeat -e --strict.perms=false
|
||||
done
|
||||
}
|
||||
|
||||
capture_stats() {
|
||||
CURRENT=$(jq -r '.flow.output_throughput.current' $NS_JSON)
|
||||
local eps_1m=$(jq -r '.flow.output_throughput.last_1_minute' $NS_JSON)
|
||||
local eps_5m=$(jq -r '.flow.output_throughput.last_5_minutes' $NS_JSON)
|
||||
local worker_util=$(jq -r '.pipelines.main.flow.worker_utilization.last_1_minute' $NS_JSON)
|
||||
local worker_concurr=$(jq -r '.pipelines.main.flow.worker_concurrency.last_1_minute' $NS_JSON)
|
||||
local cpu_percent=$(jq -r '.process.cpu.percent' $NS_JSON)
|
||||
local heap=$(jq -r '.jvm.mem.heap_used_in_bytes' $NS_JSON)
|
||||
local non_heap=$(jq -r '.jvm.mem.non_heap_used_in_bytes' $NS_JSON)
|
||||
local q_event_cnt=$(jq -r '.pipelines.main.queue.events_count' $NS_JSON)
|
||||
local q_size=$(jq -r '.pipelines.main.queue.queue_size_in_bytes' $NS_JSON)
|
||||
TOTAL_EVENTS_OUT=$(jq -r '.pipelines.main.events.out' $NS_JSON)
|
||||
printf "current: %s, 1m: %s, 5m: %s, worker_utilization: %s, worker_concurrency: %s, cpu: %s, heap: %s, non-heap: %s, q_events: %s, q_size: %s, total_events_out: %s\n" \
|
||||
$CURRENT $eps_1m $eps_5m $worker_util $worker_concurr $cpu_percent $heap $non_heap $q_event_cnt $q_size $TOTAL_EVENTS_OUT
|
||||
}
|
||||
|
||||
aggregate_stats() {
|
||||
local file_glob="$SCRIPT_PATH/$NS_DIR/${QTYPE:0:1}_w${WORKER}b${BATCH_SIZE}_*.json"
|
||||
MAX_EPS_1M=$( jqmax '.flow.output_throughput.last_1_minute' "$file_glob" )
|
||||
MAX_EPS_5M=$( jqmax '.flow.output_throughput.last_5_minutes' "$file_glob" )
|
||||
MAX_WORKER_UTIL=$( jqmax '.pipelines.main.flow.worker_utilization.last_1_minute' "$file_glob" )
|
||||
MAX_WORKER_CONCURR=$( jqmax '.pipelines.main.flow.worker_concurrency.last_1_minute' "$file_glob" )
|
||||
MAX_Q_EVENT_CNT=$( jqmax '.pipelines.main.queue.events_count' "$file_glob" )
|
||||
MAX_Q_SIZE=$( jqmax '.pipelines.main.queue.queue_size_in_bytes' "$file_glob" )
|
||||
|
||||
AVG_CPU_PERCENT=$( jqavg '.process.cpu.percent' "$file_glob" )
|
||||
AVG_VIRTUAL_MEM=$( jqavg '.process.mem.total_virtual_in_bytes' "$file_glob" )
|
||||
AVG_HEAP=$( jqavg '.jvm.mem.heap_used_in_bytes' "$file_glob" )
|
||||
AVG_NON_HEAP=$( jqavg '.jvm.mem.non_heap_used_in_bytes' "$file_glob" )
|
||||
}
|
||||
|
||||
send_summary() {
|
||||
echo "--- Send summary to Elasticsearch"
|
||||
|
||||
timestamp=$(date -u +"%Y-%m-%dT%H:%M:%S")
|
||||
tee summary.json << EOF
|
||||
{"index": {}}
|
||||
{"timestamp": "$timestamp", "version": "$LS_VERSION", "cpu": "$CPU", "mem": "$MEM", "workers": "$WORKER", "batch_size": "$BATCH_SIZE", "queue_type": "$QTYPE", "total_events_out": "$TOTAL_EVENTS_OUT", "max_eps_1m": "$MAX_EPS_1M", "max_eps_5m": "$MAX_EPS_5M", "max_worker_utilization": "$MAX_WORKER_UTIL", "max_worker_concurrency": "$MAX_WORKER_CONCURR", "avg_cpu_percentage": "$AVG_CPU_PERCENT", "avg_heap": "$AVG_HEAP", "avg_non_heap": "$AVG_NON_HEAP", "avg_virtual_memory": "$AVG_VIRTUAL_MEM", "max_queue_events": "$MAX_Q_EVENT_CNT", "max_queue_bytes_size": "$MAX_Q_SIZE"}
|
||||
EOF
|
||||
curl -X POST -u "$BENCHMARK_ES_USER:$BENCHMARK_ES_PW" "$BENCHMARK_ES_HOST/benchmark_summary/_bulk" -H 'Content-Type: application/json' --data-binary @"summary.json"
|
||||
echo ""
|
||||
}
|
||||
|
||||
# $1: snapshot index
|
||||
node_stats() {
|
||||
NS_JSON="$SCRIPT_PATH/$NS_DIR/${QTYPE:0:1}_w${WORKER}b${BATCH_SIZE}_$1.json" # m_w8b1000_0.json
|
||||
|
||||
# curl inside container because docker on mac cannot resolve localhost to host network interface
|
||||
docker exec -it ls curl localhost:9600/_node/stats > "$NS_JSON" 2> /dev/null
|
||||
}
|
||||
|
||||
# $1: index
|
||||
snapshot() {
|
||||
node_stats $1
|
||||
capture_stats
|
||||
}
|
||||
|
||||
create_directory() {
|
||||
NS_DIR="fb${FB_CNT}c${CPU}m${MEM}" # fb4c4m4
|
||||
mkdir -p "$SCRIPT_PATH/$NS_DIR"
|
||||
}
|
||||
|
||||
queue() {
|
||||
for QTYPE in "persisted" "memory"; do
|
||||
worker
|
||||
done
|
||||
}
|
||||
|
||||
worker() {
|
||||
for m in "${MULTIPLIERS[@]}"; do
|
||||
WORKER=$((CPU * m))
|
||||
batch
|
||||
done
|
||||
}
|
||||
|
||||
batch() {
|
||||
for BATCH_SIZE in "${BATCH_SIZES[@]}"; do
|
||||
run_pipeline
|
||||
stop_pipeline
|
||||
done
|
||||
}
|
||||
|
||||
run_pipeline() {
|
||||
echo "--- Run pipeline. queue type: $QTYPE, worker: $WORKER, batch size: $BATCH_SIZE"
|
||||
|
||||
start_logstash
|
||||
start_filebeat
|
||||
docker ps
|
||||
|
||||
echo "(0) sleep 3m" && sleep 180
|
||||
snapshot "0"
|
||||
|
||||
for i in {1..8}; do
|
||||
echo "($i) sleep 30s" && sleep 30
|
||||
snapshot "$i"
|
||||
|
||||
# print docker log when ingestion rate is zero
|
||||
# remove '.' in number and return max val
|
||||
[[ $(max -g "${CURRENT/./}" "0") -eq 0 ]] &&
|
||||
docker logs fb0 &&
|
||||
docker logs ls
|
||||
done
|
||||
|
||||
aggregate_stats
|
||||
send_summary
|
||||
}
|
||||
|
||||
stop_pipeline() {
|
||||
echo "--- Stop Pipeline"
|
||||
|
||||
for ((i = 0; i < FB_CNT; i++)); do
|
||||
docker stop fb$i
|
||||
docker rm fb$i
|
||||
done
|
||||
|
||||
docker stop ls
|
||||
docker rm ls
|
||||
|
||||
curl -u "$BENCHMARK_ES_USER:$BENCHMARK_ES_PW" -X DELETE $BENCHMARK_ES_HOST/_data_stream/logs-generic-default
|
||||
echo " data stream deleted "
|
||||
|
||||
# TODO: clean page caches, reduce memory fragmentation
|
||||
# https://github.com/elastic/logstash/pull/16191#discussion_r1647050216
|
||||
}
|
||||
|
||||
## FLOG_FILE_CNT=4 # number of files to generate for ingestion
|
||||
## VAULT_PATH=secret/path # vault path point to Elasticsearch credentials. The default value points to benchmark cluster.
|
||||
## TAGS=test,other # tags with "," separator.
|
||||
main() {
|
||||
parse_args "$@"
|
||||
get_secret
|
||||
|
@ -316,8 +53,7 @@ main() {
|
|||
worker
|
||||
fi
|
||||
|
||||
# stop log generation if it has not done yet
|
||||
[[ -n $(docker ps | grep flog) ]] && docker stop flog || true
|
||||
clean_up
|
||||
}
|
||||
|
||||
main "$@"
|
||||
main "$@"
|
44
.buildkite/scripts/benchmark/marathon.sh
Executable file
44
.buildkite/scripts/benchmark/marathon.sh
Executable file
|
@ -0,0 +1,44 @@
|
|||
#!/usr/bin/env bash
|
||||
set -eo pipefail
|
||||
|
||||
# *******************************************************
|
||||
# Run benchmark for versions that have flow metrics
|
||||
# When the hardware changes, run the marathon task to establish a new baseline.
|
||||
# Usage:
|
||||
# nohup bash -x all.sh > log.log 2>&1 &
|
||||
# Accept env vars:
|
||||
# STACK_VERSIONS=8.15.0,8.15.1,8.16.0-SNAPSHOT # versions to test. It is comma separator string
|
||||
# *******************************************************
|
||||
|
||||
SCRIPT_PATH="$(dirname "${BASH_SOURCE[0]}")"
|
||||
source "$SCRIPT_PATH/core.sh"
|
||||
|
||||
parse_stack_versions() {
|
||||
IFS=','
|
||||
STACK_VERSIONS="${STACK_VERSIONS:-8.6.0,8.7.0,8.8.0,8.9.0,8.10.0,8.11.0,8.12.0,8.13.0,8.14.0,8.15.0}"
|
||||
read -ra STACK_VERSIONS <<< "$STACK_VERSIONS"
|
||||
}
|
||||
|
||||
main() {
|
||||
parse_stack_versions
|
||||
parse_args "$@"
|
||||
get_secret
|
||||
generate_logs
|
||||
check_logs
|
||||
|
||||
USER_QTYPE="$QTYPE"
|
||||
|
||||
for V in "${STACK_VERSIONS[@]}" ; do
|
||||
LS_VERSION="$V"
|
||||
QTYPE="$USER_QTYPE"
|
||||
pull_images
|
||||
create_directory
|
||||
if [[ $QTYPE == "all" ]]; then
|
||||
queue
|
||||
else
|
||||
worker
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
main "$@"
|
8
.buildkite/scripts/benchmark/save-objects/CHANGELOG.md
Normal file
8
.buildkite/scripts/benchmark/save-objects/CHANGELOG.md
Normal file
|
@ -0,0 +1,8 @@
|
|||
## 20241210
|
||||
Remove scripted field `5m_num` from dashboards
|
||||
|
||||
## 20240912
|
||||
Updated runtime field `release` to return `true` when `version` contains "SNAPSHOT"
|
||||
|
||||
## 20240912
|
||||
Initial dashboards
|
14
.buildkite/scripts/benchmark/save-objects/README.md
Normal file
14
.buildkite/scripts/benchmark/save-objects/README.md
Normal file
|
@ -0,0 +1,14 @@
|
|||
benchmark_objects.ndjson contains the following resources
|
||||
|
||||
- Dashboards
|
||||
- daily snapshot
|
||||
- released versions
|
||||
- Data Views
|
||||
- benchmark
|
||||
- runtime fields
|
||||
- | Fields Name | Type | Comment |
|
||||
|--------------|---------------------------------------------------------------------------------------|--------------------------------------------------|
|
||||
| versions_num | long | convert semantic versioning to number for graph sorting |
|
||||
| release | boolean | `true` for released version. `false` for snapshot version. It is for graph filtering. |
|
||||
|
||||
To import objects to Kibana, navigate to Stack Management > Save Objects and click Import
|
File diff suppressed because one or more lines are too long
6
.buildkite/scripts/benchmark/setup/alias
Normal file
6
.buildkite/scripts/benchmark/setup/alias
Normal file
|
@ -0,0 +1,6 @@
|
|||
POST /_aliases
|
||||
{
|
||||
"actions": [
|
||||
{ "add": { "index": "benchmark_summary_v2", "alias": "benchmark_summary" } }
|
||||
]
|
||||
}
|
179
.buildkite/scripts/benchmark/setup/benchmark_summary_v2
Normal file
179
.buildkite/scripts/benchmark/setup/benchmark_summary_v2
Normal file
|
@ -0,0 +1,179 @@
|
|||
PUT /benchmark_summary_v2/_mapping
|
||||
{
|
||||
"properties": {
|
||||
"avg_cpu_percentage": {
|
||||
"type": "float",
|
||||
"fields": {
|
||||
"keyword": {
|
||||
"type": "keyword",
|
||||
"ignore_above": 256
|
||||
}
|
||||
}
|
||||
},
|
||||
"avg_heap": {
|
||||
"type": "float",
|
||||
"fields": {
|
||||
"keyword": {
|
||||
"type": "keyword",
|
||||
"ignore_above": 256
|
||||
}
|
||||
}
|
||||
},
|
||||
"avg_non_heap": {
|
||||
"type": "float",
|
||||
"fields": {
|
||||
"keyword": {
|
||||
"type": "keyword",
|
||||
"ignore_above": 256
|
||||
}
|
||||
}
|
||||
},
|
||||
"avg_virtual_memory": {
|
||||
"type": "float",
|
||||
"fields": {
|
||||
"keyword": {
|
||||
"type": "keyword",
|
||||
"ignore_above": 256
|
||||
}
|
||||
}
|
||||
},
|
||||
"batch_size": {
|
||||
"type": "integer",
|
||||
"fields": {
|
||||
"keyword": {
|
||||
"type": "keyword",
|
||||
"ignore_above": 256
|
||||
}
|
||||
}
|
||||
},
|
||||
"cpu": {
|
||||
"type": "float",
|
||||
"fields": {
|
||||
"keyword": {
|
||||
"type": "keyword",
|
||||
"ignore_above": 256
|
||||
}
|
||||
}
|
||||
},
|
||||
"max_eps_1m": {
|
||||
"type": "float",
|
||||
"fields": {
|
||||
"keyword": {
|
||||
"type": "keyword",
|
||||
"ignore_above": 256
|
||||
}
|
||||
}
|
||||
},
|
||||
"max_eps_5m": {
|
||||
"type": "float",
|
||||
"fields": {
|
||||
"keyword": {
|
||||
"type": "keyword",
|
||||
"ignore_above": 256
|
||||
}
|
||||
}
|
||||
},
|
||||
"max_queue_bytes_size": {
|
||||
"type": "integer",
|
||||
"fields": {
|
||||
"keyword": {
|
||||
"type": "keyword",
|
||||
"ignore_above": 256
|
||||
}
|
||||
}
|
||||
},
|
||||
"max_queue_events": {
|
||||
"type": "integer",
|
||||
"fields": {
|
||||
"keyword": {
|
||||
"type": "keyword",
|
||||
"ignore_above": 256
|
||||
}
|
||||
}
|
||||
},
|
||||
"max_worker_concurrency": {
|
||||
"type": "float",
|
||||
"fields": {
|
||||
"keyword": {
|
||||
"type": "keyword",
|
||||
"ignore_above": 256
|
||||
}
|
||||
}
|
||||
},
|
||||
"max_worker_utilization": {
|
||||
"type": "float",
|
||||
"fields": {
|
||||
"keyword": {
|
||||
"type": "keyword",
|
||||
"ignore_above": 256
|
||||
}
|
||||
}
|
||||
},
|
||||
"mem": {
|
||||
"type": "float",
|
||||
"fields": {
|
||||
"keyword": {
|
||||
"type": "keyword",
|
||||
"ignore_above": 256
|
||||
}
|
||||
}
|
||||
},
|
||||
"queue_type": {
|
||||
"type": "text",
|
||||
"fields": {
|
||||
"keyword": {
|
||||
"type": "keyword",
|
||||
"ignore_above": 256
|
||||
}
|
||||
}
|
||||
},
|
||||
"tag": {
|
||||
"type": "text",
|
||||
"fields": {
|
||||
"keyword": {
|
||||
"type": "keyword",
|
||||
"ignore_above": 256
|
||||
}
|
||||
}
|
||||
},
|
||||
"timestamp": {
|
||||
"type": "date"
|
||||
},
|
||||
"total_events_out": {
|
||||
"type": "integer",
|
||||
"fields": {
|
||||
"keyword": {
|
||||
"type": "keyword",
|
||||
"ignore_above": 256
|
||||
}
|
||||
}
|
||||
},
|
||||
"version": {
|
||||
"type": "text",
|
||||
"fields": {
|
||||
"keyword": {
|
||||
"type": "keyword",
|
||||
"ignore_above": 256
|
||||
}
|
||||
}
|
||||
},
|
||||
"workers": {
|
||||
"type": "integer",
|
||||
"fields": {
|
||||
"keyword": {
|
||||
"type": "keyword",
|
||||
"ignore_above": 256
|
||||
}
|
||||
}
|
||||
},
|
||||
"tags" : {
|
||||
"type": "text",
|
||||
"fields": {
|
||||
"keyword": {
|
||||
"type": "keyword",
|
||||
"ignore_above": 256
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -30,3 +30,12 @@ jqavg() {
|
|||
jqmax() {
|
||||
jq -r "$1 | select(. != null)" $2 | jq -s . | jq 'max'
|
||||
}
|
||||
|
||||
# return true if $1 is non empty and not "null"
|
||||
not_empty() {
|
||||
if [[ -n "$1" && "$1" != "null" ]]; then
|
||||
return 0
|
||||
else
|
||||
return 1
|
||||
fi
|
||||
}
|
|
@ -19,7 +19,7 @@ changed_files=$(git diff --name-only $previous_commit)
|
|||
if [[ -n "$changed_files" ]] && [[ -z "$(echo "$changed_files" | grep -vE "$1")" ]]; then
|
||||
echo "All files compared to the previous commit [$previous_commit] match the specified regex: [$1]"
|
||||
echo "Files changed:"
|
||||
git diff --name-only HEAD^
|
||||
git --no-pager diff --name-only HEAD^
|
||||
exit 0
|
||||
else
|
||||
exit 1
|
||||
|
|
29
.buildkite/scripts/common/qualified-version.sh
Executable file
29
.buildkite/scripts/common/qualified-version.sh
Executable file
|
@ -0,0 +1,29 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# ********************************************************
|
||||
# Source this script to get the QUALIFIED_VERSION env var
|
||||
# or execute it to receive the qualified version on stdout
|
||||
# ********************************************************
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
export QUALIFIED_VERSION="$(
|
||||
# Extract the version number from the version.yml file
|
||||
# e.g.: 8.6.0
|
||||
printf '%s' "$(awk -F':' '{ if ("logstash" == $1) { gsub(/^ | $/,"",$2); printf $2; exit } }' versions.yml)"
|
||||
|
||||
# Qualifier is passed from CI as optional field and specify the version postfix
|
||||
# in case of alpha or beta releases for staging builds only:
|
||||
# e.g: 8.0.0-alpha1
|
||||
printf '%s' "${VERSION_QUALIFIER:+-${VERSION_QUALIFIER}}"
|
||||
|
||||
# add the SNAPSHOT tag unless WORKFLOW_TYPE=="staging" or RELEASE=="1"
|
||||
if [[ ! ( "${WORKFLOW_TYPE:-}" == "staging" || "${RELEASE:+$RELEASE}" == "1" ) ]]; then
|
||||
printf '%s' "-SNAPSHOT"
|
||||
fi
|
||||
)"
|
||||
|
||||
# if invoked directly, output the QUALIFIED_VERSION to stdout
|
||||
if [[ "$0" == "${BASH_SOURCE:-${ZSH_SCRIPT:-}}" ]]; then
|
||||
printf '%s' "${QUALIFIED_VERSION}"
|
||||
fi
|
|
@ -12,7 +12,7 @@ set -eo pipefail
|
|||
# https://github.com/elastic/ingest-dev/issues/2664
|
||||
# *******************************************************
|
||||
|
||||
ACTIVE_BRANCHES_URL="https://raw.githubusercontent.com/elastic/logstash/main/ci/branches.json"
|
||||
ACTIVE_BRANCHES_URL="https://storage.googleapis.com/artifacts-api/snapshots/branches.json"
|
||||
EXCLUDE_BRANCHES_ARRAY=()
|
||||
BRANCHES=()
|
||||
|
||||
|
@ -63,7 +63,7 @@ exclude_branches_to_array
|
|||
set -u
|
||||
set +e
|
||||
# pull releaseable branches from $ACTIVE_BRANCHES_URL
|
||||
readarray -t ELIGIBLE_BRANCHES < <(curl --retry-all-errors --retry 5 --retry-delay 5 -fsSL $ACTIVE_BRANCHES_URL | jq -r '.branches[].branch')
|
||||
readarray -t ELIGIBLE_BRANCHES < <(curl --retry-all-errors --retry 5 --retry-delay 5 -fsSL $ACTIVE_BRANCHES_URL | jq -r '.branches[]')
|
||||
if [[ $? -ne 0 ]]; then
|
||||
echo "There was an error downloading or parsing the json output from [$ACTIVE_BRANCHES_URL]. Exiting."
|
||||
exit 1
|
||||
|
|
|
@ -1,13 +1,13 @@
|
|||
{
|
||||
"#comment": "This file lists all custom vm images. We use it to make decisions about randomized CI jobs.",
|
||||
"linux": {
|
||||
"ubuntu": ["ubuntu-2204", "ubuntu-2004"],
|
||||
"debian": ["debian-12", "debian-11", "debian-10"],
|
||||
"ubuntu": ["ubuntu-2404", "ubuntu-2204", "ubuntu-2004"],
|
||||
"debian": ["debian-12", "debian-11"],
|
||||
"rhel": ["rhel-9", "rhel-8"],
|
||||
"oraclelinux": ["oraclelinux-8", "oraclelinux-7"],
|
||||
"rocky": ["rocky-linux-8"],
|
||||
"amazonlinux": ["amazonlinux-2023"],
|
||||
"opensuse": ["opensuse-leap-15"]
|
||||
},
|
||||
"windows": ["windows-2022", "windows-2019", "windows-2016"]
|
||||
"windows": ["windows-2025", "windows-2022", "windows-2019", "windows-2016"]
|
||||
}
|
||||
|
|
|
@ -7,63 +7,28 @@ echo "####################################################################"
|
|||
source ./$(dirname "$0")/common.sh
|
||||
|
||||
# WORKFLOW_TYPE is a CI externally configured environment variable that could assume "snapshot" or "staging" values
|
||||
info "Building artifacts for the $WORKFLOW_TYPE workflow ..."
|
||||
|
||||
case "$WORKFLOW_TYPE" in
|
||||
snapshot)
|
||||
info "Building artifacts for the $WORKFLOW_TYPE workflow..."
|
||||
if [ -z "$VERSION_QUALIFIER_OPT" ]; then
|
||||
rake artifact:docker || error "artifact:docker build failed."
|
||||
rake artifact:docker_oss || error "artifact:docker_oss build failed."
|
||||
rake artifact:docker_wolfi || error "artifact:docker_wolfi build failed."
|
||||
rake artifact:dockerfiles || error "artifact:dockerfiles build failed."
|
||||
if [ "$ARCH" != "aarch64" ]; then
|
||||
rake artifact:docker_ubi8 || error "artifact:docker_ubi8 build failed."
|
||||
fi
|
||||
else
|
||||
VERSION_QUALIFIER="$VERSION_QUALIFIER_OPT" rake artifact:docker || error "artifact:docker build failed."
|
||||
VERSION_QUALIFIER="$VERSION_QUALIFIER_OPT" rake artifact:docker_oss || error "artifact:docker_oss build failed."
|
||||
VERSION_QUALIFIER="$VERSION_QUALIFIER_OPT" rake artifact:docker_wolfi || error "artifact:docker_wolfi build failed."
|
||||
VERSION_QUALIFIER="$VERSION_QUALIFIER_OPT" rake artifact:dockerfiles || error "artifact:dockerfiles build failed."
|
||||
if [ "$ARCH" != "aarch64" ]; then
|
||||
VERSION_QUALIFIER="$VERSION_QUALIFIER_OPT" rake artifact:docker_ubi8 || error "artifact:docker_ubi8 build failed."
|
||||
fi
|
||||
# Qualifier is passed from CI as optional field and specify the version postfix
|
||||
# in case of alpha or beta releases:
|
||||
# e.g: 8.0.0-alpha1
|
||||
STACK_VERSION="${STACK_VERSION}-${VERSION_QUALIFIER_OPT}"
|
||||
fi
|
||||
STACK_VERSION=${STACK_VERSION}-SNAPSHOT
|
||||
info "Build complete, setting STACK_VERSION to $STACK_VERSION."
|
||||
: # no-op
|
||||
;;
|
||||
staging)
|
||||
info "Building artifacts for the $WORKFLOW_TYPE workflow..."
|
||||
if [ -z "$VERSION_QUALIFIER_OPT" ]; then
|
||||
RELEASE=1 rake artifact:docker || error "artifact:docker build failed."
|
||||
RELEASE=1 rake artifact:docker_oss || error "artifact:docker_oss build failed."
|
||||
RELEASE=1 rake artifact:docker_wolfi || error "artifact:docker_wolfi build failed."
|
||||
RELEASE=1 rake artifact:dockerfiles || error "artifact:dockerfiles build failed."
|
||||
if [ "$ARCH" != "aarch64" ]; then
|
||||
RELEASE=1 rake artifact:docker_ubi8 || error "artifact:docker_ubi8 build failed."
|
||||
fi
|
||||
else
|
||||
VERSION_QUALIFIER="$VERSION_QUALIFIER_OPT" RELEASE=1 rake artifact:docker || error "artifact:docker build failed."
|
||||
VERSION_QUALIFIER="$VERSION_QUALIFIER_OPT" RELEASE=1 rake artifact:docker_oss || error "artifact:docker_oss build failed."
|
||||
VERSION_QUALIFIER="$VERSION_QUALIFIER_OPT" RELEASE=1 rake artifact:docker_wolfi || error "artifact:docker_wolfi build failed."
|
||||
VERSION_QUALIFIER="$VERSION_QUALIFIER_OPT" RELEASE=1 rake artifact:dockerfiles || error "artifact:dockerfiles build failed."
|
||||
if [ "$ARCH" != "aarch64" ]; then
|
||||
VERSION_QUALIFIER="$VERSION_QUALIFIER_OPT" RELEASE=1 rake artifact:docker_ubi8 || error "artifact:docker_ubi8 build failed."
|
||||
fi
|
||||
# Qualifier is passed from CI as optional field and specify the version postfix
|
||||
# in case of alpha or beta releases:
|
||||
# e.g: 8.0.0-alpha1
|
||||
STACK_VERSION="${STACK_VERSION}-${VERSION_QUALIFIER_OPT}"
|
||||
fi
|
||||
info "Build complete, setting STACK_VERSION to $STACK_VERSION."
|
||||
export RELEASE=1
|
||||
;;
|
||||
*)
|
||||
error "Workflow (WORKFLOW_TYPE variable) is not set, exiting..."
|
||||
;;
|
||||
esac
|
||||
|
||||
rake artifact:docker || error "artifact:docker build failed."
|
||||
rake artifact:docker_oss || error "artifact:docker_oss build failed."
|
||||
rake artifact:docker_wolfi || error "artifact:docker_wolfi build failed."
|
||||
rake artifact:dockerfiles || error "artifact:dockerfiles build failed."
|
||||
|
||||
STACK_VERSION="$(./$(dirname "$0")/../common/qualified-version.sh)"
|
||||
info "Build complete, setting STACK_VERSION to $STACK_VERSION."
|
||||
|
||||
info "Saving tar.gz for docker images"
|
||||
save_docker_tarballs "${ARCH}" "${STACK_VERSION}"
|
||||
|
||||
|
@ -73,10 +38,6 @@ for file in build/logstash-*; do shasum $file;done
|
|||
info "Uploading DRA artifacts in buildkite's artifact store ..."
|
||||
# Note the deb, rpm tar.gz AARCH64 files generated has already been loaded by the build_packages.sh
|
||||
images="logstash logstash-oss logstash-wolfi"
|
||||
if [ "$ARCH" != "aarch64" ]; then
|
||||
# No logstash-ubi8 for AARCH64
|
||||
images="logstash logstash-oss logstash-wolfi logstash-ubi8"
|
||||
fi
|
||||
for image in ${images}; do
|
||||
buildkite-agent artifact upload "build/$image-${STACK_VERSION}-docker-image-${ARCH}.tar.gz"
|
||||
done
|
||||
|
@ -84,7 +45,7 @@ done
|
|||
# Upload 'docker-build-context.tar.gz' files only when build x86_64, otherwise they will be
|
||||
# overwritten when building aarch64 (or viceversa).
|
||||
if [ "$ARCH" != "aarch64" ]; then
|
||||
for image in logstash logstash-oss logstash-wolfi logstash-ubi8 logstash-ironbank; do
|
||||
for image in logstash logstash-oss logstash-wolfi logstash-ironbank; do
|
||||
buildkite-agent artifact upload "build/${image}-${STACK_VERSION}-docker-build-context.tar.gz"
|
||||
done
|
||||
fi
|
||||
|
|
|
@ -7,39 +7,25 @@ echo "####################################################################"
|
|||
source ./$(dirname "$0")/common.sh
|
||||
|
||||
# WORKFLOW_TYPE is a CI externally configured environment variable that could assume "snapshot" or "staging" values
|
||||
info "Building artifacts for the $WORKFLOW_TYPE workflow ..."
|
||||
|
||||
case "$WORKFLOW_TYPE" in
|
||||
snapshot)
|
||||
info "Building artifacts for the $WORKFLOW_TYPE workflow..."
|
||||
if [ -z "$VERSION_QUALIFIER_OPT" ]; then
|
||||
SKIP_DOCKER=1 rake artifact:all || error "rake artifact:all build failed."
|
||||
else
|
||||
# Qualifier is passed from CI as optional field and specify the version postfix
|
||||
# in case of alpha or beta releases:
|
||||
# e.g: 8.0.0-alpha1
|
||||
VERSION_QUALIFIER="$VERSION_QUALIFIER_OPT" SKIP_DOCKER=1 rake artifact:all || error "rake artifact:all build failed."
|
||||
STACK_VERSION="${STACK_VERSION}-${VERSION_QUALIFIER_OPT}"
|
||||
fi
|
||||
STACK_VERSION=${STACK_VERSION}-SNAPSHOT
|
||||
info "Build complete, setting STACK_VERSION to $STACK_VERSION."
|
||||
: # no-op
|
||||
;;
|
||||
staging)
|
||||
info "Building artifacts for the $WORKFLOW_TYPE workflow..."
|
||||
if [ -z "$VERSION_QUALIFIER_OPT" ]; then
|
||||
RELEASE=1 SKIP_DOCKER=1 rake artifact:all || error "rake artifact:all build failed."
|
||||
else
|
||||
# Qualifier is passed from CI as optional field and specify the version postfix
|
||||
# in case of alpha or beta releases:
|
||||
# e.g: 8.0.0-alpha1
|
||||
VERSION_QUALIFIER="$VERSION_QUALIFIER_OPT" RELEASE=1 SKIP_DOCKER=1 rake artifact:all || error "rake artifact:all build failed."
|
||||
STACK_VERSION="${STACK_VERSION}-${VERSION_QUALIFIER_OPT}"
|
||||
fi
|
||||
info "Build complete, setting STACK_VERSION to $STACK_VERSION."
|
||||
export RELEASE=1
|
||||
;;
|
||||
*)
|
||||
error "Workflow (WORKFLOW_TYPE variable) is not set, exiting..."
|
||||
;;
|
||||
esac
|
||||
|
||||
SKIP_DOCKER=1 rake artifact:all || error "rake artifact:all build failed."
|
||||
|
||||
STACK_VERSION="$(./$(dirname "$0")/../common/qualified-version.sh)"
|
||||
info "Build complete, setting STACK_VERSION to $STACK_VERSION."
|
||||
|
||||
info "Generated Artifacts"
|
||||
for file in build/logstash-*; do shasum $file;done
|
||||
|
||||
|
|
|
@ -11,10 +11,6 @@ function save_docker_tarballs {
|
|||
local arch="${1:?architecture required}"
|
||||
local version="${2:?stack-version required}"
|
||||
local images="logstash logstash-oss logstash-wolfi"
|
||||
if [ "${arch}" != "aarch64" ]; then
|
||||
# No logstash-ubi8 for AARCH64
|
||||
images="logstash logstash-oss logstash-wolfi logstash-ubi8"
|
||||
fi
|
||||
|
||||
for image in ${images}; do
|
||||
tar_file="${image}-${version}-docker-image-${arch}.tar"
|
||||
|
@ -29,16 +25,16 @@ function save_docker_tarballs {
|
|||
# Since we are using the system jruby, we need to make sure our jvm process
|
||||
# uses at least 1g of memory, If we don't do this we can get OOM issues when
|
||||
# installing gems. See https://github.com/elastic/logstash/issues/5179
|
||||
export JRUBY_OPTS="-J-Xmx1g"
|
||||
export JRUBY_OPTS="-J-Xmx4g"
|
||||
|
||||
# Extract the version number from the version.yml file
|
||||
# e.g.: 8.6.0
|
||||
# The suffix part like alpha1 etc is managed by the optional VERSION_QUALIFIER_OPT environment variable
|
||||
# The suffix part like alpha1 etc is managed by the optional VERSION_QUALIFIER environment variable
|
||||
STACK_VERSION=`cat versions.yml | sed -n 's/^logstash\:[[:space:]]\([[:digit:]]*\.[[:digit:]]*\.[[:digit:]]*\)$/\1/p'`
|
||||
|
||||
info "Agent is running on architecture [$(uname -i)]"
|
||||
|
||||
export VERSION_QUALIFIER_OPT=${VERSION_QUALIFIER_OPT:-""}
|
||||
export VERSION_QUALIFIER=${VERSION_QUALIFIER:-""}
|
||||
export DRA_DRY_RUN=${DRA_DRY_RUN:-""}
|
||||
|
||||
if [[ ! -z $DRA_DRY_RUN && $BUILDKITE_STEP_KEY == "logstash_publish_dra" ]]; then
|
||||
|
|
|
@ -3,6 +3,8 @@ import sys
|
|||
|
||||
import yaml
|
||||
|
||||
YAML_HEADER = '# yaml-language-server: $schema=https://raw.githubusercontent.com/buildkite/pipeline-schema/main/schema.json\n'
|
||||
|
||||
def to_bk_key_friendly_string(key):
|
||||
"""
|
||||
Convert and return key to an acceptable format for Buildkite's key: field
|
||||
|
@ -28,6 +30,8 @@ def package_x86_step(branch, workflow_type):
|
|||
export PATH="/opt/buildkite-agent/.rbenv/bin:/opt/buildkite-agent/.pyenv/bin:$PATH"
|
||||
eval "$(rbenv init -)"
|
||||
.buildkite/scripts/dra/build_packages.sh
|
||||
artifact_paths:
|
||||
- "**/*.hprof"
|
||||
'''
|
||||
|
||||
return step
|
||||
|
@ -42,6 +46,8 @@ def package_x86_docker_step(branch, workflow_type):
|
|||
image: family/platform-ingest-logstash-ubuntu-2204
|
||||
machineType: "n2-standard-16"
|
||||
diskSizeGb: 200
|
||||
artifact_paths:
|
||||
- "**/*.hprof"
|
||||
command: |
|
||||
export WORKFLOW_TYPE="{workflow_type}"
|
||||
export PATH="/opt/buildkite-agent/.rbenv/bin:/opt/buildkite-agent/.pyenv/bin:$PATH"
|
||||
|
@ -61,6 +67,8 @@ def package_aarch64_docker_step(branch, workflow_type):
|
|||
imagePrefix: platform-ingest-logstash-ubuntu-2204-aarch64
|
||||
instanceType: "m6g.4xlarge"
|
||||
diskSizeGb: 200
|
||||
artifact_paths:
|
||||
- "**/*.hprof"
|
||||
command: |
|
||||
export WORKFLOW_TYPE="{workflow_type}"
|
||||
export PATH="/opt/buildkite-agent/.rbenv/bin:/opt/buildkite-agent/.pyenv/bin:$PATH"
|
||||
|
@ -106,6 +114,7 @@ def build_steps_to_yaml(branch, workflow_type):
|
|||
if __name__ == "__main__":
|
||||
try:
|
||||
workflow_type = os.environ["WORKFLOW_TYPE"]
|
||||
version_qualifier = os.environ.get("VERSION_QUALIFIER", "")
|
||||
except ImportError:
|
||||
print(f"Missing env variable WORKFLOW_TYPE. Use export WORKFLOW_TYPE=<staging|snapshot>\n.Exiting.")
|
||||
exit(1)
|
||||
|
@ -114,18 +123,25 @@ if __name__ == "__main__":
|
|||
|
||||
structure = {"steps": []}
|
||||
|
||||
# Group defining parallel steps that build and save artifacts
|
||||
group_key = to_bk_key_friendly_string(f"logstash_dra_{workflow_type}")
|
||||
if workflow_type.upper() == "SNAPSHOT" and len(version_qualifier)>0:
|
||||
structure["steps"].append({
|
||||
"label": f"no-op pipeline because prerelease builds (VERSION_QUALIFIER is set to [{version_qualifier}]) don't support the [{workflow_type}] workflow",
|
||||
"command": ":",
|
||||
"skip": "VERSION_QUALIFIER (prerelease builds) not supported with SNAPSHOT DRA",
|
||||
})
|
||||
else:
|
||||
# Group defining parallel steps that build and save artifacts
|
||||
group_key = to_bk_key_friendly_string(f"logstash_dra_{workflow_type}")
|
||||
|
||||
structure["steps"].append({
|
||||
"group": f":Build Artifacts - {workflow_type.upper()}",
|
||||
"key": group_key,
|
||||
"steps": build_steps_to_yaml(branch, workflow_type),
|
||||
})
|
||||
structure["steps"].append({
|
||||
"group": f":Build Artifacts - {workflow_type.upper()}",
|
||||
"key": group_key,
|
||||
"steps": build_steps_to_yaml(branch, workflow_type),
|
||||
})
|
||||
|
||||
# Final step: pull artifacts built above and publish them via the release-manager
|
||||
structure["steps"].extend(
|
||||
yaml.safe_load(publish_dra_step(branch, workflow_type, depends_on=group_key)),
|
||||
)
|
||||
# Final step: pull artifacts built above and publish them via the release-manager
|
||||
structure["steps"].extend(
|
||||
yaml.safe_load(publish_dra_step(branch, workflow_type, depends_on=group_key)),
|
||||
)
|
||||
|
||||
print('# yaml-language-server: $schema=https://raw.githubusercontent.com/buildkite/pipeline-schema/main/schema.json\n' + yaml.dump(structure, Dumper=yaml.Dumper, sort_keys=False))
|
||||
print(YAML_HEADER + yaml.dump(structure, Dumper=yaml.Dumper, sort_keys=False))
|
||||
|
|
|
@ -7,7 +7,9 @@ echo "####################################################################"
|
|||
|
||||
source ./$(dirname "$0")/common.sh
|
||||
|
||||
PLAIN_STACK_VERSION=$STACK_VERSION
|
||||
# DRA_BRANCH can be used for manually testing packaging with PRs
|
||||
# e.g. define `DRA_BRANCH="main"` and `RUN_SNAPSHOT="true"` under Options/Environment Variables in the Buildkite UI after clicking new Build
|
||||
BRANCH="${DRA_BRANCH:="${BUILDKITE_BRANCH:=""}"}"
|
||||
|
||||
# This is the branch selector that needs to be passed to the release-manager
|
||||
# It has to be the name of the branch which originates the artifacts.
|
||||
|
@ -15,29 +17,24 @@ RELEASE_VER=`cat versions.yml | sed -n 's/^logstash\:[[:space:]]\([[:digit:]]*\.
|
|||
if [ -n "$(git ls-remote --heads origin $RELEASE_VER)" ] ; then
|
||||
RELEASE_BRANCH=$RELEASE_VER
|
||||
else
|
||||
RELEASE_BRANCH=main
|
||||
RELEASE_BRANCH="${BRANCH:="main"}"
|
||||
fi
|
||||
echo "RELEASE BRANCH: $RELEASE_BRANCH"
|
||||
|
||||
if [ -n "$VERSION_QUALIFIER_OPT" ]; then
|
||||
# Qualifier is passed from CI as optional field and specify the version postfix
|
||||
# in case of alpha or beta releases:
|
||||
# e.g: 8.0.0-alpha1
|
||||
STACK_VERSION="${STACK_VERSION}-${VERSION_QUALIFIER_OPT}"
|
||||
PLAIN_STACK_VERSION="${PLAIN_STACK_VERSION}-${VERSION_QUALIFIER_OPT}"
|
||||
fi
|
||||
VERSION_QUALIFIER="${VERSION_QUALIFIER:=""}"
|
||||
|
||||
case "$WORKFLOW_TYPE" in
|
||||
snapshot)
|
||||
STACK_VERSION=${STACK_VERSION}-SNAPSHOT
|
||||
:
|
||||
;;
|
||||
staging)
|
||||
;;
|
||||
*)
|
||||
error "Worklflow (WORKFLOW_TYPE variable) is not set, exiting..."
|
||||
error "Workflow (WORKFLOW_TYPE variable) is not set, exiting..."
|
||||
;;
|
||||
esac
|
||||
|
||||
info "Uploading artifacts for ${WORKFLOW_TYPE} workflow on branch: ${RELEASE_BRANCH}"
|
||||
info "Uploading artifacts for ${WORKFLOW_TYPE} workflow on branch: ${RELEASE_BRANCH} for version: ${STACK_VERSION} with version_qualifier: ${VERSION_QUALIFIER}"
|
||||
|
||||
if [ "$RELEASE_VER" != "7.17" ]; then
|
||||
# Version 7.17.x doesn't generates ARM artifacts for Darwin
|
||||
|
@ -45,17 +42,12 @@ if [ "$RELEASE_VER" != "7.17" ]; then
|
|||
:
|
||||
fi
|
||||
|
||||
# Deleting ubi8 for aarch64 for the time being. This image itself is not being built, and it is not expected
|
||||
# by the release manager.
|
||||
# See https://github.com/elastic/infra/blob/master/cd/release/release-manager/project-configs/8.5/logstash.gradle
|
||||
# for more details.
|
||||
# TODO filter it out when uploading artifacts instead
|
||||
rm -f build/logstash-ubi8-${STACK_VERSION}-docker-image-aarch64.tar.gz
|
||||
|
||||
info "Downloaded ARTIFACTS sha report"
|
||||
for file in build/logstash-*; do shasum $file;done
|
||||
|
||||
mv build/distributions/dependencies-reports/logstash-${STACK_VERSION}.csv build/distributions/dependencies-${STACK_VERSION}.csv
|
||||
FINAL_VERSION="$(./$(dirname "$0")/../common/qualified-version.sh)"
|
||||
|
||||
mv build/distributions/dependencies-reports/logstash-${FINAL_VERSION}.csv build/distributions/dependencies-${FINAL_VERSION}.csv
|
||||
|
||||
# set required permissions on artifacts and directory
|
||||
chmod -R a+r build/*
|
||||
|
@ -73,6 +65,22 @@ release_manager_login
|
|||
# ensure the latest image has been pulled
|
||||
docker pull docker.elastic.co/infra/release-manager:latest
|
||||
|
||||
echo "+++ :clipboard: Listing DRA artifacts for version [$STACK_VERSION], branch [$RELEASE_BRANCH], workflow [$WORKFLOW_TYPE], QUALIFIER [$VERSION_QUALIFIER]"
|
||||
docker run --rm \
|
||||
--name release-manager \
|
||||
-e VAULT_ROLE_ID \
|
||||
-e VAULT_SECRET_ID \
|
||||
--mount type=bind,readonly=false,src="$PWD",target=/artifacts \
|
||||
docker.elastic.co/infra/release-manager:latest \
|
||||
cli list \
|
||||
--project logstash \
|
||||
--branch "${RELEASE_BRANCH}" \
|
||||
--commit "$(git rev-parse HEAD)" \
|
||||
--workflow "${WORKFLOW_TYPE}" \
|
||||
--version "${STACK_VERSION}" \
|
||||
--artifact-set main \
|
||||
--qualifier "${VERSION_QUALIFIER}"
|
||||
|
||||
info "Running the release manager ..."
|
||||
|
||||
# collect the artifacts for use with the unified build
|
||||
|
@ -88,8 +96,9 @@ docker run --rm \
|
|||
--branch ${RELEASE_BRANCH} \
|
||||
--commit "$(git rev-parse HEAD)" \
|
||||
--workflow "${WORKFLOW_TYPE}" \
|
||||
--version "${PLAIN_STACK_VERSION}" \
|
||||
--version "${STACK_VERSION}" \
|
||||
--artifact-set main \
|
||||
--qualifier "${VERSION_QUALIFIER}" \
|
||||
${DRA_DRY_RUN} | tee rm-output.txt
|
||||
|
||||
# extract the summary URL from a release manager output line like:
|
||||
|
|
|
@ -10,7 +10,7 @@ from ruamel.yaml.scalarstring import LiteralScalarString
|
|||
VM_IMAGES_FILE = ".buildkite/scripts/common/vm-images.json"
|
||||
VM_IMAGE_PREFIX = "platform-ingest-logstash-multi-jdk-"
|
||||
|
||||
ACCEPTANCE_LINUX_OSES = ["ubuntu-2204", "ubuntu-2004", "debian-11", "debian-10", "rhel-8", "oraclelinux-7", "rocky-linux-8", "opensuse-leap-15", "amazonlinux-2023"]
|
||||
ACCEPTANCE_LINUX_OSES = ["ubuntu-2404", "ubuntu-2204", "ubuntu-2004", "debian-11", "rhel-8", "oraclelinux-7", "rocky-linux-8", "opensuse-leap-15", "amazonlinux-2023"]
|
||||
|
||||
CUR_PATH = os.path.dirname(os.path.abspath(__file__))
|
||||
|
||||
|
@ -147,11 +147,6 @@ rake artifact:deb artifact:rpm
|
|||
set -eo pipefail
|
||||
source .buildkite/scripts/common/vm-agent-multi-jdk.sh
|
||||
source /etc/os-release
|
||||
if [[ "$$(echo $$ID_LIKE | tr '[:upper:]' '[:lower:]')" =~ (rhel|fedora) && "$${VERSION_ID%.*}" -le 7 ]]; then
|
||||
# jruby-9.3.10.0 unavailable on centos-7 / oel-7, see https://github.com/jruby/jruby/issues/7579#issuecomment-1425885324 / https://github.com/jruby/jruby/issues/7695
|
||||
# we only need a working jruby to run the acceptance test framework -- the packages have been prebuilt in a previous stage
|
||||
rbenv local jruby-9.4.5.0
|
||||
fi
|
||||
ci/acceptance_tests.sh"""),
|
||||
}
|
||||
steps.append(step)
|
||||
|
@ -160,7 +155,7 @@ ci/acceptance_tests.sh"""),
|
|||
|
||||
def acceptance_docker_steps()-> list[typing.Any]:
|
||||
steps = []
|
||||
for flavor in ["full", "oss", "ubi8", "wolfi"]:
|
||||
for flavor in ["full", "oss", "ubi", "wolfi"]:
|
||||
steps.append({
|
||||
"label": f":docker: {flavor} flavor acceptance",
|
||||
"agents": gcp_agent(vm_name="ubuntu-2204", image_prefix="family/platform-ingest-logstash"),
|
||||
|
|
18
.buildkite/scripts/health-report-tests/README.md
Normal file
18
.buildkite/scripts/health-report-tests/README.md
Normal file
|
@ -0,0 +1,18 @@
|
|||
## Description
|
||||
This package for integration tests of the Health Report API.
|
||||
Export `LS_BRANCH` to run on a specific branch. By default, it uses the main branch.
|
||||
|
||||
## How to run the Health Report Integration test?
|
||||
### Prerequisites
|
||||
Make sure you have python installed. Install the integration test dependencies with the following command:
|
||||
```shell
|
||||
python3 -mpip install -r .buildkite/scripts/health-report-tests/requirements.txt
|
||||
```
|
||||
|
||||
### Run the integration tests
|
||||
```shell
|
||||
python3 .buildkite/scripts/health-report-tests/main.py
|
||||
```
|
||||
|
||||
### Troubleshooting
|
||||
- If you get `WARNING: pip is configured with locations that require TLS/SSL,...` warning message, make sure you have python >=3.12.4 installed.
|
111
.buildkite/scripts/health-report-tests/bootstrap.py
Normal file
111
.buildkite/scripts/health-report-tests/bootstrap.py
Normal file
|
@ -0,0 +1,111 @@
|
|||
"""
|
||||
Health Report Integration test bootstrapper with Python script
|
||||
- A script to resolve Logstash version if not provided
|
||||
- Download LS docker image and spin up
|
||||
- When tests finished, teardown the Logstash
|
||||
"""
|
||||
import os
|
||||
import subprocess
|
||||
import time
|
||||
import util
|
||||
import yaml
|
||||
|
||||
|
||||
class Bootstrap:
|
||||
ELASTIC_STACK_RELEASED_VERSION_URL = "https://storage.googleapis.com/artifacts-api/releases/current/"
|
||||
|
||||
def __init__(self) -> None:
|
||||
f"""
|
||||
A constructor of the {Bootstrap}.
|
||||
Returns:
|
||||
Resolves Logstash branch considering provided LS_BRANCH
|
||||
Checks out git branch
|
||||
"""
|
||||
logstash_branch = os.environ.get("LS_BRANCH")
|
||||
if logstash_branch is None:
|
||||
# version is not specified, use the main branch, no need to git checkout
|
||||
print(f"LS_BRANCH is not specified, using main branch.")
|
||||
else:
|
||||
# LS_BRANCH accepts major latest as a major.x or specific branch as X.Y
|
||||
if logstash_branch.find(".x") == -1:
|
||||
print(f"Using specified branch: {logstash_branch}")
|
||||
util.git_check_out_branch(logstash_branch)
|
||||
else:
|
||||
major_version = logstash_branch.split(".")[0]
|
||||
if major_version and major_version.isnumeric():
|
||||
resolved_version = self.__resolve_latest_stack_version_for(major_version)
|
||||
minor_version = resolved_version.split(".")[1]
|
||||
branch = major_version + "." + minor_version
|
||||
print(f"Using resolved branch: {branch}")
|
||||
util.git_check_out_branch(branch)
|
||||
else:
|
||||
raise ValueError(f"Invalid value set to LS_BRANCH. Please set it properly (ex: 8.x or 9.0) and "
|
||||
f"rerun again")
|
||||
|
||||
def __resolve_latest_stack_version_for(self, major_version: str) -> str:
|
||||
resp = util.call_url_with_retry(self.ELASTIC_STACK_RELEASED_VERSION_URL + major_version)
|
||||
release_version = resp.text.strip()
|
||||
print(f"Resolved latest version for {major_version} is {release_version}.")
|
||||
|
||||
if release_version == "":
|
||||
raise ValueError(f"Cannot resolve latest version for {major_version} major")
|
||||
return release_version
|
||||
|
||||
def install_plugin(self, plugin_path: str) -> None:
|
||||
util.run_or_raise_error(
|
||||
["bin/logstash-plugin", "install", plugin_path],
|
||||
f"Failed to install {plugin_path}")
|
||||
|
||||
def build_logstash(self):
|
||||
print(f"Building Logstash...")
|
||||
util.run_or_raise_error(
|
||||
["./gradlew", "clean", "bootstrap", "assemble", "installDefaultGems"],
|
||||
"Failed to build Logstash")
|
||||
print(f"Logstash has successfully built.")
|
||||
|
||||
def apply_config(self, config: dict) -> None:
|
||||
with open(os.getcwd() + "/.buildkite/scripts/health-report-tests/config/pipelines.yml", 'w') as pipelines_file:
|
||||
yaml.dump(config, pipelines_file)
|
||||
|
||||
def run_logstash(self, full_start_required: bool) -> subprocess.Popen:
|
||||
# --config.reload.automatic is to make instance active
|
||||
# it is helpful when testing crash pipeline cases
|
||||
config_path = os.getcwd() + "/.buildkite/scripts/health-report-tests/config"
|
||||
process = subprocess.Popen(["bin/logstash", "--config.reload.automatic", "--path.settings", config_path,
|
||||
"-w 1"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, shell=False)
|
||||
if process.poll() is not None:
|
||||
print(f"Logstash failed to run, check the the config and logs, then rerun.")
|
||||
return None
|
||||
|
||||
# Read stdout and stderr in real-time
|
||||
logs = []
|
||||
for stdout_line in iter(process.stdout.readline, ""):
|
||||
logs.append(stdout_line.strip())
|
||||
# we don't wait for Logstash fully start as we also test slow pipeline start scenarios
|
||||
if full_start_required is False and "Starting pipeline" in stdout_line:
|
||||
break
|
||||
if full_start_required is True and "Pipeline started" in stdout_line:
|
||||
break
|
||||
if "Logstash shut down" in stdout_line or "Logstash stopped" in stdout_line:
|
||||
print(f"Logstash couldn't spin up.")
|
||||
print(logs)
|
||||
return None
|
||||
|
||||
print(f"Logstash is running with PID: {process.pid}.")
|
||||
return process
|
||||
|
||||
def stop_logstash(self, process: subprocess.Popen):
|
||||
start_time = time.time() # in seconds
|
||||
process.terminate()
|
||||
for stdout_line in iter(process.stdout.readline, ""):
|
||||
# print(f"STDOUT: {stdout_line.strip()}")
|
||||
if "Logstash shut down" in stdout_line or "Logstash stopped" in stdout_line:
|
||||
print(f"Logstash stopped.")
|
||||
return None
|
||||
# shudown watcher keep running, we should be good with considering time spent
|
||||
if time.time() - start_time > 60:
|
||||
print(f"Logstash didn't stop in 1min, sending SIGTERM signal.")
|
||||
process.kill()
|
||||
if time.time() - start_time > 70:
|
||||
print(f"Logstash didn't stop over 1min, exiting.")
|
||||
return None
|
|
@ -0,0 +1 @@
|
|||
# Intentionally left blank
|
70
.buildkite/scripts/health-report-tests/config_validator.py
Normal file
70
.buildkite/scripts/health-report-tests/config_validator.py
Normal file
|
@ -0,0 +1,70 @@
|
|||
import yaml
|
||||
from typing import Any, List, Dict
|
||||
|
||||
|
||||
class ConfigValidator:
|
||||
REQUIRED_KEYS = {
|
||||
"root": ["name", "config", "conditions", "expectation"],
|
||||
"config": ["pipeline.id", "config.string"],
|
||||
"conditions": ["full_start_required", "wait_seconds"],
|
||||
"expectation": ["status", "symptom", "indicators"],
|
||||
"indicators": ["pipelines"],
|
||||
"pipelines": ["status", "symptom", "indicators"],
|
||||
"DYNAMIC": ["status", "symptom", "diagnosis", "impacts", "details"], # pipeline-id is a DYNAMIC
|
||||
"details": ["status"],
|
||||
"status": ["state"]
|
||||
}
|
||||
|
||||
def __init__(self):
|
||||
self.yaml_content = None
|
||||
|
||||
def __has_valid_keys(self, data: any, key_path: str, repeated: bool) -> bool:
|
||||
# we reached the value
|
||||
if isinstance(data, str) or isinstance(data, bool) or isinstance(data, int) or isinstance(data, float):
|
||||
return True
|
||||
|
||||
# we have two indicators section and for the next repeated ones, we go deeper
|
||||
first_key = next(iter(data))
|
||||
data = data[first_key] if repeated and key_path == "indicators" else data
|
||||
|
||||
if isinstance(data, dict):
|
||||
# pipeline-id is a DYNAMIC
|
||||
required = self.REQUIRED_KEYS.get("DYNAMIC" if repeated and key_path == "indicators" else key_path, [])
|
||||
repeated = not repeated if key_path == "indicators" else repeated
|
||||
for key in required:
|
||||
if key not in data:
|
||||
print(f"Missing key '{key}' in '{key_path}'")
|
||||
return False
|
||||
else:
|
||||
dic_keys_result = self.__has_valid_keys(data[key], key, repeated)
|
||||
if dic_keys_result is False:
|
||||
return False
|
||||
elif isinstance(data, list):
|
||||
for item in data:
|
||||
list_keys_result = self.__has_valid_keys(item, key_path, repeated)
|
||||
if list_keys_result is False:
|
||||
return False
|
||||
return True
|
||||
|
||||
def load(self, file_path: str) -> None:
|
||||
"""Load the YAML file content into self.yaml_content."""
|
||||
self.yaml_content: [Dict[str, Any]] = None
|
||||
try:
|
||||
with open(file_path, 'r') as file:
|
||||
self.yaml_content = yaml.safe_load(file)
|
||||
except yaml.YAMLError as exc:
|
||||
print(f"Error in YAML file: {exc}")
|
||||
self.yaml_content = None
|
||||
|
||||
def is_valid(self) -> bool:
|
||||
"""Validate the entire YAML structure."""
|
||||
if self.yaml_content is None:
|
||||
print(f"YAML content is empty.")
|
||||
return False
|
||||
|
||||
if not isinstance(self.yaml_content, dict):
|
||||
print(f"YAML structure is not as expected, it should start with a Dict.")
|
||||
return False
|
||||
|
||||
result = self.__has_valid_keys(self.yaml_content, "root", False)
|
||||
return True if result is True else False
|
|
@ -0,0 +1,16 @@
|
|||
"""
|
||||
A class to provide information about Logstash node stats.
|
||||
"""
|
||||
|
||||
import util
|
||||
|
||||
|
||||
class LogstashHealthReport:
|
||||
LOGSTASH_HEALTH_REPORT_URL = "http://localhost:9600/_health_report"
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def get(self):
|
||||
response = util.call_url_with_retry(self.LOGSTASH_HEALTH_REPORT_URL)
|
||||
return response.json()
|
89
.buildkite/scripts/health-report-tests/main.py
Normal file
89
.buildkite/scripts/health-report-tests/main.py
Normal file
|
@ -0,0 +1,89 @@
|
|||
"""
|
||||
Main entry point of the LS health report API integration test suites
|
||||
"""
|
||||
import glob
|
||||
import os
|
||||
import time
|
||||
import traceback
|
||||
import yaml
|
||||
from bootstrap import Bootstrap
|
||||
from scenario_executor import ScenarioExecutor
|
||||
from config_validator import ConfigValidator
|
||||
|
||||
|
||||
class BootstrapContextManager:
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def __enter__(self):
|
||||
print(f"Starting Logstash Health Report Integration test.")
|
||||
self.bootstrap = Bootstrap()
|
||||
self.bootstrap.build_logstash()
|
||||
|
||||
plugin_path = os.getcwd() + "/qa/support/logstash-integration-failure_injector/logstash-integration" \
|
||||
"-failure_injector-*.gem"
|
||||
matching_files = glob.glob(plugin_path)
|
||||
if len(matching_files) == 0:
|
||||
raise ValueError(f"Could not find logstash-integration-failure_injector plugin.")
|
||||
|
||||
self.bootstrap.install_plugin(matching_files[0])
|
||||
print(f"logstash-integration-failure_injector successfully installed.")
|
||||
return self.bootstrap
|
||||
|
||||
def __exit__(self, exc_type, exc_value, exc_traceback):
|
||||
if exc_type is not None:
|
||||
print(traceback.format_exception(exc_type, exc_value, exc_traceback))
|
||||
|
||||
|
||||
def main():
|
||||
with BootstrapContextManager() as bootstrap:
|
||||
scenario_executor = ScenarioExecutor()
|
||||
config_validator = ConfigValidator()
|
||||
|
||||
working_dir = os.getcwd()
|
||||
scenario_files_path = working_dir + "/.buildkite/scripts/health-report-tests/tests/*.yaml"
|
||||
scenario_files = glob.glob(scenario_files_path)
|
||||
|
||||
for scenario_file in scenario_files:
|
||||
print(f"Validating {scenario_file} scenario file.")
|
||||
config_validator.load(scenario_file)
|
||||
if config_validator.is_valid() is False:
|
||||
print(f"{scenario_file} scenario file is not valid.")
|
||||
return
|
||||
else:
|
||||
print(f"Validation succeeded.")
|
||||
|
||||
has_failed_scenario = False
|
||||
for scenario_file in scenario_files:
|
||||
with open(scenario_file, 'r') as file:
|
||||
# scenario_content: Dict[str, Any] = None
|
||||
scenario_content = yaml.safe_load(file)
|
||||
print(f"Testing `{scenario_content.get('name')}` scenario.")
|
||||
scenario_name = scenario_content['name']
|
||||
|
||||
is_full_start_required = scenario_content.get('conditions').get('full_start_required')
|
||||
wait_seconds = scenario_content.get('conditions').get('wait_seconds')
|
||||
config = scenario_content['config']
|
||||
if config is not None:
|
||||
bootstrap.apply_config(config)
|
||||
expectations = scenario_content.get("expectation")
|
||||
process = bootstrap.run_logstash(is_full_start_required)
|
||||
if process is not None:
|
||||
if wait_seconds is not None:
|
||||
print(f"Test requires to wait for `{wait_seconds}` seconds.")
|
||||
time.sleep(wait_seconds) # wait for Logstash to start
|
||||
try:
|
||||
scenario_executor.on(scenario_name, expectations)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
has_failed_scenario = True
|
||||
bootstrap.stop_logstash(process)
|
||||
|
||||
if has_failed_scenario:
|
||||
# intentionally fail due to visibility
|
||||
raise Exception("Some of scenarios failed, check the log for details.")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
16
.buildkite/scripts/health-report-tests/main.sh
Executable file
16
.buildkite/scripts/health-report-tests/main.sh
Executable file
|
@ -0,0 +1,16 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
export PATH="/opt/buildkite-agent/.rbenv/bin:/opt/buildkite-agent/.pyenv/bin:/opt/buildkite-agent/.java/bin:$PATH"
|
||||
export JAVA_HOME="/opt/buildkite-agent/.java"
|
||||
export PYENV_VERSION="3.11.5"
|
||||
|
||||
eval "$(rbenv init -)"
|
||||
eval "$(pyenv init -)"
|
||||
|
||||
echo "--- Installing dependencies"
|
||||
python3 -m pip install -r .buildkite/scripts/health-report-tests/requirements.txt
|
||||
|
||||
echo "--- Running tests"
|
||||
python3 .buildkite/scripts/health-report-tests/main.py
|
2
.buildkite/scripts/health-report-tests/requirements.txt
Normal file
2
.buildkite/scripts/health-report-tests/requirements.txt
Normal file
|
@ -0,0 +1,2 @@
|
|||
requests==2.32.3
|
||||
pyyaml==6.0.2
|
67
.buildkite/scripts/health-report-tests/scenario_executor.py
Normal file
67
.buildkite/scripts/health-report-tests/scenario_executor.py
Normal file
|
@ -0,0 +1,67 @@
|
|||
"""
|
||||
A class to execute the given scenario for Logstash Health Report integration test
|
||||
"""
|
||||
import time
|
||||
from logstash_health_report import LogstashHealthReport
|
||||
|
||||
|
||||
class ScenarioExecutor:
|
||||
logstash_health_report_api = LogstashHealthReport()
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def __has_intersection(self, expects, results):
|
||||
# TODO: this logic is aligned on current Health API response
|
||||
# there is no guarantee that method correctly runs if provided multi expects and results
|
||||
# we expect expects to be existing in results
|
||||
for expect in expects:
|
||||
for result in results:
|
||||
if result.get('help_url') and "health-report-pipeline-" not in result.get('help_url'):
|
||||
return False
|
||||
if not all(key in result and result[key] == value for key, value in expect.items()):
|
||||
return False
|
||||
return True
|
||||
|
||||
def __get_difference(self, differences: list, expectations: dict, reports: dict) -> dict:
|
||||
for key in expectations.keys():
|
||||
|
||||
if type(expectations.get(key)) != type(reports.get(key)):
|
||||
differences.append(f"Scenario expectation and Health API report structure differs for {key}.")
|
||||
return differences
|
||||
|
||||
if isinstance(expectations.get(key), str):
|
||||
if expectations.get(key) != reports.get(key):
|
||||
differences.append({key: {"expected": expectations.get(key), "got": reports.get(key)}})
|
||||
continue
|
||||
elif isinstance(expectations.get(key), dict):
|
||||
self.__get_difference(differences, expectations.get(key), reports.get(key))
|
||||
elif isinstance(expectations.get(key), list):
|
||||
if not self.__has_intersection(expectations.get(key), reports.get(key)):
|
||||
differences.append({key: {"expected": expectations.get(key), "got": reports.get(key)}})
|
||||
return differences
|
||||
|
||||
def __is_expected(self, expectations: dict) -> None:
|
||||
reports = self.logstash_health_report_api.get()
|
||||
differences = self.__get_difference([], expectations, reports)
|
||||
if differences:
|
||||
print("Differences found in 'expectation' section between YAML content and stats:")
|
||||
for diff in differences:
|
||||
print(f"Difference: {diff}")
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
def on(self, scenario_name: str, expectations: dict) -> None:
|
||||
# retriable check the expectations
|
||||
attempts = 5
|
||||
while self.__is_expected(expectations) is False:
|
||||
attempts = attempts - 1
|
||||
if attempts == 0:
|
||||
break
|
||||
time.sleep(1)
|
||||
|
||||
if attempts == 0:
|
||||
raise Exception(f"{scenario_name} failed.")
|
||||
else:
|
||||
print(f"Scenario `{scenario_name}` expectaion meets the health report stats.")
|
|
@ -0,0 +1,32 @@
|
|||
name: "Abnormally terminated pipeline"
|
||||
config:
|
||||
- pipeline.id: abnormally-terminated-pp
|
||||
config.string: |
|
||||
input { heartbeat { interval => 1 } }
|
||||
filter { failure_injector { crash_at => filter } }
|
||||
output { stdout {} }
|
||||
pipeline.workers: 1
|
||||
pipeline.batch.size: 1
|
||||
conditions:
|
||||
full_start_required: true
|
||||
wait_seconds: 5
|
||||
expectation:
|
||||
status: "red"
|
||||
symptom: "1 indicator is unhealthy (`pipelines`)"
|
||||
indicators:
|
||||
pipelines:
|
||||
status: "red"
|
||||
symptom: "1 indicator is unhealthy (`abnormally-terminated-pp`)"
|
||||
indicators:
|
||||
abnormally-terminated-pp:
|
||||
status: "red"
|
||||
symptom: "The pipeline is unhealthy; 1 area is impacted and 1 diagnosis is available"
|
||||
diagnosis:
|
||||
- cause: "pipeline is not running, likely because it has encountered an error"
|
||||
action: "view logs to determine the cause of abnormal pipeline shutdown"
|
||||
impacts:
|
||||
- description: "the pipeline is not currently processing"
|
||||
impact_areas: ["pipeline_execution"]
|
||||
details:
|
||||
status:
|
||||
state: "TERMINATED"
|
|
@ -0,0 +1,38 @@
|
|||
name: "Backpressured in 1min pipeline"
|
||||
config:
|
||||
- pipeline.id: backpressure-1m-pp
|
||||
config.string: |
|
||||
input { heartbeat { interval => 0.1 } }
|
||||
filter { failure_injector { degrade_at => [filter] } }
|
||||
output { stdout {} }
|
||||
pipeline.workers: 1
|
||||
pipeline.batch.size: 1
|
||||
conditions:
|
||||
full_start_required: true
|
||||
wait_seconds: 70 # give more seconds to make sure time is over the threshold, 1m in this case
|
||||
expectation:
|
||||
status: "yellow"
|
||||
symptom: "1 indicator is concerning (`pipelines`)"
|
||||
indicators:
|
||||
pipelines:
|
||||
status: "yellow"
|
||||
symptom: "1 indicator is concerning (`backpressure-1m-pp`)"
|
||||
indicators:
|
||||
backpressure-1m-pp:
|
||||
status: "yellow"
|
||||
symptom: "The pipeline is concerning; 1 area is impacted and 1 diagnosis is available"
|
||||
diagnosis:
|
||||
- id: "logstash:health:pipeline:flow:worker_utilization:diagnosis:1m-blocked"
|
||||
cause: "pipeline workers have been completely blocked for at least one minute"
|
||||
action: "address bottleneck or add resources"
|
||||
impacts:
|
||||
- id: "logstash:health:pipeline:flow:impact:blocked_processing"
|
||||
severity: 2
|
||||
description: "the pipeline is blocked"
|
||||
impact_areas: ["pipeline_execution"]
|
||||
details:
|
||||
status:
|
||||
state: "RUNNING"
|
||||
flow:
|
||||
worker_utilization:
|
||||
last_1_minute: 100.0
|
|
@ -0,0 +1,39 @@
|
|||
name: "Backpressured in 5min pipeline"
|
||||
config:
|
||||
- pipeline.id: backpressure-5m-pp
|
||||
config.string: |
|
||||
input { heartbeat { interval => 0.1 } }
|
||||
filter { failure_injector { degrade_at => [filter] } }
|
||||
output { stdout {} }
|
||||
pipeline.workers: 1
|
||||
pipeline.batch.size: 1
|
||||
conditions:
|
||||
full_start_required: true
|
||||
wait_seconds: 310 # give more seconds to make sure time is over the threshold, 1m in this case
|
||||
expectation:
|
||||
status: "red"
|
||||
symptom: "1 indicator is unhealthy (`pipelines`)"
|
||||
indicators:
|
||||
pipelines:
|
||||
status: "red"
|
||||
symptom: "1 indicator is unhealthy (`backpressure-5m-pp`)"
|
||||
indicators:
|
||||
backpressure-5m-pp:
|
||||
status: "red"
|
||||
symptom: "The pipeline is unhealthy; 1 area is impacted and 1 diagnosis is available"
|
||||
diagnosis:
|
||||
- id: "logstash:health:pipeline:flow:worker_utilization:diagnosis:5m-blocked"
|
||||
cause: "pipeline workers have been completely blocked for at least five minutes"
|
||||
action: "address bottleneck or add resources"
|
||||
impacts:
|
||||
- id: "logstash:health:pipeline:flow:impact:blocked_processing"
|
||||
severity: 1
|
||||
description: "the pipeline is blocked"
|
||||
impact_areas: ["pipeline_execution"]
|
||||
details:
|
||||
status:
|
||||
state: "RUNNING"
|
||||
flow:
|
||||
worker_utilization:
|
||||
last_1_minute: 100.0
|
||||
last_5_minutes: 100.0
|
|
@ -0,0 +1,67 @@
|
|||
name: "Multi pipeline"
|
||||
config:
|
||||
- pipeline.id: slow-start-pp-multipipeline
|
||||
config.string: |
|
||||
input { heartbeat {} }
|
||||
filter { failure_injector { degrade_at => [register] } }
|
||||
output { stdout {} }
|
||||
pipeline.workers: 1
|
||||
pipeline.batch.size: 1
|
||||
- pipeline.id: normally-terminated-pp-multipipeline
|
||||
config.string: |
|
||||
input { generator { count => 1 } }
|
||||
output { stdout {} }
|
||||
pipeline.workers: 1
|
||||
pipeline.batch.size: 1
|
||||
- pipeline.id: abnormally-terminated-pp-multipipeline
|
||||
config.string: |
|
||||
input { heartbeat { interval => 1 } }
|
||||
filter { failure_injector { crash_at => filter } }
|
||||
output { stdout {} }
|
||||
pipeline.workers: 1
|
||||
pipeline.batch.size: 1
|
||||
conditions:
|
||||
full_start_required: false
|
||||
wait_seconds: 10
|
||||
expectation:
|
||||
status: "red"
|
||||
symptom: "1 indicator is unhealthy (`pipelines`)"
|
||||
indicators:
|
||||
pipelines:
|
||||
status: "red"
|
||||
symptom: "1 indicator is unhealthy (`abnormally-terminated-pp-multipipeline`) and 2 indicators are concerning (`slow-start-pp-multipipeline`, `normally-terminated-pp-multipipeline`)"
|
||||
indicators:
|
||||
slow-start-pp-multipipeline:
|
||||
status: "yellow"
|
||||
symptom: "The pipeline is concerning; 1 area is impacted and 1 diagnosis is available"
|
||||
diagnosis:
|
||||
- cause: "pipeline is loading"
|
||||
action: "if pipeline does not come up quickly, you may need to check the logs to see if it is stalled"
|
||||
impacts:
|
||||
- impact_areas: ["pipeline_execution"]
|
||||
details:
|
||||
status:
|
||||
state: "LOADING"
|
||||
normally-terminated-pp-multipipeline:
|
||||
status: "yellow"
|
||||
symptom: "The pipeline is concerning; 1 area is impacted and 1 diagnosis is available"
|
||||
diagnosis:
|
||||
- cause: "pipeline has finished running because its inputs have been closed and events have been processed"
|
||||
action: "if you expect this pipeline to run indefinitely, you will need to configure its inputs to continue receiving or fetching events"
|
||||
impacts:
|
||||
- impact_areas: [ "pipeline_execution" ]
|
||||
details:
|
||||
status:
|
||||
state: "FINISHED"
|
||||
abnormally-terminated-pp-multipipeline:
|
||||
status: "red"
|
||||
symptom: "The pipeline is unhealthy; 1 area is impacted and 1 diagnosis is available"
|
||||
diagnosis:
|
||||
- cause: "pipeline is not running, likely because it has encountered an error"
|
||||
action: "view logs to determine the cause of abnormal pipeline shutdown"
|
||||
impacts:
|
||||
- description: "the pipeline is not currently processing"
|
||||
impact_areas: [ "pipeline_execution" ]
|
||||
details:
|
||||
status:
|
||||
state: "TERMINATED"
|
|
@ -0,0 +1,30 @@
|
|||
name: "Successfully terminated pipeline"
|
||||
config:
|
||||
- pipeline.id: normally-terminated-pp
|
||||
config.string: |
|
||||
input { generator { count => 1 } }
|
||||
output { stdout {} }
|
||||
pipeline.workers: 1
|
||||
pipeline.batch.size: 1
|
||||
conditions:
|
||||
full_start_required: true
|
||||
wait_seconds: 5
|
||||
expectation:
|
||||
status: "yellow"
|
||||
symptom: "1 indicator is concerning (`pipelines`)"
|
||||
indicators:
|
||||
pipelines:
|
||||
status: "yellow"
|
||||
symptom: "1 indicator is concerning (`normally-terminated-pp`)"
|
||||
indicators:
|
||||
normally-terminated-pp:
|
||||
status: "yellow"
|
||||
symptom: "The pipeline is concerning; 1 area is impacted and 1 diagnosis is available"
|
||||
diagnosis:
|
||||
- cause: "pipeline has finished running because its inputs have been closed and events have been processed"
|
||||
action: "if you expect this pipeline to run indefinitely, you will need to configure its inputs to continue receiving or fetching events"
|
||||
impacts:
|
||||
- impact_areas: ["pipeline_execution"]
|
||||
details:
|
||||
status:
|
||||
state: "FINISHED"
|
31
.buildkite/scripts/health-report-tests/tests/slow-start.yaml
Normal file
31
.buildkite/scripts/health-report-tests/tests/slow-start.yaml
Normal file
|
@ -0,0 +1,31 @@
|
|||
name: "Slow start pipeline"
|
||||
config:
|
||||
- pipeline.id: slow-start-pp
|
||||
config.string: |
|
||||
input { heartbeat {} }
|
||||
filter { failure_injector { degrade_at => [register] } }
|
||||
output { stdout {} }
|
||||
pipeline.workers: 1
|
||||
pipeline.batch.size: 1
|
||||
conditions:
|
||||
full_start_required: false
|
||||
wait_seconds: 0
|
||||
expectation:
|
||||
status: "yellow"
|
||||
symptom: "1 indicator is concerning (`pipelines`)"
|
||||
indicators:
|
||||
pipelines:
|
||||
status: "yellow"
|
||||
symptom: "1 indicator is concerning (`slow-start-pp`)"
|
||||
indicators:
|
||||
slow-start-pp:
|
||||
status: "yellow"
|
||||
symptom: "The pipeline is concerning; 1 area is impacted and 1 diagnosis is available"
|
||||
diagnosis:
|
||||
- cause: "pipeline is loading"
|
||||
action: "if pipeline does not come up quickly, you may need to check the logs to see if it is stalled"
|
||||
impacts:
|
||||
- impact_areas: ["pipeline_execution"]
|
||||
details:
|
||||
status:
|
||||
state: "LOADING"
|
36
.buildkite/scripts/health-report-tests/util.py
Normal file
36
.buildkite/scripts/health-report-tests/util.py
Normal file
|
@ -0,0 +1,36 @@
|
|||
import os
|
||||
import requests
|
||||
import subprocess
|
||||
from requests.adapters import HTTPAdapter, Retry
|
||||
|
||||
|
||||
def call_url_with_retry(url: str, max_retries: int = 5, delay: int = 1) -> requests.Response:
|
||||
f"""
|
||||
Calls the given {url} with maximum of {max_retries} retries with {delay} delay.
|
||||
"""
|
||||
schema = "https://" if "https://" in url else "http://"
|
||||
session = requests.Session()
|
||||
# retry on most common failures such as connection timeout(408), etc...
|
||||
retries = Retry(total=max_retries, backoff_factor=delay, status_forcelist=[408, 502, 503, 504])
|
||||
session.mount(schema, HTTPAdapter(max_retries=retries))
|
||||
return session.get(url)
|
||||
|
||||
|
||||
def git_check_out_branch(branch_name: str) -> None:
|
||||
f"""
|
||||
Checks out specified branch or fails with error if checkout operation fails.
|
||||
"""
|
||||
run_or_raise_error(["git", "checkout", branch_name],
|
||||
"Error occurred while checking out the " + branch_name + " branch")
|
||||
|
||||
|
||||
def run_or_raise_error(commands: list, error_message):
|
||||
f"""
|
||||
Executes the {list} commands and raises an {Exception} if opration fails.
|
||||
"""
|
||||
result = subprocess.run(commands, env=os.environ.copy(), universal_newlines=True, stdout=subprocess.PIPE)
|
||||
if result.returncode != 0:
|
||||
full_error_message = (error_message + ", output: " + result.stdout.decode('utf-8')) \
|
||||
if result.stdout else error_message
|
||||
raise Exception(f"{full_error_message}")
|
||||
|
|
@ -4,6 +4,7 @@ from dataclasses import dataclass, field
|
|||
import os
|
||||
import sys
|
||||
import typing
|
||||
from functools import partial
|
||||
|
||||
from ruamel.yaml import YAML
|
||||
from ruamel.yaml.scalarstring import LiteralScalarString
|
||||
|
@ -177,17 +178,15 @@ class LinuxJobs(Jobs):
|
|||
super().__init__(os=os, jdk=jdk, group_key=group_key, agent=agent)
|
||||
|
||||
def all_jobs(self) -> list[typing.Callable[[], JobRetValues]]:
|
||||
return [
|
||||
self.init_annotation,
|
||||
self.java_unit_test,
|
||||
self.ruby_unit_test,
|
||||
self.integration_tests_part_1,
|
||||
self.integration_tests_part_2,
|
||||
self.pq_integration_tests_part_1,
|
||||
self.pq_integration_tests_part_2,
|
||||
self.x_pack_unit_tests,
|
||||
self.x_pack_integration,
|
||||
]
|
||||
jobs=list()
|
||||
jobs.append(self.init_annotation)
|
||||
jobs.append(self.java_unit_test)
|
||||
jobs.append(self.ruby_unit_test)
|
||||
jobs.extend(self.integration_test_parts(3))
|
||||
jobs.extend(self.pq_integration_test_parts(3))
|
||||
jobs.append(self.x_pack_unit_tests)
|
||||
jobs.append(self.x_pack_integration)
|
||||
return jobs
|
||||
|
||||
def prepare_shell(self) -> str:
|
||||
jdk_dir = f"/opt/buildkite-agent/.java/{self.jdk}"
|
||||
|
@ -259,17 +258,14 @@ ci/unit_tests.sh ruby
|
|||
retry=copy.deepcopy(ENABLED_RETRIES),
|
||||
)
|
||||
|
||||
def integration_tests_part_1(self) -> JobRetValues:
|
||||
return self.integration_tests(part=1)
|
||||
def integration_test_parts(self, parts) -> list[partial[JobRetValues]]:
|
||||
return [partial(self.integration_tests, part=idx+1, parts=parts) for idx in range(parts)]
|
||||
|
||||
def integration_tests_part_2(self) -> JobRetValues:
|
||||
return self.integration_tests(part=2)
|
||||
|
||||
def integration_tests(self, part: int) -> JobRetValues:
|
||||
step_name_human = f"Integration Tests - {part}"
|
||||
step_key = f"{self.group_key}-integration-tests-{part}"
|
||||
def integration_tests(self, part: int, parts: int) -> JobRetValues:
|
||||
step_name_human = f"Integration Tests - {part}/{parts}"
|
||||
step_key = f"{self.group_key}-integration-tests-{part}-of-{parts}"
|
||||
test_command = f"""
|
||||
ci/integration_tests.sh split {part-1}
|
||||
ci/integration_tests.sh split {part-1} {parts}
|
||||
"""
|
||||
|
||||
return JobRetValues(
|
||||
|
@ -281,18 +277,15 @@ ci/integration_tests.sh split {part-1}
|
|||
retry=copy.deepcopy(ENABLED_RETRIES),
|
||||
)
|
||||
|
||||
def pq_integration_tests_part_1(self) -> JobRetValues:
|
||||
return self.pq_integration_tests(part=1)
|
||||
def pq_integration_test_parts(self, parts) -> list[partial[JobRetValues]]:
|
||||
return [partial(self.pq_integration_tests, part=idx+1, parts=parts) for idx in range(parts)]
|
||||
|
||||
def pq_integration_tests_part_2(self) -> JobRetValues:
|
||||
return self.pq_integration_tests(part=2)
|
||||
|
||||
def pq_integration_tests(self, part: int) -> JobRetValues:
|
||||
step_name_human = f"IT Persistent Queues - {part}"
|
||||
step_key = f"{self.group_key}-it-persistent-queues-{part}"
|
||||
def pq_integration_tests(self, part: int, parts: int) -> JobRetValues:
|
||||
step_name_human = f"IT Persistent Queues - {part}/{parts}"
|
||||
step_key = f"{self.group_key}-it-persistent-queues-{part}-of-{parts}"
|
||||
test_command = f"""
|
||||
export FEATURE_FLAG=persistent_queues
|
||||
ci/integration_tests.sh split {part-1}
|
||||
ci/integration_tests.sh split {part-1} {parts}
|
||||
"""
|
||||
|
||||
return JobRetValues(
|
||||
|
|
|
@ -4,7 +4,7 @@ set -e
|
|||
|
||||
install_java() {
|
||||
# TODO: let's think about regularly creating a custom image for Logstash which may align on version.yml definitions
|
||||
sudo apt update && sudo apt install -y openjdk-17-jdk && sudo apt install -y openjdk-17-jre
|
||||
sudo apt update && sudo apt install -y openjdk-21-jdk && sudo apt install -y openjdk-21-jre
|
||||
}
|
||||
|
||||
install_java
|
||||
|
|
|
@ -4,22 +4,13 @@ set -e
|
|||
|
||||
TARGET_BRANCHES=("main")
|
||||
|
||||
install_java() {
|
||||
# TODO: let's think about using BK agent which has Java installed
|
||||
# Current caveat is Logstash BK agent doesn't support docker operatioins in it
|
||||
sudo apt update && sudo apt install -y openjdk-17-jdk && sudo apt install -y openjdk-17-jre
|
||||
install_java_11() {
|
||||
curl -L -s "https://github.com/adoptium/temurin11-binaries/releases/download/jdk-11.0.24%2B8/OpenJDK11U-jdk_x64_linux_hotspot_11.0.24_8.tar.gz" | tar -zxf -
|
||||
}
|
||||
|
||||
# Resolves the branches we are going to track
|
||||
resolve_latest_branches() {
|
||||
source .buildkite/scripts/snyk/resolve_stack_version.sh
|
||||
for SNAPSHOT_VERSION in "${SNAPSHOT_VERSIONS[@]}"
|
||||
do
|
||||
IFS='.'
|
||||
read -a versions <<< "$SNAPSHOT_VERSION"
|
||||
version=${versions[0]}.${versions[1]}
|
||||
TARGET_BRANCHES+=("$version")
|
||||
done
|
||||
}
|
||||
|
||||
# Build Logstash specific branch to generate Gemlock file where Snyk scans
|
||||
|
@ -42,7 +33,7 @@ download_auth_snyk() {
|
|||
report() {
|
||||
REMOTE_REPO_URL=$1
|
||||
echo "Reporting $REMOTE_REPO_URL branch."
|
||||
if [ "$REMOTE_REPO_URL" != "main" ]; then
|
||||
if [ "$REMOTE_REPO_URL" != "main" ] && [ "$REMOTE_REPO_URL" != "8.x" ]; then
|
||||
MAJOR_VERSION=$(echo "$REMOTE_REPO_URL"| cut -d'.' -f 1)
|
||||
REMOTE_REPO_URL="$MAJOR_VERSION".latest
|
||||
echo "Using '$REMOTE_REPO_URL' remote repo url."
|
||||
|
@ -55,13 +46,18 @@ report() {
|
|||
./snyk monitor --prune-repeated-subdependencies --all-projects --org=logstash --remote-repo-url="$REMOTE_REPO_URL" --target-reference="$REMOTE_REPO_URL" --detection-depth=6 --exclude=qa,tools,devtools,requirements.txt --project-tags=branch="$TARGET_BRANCH",git_head="$GIT_HEAD" || :
|
||||
}
|
||||
|
||||
install_java
|
||||
resolve_latest_branches
|
||||
download_auth_snyk
|
||||
|
||||
# clone Logstash repo, build and report
|
||||
for TARGET_BRANCH in "${TARGET_BRANCHES[@]}"
|
||||
do
|
||||
if [ "$TARGET_BRANCH" == "7.17" ]; then
|
||||
echo "Installing and configuring JDK11."
|
||||
export OLD_PATH=$PATH
|
||||
install_java_11
|
||||
export PATH=$PWD/jdk-11.0.24+8/bin:$PATH
|
||||
fi
|
||||
git reset --hard HEAD # reset if any generated files appeared
|
||||
# check if target branch exists
|
||||
echo "Checking out $TARGET_BRANCH branch."
|
||||
|
@ -71,70 +67,10 @@ do
|
|||
else
|
||||
echo "$TARGET_BRANCH branch doesn't exist."
|
||||
fi
|
||||
done
|
||||
|
||||
# Scan Logstash docker images and report
|
||||
REPOSITORY_BASE_URL="docker.elastic.co/logstash/"
|
||||
|
||||
report_docker_image() {
|
||||
image=$1
|
||||
project_name=$2
|
||||
platform=$3
|
||||
echo "Reporting $image to Snyk started..."
|
||||
docker pull "$image"
|
||||
if [[ $platform != null ]]; then
|
||||
./snyk container monitor "$image" --org=logstash --platform="$platform" --project-name="$project_name" --project-tags=version="$version" || :
|
||||
else
|
||||
./snyk container monitor "$image" --org=logstash --project-name="$project_name" --project-tags=version="$version" || :
|
||||
if [ "$TARGET_BRANCH" == "7.17" ]; then
|
||||
# reset state
|
||||
echo "Removing JDK11 installation."
|
||||
rm -rf jdk-11.0.24+8
|
||||
export PATH=$OLD_PATH
|
||||
fi
|
||||
}
|
||||
|
||||
report_docker_images() {
|
||||
version=$1
|
||||
echo "Version value: $version"
|
||||
|
||||
image=$REPOSITORY_BASE_URL"logstash:$version-SNAPSHOT"
|
||||
snyk_project_name="logstash-$version-SNAPSHOT"
|
||||
report_docker_image "$image" "$snyk_project_name"
|
||||
|
||||
image=$REPOSITORY_BASE_URL"logstash-oss:$version-SNAPSHOT"
|
||||
snyk_project_name="logstash-oss-$version-SNAPSHOT"
|
||||
report_docker_image "$image" "$snyk_project_name"
|
||||
|
||||
image=$REPOSITORY_BASE_URL"logstash:$version-SNAPSHOT-arm64"
|
||||
snyk_project_name="logstash-$version-SNAPSHOT-arm64"
|
||||
report_docker_image "$image" "$snyk_project_name" "linux/arm64"
|
||||
|
||||
image=$REPOSITORY_BASE_URL"logstash:$version-SNAPSHOT-amd64"
|
||||
snyk_project_name="logstash-$version-SNAPSHOT-amd64"
|
||||
report_docker_image "$image" "$snyk_project_name" "linux/amd64"
|
||||
|
||||
image=$REPOSITORY_BASE_URL"logstash-oss:$version-SNAPSHOT-arm64"
|
||||
snyk_project_name="logstash-oss-$version-SNAPSHOT-arm64"
|
||||
report_docker_image "$image" "$snyk_project_name" "linux/arm64"
|
||||
|
||||
image=$REPOSITORY_BASE_URL"logstash-oss:$version-SNAPSHOT-amd64"
|
||||
snyk_project_name="logstash-oss-$version-SNAPSHOT-amd64"
|
||||
report_docker_image "$image" "$snyk_project_name" "linux/amd64"
|
||||
}
|
||||
|
||||
resolve_version_and_report_docker_images() {
|
||||
git reset --hard HEAD # reset if any generated files appeared
|
||||
git checkout "$1"
|
||||
|
||||
# parse version (ex: 8.8.2 from 8.8 branch, or 8.9.0 from main branch)
|
||||
versions_file="$PWD/versions.yml"
|
||||
version=$(awk '/logstash:/ { print $2 }' "$versions_file")
|
||||
report_docker_images "$version"
|
||||
}
|
||||
|
||||
# resolve docker artifact and report
|
||||
#for TARGET_BRANCH in "${TARGET_BRANCHES[@]}"
|
||||
#do
|
||||
# if git show-ref --quiet refs/heads/"$TARGET_BRANCH"; then
|
||||
# echo "Using $TARGET_BRANCH branch for docker images."
|
||||
# resolve_version_and_report_docker_images "$TARGET_BRANCH"
|
||||
# else
|
||||
# echo "$TARGET_BRANCH branch doesn't exist."
|
||||
# fi
|
||||
#done
|
||||
done
|
||||
|
|
|
@ -6,14 +6,9 @@
|
|||
|
||||
set -e
|
||||
|
||||
VERSION_URL="https://raw.githubusercontent.com/elastic/logstash/main/ci/logstash_releases.json"
|
||||
VERSION_URL="https://storage.googleapis.com/artifacts-api/snapshots/branches.json"
|
||||
|
||||
echo "Fetching versions from $VERSION_URL"
|
||||
VERSIONS=$(curl --silent $VERSION_URL)
|
||||
SNAPSHOT_KEYS=$(echo "$VERSIONS" | jq -r '.snapshots | .[]')
|
||||
readarray -t TARGET_BRANCHES < <(curl --retry-all-errors --retry 5 --retry-delay 5 -fsSL $VERSION_URL | jq -r '.branches[]')
|
||||
echo "${TARGET_BRANCHES[@]}"
|
||||
|
||||
SNAPSHOT_VERSIONS=()
|
||||
while IFS= read -r line; do
|
||||
SNAPSHOT_VERSIONS+=("$line")
|
||||
echo "Resolved snapshot version: $line"
|
||||
done <<< "$SNAPSHOT_KEYS"
|
|
@ -1,12 +1,14 @@
|
|||
agents:
|
||||
provider: gcp
|
||||
imageProject: elastic-images-prod
|
||||
image: family/platform-ingest-logstash-ubuntu-2204
|
||||
machineType: "n2-standard-4"
|
||||
diskSizeGb: 120
|
||||
image: "docker.elastic.co/ci-agent-images/platform-ingest/buildkite-agent-logstash-ci"
|
||||
cpu: "2"
|
||||
memory: "4Gi"
|
||||
ephemeralStorage: "64Gi"
|
||||
|
||||
steps:
|
||||
# reports main, previous (ex: 7.latest) and current (ex: 8.latest) release branches to Snyk
|
||||
- label: ":hammer: Report to Snyk"
|
||||
command:
|
||||
- .buildkite/scripts/snyk/report.sh
|
||||
- .buildkite/scripts/snyk/report.sh
|
||||
retry:
|
||||
automatic:
|
||||
- limit: 3
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
env:
|
||||
DEFAULT_MATRIX_OS: "windows-2022"
|
||||
DEFAULT_MATRIX_JDK: "adoptiumjdk_17"
|
||||
DEFAULT_MATRIX_JDK: "adoptiumjdk_21"
|
||||
|
||||
steps:
|
||||
- input: "Test Parameters"
|
||||
|
@ -15,6 +15,8 @@ steps:
|
|||
multiple: true
|
||||
default: "${DEFAULT_MATRIX_OS}"
|
||||
options:
|
||||
- label: "Windows 2025"
|
||||
value: "windows-2025"
|
||||
- label: "Windows 2022"
|
||||
value: "windows-2022"
|
||||
- label: "Windows 2019"
|
||||
|
@ -33,20 +35,14 @@ steps:
|
|||
value: "adoptiumjdk_21"
|
||||
- label: "Adoptium JDK 17 (Eclipse Temurin)"
|
||||
value: "adoptiumjdk_17"
|
||||
- label: "Adoptium JDK 11 (Eclipse Temurin)"
|
||||
value: "adoptiumjdk_11"
|
||||
- label: "OpenJDK 21"
|
||||
value: "openjdk_21"
|
||||
- label: "OpenJDK 17"
|
||||
value: "openjdk_17"
|
||||
- label: "OpenJDK 11"
|
||||
value: "openjdk_11"
|
||||
- label: "Zulu 21"
|
||||
value: "zulu_21"
|
||||
- label: "Zulu 17"
|
||||
value: "zulu_17"
|
||||
- label: "Zulu 11"
|
||||
value: "zulu_11"
|
||||
|
||||
- wait: ~
|
||||
if: build.source != "schedule" && build.source != "trigger_job"
|
||||
|
|
42
.ci/Makefile
Normal file
42
.ci/Makefile
Normal file
|
@ -0,0 +1,42 @@
|
|||
.SILENT:
|
||||
MAKEFLAGS += --no-print-directory
|
||||
.SHELLFLAGS = -euc
|
||||
SHELL = /bin/bash
|
||||
|
||||
#######################
|
||||
## Templates
|
||||
#######################
|
||||
## Mergify template
|
||||
define MERGIFY_TMPL
|
||||
|
||||
- name: backport patches to $(BRANCH) branch
|
||||
conditions:
|
||||
- merged
|
||||
- base=main
|
||||
- label=$(BACKPORT_LABEL)
|
||||
actions:
|
||||
backport:
|
||||
branches:
|
||||
- "$(BRANCH)"
|
||||
endef
|
||||
|
||||
# Add mergify entry for the new backport label
|
||||
.PHONY: mergify
|
||||
export MERGIFY_TMPL
|
||||
mergify: BACKPORT_LABEL=$${BACKPORT_LABEL} BRANCH=$${BRANCH} PUSH_BRANCH=$${PUSH_BRANCH}
|
||||
mergify:
|
||||
@echo ">> mergify"
|
||||
echo "$$MERGIFY_TMPL" >> ../.mergify.yml
|
||||
git add ../.mergify.yml
|
||||
git status
|
||||
if [ ! -z "$$(git status --porcelain)" ]; then \
|
||||
git commit -m "mergify: add $(BACKPORT_LABEL) rule"; \
|
||||
git push origin $(PUSH_BRANCH) ; \
|
||||
fi
|
||||
|
||||
# Create GitHub backport label
|
||||
.PHONY: backport-label
|
||||
backport-label: BACKPORT_LABEL=$${BACKPORT_LABEL}
|
||||
backport-label:
|
||||
@echo ">> backport-label"
|
||||
gh label create $(BACKPORT_LABEL) --description "Automated backport with mergify" --color 0052cc --force
|
|
@ -1,2 +1,2 @@
|
|||
LS_BUILD_JAVA=adoptiumjdk_17
|
||||
LS_RUNTIME_JAVA=adoptiumjdk_17
|
||||
LS_BUILD_JAVA=adoptiumjdk_21
|
||||
LS_RUNTIME_JAVA=adoptiumjdk_21
|
||||
|
|
|
@ -21,10 +21,6 @@ analyze:
|
|||
type: gradle
|
||||
target: 'dependencies-report:'
|
||||
path: .
|
||||
- name: ingest-converter
|
||||
type: gradle
|
||||
target: 'ingest-converter:'
|
||||
path: .
|
||||
- name: logstash-core
|
||||
type: gradle
|
||||
target: 'logstash-core:'
|
||||
|
|
1
.github/ISSUE_TEMPLATE/test-failure.md
vendored
1
.github/ISSUE_TEMPLATE/test-failure.md
vendored
|
@ -21,6 +21,5 @@ to reproduce locally
|
|||
**Failure history**:
|
||||
<!--
|
||||
Link to build stats and possible indication of when this started failing and how often it fails
|
||||
<https://build-stats.elastic.co/app/kibana>
|
||||
-->
|
||||
**Failure excerpt**:
|
||||
|
|
18
.github/dependabot.yml
vendored
Normal file
18
.github/dependabot.yml
vendored
Normal file
|
@ -0,0 +1,18 @@
|
|||
---
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "github-actions"
|
||||
directories:
|
||||
- '/'
|
||||
- '/.github/actions/*'
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
day: "sunday"
|
||||
time: "22:00"
|
||||
reviewers:
|
||||
- "elastic/observablt-ci"
|
||||
- "elastic/observablt-ci-contractors"
|
||||
groups:
|
||||
github-actions:
|
||||
patterns:
|
||||
- "*"
|
33
.github/workflows/add-docs-preview-link.yml
vendored
33
.github/workflows/add-docs-preview-link.yml
vendored
|
@ -1,33 +0,0 @@
|
|||
name: Docs Preview Link
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [opened, synchronize]
|
||||
paths:
|
||||
- docs/**
|
||||
- docsk8s/**
|
||||
jobs:
|
||||
docs-preview-link:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
pull-requests: write
|
||||
steps:
|
||||
- id: wait-for-status
|
||||
uses: autotelic/action-wait-for-status-check@v1
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
owner: elastic
|
||||
# when running with on: pull_request_target we get the PR base ref by default
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
statusName: "buildkite/docs-build-pr"
|
||||
# https://elasticsearch-ci.elastic.co/job/elastic+logstash+pull-request+build-docs
|
||||
# usually finishes in ~ 20 minutes
|
||||
timeoutSeconds: 900
|
||||
intervalSeconds: 30
|
||||
- name: Add Docs Preview link in PR Comment
|
||||
if: steps.wait-for-status.outputs.state == 'success'
|
||||
uses: thollander/actions-comment-pull-request@v1
|
||||
with:
|
||||
message: |
|
||||
:page_with_curl: **DOCS PREVIEW** :sparkles: https://logstash_bk_${{ github.event.number }}.docs-preview.app.elstc.co/diff
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
22
.github/workflows/backport-active.yml
vendored
Normal file
22
.github/workflows/backport-active.yml
vendored
Normal file
|
@ -0,0 +1,22 @@
|
|||
name: Backport to active branches
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [closed]
|
||||
branches:
|
||||
- main
|
||||
|
||||
permissions:
|
||||
pull-requests: write
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
backport:
|
||||
# Only run if the PR was merged (not just closed) and has one of the backport labels
|
||||
if: |
|
||||
github.event.pull_request.merged == true &&
|
||||
contains(toJSON(github.event.pull_request.labels.*.name), 'backport-active-')
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: elastic/oblt-actions/github/backport-active@v1
|
19
.github/workflows/docs-build.yml
vendored
Normal file
19
.github/workflows/docs-build.yml
vendored
Normal file
|
@ -0,0 +1,19 @@
|
|||
name: docs-build
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request_target: ~
|
||||
merge_group: ~
|
||||
|
||||
jobs:
|
||||
docs-preview:
|
||||
uses: elastic/docs-builder/.github/workflows/preview-build.yml@main
|
||||
with:
|
||||
path-pattern: docs/**
|
||||
permissions:
|
||||
deployments: write
|
||||
id-token: write
|
||||
contents: read
|
||||
pull-requests: read
|
14
.github/workflows/docs-cleanup.yml
vendored
Normal file
14
.github/workflows/docs-cleanup.yml
vendored
Normal file
|
@ -0,0 +1,14 @@
|
|||
name: docs-cleanup
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types:
|
||||
- closed
|
||||
|
||||
jobs:
|
||||
docs-preview:
|
||||
uses: elastic/docs-builder/.github/workflows/preview-cleanup.yml@main
|
||||
permissions:
|
||||
contents: none
|
||||
id-token: write
|
||||
deployments: write
|
23
.github/workflows/mergify-labels-copier.yml
vendored
Normal file
23
.github/workflows/mergify-labels-copier.yml
vendored
Normal file
|
@ -0,0 +1,23 @@
|
|||
name: mergify backport labels copier
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
- opened
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
mergify-backport-labels-copier:
|
||||
runs-on: ubuntu-latest
|
||||
if: startsWith(github.head_ref, 'mergify/bp/')
|
||||
permissions:
|
||||
# Add GH labels
|
||||
pull-requests: write
|
||||
# See https://github.com/cli/cli/issues/6274
|
||||
repository-projects: read
|
||||
steps:
|
||||
- uses: elastic/oblt-actions/mergify/labels-copier@v1
|
||||
with:
|
||||
excluded-labels-regex: "^backport-*"
|
49
.github/workflows/pr_backporter.yml
vendored
49
.github/workflows/pr_backporter.yml
vendored
|
@ -1,49 +0,0 @@
|
|||
name: Backport PR to another branch
|
||||
on:
|
||||
issue_comment:
|
||||
types: [created]
|
||||
|
||||
permissions:
|
||||
pull-requests: write
|
||||
contents: write
|
||||
|
||||
jobs:
|
||||
pr_commented:
|
||||
name: PR comment
|
||||
if: github.event.issue.pull_request
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions-ecosystem/action-regex-match@v2
|
||||
id: regex-match
|
||||
with:
|
||||
text: ${{ github.event.comment.body }}
|
||||
regex: '^@logstashmachine backport (main|[x0-9\.]+)$'
|
||||
- if: ${{ steps.regex-match.outputs.group1 == '' }}
|
||||
run: exit 1
|
||||
- name: Fetch logstash-core team member list
|
||||
uses: tspascoal/get-user-teams-membership@v1
|
||||
id: checkUserMember
|
||||
with:
|
||||
username: ${{ github.actor }}
|
||||
organization: elastic
|
||||
team: logstash
|
||||
GITHUB_TOKEN: ${{ secrets.READ_ORG_SECRET_JSVD }}
|
||||
- name: Is user not a core team member?
|
||||
if: ${{ steps.checkUserMember.outputs.isTeamMember == 'false' }}
|
||||
run: exit 1
|
||||
- name: checkout repo content
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: 'main'
|
||||
- run: git config --global user.email "43502315+logstashmachine@users.noreply.github.com"
|
||||
- run: git config --global user.name "logstashmachine"
|
||||
- name: setup python
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.8
|
||||
- run: |
|
||||
mkdir ~/.elastic && echo ${{ github.token }} >> ~/.elastic/github.token
|
||||
- run: pip install requests
|
||||
- name: run backport
|
||||
run: python devtools/backport ${{ steps.regex-match.outputs.group1 }} ${{ github.event.issue.number }} --remote=origin --yes
|
18
.github/workflows/pre-commit.yml
vendored
Normal file
18
.github/workflows/pre-commit.yml
vendored
Normal file
|
@ -0,0 +1,18 @@
|
|||
name: pre-commit
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- 8.*
|
||||
- 9.*
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
pre-commit:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: elastic/oblt-actions/pre-commit@v1
|
29
.github/workflows/version_bumps.yml
vendored
29
.github/workflows/version_bumps.yml
vendored
|
@ -25,9 +25,13 @@ jobs:
|
|||
version_bumper:
|
||||
name: Bump versions
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
INPUTS_BRANCH: "${{ inputs.branch }}"
|
||||
INPUTS_BUMP: "${{ inputs.bump }}"
|
||||
BACKPORT_LABEL: "backport-${{ inputs.branch }}"
|
||||
steps:
|
||||
- name: Fetch logstash-core team member list
|
||||
uses: tspascoal/get-user-teams-membership@v1
|
||||
uses: tspascoal/get-user-teams-membership@57e9f42acd78f4d0f496b3be4368fc5f62696662 #v3.0.0
|
||||
with:
|
||||
username: ${{ github.actor }}
|
||||
organization: elastic
|
||||
|
@ -37,14 +41,14 @@ jobs:
|
|||
if: ${{ steps.checkUserMember.outputs.isTeamMember == 'false' }}
|
||||
run: exit 1
|
||||
- name: checkout repo content
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: ${{ github.event.inputs.branch }}
|
||||
ref: ${{ env.INPUTS_BRANCH }}
|
||||
- run: git config --global user.email "43502315+logstashmachine@users.noreply.github.com"
|
||||
- run: git config --global user.name "logstashmachine"
|
||||
- run: ./gradlew clean installDefaultGems
|
||||
- run: ./vendor/jruby/bin/jruby -S bundle update --all --${{ github.event.inputs.bump }} --strict
|
||||
- run: ./vendor/jruby/bin/jruby -S bundle update --all --${{ env.INPUTS_BUMP }} --strict
|
||||
- run: mv Gemfile.lock Gemfile.jruby-*.lock.release
|
||||
- run: echo "T=$(date +%s)" >> $GITHUB_ENV
|
||||
- run: echo "BRANCH=update_lock_${T}" >> $GITHUB_ENV
|
||||
|
@ -53,8 +57,21 @@ jobs:
|
|||
git add .
|
||||
git status
|
||||
if [[ -z $(git status --porcelain) ]]; then echo "No changes. We're done."; exit 0; fi
|
||||
git commit -m "Update ${{ github.event.inputs.bump }} plugin versions in gemfile lock" -a
|
||||
git commit -m "Update ${{ env.INPUTS_BUMP }} plugin versions in gemfile lock" -a
|
||||
git push origin $BRANCH
|
||||
|
||||
- name: Update mergify (minor only)
|
||||
if: ${{ inputs.bump == 'minor' }}
|
||||
continue-on-error: true
|
||||
run: make -C .ci mergify BACKPORT_LABEL=$BACKPORT_LABEL BRANCH=$INPUTS_BRANCH PUSH_BRANCH=$BRANCH
|
||||
|
||||
- name: Create Pull Request
|
||||
run: |
|
||||
curl -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" -X POST -d "{\"title\": \"bump lock file for ${{ github.event.inputs.branch }}\",\"head\": \"${BRANCH}\",\"base\": \"${{ github.event.inputs.branch }}\"}" https://api.github.com/repos/elastic/logstash/pulls
|
||||
curl -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" -X POST -d "{\"title\": \"bump lock file for ${{ env.INPUTS_BRANCH }}\",\"head\": \"${BRANCH}\",\"base\": \"${{ env.INPUTS_BRANCH }}\"}" https://api.github.com/repos/elastic/logstash/pulls
|
||||
|
||||
- name: Create GitHub backport label (Mergify) (minor only)
|
||||
if: ${{ inputs.bump == 'minor' }}
|
||||
continue-on-error: true
|
||||
run: make -C .ci backport-label BACKPORT_LABEL=$BACKPORT_LABEL
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
132
.mergify.yml
Normal file
132
.mergify.yml
Normal file
|
@ -0,0 +1,132 @@
|
|||
commands_restrictions:
|
||||
backport:
|
||||
conditions:
|
||||
- or:
|
||||
- sender-permission>=write
|
||||
- sender=github-actions[bot]
|
||||
defaults:
|
||||
actions:
|
||||
backport:
|
||||
title: "[{{ destination_branch }}] (backport #{{ number }}) {{ title }}"
|
||||
assignees:
|
||||
- "{{ author }}"
|
||||
labels:
|
||||
- "backport"
|
||||
pull_request_rules:
|
||||
# - name: ask to resolve conflict
|
||||
# conditions:
|
||||
# - conflict
|
||||
# actions:
|
||||
# comment:
|
||||
# message: |
|
||||
# This pull request is now in conflicts. Could you fix it @{{author}}? 🙏
|
||||
# To fixup this pull request, you can check out it locally. See documentation: https://help.github.com/articles/checking-out-pull-requests-locally/
|
||||
# ```
|
||||
# git fetch upstream
|
||||
# git checkout -b {{head}} upstream/{{head}}
|
||||
# git merge upstream/{{base}}
|
||||
# git push upstream {{head}}
|
||||
# ```
|
||||
|
||||
- name: notify the backport policy
|
||||
conditions:
|
||||
- -label~=^backport
|
||||
- base=main
|
||||
actions:
|
||||
comment:
|
||||
message: |
|
||||
This pull request does not have a backport label. Could you fix it @{{author}}? 🙏
|
||||
To fixup this pull request, you need to add the backport labels for the needed
|
||||
branches, such as:
|
||||
* `backport-8./d` is the label to automatically backport to the `8./d` branch. `/d` is the digit.
|
||||
* If no backport is necessary, please add the `backport-skip` label
|
||||
|
||||
- name: remove backport-skip label
|
||||
conditions:
|
||||
- label~=^backport-\d
|
||||
actions:
|
||||
label:
|
||||
remove:
|
||||
- backport-skip
|
||||
|
||||
- name: notify the backport has not been merged yet
|
||||
conditions:
|
||||
- -merged
|
||||
- -closed
|
||||
- author=mergify[bot]
|
||||
- "#check-success>0"
|
||||
- schedule=Mon-Mon 06:00-10:00[Europe/Paris]
|
||||
actions:
|
||||
comment:
|
||||
message: |
|
||||
This pull request has not been merged yet. Could you please review and merge it @{{ assignee | join(', @') }}? 🙏
|
||||
|
||||
- name: backport patches to 8.16 branch
|
||||
conditions:
|
||||
- merged
|
||||
- base=main
|
||||
- label=backport-8.16
|
||||
actions:
|
||||
backport:
|
||||
assignees:
|
||||
- "{{ author }}"
|
||||
branches:
|
||||
- "8.16"
|
||||
labels:
|
||||
- "backport"
|
||||
title: "[{{ destination_branch }}] {{ title }} (backport #{{ number }})"
|
||||
|
||||
- name: backport patches to 8.17 branch
|
||||
conditions:
|
||||
- merged
|
||||
- base=main
|
||||
- label=backport-8.17
|
||||
actions:
|
||||
backport:
|
||||
assignees:
|
||||
- "{{ author }}"
|
||||
branches:
|
||||
- "8.17"
|
||||
labels:
|
||||
- "backport"
|
||||
title: "[{{ destination_branch }}] {{ title }} (backport #{{ number }})"
|
||||
|
||||
- name: backport patches to 8.18 branch
|
||||
conditions:
|
||||
- merged
|
||||
- base=main
|
||||
- label=backport-8.18
|
||||
actions:
|
||||
backport:
|
||||
assignees:
|
||||
- "{{ author }}"
|
||||
branches:
|
||||
- "8.18"
|
||||
labels:
|
||||
- "backport"
|
||||
title: "[{{ destination_branch }}] {{ title }} (backport #{{ number }})"
|
||||
|
||||
- name: backport patches to 8.19 branch
|
||||
conditions:
|
||||
- merged
|
||||
- base=main
|
||||
- label=backport-8.19
|
||||
actions:
|
||||
backport:
|
||||
branches:
|
||||
- "8.19"
|
||||
|
||||
- name: backport patches to 9.0 branch
|
||||
conditions:
|
||||
- merged
|
||||
- base=main
|
||||
- label=backport-9.0
|
||||
actions:
|
||||
backport:
|
||||
assignees:
|
||||
- "{{ author }}"
|
||||
branches:
|
||||
- "9.0"
|
||||
labels:
|
||||
- "backport"
|
||||
title: "[{{ destination_branch }}] {{ title }} (backport #{{ number }})"
|
6
.pre-commit-config.yaml
Normal file
6
.pre-commit-config.yaml
Normal file
|
@ -0,0 +1,6 @@
|
|||
repos:
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v4.6.0
|
||||
hooks:
|
||||
- id: check-merge-conflict
|
||||
args: ['--assume-in-merge']
|
|
@ -1 +1 @@
|
|||
jruby-9.3.10.0
|
||||
jruby-9.4.9.0
|
||||
|
|
|
@ -13,7 +13,7 @@ gem "ruby-maven-libs", "~> 3", ">= 3.9.6.1"
|
|||
gem "logstash-output-elasticsearch", ">= 11.14.0"
|
||||
gem "polyglot", require: false
|
||||
gem "treetop", require: false
|
||||
gem "faraday", "~> 1", :require => false # due elasticsearch-transport (elastic-transport) depending faraday '~> 1'
|
||||
gem "minitar", "~> 1", :group => :build
|
||||
gem "childprocess", "~> 4", :group => :build
|
||||
gem "fpm", "~> 1", ">= 1.14.1", :group => :build # compound due to bugfix https://github.com/jordansissel/fpm/pull/1856
|
||||
gem "gems", "~> 1", :group => :build
|
||||
|
@ -26,6 +26,8 @@ gem "stud", "~> 0.0.22", :group => :build
|
|||
gem "fileutils", "~> 1.7"
|
||||
|
||||
gem "rubocop", :group => :development
|
||||
# rubocop-ast 1.43.0 carries a dep on `prism` which requires native c extensions
|
||||
gem 'rubocop-ast', '= 1.42.0', :group => :development
|
||||
gem "belzebuth", :group => :development
|
||||
gem "benchmark-ips", :group => :development
|
||||
gem "ci_reporter_rspec", "~> 1", :group => :development
|
||||
|
@ -39,5 +41,9 @@ gem "simplecov", "~> 0.22.0", :group => :development
|
|||
gem "simplecov-json", require: false, :group => :development
|
||||
gem "jar-dependencies", "= 0.4.1" # Gem::LoadError with jar-dependencies 0.4.2
|
||||
gem "murmurhash3", "= 0.1.6" # Pins until version 0.1.7-java is released
|
||||
gem "date", "= 3.3.3"
|
||||
gem "thwait"
|
||||
gem "bigdecimal", "~> 3.1"
|
||||
gem "psych", "5.2.2"
|
||||
gem "cgi", "0.3.7" # Pins until a new jruby version with updated cgi is released
|
||||
gem "uri", "0.12.3" # Pins until a new jruby version with updated cgi is released
|
||||
|
|
997
NOTICE.TXT
997
NOTICE.TXT
File diff suppressed because it is too large
Load diff
|
@ -20,7 +20,6 @@ supported platforms, from [downloads page](https://www.elastic.co/downloads/logs
|
|||
|
||||
- [Logstash Forum](https://discuss.elastic.co/c/logstash)
|
||||
- [Logstash Documentation](https://www.elastic.co/guide/en/logstash/current/index.html)
|
||||
- [#logstash on freenode IRC](https://webchat.freenode.net/?channels=logstash)
|
||||
- [Logstash Product Information](https://www.elastic.co/products/logstash)
|
||||
- [Elastic Support](https://www.elastic.co/subscriptions)
|
||||
|
||||
|
|
|
@ -1,10 +0,0 @@
|
|||
@echo off
|
||||
setlocal enabledelayedexpansion
|
||||
|
||||
cd /d "%~dp0\.."
|
||||
for /f %%i in ('cd') do set RESULT=%%i
|
||||
|
||||
"%JAVACMD%" -cp "!RESULT!\tools\ingest-converter\build\libs\ingest-converter.jar;*" ^
|
||||
org.logstash.ingest.Pipeline %*
|
||||
|
||||
endlocal
|
|
@ -1,4 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
java -cp "$(cd `dirname $0`/..; pwd)"'/tools/ingest-converter/build/libs/ingest-converter.jar:*' \
|
||||
org.logstash.ingest.Pipeline "$@"
|
|
@ -6,7 +6,7 @@ set params='%*'
|
|||
if "%1" == "-V" goto version
|
||||
if "%1" == "--version" goto version
|
||||
|
||||
call "%~dp0setup.bat" || exit /b 1
|
||||
1>&2 (call "%~dp0setup.bat") || exit /b 1
|
||||
if errorlevel 1 (
|
||||
if not defined nopauseonerror (
|
||||
pause
|
||||
|
|
|
@ -186,8 +186,8 @@ setup_vendored_jruby() {
|
|||
}
|
||||
|
||||
setup() {
|
||||
setup_java
|
||||
setup_vendored_jruby
|
||||
>&2 setup_java
|
||||
>&2 setup_vendored_jruby
|
||||
}
|
||||
|
||||
ruby_exec() {
|
||||
|
|
|
@ -16,7 +16,7 @@ for %%i in ("%LS_HOME%\logstash-core\lib\jars\*.jar") do (
|
|||
call :concat "%%i"
|
||||
)
|
||||
|
||||
"%JAVACMD%" "%JAVA_OPTS%" -cp "%CLASSPATH%" org.logstash.ackedqueue.PqCheck %*
|
||||
"%JAVACMD%" %JAVA_OPTS% org.logstash.ackedqueue.PqCheck %*
|
||||
|
||||
:concat
|
||||
IF not defined CLASSPATH (
|
||||
|
|
|
@ -16,7 +16,7 @@ for %%i in ("%LS_HOME%\logstash-core\lib\jars\*.jar") do (
|
|||
call :concat "%%i"
|
||||
)
|
||||
|
||||
"%JAVACMD%" %JAVA_OPTS% -cp "%CLASSPATH%" org.logstash.ackedqueue.PqRepair %*
|
||||
"%JAVACMD%" %JAVA_OPTS% org.logstash.ackedqueue.PqRepair %*
|
||||
|
||||
:concat
|
||||
IF not defined CLASSPATH (
|
||||
|
|
|
@ -42,7 +42,7 @@ if defined LS_JAVA_HOME (
|
|||
)
|
||||
|
||||
if not exist "%JAVACMD%" (
|
||||
echo could not find java; set JAVA_HOME or ensure java is in PATH 1>&2
|
||||
echo could not find java; set LS_JAVA_HOME or ensure java is in PATH 1>&2
|
||||
exit /b 1
|
||||
)
|
||||
|
||||
|
|
202
build.gradle
202
build.gradle
|
@ -101,6 +101,7 @@ allprojects {
|
|||
"--add-opens=java.base/java.lang=ALL-UNNAMED",
|
||||
"--add-opens=java.base/java.util=ALL-UNNAMED"
|
||||
]
|
||||
maxHeapSize = "2g"
|
||||
//https://stackoverflow.com/questions/3963708/gradle-how-to-display-test-results-in-the-console-in-real-time
|
||||
testLogging {
|
||||
// set options for log level LIFECYCLE
|
||||
|
@ -145,7 +146,6 @@ subprojects {
|
|||
}
|
||||
|
||||
version = versionMap['logstash-core']
|
||||
String artifactVersionsApi = "https://artifacts-api.elastic.co/v1/versions"
|
||||
|
||||
tasks.register("configureArchitecture") {
|
||||
String arch = System.properties['os.arch']
|
||||
|
@ -171,33 +171,28 @@ tasks.register("configureArtifactInfo") {
|
|||
description "Set the url to download stack artifacts for select stack version"
|
||||
|
||||
doLast {
|
||||
def versionQualifier = System.getenv('VERSION_QUALIFIER')
|
||||
if (versionQualifier) {
|
||||
version = "$version-$versionQualifier"
|
||||
}
|
||||
def splitVersion = version.split('\\.')
|
||||
int major = splitVersion[0].toInteger()
|
||||
int minor = splitVersion[1].toInteger()
|
||||
String branch = "${major}.${minor}"
|
||||
String fallbackMajorX = "${major}.x"
|
||||
boolean isFallBackPreviousMajor = minor - 1 < 0
|
||||
String fallbackBranch = isFallBackPreviousMajor ? "${major-1}.x" : "${major}.${minor-1}"
|
||||
def qualifiedVersion = ""
|
||||
|
||||
boolean isReleaseBuild = System.getenv('RELEASE') == "1" || versionQualifier
|
||||
String apiResponse = artifactVersionsApi.toURL().text
|
||||
|
||||
def dlVersions = new JsonSlurper().parseText(apiResponse)
|
||||
String qualifiedVersion = dlVersions['versions'].grep(isReleaseBuild ? ~/^${version}$/ : ~/^${version}-SNAPSHOT/)[0]
|
||||
if (qualifiedVersion == null) {
|
||||
if (!isReleaseBuild) {
|
||||
project.ext.set("useProjectSpecificArtifactSnapshotUrl", true)
|
||||
project.ext.set("stackArtifactSuffix", "${version}-SNAPSHOT")
|
||||
return
|
||||
for (b in [branch, fallbackMajorX, fallbackBranch]) {
|
||||
def url = "https://storage.googleapis.com/artifacts-api/snapshots/${b}.json"
|
||||
try {
|
||||
def snapshotInfo = new JsonSlurper().parseText(url.toURL().text)
|
||||
qualifiedVersion = snapshotInfo.version
|
||||
println "ArtifactInfo version: ${qualifiedVersion}"
|
||||
break
|
||||
} catch (Exception e) {
|
||||
println "Failed to fetch branch ${branch} from ${url}: ${e.message}"
|
||||
}
|
||||
throw new GradleException("could not find the current artifact from the artifact-api ${artifactVersionsApi} for ${version}")
|
||||
}
|
||||
// find latest reference to last build
|
||||
String buildsListApi = "${artifactVersionsApi}/${qualifiedVersion}/builds/"
|
||||
apiResponse = buildsListApi.toURL().text
|
||||
def dlBuilds = new JsonSlurper().parseText(apiResponse)
|
||||
def stackBuildVersion = dlBuilds["builds"][0]
|
||||
|
||||
project.ext.set("artifactApiVersionedBuildUrl", "${artifactVersionsApi}/${qualifiedVersion}/builds/${stackBuildVersion}")
|
||||
project.ext.set("stackArtifactSuffix", qualifiedVersion)
|
||||
project.ext.set("useProjectSpecificArtifactSnapshotUrl", false)
|
||||
project.ext.set("artifactApiVersion", qualifiedVersion)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -333,7 +328,6 @@ tasks.register("assembleTarDistribution") {
|
|||
inputs.files fileTree("${projectDir}/bin")
|
||||
inputs.files fileTree("${projectDir}/config")
|
||||
inputs.files fileTree("${projectDir}/lib")
|
||||
inputs.files fileTree("${projectDir}/modules")
|
||||
inputs.files fileTree("${projectDir}/logstash-core-plugin-api")
|
||||
inputs.files fileTree("${projectDir}/logstash-core/lib")
|
||||
inputs.files fileTree("${projectDir}/logstash-core/src")
|
||||
|
@ -350,7 +344,6 @@ tasks.register("assembleOssTarDistribution") {
|
|||
inputs.files fileTree("${projectDir}/bin")
|
||||
inputs.files fileTree("${projectDir}/config")
|
||||
inputs.files fileTree("${projectDir}/lib")
|
||||
inputs.files fileTree("${projectDir}/modules")
|
||||
inputs.files fileTree("${projectDir}/logstash-core-plugin-api")
|
||||
inputs.files fileTree("${projectDir}/logstash-core/lib")
|
||||
inputs.files fileTree("${projectDir}/logstash-core/src")
|
||||
|
@ -365,7 +358,6 @@ tasks.register("assembleZipDistribution") {
|
|||
inputs.files fileTree("${projectDir}/bin")
|
||||
inputs.files fileTree("${projectDir}/config")
|
||||
inputs.files fileTree("${projectDir}/lib")
|
||||
inputs.files fileTree("${projectDir}/modules")
|
||||
inputs.files fileTree("${projectDir}/logstash-core-plugin-api")
|
||||
inputs.files fileTree("${projectDir}/logstash-core/lib")
|
||||
inputs.files fileTree("${projectDir}/logstash-core/src")
|
||||
|
@ -382,7 +374,6 @@ tasks.register("assembleOssZipDistribution") {
|
|||
inputs.files fileTree("${projectDir}/bin")
|
||||
inputs.files fileTree("${projectDir}/config")
|
||||
inputs.files fileTree("${projectDir}/lib")
|
||||
inputs.files fileTree("${projectDir}/modules")
|
||||
inputs.files fileTree("${projectDir}/logstash-core-plugin-api")
|
||||
inputs.files fileTree("${projectDir}/logstash-core/lib")
|
||||
inputs.files fileTree("${projectDir}/logstash-core/src")
|
||||
|
@ -417,7 +408,7 @@ def qaBuildPath = "${buildDir}/qa/integration"
|
|||
def qaVendorPath = "${qaBuildPath}/vendor"
|
||||
|
||||
tasks.register("installIntegrationTestGems") {
|
||||
dependsOn unpackTarDistribution
|
||||
dependsOn assembleTarDistribution
|
||||
def gemfilePath = file("${projectDir}/qa/integration/Gemfile")
|
||||
inputs.files gemfilePath
|
||||
inputs.files file("${projectDir}/qa/integration/integration_tests.gemspec")
|
||||
|
@ -440,23 +431,13 @@ tasks.register("downloadFilebeat") {
|
|||
|
||||
doLast {
|
||||
download {
|
||||
String beatVersion = project.ext.get("stackArtifactSuffix")
|
||||
String downloadedFilebeatName = "filebeat-${beatVersion}-${project.ext.get("beatsArchitecture")}"
|
||||
String beatsVersion = project.ext.get("artifactApiVersion")
|
||||
String downloadedFilebeatName = "filebeat-${beatsVersion}-${project.ext.get("beatsArchitecture")}"
|
||||
project.ext.set("unpackedFilebeatName", downloadedFilebeatName)
|
||||
|
||||
if (project.ext.get("useProjectSpecificArtifactSnapshotUrl")) {
|
||||
def res = SnapshotArtifactURLs.packageUrls("beats", beatVersion, downloadedFilebeatName)
|
||||
project.ext.set("filebeatSnapshotUrl", System.getenv("FILEBEAT_SNAPSHOT_URL") ?: res.packageUrl)
|
||||
project.ext.set("filebeatDownloadLocation", "${projectDir}/build/${downloadedFilebeatName}.tar.gz")
|
||||
} else {
|
||||
// find url of build artifact
|
||||
String artifactApiUrl = "${project.ext.get("artifactApiVersionedBuildUrl")}/projects/beats/packages/${downloadedFilebeatName}.tar.gz"
|
||||
String apiResponse = artifactApiUrl.toURL().text
|
||||
def buildUrls = new JsonSlurper().parseText(apiResponse)
|
||||
|
||||
project.ext.set("filebeatSnapshotUrl", System.getenv("FILEBEAT_SNAPSHOT_URL") ?: buildUrls["package"]["url"])
|
||||
project.ext.set("filebeatDownloadLocation", "${projectDir}/build/${downloadedFilebeatName}.tar.gz")
|
||||
}
|
||||
def res = SnapshotArtifactURLs.packageUrls("beats", beatsVersion, downloadedFilebeatName)
|
||||
project.ext.set("filebeatSnapshotUrl", System.getenv("FILEBEAT_SNAPSHOT_URL") ?: res.packageUrl)
|
||||
project.ext.set("filebeatDownloadLocation", "${projectDir}/build/${downloadedFilebeatName}.tar.gz")
|
||||
|
||||
src project.ext.filebeatSnapshotUrl
|
||||
onlyIfNewer true
|
||||
|
@ -492,20 +473,12 @@ tasks.register("checkEsSHA") {
|
|||
description "Download ES version remote's fingerprint file"
|
||||
|
||||
doLast {
|
||||
String esVersion = project.ext.get("stackArtifactSuffix")
|
||||
String esVersion = project.ext.get("artifactApiVersion")
|
||||
String downloadedElasticsearchName = "elasticsearch-${esVersion}-${project.ext.get("esArchitecture")}"
|
||||
String remoteSHA
|
||||
|
||||
if (project.ext.get("useProjectSpecificArtifactSnapshotUrl")) {
|
||||
def res = SnapshotArtifactURLs.packageUrls("elasticsearch", esVersion, downloadedElasticsearchName)
|
||||
remoteSHA = res.packageShaUrl
|
||||
} else {
|
||||
// find url of build artifact
|
||||
String artifactApiUrl = "${project.ext.get("artifactApiVersionedBuildUrl")}/projects/elasticsearch/packages/${downloadedElasticsearchName}.tar.gz"
|
||||
String apiResponse = artifactApiUrl.toURL().text
|
||||
def buildUrls = new JsonSlurper().parseText(apiResponse)
|
||||
remoteSHA = buildUrls.package.sha_url.toURL().text
|
||||
}
|
||||
def res = SnapshotArtifactURLs.packageUrls("elasticsearch", esVersion, downloadedElasticsearchName)
|
||||
remoteSHA = res.packageShaUrl
|
||||
|
||||
def localESArchive = new File("${projectDir}/build/${downloadedElasticsearchName}.tar.gz")
|
||||
if (localESArchive.exists()) {
|
||||
|
@ -539,25 +512,14 @@ tasks.register("downloadEs") {
|
|||
|
||||
doLast {
|
||||
download {
|
||||
String esVersion = project.ext.get("stackArtifactSuffix")
|
||||
String esVersion = project.ext.get("artifactApiVersion")
|
||||
String downloadedElasticsearchName = "elasticsearch-${esVersion}-${project.ext.get("esArchitecture")}"
|
||||
|
||||
project.ext.set("unpackedElasticsearchName", "elasticsearch-${esVersion}")
|
||||
|
||||
if (project.ext.get("useProjectSpecificArtifactSnapshotUrl")) {
|
||||
def res = SnapshotArtifactURLs.packageUrls("elasticsearch", esVersion, downloadedElasticsearchName)
|
||||
project.ext.set("elasticsearchSnapshotURL", System.getenv("ELASTICSEARCH_SNAPSHOT_URL") ?: res.packageUrl)
|
||||
project.ext.set("elasticsearchDownloadLocation", "${projectDir}/build/${downloadedElasticsearchName}.tar.gz")
|
||||
} else {
|
||||
// find url of build artifact
|
||||
String artifactApiUrl = "${project.ext.get("artifactApiVersionedBuildUrl")}/projects/elasticsearch/packages/${downloadedElasticsearchName}.tar.gz"
|
||||
String apiResponse = artifactApiUrl.toURL().text
|
||||
|
||||
def buildUrls = new JsonSlurper().parseText(apiResponse)
|
||||
|
||||
project.ext.set("elasticsearchSnapshotURL", System.getenv("ELASTICSEARCH_SNAPSHOT_URL") ?: buildUrls["package"]["url"])
|
||||
project.ext.set("elasticsearchDownloadLocation", "${projectDir}/build/${downloadedElasticsearchName}.tar.gz")
|
||||
}
|
||||
def res = SnapshotArtifactURLs.packageUrls("elasticsearch", esVersion, downloadedElasticsearchName)
|
||||
project.ext.set("elasticsearchSnapshotURL", System.getenv("ELASTICSEARCH_SNAPSHOT_URL") ?: res.packageUrl)
|
||||
project.ext.set("elasticsearchDownloadLocation", "${projectDir}/build/${downloadedElasticsearchName}.tar.gz")
|
||||
|
||||
src project.ext.elasticsearchSnapshotURL
|
||||
onlyIfNewer true
|
||||
|
@ -597,7 +559,8 @@ project(":logstash-integration-tests") {
|
|||
systemProperty 'org.logstash.integration.specs', rubyIntegrationSpecs
|
||||
environment "FEATURE_FLAG", System.getenv('FEATURE_FLAG')
|
||||
workingDir integrationTestPwd
|
||||
dependsOn = [installIntegrationTestGems, copyProductionLog4jConfiguration]
|
||||
dependsOn installIntegrationTestGems
|
||||
dependsOn copyProductionLog4jConfiguration
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -740,46 +703,66 @@ class JDKDetails {
|
|||
return createElasticCatalogDownloadUrl()
|
||||
}
|
||||
|
||||
private String createElasticCatalogDownloadUrl() {
|
||||
// Ask details to catalog https://jvm-catalog.elastic.co/jdk and return the url to download the JDK
|
||||
|
||||
// arch x86_64 never used, only aarch64 if macos
|
||||
// throws an error iff local version in versions.yml doesn't match the latest from JVM catalog.
|
||||
void checkLocalVersionMatchingLatest() {
|
||||
// retrieve the metadata from remote
|
||||
def url = "https://jvm-catalog.elastic.co/jdk/latest_adoptiumjdk_${major}_${osName}"
|
||||
|
||||
// Append the cpu's arch only if Mac on aarch64, all the other OSes doesn't have CPU extension
|
||||
if (arch == "aarch64") {
|
||||
url += "_${arch}"
|
||||
}
|
||||
println "Retrieving JDK from catalog..."
|
||||
def catalogMetadataUrl = URI.create(url).toURL()
|
||||
def catalogConnection = catalogMetadataUrl.openConnection()
|
||||
catalogConnection.requestMethod = 'GET'
|
||||
assert catalogConnection.responseCode == 200
|
||||
|
||||
def metadataRetrieved = catalogConnection.content.text
|
||||
println "Retrieved!"
|
||||
|
||||
def catalogMetadata = new JsonSlurper().parseText(metadataRetrieved)
|
||||
return catalogMetadata.url
|
||||
}
|
||||
|
||||
private String createAdoptDownloadUrl() {
|
||||
String releaseName = major > 8 ?
|
||||
"jdk-${revision}+${build}" :
|
||||
"jdk${revision}u${build}"
|
||||
String vendorOsName = vendorOsName(osName)
|
||||
switch (vendor) {
|
||||
case "adoptium":
|
||||
return "https://api.adoptium.net/v3/binary/version/${releaseName}/${vendorOsName}/${arch}/jdk/hotspot/normal/adoptium"
|
||||
default:
|
||||
throw RuntimeException("Can't handle vendor: ${vendor}")
|
||||
if (catalogMetadata.version != revision || catalogMetadata.revision != build) {
|
||||
throw new GradleException("Found new jdk version. Please update version.yml to ${catalogMetadata.version} build ${catalogMetadata.revision}")
|
||||
}
|
||||
}
|
||||
|
||||
private String vendorOsName(String osName) {
|
||||
if (osName == "darwin")
|
||||
return "mac"
|
||||
return osName
|
||||
private String createElasticCatalogDownloadUrl() {
|
||||
// Ask details to catalog https://jvm-catalog.elastic.co/jdk and return the url to download the JDK
|
||||
|
||||
// arch x86_64 is default, aarch64 if macos or linux
|
||||
def url = "https://jvm-catalog.elastic.co/jdk/adoptiumjdk-${revision}+${build}-${osName}"
|
||||
|
||||
// Append the cpu's arch only if not x86_64, which is the default
|
||||
if (arch == "aarch64") {
|
||||
url += "-${arch}"
|
||||
}
|
||||
println "Retrieving JDK from catalog..."
|
||||
def catalogMetadataUrl = URI.create(url).toURL()
|
||||
def catalogConnection = catalogMetadataUrl.openConnection()
|
||||
catalogConnection.requestMethod = 'GET'
|
||||
if (catalogConnection.responseCode != 200) {
|
||||
println "Can't find adoptiumjdk ${revision} for ${osName} on Elastic JVM catalog"
|
||||
throw new GradleException("JVM not present on catalog")
|
||||
}
|
||||
|
||||
def metadataRetrieved = catalogConnection.content.text
|
||||
println "Retrieved!"
|
||||
|
||||
def catalogMetadata = new JsonSlurper().parseText(metadataRetrieved)
|
||||
validateMetadata(catalogMetadata)
|
||||
|
||||
return catalogMetadata.url
|
||||
}
|
||||
|
||||
//Verify that the artifact metadata correspond to the request, if not throws an error
|
||||
private void validateMetadata(Map metadata) {
|
||||
if (metadata.version != revision) {
|
||||
throw new GradleException("Expected to retrieve a JDK for version ${revision} but received: ${metadata.version}")
|
||||
}
|
||||
if (!isSameArchitecture(metadata.architecture)) {
|
||||
throw new GradleException("Expected to retrieve a JDK for architecture ${arch} but received: ${metadata.architecture}")
|
||||
}
|
||||
}
|
||||
|
||||
private boolean isSameArchitecture(String metadataArch) {
|
||||
if (arch == 'x64') {
|
||||
return metadataArch == 'x86_64'
|
||||
}
|
||||
return metadataArch == arch
|
||||
}
|
||||
|
||||
private String parseJdkArchitecture(String jdkArch) {
|
||||
|
@ -791,16 +774,22 @@ class JDKDetails {
|
|||
return "aarch64"
|
||||
break
|
||||
default:
|
||||
throw RuntimeException("Can't handle CPU architechture: ${jdkArch}")
|
||||
throw new GradleException("Can't handle CPU architechture: ${jdkArch}")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
tasks.register("lint") {
|
||||
// Calls rake's 'lint' task
|
||||
description = "Lint Ruby source files. Use -PrubySource=file1.rb,file2.rb to specify files"
|
||||
dependsOn installDevelopmentGems
|
||||
doLast {
|
||||
rake(projectDir, buildDir, 'lint:report')
|
||||
if (project.hasProperty("rubySource")) {
|
||||
// Split the comma-separated files and pass them as separate arguments
|
||||
def files = project.property("rubySource").split(",")
|
||||
rake(projectDir, buildDir, "lint:report", *files)
|
||||
} else {
|
||||
rake(projectDir, buildDir, "lint:report")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -840,6 +829,15 @@ tasks.register("downloadJdk", Download) {
|
|||
}
|
||||
}
|
||||
|
||||
tasks.register("checkNewJdkVersion") {
|
||||
def versionYml = new Yaml().load(new File("$projectDir/versions.yml").text)
|
||||
|
||||
// use Linux x86_64 as canary platform
|
||||
def jdkDetails = new JDKDetails(versionYml, "linux", "x86_64")
|
||||
// throws Gradle exception if local and remote doesn't match
|
||||
jdkDetails.checkLocalVersionMatchingLatest()
|
||||
}
|
||||
|
||||
tasks.register("deleteLocalJdk", Delete) {
|
||||
// CLI project properties: -Pjdk_bundle_os=[windows|linux|darwin]
|
||||
String osName = selectOsType()
|
||||
|
|
|
@ -32,6 +32,8 @@ spec:
|
|||
- resource:logstash-linux-jdk-matrix-pipeline
|
||||
- resource:logstash-windows-jdk-matrix-pipeline
|
||||
- resource:logstash-benchmark-pipeline
|
||||
- resource:logstash-health-report-tests-pipeline
|
||||
- resource:logstash-jdk-availability-check-pipeline
|
||||
|
||||
# ***********************************
|
||||
# Declare serverless IT pipeline
|
||||
|
@ -140,7 +142,7 @@ metadata:
|
|||
spec:
|
||||
type: buildkite-pipeline
|
||||
owner: group:logstash
|
||||
system: buildkite
|
||||
system: platform-ingest
|
||||
implementation:
|
||||
apiVersion: buildkite.elastic.dev/v1
|
||||
kind: Pipeline
|
||||
|
@ -183,7 +185,7 @@ metadata:
|
|||
spec:
|
||||
type: buildkite-pipeline
|
||||
owner: group:logstash
|
||||
system: buildkite
|
||||
system: platform-ingest
|
||||
implementation:
|
||||
apiVersion: buildkite.elastic.dev/v1
|
||||
kind: Pipeline
|
||||
|
@ -234,7 +236,7 @@ metadata:
|
|||
spec:
|
||||
type: buildkite-pipeline
|
||||
owner: group:logstash
|
||||
system: buildkite
|
||||
system: platform-ingest
|
||||
implementation:
|
||||
apiVersion: buildkite.elastic.dev/v1
|
||||
kind: Pipeline
|
||||
|
@ -292,7 +294,7 @@ metadata:
|
|||
spec:
|
||||
type: buildkite-pipeline
|
||||
owner: group:logstash
|
||||
system: buildkite
|
||||
system: platform-ingest
|
||||
implementation:
|
||||
apiVersion: buildkite.elastic.dev/v1
|
||||
kind: Pipeline
|
||||
|
@ -343,7 +345,7 @@ metadata:
|
|||
spec:
|
||||
type: buildkite-pipeline
|
||||
owner: group:logstash
|
||||
system: buildkite
|
||||
system: platform-ingest
|
||||
implementation:
|
||||
apiVersion: buildkite.elastic.dev/v1
|
||||
kind: Pipeline
|
||||
|
@ -386,7 +388,7 @@ metadata:
|
|||
spec:
|
||||
type: buildkite-pipeline
|
||||
owner: group:logstash
|
||||
system: buildkite
|
||||
system: platform-ingest
|
||||
implementation:
|
||||
apiVersion: buildkite.elastic.dev/v1
|
||||
kind: Pipeline
|
||||
|
@ -405,6 +407,7 @@ spec:
|
|||
ELASTIC_SLACK_NOTIFICATIONS_ENABLED: 'true'
|
||||
SLACK_NOTIFICATIONS_CHANNEL: '#logstash-build'
|
||||
SLACK_NOTIFICATIONS_ON_SUCCESS: 'false'
|
||||
SLACK_NOTIFICATIONS_SKIP_FOR_RETRIES: 'true'
|
||||
teams:
|
||||
ingest-fp:
|
||||
access_level: MANAGE_BUILD_AND_READ
|
||||
|
@ -436,7 +439,7 @@ metadata:
|
|||
spec:
|
||||
type: buildkite-pipeline
|
||||
owner: group:logstash
|
||||
system: buildkite
|
||||
system: platform-ingest
|
||||
implementation:
|
||||
apiVersion: buildkite.elastic.dev/v1
|
||||
kind: Pipeline
|
||||
|
@ -452,7 +455,8 @@ spec:
|
|||
build_pull_requests: false
|
||||
build_tags: false
|
||||
trigger_mode: code
|
||||
filter_condition: 'build.branch !~ /^backport.*$/'
|
||||
filter_condition: >-
|
||||
build.branch !~ /^backport.*$/ && build.branch !~ /^mergify\/bp\/.*$/
|
||||
filter_enabled: true
|
||||
cancel_intermediate_builds: false
|
||||
skip_intermediate_builds: false
|
||||
|
@ -491,7 +495,7 @@ metadata:
|
|||
spec:
|
||||
type: buildkite-pipeline
|
||||
owner: group:logstash
|
||||
system: buildkite
|
||||
system: platform-ingest
|
||||
implementation:
|
||||
apiVersion: buildkite.elastic.dev/v1
|
||||
kind: Pipeline
|
||||
|
@ -533,7 +537,7 @@ metadata:
|
|||
spec:
|
||||
type: buildkite-pipeline
|
||||
owner: group:logstash
|
||||
system: buildkite
|
||||
system: platform-ingest
|
||||
implementation:
|
||||
apiVersion: buildkite.elastic.dev/v1
|
||||
kind: Pipeline
|
||||
|
@ -613,7 +617,7 @@ spec:
|
|||
kind: Pipeline
|
||||
metadata:
|
||||
name: logstash-benchmark-pipeline
|
||||
description: ':logstash: The Benchmark pipeline'
|
||||
description: ':running: The Benchmark pipeline for snapshot version'
|
||||
spec:
|
||||
repository: elastic/logstash
|
||||
pipeline_file: ".buildkite/benchmark_pipeline.yml"
|
||||
|
@ -642,4 +646,160 @@ spec:
|
|||
|
||||
# *******************************
|
||||
# SECTION END: Benchmark pipeline
|
||||
# *******************************
|
||||
|
||||
# ***********************************
|
||||
# SECTION START: Benchmark Marathon
|
||||
# ***********************************
|
||||
---
|
||||
# yaml-language-server: $schema=https://gist.githubusercontent.com/elasticmachine/988b80dae436cafea07d9a4a460a011d/raw/rre.schema.json
|
||||
apiVersion: backstage.io/v1alpha1
|
||||
kind: Resource
|
||||
metadata:
|
||||
name: logstash-benchmark-marathon-pipeline
|
||||
description: Buildkite pipeline for benchmarking multi-version
|
||||
links:
|
||||
- title: 'Logstash Benchmark Marathon'
|
||||
url: https://buildkite.com/elastic/logstash-benchmark-marathon-pipeline
|
||||
spec:
|
||||
type: buildkite-pipeline
|
||||
owner: group:logstash
|
||||
system: platform-ingest
|
||||
implementation:
|
||||
apiVersion: buildkite.elastic.dev/v1
|
||||
kind: Pipeline
|
||||
metadata:
|
||||
name: logstash-benchmark-marathon-pipeline
|
||||
description: ':running: The Benchmark Marathon for multi-version'
|
||||
spec:
|
||||
repository: elastic/logstash
|
||||
pipeline_file: ".buildkite/benchmark_marathon_pipeline.yml"
|
||||
maximum_timeout_in_minutes: 480
|
||||
provider_settings:
|
||||
trigger_mode: none # don't trigger jobs from github activity
|
||||
env:
|
||||
ELASTIC_SLACK_NOTIFICATIONS_ENABLED: 'false'
|
||||
SLACK_NOTIFICATIONS_CHANNEL: '#logstash-build'
|
||||
SLACK_NOTIFICATIONS_ON_SUCCESS: 'false'
|
||||
SLACK_NOTIFICATIONS_SKIP_FOR_RETRIES: 'true'
|
||||
teams:
|
||||
ingest-fp:
|
||||
access_level: MANAGE_BUILD_AND_READ
|
||||
logstash:
|
||||
access_level: MANAGE_BUILD_AND_READ
|
||||
ingest-eng-prod:
|
||||
access_level: MANAGE_BUILD_AND_READ
|
||||
everyone:
|
||||
access_level: READ_ONLY
|
||||
|
||||
# *******************************
|
||||
# SECTION END: Benchmark Marathon
|
||||
# *******************************
|
||||
|
||||
# ***********************************
|
||||
# Declare Health Report Tests pipeline
|
||||
# ***********************************
|
||||
---
|
||||
# yaml-language-server: $schema=https://gist.githubusercontent.com/elasticmachine/988b80dae436cafea07d9a4a460a011d/raw/rre.schema.json
|
||||
apiVersion: backstage.io/v1alpha1
|
||||
kind: Resource
|
||||
metadata:
|
||||
name: logstash-health-report-tests-pipeline
|
||||
description: Buildkite pipeline for the Logstash Health Report Tests
|
||||
links:
|
||||
- title: ':logstash Logstash Health Report Tests (Daily, Auto) pipeline'
|
||||
url: https://buildkite.com/elastic/logstash-health-report-tests-pipeline
|
||||
spec:
|
||||
type: buildkite-pipeline
|
||||
owner: group:logstash
|
||||
system: platform-ingest
|
||||
implementation:
|
||||
apiVersion: buildkite.elastic.dev/v1
|
||||
kind: Pipeline
|
||||
metadata:
|
||||
name: logstash-health-report-tests-pipeline
|
||||
description: ':logstash: Logstash Health Report tests :pipeline:'
|
||||
spec:
|
||||
repository: elastic/logstash
|
||||
pipeline_file: ".buildkite/health_report_tests_pipeline.yml"
|
||||
maximum_timeout_in_minutes: 30 # usually tests last max ~17mins
|
||||
provider_settings:
|
||||
trigger_mode: none # don't trigger jobs from github activity
|
||||
env:
|
||||
ELASTIC_SLACK_NOTIFICATIONS_ENABLED: 'true'
|
||||
SLACK_NOTIFICATIONS_CHANNEL: '#logstash-build'
|
||||
SLACK_NOTIFICATIONS_ON_SUCCESS: 'false'
|
||||
SLACK_NOTIFICATIONS_SKIP_FOR_RETRIES: 'true'
|
||||
teams:
|
||||
ingest-fp:
|
||||
access_level: MANAGE_BUILD_AND_READ
|
||||
logstash:
|
||||
access_level: MANAGE_BUILD_AND_READ
|
||||
ingest-eng-prod:
|
||||
access_level: MANAGE_BUILD_AND_READ
|
||||
everyone:
|
||||
access_level: READ_ONLY
|
||||
schedules:
|
||||
Daily Health Report tests on main branch:
|
||||
branch: main
|
||||
cronline: 30 20 * * *
|
||||
message: Daily trigger of Health Report Tests Pipeline
|
||||
|
||||
# *******************************
|
||||
# SECTION END: Health Report Tests pipeline
|
||||
# *******************************
|
||||
|
||||
# ***********************************
|
||||
# Declare JDK check pipeline
|
||||
# ***********************************
|
||||
---
|
||||
# yaml-language-server: $schema=https://gist.githubusercontent.com/elasticmachine/988b80dae436cafea07d9a4a460a011d/raw/rre.schema.json
|
||||
apiVersion: backstage.io/v1alpha1
|
||||
kind: Resource
|
||||
metadata:
|
||||
name: logstash-jdk-availability-check-pipeline
|
||||
description: ":logstash: check availability of new JDK version"
|
||||
spec:
|
||||
type: buildkite-pipeline
|
||||
owner: group:logstash
|
||||
system: platform-ingest
|
||||
implementation:
|
||||
apiVersion: buildkite.elastic.dev/v1
|
||||
kind: Pipeline
|
||||
metadata:
|
||||
name: logstash-jdk-availability-check-pipeline
|
||||
spec:
|
||||
repository: elastic/logstash
|
||||
pipeline_file: ".buildkite/jdk_availability_check_pipeline.yml"
|
||||
maximum_timeout_in_minutes: 10
|
||||
provider_settings:
|
||||
trigger_mode: none # don't trigger jobs from github activity
|
||||
env:
|
||||
ELASTIC_SLACK_NOTIFICATIONS_ENABLED: 'true'
|
||||
SLACK_NOTIFICATIONS_CHANNEL: '#logstash-build'
|
||||
SLACK_NOTIFICATIONS_ON_SUCCESS: 'false'
|
||||
SLACK_NOTIFICATIONS_SKIP_FOR_RETRIES: 'true'
|
||||
teams:
|
||||
logstash:
|
||||
access_level: MANAGE_BUILD_AND_READ
|
||||
ingest-eng-prod:
|
||||
access_level: MANAGE_BUILD_AND_READ
|
||||
everyone:
|
||||
access_level: READ_ONLY
|
||||
schedules:
|
||||
Weekly JDK availability check (main):
|
||||
branch: main
|
||||
cronline: 0 2 * * 1 # every Monday@2AM UTC
|
||||
message: Weekly trigger of JDK update availability pipeline per branch
|
||||
env:
|
||||
PIPELINES_TO_TRIGGER: 'logstash-jdk-availability-check-pipeline'
|
||||
Weekly JDK availability check (8.x):
|
||||
branch: 8.x
|
||||
cronline: 0 2 * * 1 # every Monday@2AM UTC
|
||||
message: Weekly trigger of JDK update availability pipeline per branch
|
||||
env:
|
||||
PIPELINES_TO_TRIGGER: 'logstash-jdk-availability-check-pipeline'
|
||||
|
||||
# *******************************
|
||||
# SECTION END: JDK check pipeline
|
||||
# *******************************
|
|
@ -19,7 +19,7 @@ function get_package_type {
|
|||
# uses at least 1g of memory, If we don't do this we can get OOM issues when
|
||||
# installing gems. See https://github.com/elastic/logstash/issues/5179
|
||||
export JRUBY_OPTS="-J-Xmx1g"
|
||||
export GRADLE_OPTS="-Xmx4g -Dorg.gradle.daemon=false -Dorg.gradle.logging.level=info -Dfile.encoding=UTF-8"
|
||||
export GRADLE_OPTS="-Xmx4g -Dorg.gradle.console=plain -Dorg.gradle.daemon=false -Dorg.gradle.logging.level=info -Dfile.encoding=UTF-8"
|
||||
export OSS=true
|
||||
|
||||
if [ -n "$BUILD_JAVA_HOME" ]; then
|
||||
|
|
|
@ -1,17 +0,0 @@
|
|||
{
|
||||
"notice": "This file is not maintained outside of the main branch and should only be used for tooling.",
|
||||
"branches": [
|
||||
{
|
||||
"branch": "main"
|
||||
},
|
||||
{
|
||||
"branch": "8.13"
|
||||
},
|
||||
{
|
||||
"branch": "8.14"
|
||||
},
|
||||
{
|
||||
"branch": "7.17"
|
||||
}
|
||||
]
|
||||
}
|
7
ci/check_jdk_version_availability.sh
Executable file
7
ci/check_jdk_version_availability.sh
Executable file
|
@ -0,0 +1,7 @@
|
|||
#!/usr/bin/env bash
|
||||
set -eo pipefail
|
||||
|
||||
export GRADLE_OPTS="-Xmx4g -Dorg.gradle.daemon=false -Dorg.gradle.logging.level=info -Dfile.encoding=UTF-8"
|
||||
|
||||
echo "Checking local JDK version against latest remote from JVM catalog"
|
||||
./gradlew checkNewJdkVersion
|
|
@ -6,7 +6,7 @@ set -x
|
|||
# uses at least 1g of memory, If we don't do this we can get OOM issues when
|
||||
# installing gems. See https://github.com/elastic/logstash/issues/5179
|
||||
export JRUBY_OPTS="-J-Xmx1g"
|
||||
export GRADLE_OPTS="-Xmx4g -Dorg.gradle.daemon=false -Dorg.gradle.logging.level=info -Dfile.encoding=UTF-8"
|
||||
export GRADLE_OPTS="-Xmx4g -Dorg.gradle.console=plain -Dorg.gradle.daemon=false -Dorg.gradle.logging.level=info -Dfile.encoding=UTF-8"
|
||||
|
||||
if [ -n "$BUILD_JAVA_HOME" ]; then
|
||||
GRADLE_OPTS="$GRADLE_OPTS -Dorg.gradle.java.home=$BUILD_JAVA_HOME"
|
||||
|
@ -15,7 +15,6 @@ fi
|
|||
# Can run either a specific flavor, or all flavors -
|
||||
# eg `ci/acceptance_tests.sh oss` will run tests for open source container
|
||||
# `ci/acceptance_tests.sh full` will run tests for the default container
|
||||
# `ci/acceptance_tests.sh ubi8` will run tests for the ubi8 based container
|
||||
# `ci/acceptance_tests.sh wolfi` will run tests for the wolfi based container
|
||||
# `ci/acceptance_tests.sh` will run tests for all containers
|
||||
SELECTED_TEST_SUITE=$1
|
||||
|
@ -49,23 +48,13 @@ if [[ $SELECTED_TEST_SUITE == "oss" ]]; then
|
|||
elif [[ $SELECTED_TEST_SUITE == "full" ]]; then
|
||||
echo "--- Building $SELECTED_TEST_SUITE docker images"
|
||||
cd $LS_HOME
|
||||
rake artifact:docker
|
||||
rake artifact:build_docker_full
|
||||
echo "--- Acceptance: Installing dependencies"
|
||||
cd $QA_DIR
|
||||
bundle install
|
||||
|
||||
echo "--- Acceptance: Running the tests"
|
||||
bundle exec rspec docker/spec/full/*_spec.rb
|
||||
elif [[ $SELECTED_TEST_SUITE == "ubi8" ]]; then
|
||||
echo "--- Building $SELECTED_TEST_SUITE docker images"
|
||||
cd $LS_HOME
|
||||
rake artifact:docker_ubi8
|
||||
echo "--- Acceptance: Installing dependencies"
|
||||
cd $QA_DIR
|
||||
bundle install
|
||||
|
||||
echo "--- Acceptance: Running the tests"
|
||||
bundle exec rspec docker/spec/ubi8/*_spec.rb
|
||||
elif [[ $SELECTED_TEST_SUITE == "wolfi" ]]; then
|
||||
echo "--- Building $SELECTED_TEST_SUITE docker images"
|
||||
cd $LS_HOME
|
||||
|
|
|
@ -19,24 +19,15 @@ if [[ $1 = "setup" ]]; then
|
|||
exit 0
|
||||
|
||||
elif [[ $1 == "split" ]]; then
|
||||
cd qa/integration
|
||||
glob1=(specs/*spec.rb)
|
||||
glob2=(specs/**/*spec.rb)
|
||||
all_specs=("${glob1[@]}" "${glob2[@]}")
|
||||
# Source shared function for splitting integration tests
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/partition-files.lib.sh"
|
||||
|
||||
specs0=${all_specs[@]::$((${#all_specs[@]} / 2 ))}
|
||||
specs1=${all_specs[@]:$((${#all_specs[@]} / 2 ))}
|
||||
cd ../..
|
||||
if [[ $2 == 0 ]]; then
|
||||
echo "Running the first half of integration specs: $specs0"
|
||||
./gradlew runIntegrationTests -PrubyIntegrationSpecs="$specs0" --console=plain
|
||||
elif [[ $2 == 1 ]]; then
|
||||
echo "Running the second half of integration specs: $specs1"
|
||||
./gradlew runIntegrationTests -PrubyIntegrationSpecs="$specs1" --console=plain
|
||||
else
|
||||
echo "Error, must specify 0 or 1 after the split. For example ci/integration_tests.sh split 0"
|
||||
exit 1
|
||||
fi
|
||||
index="${2:?index}"
|
||||
count="${3:-2}"
|
||||
specs=($(cd qa/integration; partition_files "${index}" "${count}" < <(find specs -name '*_spec.rb') ))
|
||||
|
||||
echo "Running integration tests partition[${index}] of ${count}: ${specs[*]}"
|
||||
./gradlew runIntegrationTests -PrubyIntegrationSpecs="${specs[*]}" --console=plain
|
||||
|
||||
elif [[ ! -z $@ ]]; then
|
||||
echo "Running integration tests 'rspec $@'"
|
||||
|
|
|
@ -1,13 +1,15 @@
|
|||
{
|
||||
"releases": {
|
||||
"5.x": "5.6.16",
|
||||
"6.x": "6.8.23",
|
||||
"7.x": "7.17.22",
|
||||
"8.x": "8.14.1"
|
||||
"7.current": "7.17.28",
|
||||
"8.previous": "8.17.5",
|
||||
"8.current": "8.18.0"
|
||||
},
|
||||
"snapshots": {
|
||||
"7.x": "7.17.23-SNAPSHOT",
|
||||
"8.x": "8.14.2-SNAPSHOT",
|
||||
"main": "8.15.0-SNAPSHOT"
|
||||
"7.current": "7.17.29-SNAPSHOT",
|
||||
"8.previous": "8.17.6-SNAPSHOT",
|
||||
"8.current": "8.18.1-SNAPSHOT",
|
||||
"8.next": "8.19.0-SNAPSHOT",
|
||||
"9.next": "9.0.1-SNAPSHOT",
|
||||
"main": "9.1.0-SNAPSHOT"
|
||||
}
|
||||
}
|
||||
|
|
78
ci/partition-files.lib.sh
Executable file
78
ci/partition-files.lib.sh
Executable file
|
@ -0,0 +1,78 @@
|
|||
#!/bin/bash
|
||||
|
||||
# partition_files returns a consistent partition of the filenames given on stdin
|
||||
# Usage: partition_files <partition_index> <partition_count=2> < <(ls files)
|
||||
# partition_index: the zero-based index of the partition to select `[0,partition_count)`
|
||||
# partition_count: the number of partitions `[2,#files]`
|
||||
partition_files() (
|
||||
set -e
|
||||
|
||||
local files
|
||||
# ensure files is consistently sorted and distinct
|
||||
IFS=$'\n' read -ra files -d '' <<<"$(cat - | sort | uniq)" || true
|
||||
|
||||
local partition_index="${1:?}"
|
||||
local partition_count="${2:?}"
|
||||
|
||||
_error () { >&2 echo "ERROR: ${1:-UNSPECIFIED}"; exit 1; }
|
||||
|
||||
# safeguard against nonsense invocations
|
||||
if (( ${#files[@]} < 2 )); then
|
||||
_error "#files(${#files[@]}) must be at least 2 in order to partition"
|
||||
elif ( ! [[ "${partition_count}" =~ ^[0-9]+$ ]] ) || (( partition_count < 2 )) || (( partition_count > ${#files[@]})); then
|
||||
_error "partition_count(${partition_count}) must be a number that is at least 2 and not greater than #files(${#files[@]})"
|
||||
elif ( ! [[ "${partition_index}" =~ ^[0-9]+$ ]] ) || (( partition_index < 0 )) || (( partition_index >= $partition_count )) ; then
|
||||
_error "partition_index(${partition_index}) must be a number that is greater 0 and less than partition_count(${partition_count})"
|
||||
fi
|
||||
|
||||
# round-robbin emit those in our selected partition
|
||||
for index in "${!files[@]}"; do
|
||||
partition="$(( index % partition_count ))"
|
||||
if (( partition == partition_index )); then
|
||||
echo "${files[$index]}"
|
||||
fi
|
||||
done
|
||||
)
|
||||
|
||||
if [[ "$0" == "${BASH_SOURCE[0]}" ]]; then
|
||||
if [[ "$1" == "test" ]]; then
|
||||
status=0
|
||||
|
||||
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
||||
file_list="$( cd "${SCRIPT_DIR}"; find . -type f )"
|
||||
|
||||
# for any legal partitioning into N partitions, we ensure that
|
||||
# the combined output of `partition_files I N` where `I` is all numbers in
|
||||
# the range `[0,N)` produces no repeats and no omissions, even if the
|
||||
# input list is not consistently ordered.
|
||||
for n in $(seq 2 $(wc -l <<<"${file_list}")); do
|
||||
result=""
|
||||
for i in $(seq 0 $(( n - 1 ))); do
|
||||
for file in $(partition_files $i $n <<<"$( shuf <<<"${file_list}" )"); do
|
||||
result+="${file}"$'\n'
|
||||
done
|
||||
done
|
||||
|
||||
repeated="$( uniq --repeated <<<"$( sort <<<"${result}" )" )"
|
||||
if (( $(printf "${repeated}" | wc -l) > 0 )); then
|
||||
status=1
|
||||
echo "[n=${n}]FAIL(repeated):"$'\n'"${repeated}"
|
||||
fi
|
||||
|
||||
missing=$( comm -23 <(sort <<<"${file_list}") <( sort <<<"${result}" ) )
|
||||
if (( $(printf "${missing}" | wc -l) > 0 )); then
|
||||
status=1
|
||||
echo "[n=${n}]FAIL(omitted):"$'\n'"${missing}"
|
||||
fi
|
||||
done
|
||||
|
||||
if (( status > 0 )); then
|
||||
echo "There were failures. The input list was:"
|
||||
echo "${file_list}"
|
||||
fi
|
||||
|
||||
exit "${status}"
|
||||
else
|
||||
partition_files $@
|
||||
fi
|
||||
fi
|
|
@ -28,7 +28,9 @@ build_logstash() {
|
|||
}
|
||||
|
||||
index_test_data() {
|
||||
curl -X POST -H "Authorization: ApiKey $TESTER_API_KEY_ENCODED" "$ES_ENDPOINT/$INDEX_NAME/_bulk" -H 'Content-Type: application/json' --data-binary @"$CURRENT_DIR/test_data/book.json"
|
||||
curl -X POST -H "Authorization: ApiKey $TESTER_API_KEY_ENCODED" "$ES_ENDPOINT/$INDEX_NAME/_bulk" \
|
||||
-H 'x-elastic-product-origin: logstash' \
|
||||
-H 'Content-Type: application/json' --data-binary @"$CURRENT_DIR/test_data/book.json"
|
||||
}
|
||||
|
||||
# $1: check function
|
||||
|
|
|
@ -7,7 +7,8 @@ export PIPELINE_NAME='gen_es'
|
|||
|
||||
# update pipeline and check response code
|
||||
index_pipeline() {
|
||||
RESP_CODE=$(curl -s -w "%{http_code}" -X PUT -H "Authorization: ApiKey $TESTER_API_KEY_ENCODED" "$ES_ENDPOINT/_logstash/pipeline/$1" -H 'Content-Type: application/json' -d "$2")
|
||||
RESP_CODE=$(curl -s -w "%{http_code}" -X PUT -H "Authorization: ApiKey $TESTER_API_KEY_ENCODED" "$ES_ENDPOINT/_logstash/pipeline/$1" \
|
||||
-H 'x-elastic-product-origin: logstash' -H 'Content-Type: application/json' -d "$2")
|
||||
if [[ $RESP_CODE -ge '400' ]]; then
|
||||
echo "failed to update pipeline for Central Pipeline Management. Got $RESP_CODE from Elasticsearch"
|
||||
exit 1
|
||||
|
@ -34,7 +35,7 @@ check_plugin() {
|
|||
}
|
||||
|
||||
delete_pipeline() {
|
||||
curl -H "Authorization: ApiKey $TESTER_API_KEY_ENCODED" -X DELETE "$ES_ENDPOINT/_logstash/pipeline/$PIPELINE_NAME" -H 'Content-Type: application/json';
|
||||
curl -H "Authorization: ApiKey $TESTER_API_KEY_ENCODED" -H 'x-elastic-product-origin: logstash' -X DELETE "$ES_ENDPOINT/_logstash/pipeline/$PIPELINE_NAME" -H 'Content-Type: application/json';
|
||||
}
|
||||
|
||||
cpm_clean_up_and_get_result() {
|
||||
|
|
|
@ -6,10 +6,12 @@ source ./$(dirname "$0")/common.sh
|
|||
deploy_ingest_pipeline() {
|
||||
PIPELINE_RESP_CODE=$(curl -s -w "%{http_code}" -o /dev/null -X PUT -H "Authorization: ApiKey $TESTER_API_KEY_ENCODED" "$ES_ENDPOINT/_ingest/pipeline/integration-logstash_test.events-default" \
|
||||
-H 'Content-Type: application/json' \
|
||||
-H 'x-elastic-product-origin: logstash' \
|
||||
--data-binary @"$CURRENT_DIR/test_data/ingest_pipeline.json")
|
||||
|
||||
TEMPLATE_RESP_CODE=$(curl -s -w "%{http_code}" -o /dev/null -X PUT -H "Authorization: ApiKey $TESTER_API_KEY_ENCODED" "$ES_ENDPOINT/_index_template/logs-serverless-default-template" \
|
||||
-H 'Content-Type: application/json' \
|
||||
-H 'x-elastic-product-origin: logstash' \
|
||||
--data-binary @"$CURRENT_DIR/test_data/index_template.json")
|
||||
|
||||
# ingest pipeline is likely be there from the last run
|
||||
|
@ -29,7 +31,7 @@ check_integration_filter() {
|
|||
}
|
||||
|
||||
get_doc_msg_length() {
|
||||
curl -s -H "Authorization: ApiKey $TESTER_API_KEY_ENCODED" "$ES_ENDPOINT/logs-$INDEX_NAME.004-default/_search?size=1" | jq '.hits.hits[0]._source.message | length'
|
||||
curl -s -H "Authorization: ApiKey $TESTER_API_KEY_ENCODED" "$ES_ENDPOINT/logs-$INDEX_NAME.004-default/_search?size=1" -H 'x-elastic-product-origin: logstash' | jq '.hits.hits[0]._source.message | length'
|
||||
}
|
||||
|
||||
# ensure no double run of ingest pipeline
|
||||
|
|
|
@ -9,7 +9,7 @@ check_named_index() {
|
|||
}
|
||||
|
||||
get_data_stream_count() {
|
||||
curl -s -H "Authorization: ApiKey $TESTER_API_KEY_ENCODED" "$ES_ENDPOINT/logs-$INDEX_NAME.001-default/_count" | jq '.count // 0'
|
||||
curl -s -H "Authorization: ApiKey $TESTER_API_KEY_ENCODED" -H 'x-elastic-product-origin: logstash' "$ES_ENDPOINT/logs-$INDEX_NAME.001-default/_count" | jq '.count // 0'
|
||||
}
|
||||
|
||||
compare_data_stream_count() {
|
||||
|
|
|
@ -10,7 +10,7 @@ export EXIT_CODE="0"
|
|||
|
||||
create_pipeline() {
|
||||
RESP_CODE=$(curl -s -w "%{http_code}" -o /dev/null -X PUT -H "Authorization: ApiKey $TESTER_API_KEY_ENCODED" "$KB_ENDPOINT/api/logstash/pipeline/$PIPELINE_NAME" \
|
||||
-H 'Content-Type: application/json' -H 'kbn-xsrf: logstash' \
|
||||
-H 'Content-Type: application/json' -H 'kbn-xsrf: logstash' -H 'x-elastic-product-origin: logstash' \
|
||||
--data-binary @"$CURRENT_DIR/test_data/$PIPELINE_NAME.json")
|
||||
|
||||
if [[ RESP_CODE -ge '400' ]]; then
|
||||
|
@ -20,7 +20,8 @@ create_pipeline() {
|
|||
}
|
||||
|
||||
get_pipeline() {
|
||||
RESP_BODY=$(curl -s -X GET -H "Authorization: ApiKey $TESTER_API_KEY_ENCODED" "$KB_ENDPOINT/api/logstash/pipeline/$PIPELINE_NAME")
|
||||
RESP_BODY=$(curl -s -X GET -H "Authorization: ApiKey $TESTER_API_KEY_ENCODED" -H 'x-elastic-product-origin: logstash' \
|
||||
"$KB_ENDPOINT/api/logstash/pipeline/$PIPELINE_NAME") \
|
||||
SOURCE_BODY=$(cat "$CURRENT_DIR/test_data/$PIPELINE_NAME.json")
|
||||
|
||||
RESP_PIPELINE_NAME=$(echo "$RESP_BODY" | jq -r '.id')
|
||||
|
@ -41,7 +42,8 @@ get_pipeline() {
|
|||
}
|
||||
|
||||
list_pipeline() {
|
||||
RESP_BODY=$(curl -s -X GET -H "Authorization: ApiKey $TESTER_API_KEY_ENCODED" "$KB_ENDPOINT/api/logstash/pipelines" | jq --arg name "$PIPELINE_NAME" '.pipelines[] | select(.id==$name)' )
|
||||
RESP_BODY=$(curl -s -X GET -H "Authorization: ApiKey $TESTER_API_KEY_ENCODED" -H 'x-elastic-product-origin: logstash' \
|
||||
"$KB_ENDPOINT/api/logstash/pipelines" | jq --arg name "$PIPELINE_NAME" '.pipelines[] | select(.id==$name)' )
|
||||
if [[ -z "$RESP_BODY" ]]; then
|
||||
EXIT_CODE=$(( EXIT_CODE + 1 ))
|
||||
echo "Fail to list pipeline."
|
||||
|
@ -49,7 +51,8 @@ list_pipeline() {
|
|||
}
|
||||
|
||||
delete_pipeline() {
|
||||
RESP_CODE=$(curl -s -w "%{http_code}" -o /dev/null -X DELETE -H "Authorization: ApiKey $TESTER_API_KEY_ENCODED" "$KB_ENDPOINT/api/logstash/pipeline/$PIPELINE_NAME" \
|
||||
RESP_CODE=$(curl -s -w "%{http_code}" -o /dev/null -X DELETE -H "Authorization: ApiKey $TESTER_API_KEY_ENCODED" -H 'x-elastic-product-origin: logstash' \
|
||||
"$KB_ENDPOINT/api/logstash/pipeline/$PIPELINE_NAME" \
|
||||
-H 'Content-Type: application/json' -H 'kbn-xsrf: logstash' \
|
||||
--data-binary @"$CURRENT_DIR/test_data/$PIPELINE_NAME.json")
|
||||
|
||||
|
|
|
@ -40,7 +40,7 @@ stop_metricbeat() {
|
|||
}
|
||||
|
||||
get_monitor_count() {
|
||||
curl -s -H "Authorization: ApiKey $TESTER_API_KEY_ENCODED" "$ES_ENDPOINT/$INDEX_NAME/_count" | jq '.count // 0'
|
||||
curl -s -H "Authorization: ApiKey $TESTER_API_KEY_ENCODED" -H 'x-elastic-product-origin: logstash' "$ES_ENDPOINT/$INDEX_NAME/_count" | jq '.count // 0'
|
||||
}
|
||||
|
||||
compare_monitor_count() {
|
||||
|
|
|
@ -6,7 +6,7 @@ set -ex
|
|||
source ./$(dirname "$0")/common.sh
|
||||
|
||||
get_monitor_count() {
|
||||
curl -s -H "Authorization: ApiKey $LS_ROLE_API_KEY_ENCODED" "$ES_ENDPOINT/.monitoring-logstash-7-*/_count" | jq '.count'
|
||||
curl -s -H "Authorization: ApiKey $LS_ROLE_API_KEY_ENCODED" -H 'x-elastic-product-origin: logstash' "$ES_ENDPOINT/.monitoring-logstash-7-*/_count" | jq '.count'
|
||||
}
|
||||
|
||||
compare_monitor_count() {
|
||||
|
|
|
@ -16,10 +16,6 @@
|
|||
##
|
||||
################################################################
|
||||
|
||||
## GC configuration
|
||||
11-13:-XX:+UseConcMarkSweepGC
|
||||
11-13:-XX:CMSInitiatingOccupancyFraction=75
|
||||
11-13:-XX:+UseCMSInitiatingOccupancyOnly
|
||||
|
||||
## Locale
|
||||
# Set the locale language
|
||||
|
@ -34,7 +30,7 @@
|
|||
## basic
|
||||
|
||||
# set the I/O temp directory
|
||||
#-Djava.io.tmpdir=$HOME
|
||||
#-Djava.io.tmpdir=${HOME}
|
||||
|
||||
# set to headless, just in case
|
||||
-Djava.awt.headless=true
|
||||
|
@ -59,11 +55,7 @@
|
|||
#-XX:HeapDumpPath=${LOGSTASH_HOME}/heapdump.hprof
|
||||
|
||||
## GC logging
|
||||
#-Xlog:gc*,gc+age=trace,safepoint:file=@loggc@:utctime,pid,tags:filecount=32,filesize=64m
|
||||
|
||||
# log GC status to a file with time stamps
|
||||
# ensure the directory exists
|
||||
#-Xloggc:${LS_GC_LOG_FILE}
|
||||
#-Xlog:gc*,gc+age=trace,safepoint:file=${LS_GC_LOG_FILE}:utctime,pid,tags:filecount=32,filesize=64m
|
||||
|
||||
# Entropy source for randomness
|
||||
-Djava.security.egd=file:/dev/urandom
|
||||
|
@ -79,11 +71,11 @@
|
|||
# text values with sizes less than or equal to this limit will be treated as invalid.
|
||||
# This value should be higher than `logstash.jackson.stream-read-constraints.max-number-length`.
|
||||
# The jackson library defaults to 20000000 or 20MB, whereas Logstash defaults to 200MB or 200000000 characters.
|
||||
-Dlogstash.jackson.stream-read-constraints.max-string-length=200000000
|
||||
#-Dlogstash.jackson.stream-read-constraints.max-string-length=200000000
|
||||
#
|
||||
# Sets the maximum number length (in chars or bytes, depending on input context).
|
||||
# The jackson library defaults to 1000, whereas Logstash defaults to 10000.
|
||||
-Dlogstash.jackson.stream-read-constraints.max-number-length=10000
|
||||
#-Dlogstash.jackson.stream-read-constraints.max-number-length=10000
|
||||
#
|
||||
# Sets the maximum nesting depth. The depth is a count of objects and arrays that have not
|
||||
# been closed, `{` and `[` respectively.
|
||||
|
|
|
@ -154,7 +154,7 @@ appender.deprecation_rolling.policies.size.size = 100MB
|
|||
appender.deprecation_rolling.strategy.type = DefaultRolloverStrategy
|
||||
appender.deprecation_rolling.strategy.max = 30
|
||||
|
||||
logger.deprecation.name = org.logstash.deprecation, deprecation
|
||||
logger.deprecation.name = org.logstash.deprecation
|
||||
logger.deprecation.level = WARN
|
||||
logger.deprecation.appenderRef.deprecation_rolling.ref = deprecation_plain_rolling
|
||||
logger.deprecation.additivity = false
|
||||
|
|
|
@ -181,38 +181,6 @@
|
|||
#
|
||||
# api.auth.basic.password_policy.mode: WARN
|
||||
#
|
||||
# ------------ Module Settings ---------------
|
||||
# Define modules here. Modules definitions must be defined as an array.
|
||||
# The simple way to see this is to prepend each `name` with a `-`, and keep
|
||||
# all associated variables under the `name` they are associated with, and
|
||||
# above the next, like this:
|
||||
#
|
||||
# modules:
|
||||
# - name: MODULE_NAME
|
||||
# var.PLUGINTYPE1.PLUGINNAME1.KEY1: VALUE
|
||||
# var.PLUGINTYPE1.PLUGINNAME1.KEY2: VALUE
|
||||
# var.PLUGINTYPE2.PLUGINNAME1.KEY1: VALUE
|
||||
# var.PLUGINTYPE3.PLUGINNAME3.KEY1: VALUE
|
||||
#
|
||||
# Module variable names must be in the format of
|
||||
#
|
||||
# var.PLUGIN_TYPE.PLUGIN_NAME.KEY
|
||||
#
|
||||
# modules:
|
||||
#
|
||||
# ------------ Cloud Settings ---------------
|
||||
# Define Elastic Cloud settings here.
|
||||
# Format of cloud.id is a base64 value e.g. dXMtZWFzdC0xLmF3cy5mb3VuZC5pbyRub3RhcmVhbCRpZGVudGlmaWVy
|
||||
# and it may have an label prefix e.g. staging:dXMtZ...
|
||||
# This will overwrite 'var.elasticsearch.hosts' and 'var.kibana.host'
|
||||
# cloud.id: <identifier>
|
||||
#
|
||||
# Format of cloud.auth is: <user>:<pass>
|
||||
# This is optional
|
||||
# If supplied this will overwrite 'var.elasticsearch.username' and 'var.elasticsearch.password'
|
||||
# If supplied this will overwrite 'var.kibana.username' and 'var.kibana.password'
|
||||
# cloud.auth: elastic:<password>
|
||||
#
|
||||
# ------------ Queuing Settings --------------
|
||||
#
|
||||
# Internal queuing model, "memory" for legacy in-memory based queuing and
|
||||
|
@ -314,13 +282,13 @@
|
|||
# * json
|
||||
#
|
||||
# log.format: plain
|
||||
# log.format.json.fix_duplicate_message_fields: false
|
||||
# log.format.json.fix_duplicate_message_fields: true
|
||||
#
|
||||
# path.logs:
|
||||
#
|
||||
# ------------ Other Settings --------------
|
||||
#
|
||||
# Allow or block running Logstash as superuser (default: true)
|
||||
# Allow or block running Logstash as superuser (default: true). Windows are excluded from the checking
|
||||
# allow_superuser: false
|
||||
#
|
||||
# Where to find custom plugins
|
||||
|
@ -331,13 +299,15 @@
|
|||
# pipeline.separate_logs: false
|
||||
#
|
||||
# Determine where to allocate memory buffers, for plugins that leverage them.
|
||||
# Default to direct, optionally can be switched to heap to select Java heap space.
|
||||
# pipeline.buffer.type: direct
|
||||
# Defaults to heap,but can be switched to direct if you prefer using direct memory space instead.
|
||||
# pipeline.buffer.type: heap
|
||||
#
|
||||
# ------------ X-Pack Settings (not applicable for OSS build)--------------
|
||||
#
|
||||
# X-Pack Monitoring
|
||||
# https://www.elastic.co/guide/en/logstash/current/monitoring-logstash.html
|
||||
# Flag to allow the legacy internal monitoring (default: false)
|
||||
#xpack.monitoring.allow_legacy_collection: false
|
||||
#xpack.monitoring.enabled: false
|
||||
#xpack.monitoring.elasticsearch.username: logstash_system
|
||||
#xpack.monitoring.elasticsearch.password: password
|
||||
|
|
|
@ -1,232 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
"""Cherry pick and backport a PR"""
|
||||
from __future__ import print_function
|
||||
|
||||
from builtins import input
|
||||
import sys
|
||||
import os
|
||||
import argparse
|
||||
from os.path import expanduser
|
||||
import re
|
||||
from subprocess import check_call, call, check_output
|
||||
import requests
|
||||
import json
|
||||
|
||||
usage = """
|
||||
Example usage:
|
||||
./devtools/backport 7.16 2565 6490604aa0cf7fa61932a90700e6ca988fc8a527
|
||||
|
||||
In case of backporting errors, fix them, then run
|
||||
git cherry-pick --continue
|
||||
./devtools/backport 7.16 2565 6490604aa0cf7fa61932a90700e6ca988fc8a527 --continue
|
||||
|
||||
This script does the following:
|
||||
* cleanups both from_branch and to_branch (warning: drops local changes)
|
||||
* creates a temporary branch named something like "branch_2565"
|
||||
* calls the git cherry-pick command in this branch
|
||||
* after fixing the merge errors (if needed), pushes the branch to your
|
||||
remote
|
||||
* it will attempt to create a PR for you using the GitHub API, but requires
|
||||
the GitHub token, with the public_repo scope, available in `~/.elastic/github.token`.
|
||||
Keep in mind this token has to also be authorized to the Elastic organization as
|
||||
well as to work with SSO.
|
||||
(see https://help.github.com/en/articles/authorizing-a-personal-access-token-for-use-with-saml-single-sign-on)
|
||||
|
||||
Note that you need to take the commit hashes from `git log` on the
|
||||
from_branch, copying the IDs from Github doesn't work in case we squashed the
|
||||
PR.
|
||||
"""
|
||||
|
||||
|
||||
def main():
|
||||
"""Main"""
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Creates a PR for cherry-picking commits",
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
epilog=usage)
|
||||
parser.add_argument("to_branch",
|
||||
help="To branch (e.g 7.x)")
|
||||
parser.add_argument("pr_number",
|
||||
help="The PR number being merged (e.g. 2345)")
|
||||
parser.add_argument("commit_hashes", metavar="hash", nargs="*",
|
||||
help="The commit hashes to cherry pick." +
|
||||
" You can specify multiple.")
|
||||
parser.add_argument("--yes", action="store_true",
|
||||
help="Assume yes. Warning: discards local changes.")
|
||||
parser.add_argument("--continue", action="store_true",
|
||||
help="Continue after fixing merging errors.")
|
||||
parser.add_argument("--from_branch", default="main",
|
||||
help="From branch")
|
||||
parser.add_argument("--diff", action="store_true",
|
||||
help="Display the diff before pushing the PR")
|
||||
parser.add_argument("--remote", default="",
|
||||
help="Which remote to push the backport branch to")
|
||||
#parser.add_argument("--zube-team", default="",
|
||||
# help="Team the PR belongs to")
|
||||
#parser.add_argument("--keep-backport-label", action="store_true",
|
||||
# help="Preserve label needs_backport in original PR")
|
||||
args = parser.parse_args()
|
||||
|
||||
print(args)
|
||||
|
||||
create_pr(parser, args)
|
||||
|
||||
def create_pr(parser, args):
|
||||
info("Checking if GitHub API token is available in `~/.elastic/github.token`")
|
||||
token = get_github_token()
|
||||
|
||||
tmp_branch = "backport_{}_{}".format(args.pr_number, args.to_branch)
|
||||
|
||||
if not vars(args)["continue"]:
|
||||
if not args.yes and input("This will destroy all local changes. " +
|
||||
"Continue? [y/n]: ") != "y":
|
||||
return 1
|
||||
info("Destroying local changes...")
|
||||
check_call("git reset --hard", shell=True)
|
||||
check_call("git clean -df", shell=True)
|
||||
check_call("git fetch", shell=True)
|
||||
|
||||
info("Checkout of {} to backport from....".format(args.from_branch))
|
||||
check_call("git checkout {}".format(args.from_branch), shell=True)
|
||||
check_call("git pull", shell=True)
|
||||
|
||||
info("Checkout of {} to backport to...".format(args.to_branch))
|
||||
check_call("git checkout {}".format(args.to_branch), shell=True)
|
||||
check_call("git pull", shell=True)
|
||||
|
||||
info("Creating backport branch {}...".format(tmp_branch))
|
||||
call("git branch -D {} > /dev/null".format(tmp_branch), shell=True)
|
||||
check_call("git checkout -b {}".format(tmp_branch), shell=True)
|
||||
|
||||
if len(args.commit_hashes) == 0:
|
||||
if token:
|
||||
session = github_session(token)
|
||||
base = "https://api.github.com/repos/elastic/logstash"
|
||||
original_pr = session.get(base + "/pulls/" + args.pr_number).json()
|
||||
merge_commit = original_pr['merge_commit_sha']
|
||||
if not merge_commit:
|
||||
info("Could not auto resolve merge commit - PR isn't merged yet")
|
||||
return 1
|
||||
info("Merge commit detected from PR: {}".format(merge_commit))
|
||||
commit_hashes = merge_commit
|
||||
else:
|
||||
info("GitHub API token not available. " +
|
||||
"Please manually specify commit hash(es) argument(s)\n")
|
||||
parser.print_help()
|
||||
return 1
|
||||
else:
|
||||
commit_hashes = "{}".format(" ").join(args.commit_hashes)
|
||||
|
||||
info("Cherry-picking {}".format(commit_hashes))
|
||||
if call("git cherry-pick -x {}".format(commit_hashes), shell=True) != 0:
|
||||
info("Looks like you have cherry-pick errors.")
|
||||
info("Fix them, then run: ")
|
||||
info(" git cherry-pick --continue")
|
||||
info(" {} --continue".format(" ".join(sys.argv)))
|
||||
return 1
|
||||
|
||||
if len(check_output("git status -s", shell=True).strip()) > 0:
|
||||
info("Looks like you have uncommitted changes." +
|
||||
" Please execute first: git cherry-pick --continue")
|
||||
return 1
|
||||
|
||||
if len(check_output("git log HEAD...{}".format(args.to_branch),
|
||||
shell=True).strip()) == 0:
|
||||
info("No commit to push")
|
||||
return 1
|
||||
|
||||
if args.diff:
|
||||
call("git diff {}".format(args.to_branch), shell=True)
|
||||
if input("Continue? [y/n]: ") != "y":
|
||||
info("Aborting cherry-pick.")
|
||||
return 1
|
||||
|
||||
info("Ready to push branch.")
|
||||
|
||||
remote = args.remote
|
||||
if not remote:
|
||||
remote = input("To which remote should I push? (your fork): ")
|
||||
|
||||
info("Pushing branch {} to remote {}".format(tmp_branch, remote))
|
||||
call("git push {} :{} > /dev/null".format(remote, tmp_branch), shell=True)
|
||||
check_call("git push --set-upstream {} {}".format(remote, tmp_branch), shell=True)
|
||||
|
||||
if not token:
|
||||
info("GitHub API token not available.\n" +
|
||||
"Manually create a PR by following this URL: \n\t" +
|
||||
"https://github.com/elastic/logstash/compare/{}...{}:{}?expand=1"
|
||||
.format(args.to_branch, remote, tmp_branch))
|
||||
else:
|
||||
info("Automatically creating a PR for you...")
|
||||
|
||||
session = github_session(token)
|
||||
base = "https://api.github.com/repos/elastic/logstash"
|
||||
original_pr = session.get(base + "/pulls/" + args.pr_number).json()
|
||||
|
||||
# get the github username from the remote where we pushed
|
||||
remote_url = check_output("git remote get-url {}".format(remote), shell=True)
|
||||
remote_user = re.search("github.com[:/](.+)/logstash", str(remote_url)).group(1)
|
||||
|
||||
# create PR
|
||||
request = session.post(base + "/pulls", json=dict(
|
||||
title="Backport PR #{} to {}: {}".format(args.pr_number, args.to_branch, original_pr["title"]),
|
||||
head=remote_user + ":" + tmp_branch,
|
||||
base=args.to_branch,
|
||||
body="**Backport PR #{} to {} branch, original message:**\n\n---\n\n{}"
|
||||
.format(args.pr_number, args.to_branch, original_pr["body"])
|
||||
))
|
||||
if request.status_code > 299:
|
||||
info("Creating PR failed: {}".format(request.json()))
|
||||
sys.exit(1)
|
||||
new_pr = request.json()
|
||||
|
||||
# add labels
|
||||
labels = ["backport"]
|
||||
# get the version (vX.Y.Z) we are backporting to
|
||||
version = get_version(os.getcwd())
|
||||
if version:
|
||||
labels.append(version)
|
||||
|
||||
session.post(
|
||||
base + "/issues/{}/labels".format(new_pr["number"]), json=labels)
|
||||
|
||||
"""
|
||||
if not args.keep_backport_label:
|
||||
# remove needs backport label from the original PR
|
||||
session.delete(base + "/issues/{}/labels/needs_backport".format(args.pr_number))
|
||||
"""
|
||||
# Set a version label on the original PR
|
||||
if version:
|
||||
session.post(
|
||||
base + "/issues/{}/labels".format(args.pr_number), json=[version])
|
||||
|
||||
info("Done. PR created: {}".format(new_pr["html_url"]))
|
||||
info("Please go and check it and add the review tags")
|
||||
|
||||
def get_version(base_dir):
|
||||
#pattern = re.compile(r'(const\s|)\w*(v|V)ersion\s=\s"(?P<version>.*)"')
|
||||
with open(os.path.join(base_dir, "versions.yml"), "r") as f:
|
||||
for line in f:
|
||||
if line.startswith('logstash:'):
|
||||
return "v" + line.split(':')[-1].strip()
|
||||
#match = pattern.match(line)
|
||||
#if match:
|
||||
# return match.group('version')
|
||||
|
||||
def get_github_token():
|
||||
try:
|
||||
token = open(expanduser("~/.elastic/github.token"), "r").read().strip()
|
||||
except:
|
||||
token = False
|
||||
return token
|
||||
|
||||
def github_session(token):
|
||||
session = requests.Session()
|
||||
session.headers.update({"Authorization": "token " + token})
|
||||
return session
|
||||
|
||||
def info(msg):
|
||||
print("\nINFO: {}".format(msg))
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
|
@ -1,163 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
"""Cherry pick and backport a PR"""
|
||||
from __future__ import print_function
|
||||
|
||||
from builtins import input
|
||||
import sys
|
||||
import os
|
||||
import argparse
|
||||
from os.path import expanduser
|
||||
import re
|
||||
from subprocess import check_call, call, check_output
|
||||
import requests
|
||||
|
||||
usage = """
|
||||
Example usage:
|
||||
./dev-tools/create local_branch
|
||||
|
||||
This script does the following:
|
||||
* cleanups local_branch (warning: drops local changes)
|
||||
* rebases the branch against main
|
||||
* it will attempt to create a PR for you using the GitHub API, but requires
|
||||
the GitHub token, with the public_repo scope, available in `~/.elastic/github.token`.
|
||||
Keep in mind this token has to also be authorized to the Elastic organization as
|
||||
well as to work with SSO.
|
||||
(see https://help.github.com/en/articles/authorizing-a-personal-access-token-for-use-with-saml-single-sign-on)
|
||||
|
||||
Note that you need to take the commit hashes from `git log` on the
|
||||
from_branch, copying the IDs from Github doesn't work in case we squashed the
|
||||
PR.
|
||||
"""
|
||||
|
||||
|
||||
def main():
|
||||
"""Main"""
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Creates a new PR from a branch",
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
epilog=usage)
|
||||
parser.add_argument("local_branch",
|
||||
help="Branch to Create a PR for")
|
||||
parser.add_argument("--to_branch", default="main",
|
||||
help="Which remote to push the backport branch to")
|
||||
parser.add_argument("--yes", action="store_true",
|
||||
help="Assume yes. Warning: discards local changes.")
|
||||
parser.add_argument("--continue", action="store_true",
|
||||
help="Continue after fixing merging errors.")
|
||||
parser.add_argument("--diff", action="store_true",
|
||||
help="Display the diff before pushing the PR")
|
||||
parser.add_argument("--remote", default="",
|
||||
help="Which remote to push the backport branch to")
|
||||
args = parser.parse_args()
|
||||
|
||||
print(args)
|
||||
|
||||
create_pr(args)
|
||||
|
||||
def create_pr(args):
|
||||
|
||||
if not vars(args)["continue"]:
|
||||
if not args.yes and input("This will destroy all local changes. " +
|
||||
"Continue? [y/n]: ") != "y":
|
||||
return 1
|
||||
info("Destroying local changess...")
|
||||
check_call("git reset --hard", shell=True)
|
||||
check_call("git clean -df", shell=True)
|
||||
#check_call("git fetch", shell=True)
|
||||
|
||||
info("Checkout of {} to create a PR....".format(args.local_branch))
|
||||
check_call("git checkout {}".format(args.local_branch), shell=True)
|
||||
check_call("git rebase {}".format(args.to_branch), shell=True)
|
||||
|
||||
if args.diff:
|
||||
call("git diff {}".format(args.to_branch), shell=True)
|
||||
if input("Continue? [y/n]: ") != "y":
|
||||
info("Aborting PR creation...")
|
||||
return 1
|
||||
|
||||
info("Ready to push branch and create PR...")
|
||||
|
||||
remote = args.remote
|
||||
if not remote:
|
||||
remote = input("To which remote should I push? (your fork): ")
|
||||
|
||||
info("Pushing branch {} to remote {}".format(args.local_branch, remote))
|
||||
call("git push {} :{} > /dev/null".format(remote, args.local_branch),
|
||||
shell=True)
|
||||
check_call("git push --set-upstream {} {}"
|
||||
.format(remote, args.local_branch), shell=True)
|
||||
|
||||
info("Checking if GitHub API token is available in `~/.elastic/github.token`")
|
||||
try:
|
||||
token = open(expanduser("~/.elastic/github.token"), "r").read().strip()
|
||||
except:
|
||||
token = False
|
||||
|
||||
if not token:
|
||||
info("GitHub API token not available.\n" +
|
||||
"Manually create a PR by following this URL: \n\t" +
|
||||
"https://github.com/elastic/logstash/compare/{}...{}:{}?expand=1"
|
||||
.format(args.to_branch, remote, args.local_branch))
|
||||
else:
|
||||
info("Automatically creating a PR for you...")
|
||||
|
||||
base = "https://api.github.com/repos/elastic/logstash"
|
||||
session = requests.Session()
|
||||
session.headers.update({"Authorization": "token " + token})
|
||||
|
||||
# get the github username from the remote where we pushed
|
||||
remote_url = check_output("git remote get-url {}".format(remote),
|
||||
shell=True)
|
||||
remote_user = re.search("github.com[:/](.+)/logstash", str(remote_url)).group(1)
|
||||
|
||||
### TODO:
|
||||
title = input("Title: ")
|
||||
body = input("Description: ")
|
||||
|
||||
# create PR
|
||||
request = session.post(base + "/pulls", json=dict(
|
||||
title=title,
|
||||
head=remote_user + ":" + args.local_branch,
|
||||
base=args.to_branch,
|
||||
body=body
|
||||
))
|
||||
if request.status_code > 299:
|
||||
info("Creating PR failed: {}".format(request.json()))
|
||||
sys.exit(1)
|
||||
new_pr = request.json()
|
||||
|
||||
"""
|
||||
# add labels
|
||||
labels = ["backport"]
|
||||
# get the version we are backported to
|
||||
version = get_version(os.getcwd())
|
||||
if version:
|
||||
labels.append("v" + version)
|
||||
|
||||
session.post(
|
||||
base + "/issues/{}/labels".format(new_pr["number"]), json=labels)
|
||||
|
||||
# Set a version label on the original PR
|
||||
if version:
|
||||
session.post(
|
||||
base + "/issues/{}/labels".format(args.pr_number), json=[version])
|
||||
"""
|
||||
|
||||
info("Done. PR created: {}".format(new_pr["html_url"]))
|
||||
info("Please go and check it and add the review tags")
|
||||
|
||||
def get_version(base_dir):
|
||||
#pattern = re.compile(r'(const\s|)\w*(v|V)ersion\s=\s"(?P<version>.*)"')
|
||||
with open(os.path.join(base_dir, "versions.yml"), "r") as f:
|
||||
for line in f:
|
||||
if line.startswith('logstash:'):
|
||||
return line.split(':')[-1].strip()
|
||||
#match = pattern.match(line)
|
||||
#if match:
|
||||
# return match.group('version')
|
||||
|
||||
def info(msg):
|
||||
print("\nINFO: {}".format(msg))
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue