mirror of
https://github.com/elastic/logstash.git
synced 2025-04-19 12:25:10 -04:00
Compare commits
90 commits
Author | SHA1 | Date | |
---|---|---|---|
|
8fc4b75e66 | ||
|
bbc80ec0ff | ||
|
4205eb7a75 | ||
|
4d04d6c7a4 | ||
|
a7ca91af8d | ||
|
f9606502ec | ||
|
ce8d43f39b | ||
|
69e554b799 | ||
|
6dceb2453d | ||
|
168f969013 | ||
|
3a35abc24e | ||
|
002d4897c4 | ||
|
32e6def9f8 | ||
|
4e87a30c1c | ||
|
589f1b652d | ||
|
430424aac8 | ||
|
5ead3a9880 | ||
|
baa181ae39 | ||
|
ed38254f45 | ||
|
1adecfe3b7 | ||
|
f2a7b3022f | ||
|
15e4e25048 | ||
|
2a4eb2da01 | ||
|
7f19931c77 | ||
|
3ac74007f2 | ||
|
a3f7aece28 | ||
|
946c78c941 | ||
|
afeef994de | ||
|
ad16facaae | ||
|
8716e77313 | ||
|
ed4b2e2f6f | ||
|
1b1a76b03f | ||
|
ff27fae426 | ||
|
911f553dbc | ||
|
bd8a67c146 | ||
|
886a0c7ed7 | ||
|
2264a557ed | ||
|
11f433f7ec | ||
|
8497db1cd8 | ||
|
5521023240 | ||
|
0770fff960 | ||
|
2e7b1fe6fd | ||
|
c0ae02e44d | ||
|
1f93dc5b65 | ||
|
36feb29597 | ||
|
12645e9755 | ||
|
eeec9e0621 | ||
|
0beea5ad52 | ||
|
8b97c052e6 | ||
|
a769327be8 | ||
|
913d705e7c | ||
|
e5e6f5fc1a | ||
|
2c982112ad | ||
|
15cdf5c63d | ||
|
ccab5448cb | ||
|
8b897915cd | ||
|
e4cb5c1ff7 | ||
|
0db1565e7d | ||
|
b3aad84b84 | ||
|
efe776eee0 | ||
|
8c460d6a24 | ||
|
c2d6e9636f | ||
|
d32b98758e | ||
|
f25fcc303f | ||
|
c1374a1d81 | ||
|
4677cb22ed | ||
|
dc0739bdaf | ||
|
a1f4633b55 | ||
|
8c6832e3d9 | ||
|
0594c8867f | ||
|
26c2f61276 | ||
|
986a253db8 | ||
|
ad7c61448f | ||
|
1e5105fcd8 | ||
|
7eb5185b4e | ||
|
c2c62fdce4 | ||
|
e854ac7bf5 | ||
|
3b751d9794 | ||
|
2f3f6a9651 | ||
|
d1155988c1 | ||
|
476b9216f2 | ||
|
2c024daecd | ||
|
eafcf577dd | ||
|
8f20bd90c9 | ||
|
1ccfb161d7 | ||
|
30803789fc | ||
|
14f52c0472 | ||
|
d2b19001de | ||
|
32e7a25a15 | ||
|
5ef86a8aa1 |
170 changed files with 8006 additions and 679 deletions
|
@ -4,8 +4,12 @@ steps:
|
|||
- label: ":pipeline: Generate steps"
|
||||
command: |
|
||||
set -euo pipefail
|
||||
|
||||
echo "--- Building [${WORKFLOW_TYPE}] artifacts"
|
||||
|
||||
echo "--- Building [$${WORKFLOW_TYPE}] artifacts"
|
||||
python3 -m pip install pyyaml
|
||||
echo "--- Building dynamic pipeline steps"
|
||||
python3 .buildkite/scripts/dra/generatesteps.py | buildkite-agent pipeline upload
|
||||
python3 .buildkite/scripts/dra/generatesteps.py > steps.yml
|
||||
echo "--- Printing dynamic pipeline steps"
|
||||
cat steps.yml
|
||||
echo "--- Uploading dynamic pipeline steps"
|
||||
cat steps.yml | buildkite-agent pipeline upload
|
||||
|
|
20
.buildkite/health_report_tests_pipeline.yml
Normal file
20
.buildkite/health_report_tests_pipeline.yml
Normal file
|
@ -0,0 +1,20 @@
|
|||
# yaml-language-server: $schema=https://raw.githubusercontent.com/buildkite/pipeline-schema/main/schema.json
|
||||
|
||||
agents:
|
||||
provider: gcp
|
||||
imageProject: elastic-images-prod
|
||||
image: family/platform-ingest-logstash-ubuntu-2204
|
||||
machineType: "n2-standard-4"
|
||||
diskSizeGb: 64
|
||||
|
||||
steps:
|
||||
- group: ":logstash: Health API integration tests"
|
||||
key: "testing-phase"
|
||||
steps:
|
||||
- label: "main branch"
|
||||
key: "integ-tests-on-main-branch"
|
||||
command:
|
||||
- .buildkite/scripts/health-report-tests/main.sh
|
||||
retry:
|
||||
automatic:
|
||||
- limit: 3
|
|
@ -12,7 +12,7 @@ set -eo pipefail
|
|||
# https://github.com/elastic/ingest-dev/issues/2664
|
||||
# *******************************************************
|
||||
|
||||
ACTIVE_BRANCHES_URL="https://raw.githubusercontent.com/elastic/logstash/main/ci/branches.json"
|
||||
ACTIVE_BRANCHES_URL="https://storage.googleapis.com/artifacts-api/snapshots/branches.json"
|
||||
EXCLUDE_BRANCHES_ARRAY=()
|
||||
BRANCHES=()
|
||||
|
||||
|
@ -63,7 +63,7 @@ exclude_branches_to_array
|
|||
set -u
|
||||
set +e
|
||||
# pull releaseable branches from $ACTIVE_BRANCHES_URL
|
||||
readarray -t ELIGIBLE_BRANCHES < <(curl --retry-all-errors --retry 5 --retry-delay 5 -fsSL $ACTIVE_BRANCHES_URL | jq -r '.branches[].branch')
|
||||
readarray -t ELIGIBLE_BRANCHES < <(curl --retry-all-errors --retry 5 --retry-delay 5 -fsSL $ACTIVE_BRANCHES_URL | jq -r '.branches[]')
|
||||
if [[ $? -ne 0 ]]; then
|
||||
echo "There was an error downloading or parsing the json output from [$ACTIVE_BRANCHES_URL]. Exiting."
|
||||
exit 1
|
||||
|
|
|
@ -9,5 +9,5 @@
|
|||
"amazonlinux": ["amazonlinux-2023"],
|
||||
"opensuse": ["opensuse-leap-15"]
|
||||
},
|
||||
"windows": ["windows-2022", "windows-2019", "windows-2016"]
|
||||
"windows": ["windows-2025", "windows-2022", "windows-2019", "windows-2016"]
|
||||
}
|
||||
|
|
|
@ -7,63 +7,42 @@ echo "####################################################################"
|
|||
source ./$(dirname "$0")/common.sh
|
||||
|
||||
# WORKFLOW_TYPE is a CI externally configured environment variable that could assume "snapshot" or "staging" values
|
||||
info "Building artifacts for the $WORKFLOW_TYPE workflow ..."
|
||||
|
||||
case "$WORKFLOW_TYPE" in
|
||||
snapshot)
|
||||
info "Building artifacts for the $WORKFLOW_TYPE workflow..."
|
||||
if [ -z "$VERSION_QUALIFIER_OPT" ]; then
|
||||
rake artifact:docker || error "artifact:docker build failed."
|
||||
rake artifact:docker_oss || error "artifact:docker_oss build failed."
|
||||
rake artifact:docker_wolfi || error "artifact:docker_wolfi build failed."
|
||||
rake artifact:dockerfiles || error "artifact:dockerfiles build failed."
|
||||
if [ "$ARCH" != "aarch64" ]; then
|
||||
rake artifact:docker_ubi8 || error "artifact:docker_ubi8 build failed."
|
||||
fi
|
||||
else
|
||||
VERSION_QUALIFIER="$VERSION_QUALIFIER_OPT" rake artifact:docker || error "artifact:docker build failed."
|
||||
VERSION_QUALIFIER="$VERSION_QUALIFIER_OPT" rake artifact:docker_oss || error "artifact:docker_oss build failed."
|
||||
VERSION_QUALIFIER="$VERSION_QUALIFIER_OPT" rake artifact:docker_wolfi || error "artifact:docker_wolfi build failed."
|
||||
VERSION_QUALIFIER="$VERSION_QUALIFIER_OPT" rake artifact:dockerfiles || error "artifact:dockerfiles build failed."
|
||||
if [ "$ARCH" != "aarch64" ]; then
|
||||
VERSION_QUALIFIER="$VERSION_QUALIFIER_OPT" rake artifact:docker_ubi8 || error "artifact:docker_ubi8 build failed."
|
||||
fi
|
||||
# Qualifier is passed from CI as optional field and specify the version postfix
|
||||
# in case of alpha or beta releases:
|
||||
# e.g: 8.0.0-alpha1
|
||||
STACK_VERSION="${STACK_VERSION}-${VERSION_QUALIFIER_OPT}"
|
||||
fi
|
||||
STACK_VERSION=${STACK_VERSION}-SNAPSHOT
|
||||
info "Build complete, setting STACK_VERSION to $STACK_VERSION."
|
||||
: # no-op
|
||||
;;
|
||||
staging)
|
||||
info "Building artifacts for the $WORKFLOW_TYPE workflow..."
|
||||
if [ -z "$VERSION_QUALIFIER_OPT" ]; then
|
||||
RELEASE=1 rake artifact:docker || error "artifact:docker build failed."
|
||||
RELEASE=1 rake artifact:docker_oss || error "artifact:docker_oss build failed."
|
||||
RELEASE=1 rake artifact:docker_wolfi || error "artifact:docker_wolfi build failed."
|
||||
RELEASE=1 rake artifact:dockerfiles || error "artifact:dockerfiles build failed."
|
||||
if [ "$ARCH" != "aarch64" ]; then
|
||||
RELEASE=1 rake artifact:docker_ubi8 || error "artifact:docker_ubi8 build failed."
|
||||
fi
|
||||
else
|
||||
VERSION_QUALIFIER="$VERSION_QUALIFIER_OPT" RELEASE=1 rake artifact:docker || error "artifact:docker build failed."
|
||||
VERSION_QUALIFIER="$VERSION_QUALIFIER_OPT" RELEASE=1 rake artifact:docker_oss || error "artifact:docker_oss build failed."
|
||||
VERSION_QUALIFIER="$VERSION_QUALIFIER_OPT" RELEASE=1 rake artifact:docker_wolfi || error "artifact:docker_wolfi build failed."
|
||||
VERSION_QUALIFIER="$VERSION_QUALIFIER_OPT" RELEASE=1 rake artifact:dockerfiles || error "artifact:dockerfiles build failed."
|
||||
if [ "$ARCH" != "aarch64" ]; then
|
||||
VERSION_QUALIFIER="$VERSION_QUALIFIER_OPT" RELEASE=1 rake artifact:docker_ubi8 || error "artifact:docker_ubi8 build failed."
|
||||
fi
|
||||
# Qualifier is passed from CI as optional field and specify the version postfix
|
||||
# in case of alpha or beta releases:
|
||||
# e.g: 8.0.0-alpha1
|
||||
STACK_VERSION="${STACK_VERSION}-${VERSION_QUALIFIER_OPT}"
|
||||
fi
|
||||
info "Build complete, setting STACK_VERSION to $STACK_VERSION."
|
||||
export RELEASE=1
|
||||
;;
|
||||
*)
|
||||
error "Workflow (WORKFLOW_TYPE variable) is not set, exiting..."
|
||||
;;
|
||||
esac
|
||||
|
||||
rake artifact:docker || error "artifact:docker build failed."
|
||||
rake artifact:docker_oss || error "artifact:docker_oss build failed."
|
||||
rake artifact:docker_wolfi || error "artifact:docker_wolfi build failed."
|
||||
rake artifact:dockerfiles || error "artifact:dockerfiles build failed."
|
||||
|
||||
if [[ "$ARCH" != "aarch64" ]]; then
|
||||
rake artifact:docker_ubi8 || error "artifact:docker_ubi8 build failed."
|
||||
fi
|
||||
|
||||
if [[ "$WORKFLOW_TYPE" == "staging" ]] && [[ -n "$VERSION_QUALIFIER" ]]; then
|
||||
# Qualifier is passed from CI as optional field and specify the version postfix
|
||||
# in case of alpha or beta releases for staging builds only:
|
||||
# e.g: 8.0.0-alpha1
|
||||
STACK_VERSION="${STACK_VERSION}-${VERSION_QUALIFIER}"
|
||||
fi
|
||||
|
||||
if [[ "$WORKFLOW_TYPE" == "snapshot" ]]; then
|
||||
STACK_VERSION="${STACK_VERSION}-SNAPSHOT"
|
||||
fi
|
||||
|
||||
info "Build complete, setting STACK_VERSION to $STACK_VERSION."
|
||||
|
||||
info "Saving tar.gz for docker images"
|
||||
save_docker_tarballs "${ARCH}" "${STACK_VERSION}"
|
||||
|
||||
|
|
|
@ -7,39 +7,35 @@ echo "####################################################################"
|
|||
source ./$(dirname "$0")/common.sh
|
||||
|
||||
# WORKFLOW_TYPE is a CI externally configured environment variable that could assume "snapshot" or "staging" values
|
||||
info "Building artifacts for the $WORKFLOW_TYPE workflow ..."
|
||||
|
||||
case "$WORKFLOW_TYPE" in
|
||||
snapshot)
|
||||
info "Building artifacts for the $WORKFLOW_TYPE workflow..."
|
||||
if [ -z "$VERSION_QUALIFIER_OPT" ]; then
|
||||
SKIP_DOCKER=1 rake artifact:all || error "rake artifact:all build failed."
|
||||
else
|
||||
# Qualifier is passed from CI as optional field and specify the version postfix
|
||||
# in case of alpha or beta releases:
|
||||
# e.g: 8.0.0-alpha1
|
||||
VERSION_QUALIFIER="$VERSION_QUALIFIER_OPT" SKIP_DOCKER=1 rake artifact:all || error "rake artifact:all build failed."
|
||||
STACK_VERSION="${STACK_VERSION}-${VERSION_QUALIFIER_OPT}"
|
||||
fi
|
||||
STACK_VERSION=${STACK_VERSION}-SNAPSHOT
|
||||
info "Build complete, setting STACK_VERSION to $STACK_VERSION."
|
||||
: # no-op
|
||||
;;
|
||||
staging)
|
||||
info "Building artifacts for the $WORKFLOW_TYPE workflow..."
|
||||
if [ -z "$VERSION_QUALIFIER_OPT" ]; then
|
||||
RELEASE=1 SKIP_DOCKER=1 rake artifact:all || error "rake artifact:all build failed."
|
||||
else
|
||||
# Qualifier is passed from CI as optional field and specify the version postfix
|
||||
# in case of alpha or beta releases:
|
||||
# e.g: 8.0.0-alpha1
|
||||
VERSION_QUALIFIER="$VERSION_QUALIFIER_OPT" RELEASE=1 SKIP_DOCKER=1 rake artifact:all || error "rake artifact:all build failed."
|
||||
STACK_VERSION="${STACK_VERSION}-${VERSION_QUALIFIER_OPT}"
|
||||
fi
|
||||
info "Build complete, setting STACK_VERSION to $STACK_VERSION."
|
||||
export RELEASE=1
|
||||
;;
|
||||
*)
|
||||
error "Workflow (WORKFLOW_TYPE variable) is not set, exiting..."
|
||||
;;
|
||||
esac
|
||||
|
||||
SKIP_DOCKER=1 rake artifact:all || error "rake artifact:all build failed."
|
||||
|
||||
if [[ "$WORKFLOW_TYPE" == "staging" ]] && [[ -n "$VERSION_QUALIFIER" ]]; then
|
||||
# Qualifier is passed from CI as optional field and specify the version postfix
|
||||
# in case of alpha or beta releases for staging builds only:
|
||||
# e.g: 8.0.0-alpha1
|
||||
STACK_VERSION="${STACK_VERSION}-${VERSION_QUALIFIER}"
|
||||
fi
|
||||
|
||||
if [[ "$WORKFLOW_TYPE" == "snapshot" ]]; then
|
||||
STACK_VERSION="${STACK_VERSION}-SNAPSHOT"
|
||||
fi
|
||||
|
||||
info "Build complete, setting STACK_VERSION to $STACK_VERSION."
|
||||
|
||||
info "Generated Artifacts"
|
||||
for file in build/logstash-*; do shasum $file;done
|
||||
|
||||
|
|
|
@ -29,7 +29,7 @@ function save_docker_tarballs {
|
|||
# Since we are using the system jruby, we need to make sure our jvm process
|
||||
# uses at least 1g of memory, If we don't do this we can get OOM issues when
|
||||
# installing gems. See https://github.com/elastic/logstash/issues/5179
|
||||
export JRUBY_OPTS="-J-Xmx2g"
|
||||
export JRUBY_OPTS="-J-Xmx4g"
|
||||
|
||||
# Extract the version number from the version.yml file
|
||||
# e.g.: 8.6.0
|
||||
|
|
|
@ -3,6 +3,8 @@ import sys
|
|||
|
||||
import yaml
|
||||
|
||||
YAML_HEADER = '# yaml-language-server: $schema=https://raw.githubusercontent.com/buildkite/pipeline-schema/main/schema.json\n'
|
||||
|
||||
def to_bk_key_friendly_string(key):
|
||||
"""
|
||||
Convert and return key to an acceptable format for Buildkite's key: field
|
||||
|
@ -28,6 +30,8 @@ def package_x86_step(branch, workflow_type):
|
|||
export PATH="/opt/buildkite-agent/.rbenv/bin:/opt/buildkite-agent/.pyenv/bin:$PATH"
|
||||
eval "$(rbenv init -)"
|
||||
.buildkite/scripts/dra/build_packages.sh
|
||||
artifact_paths:
|
||||
- "**/*.hprof"
|
||||
'''
|
||||
|
||||
return step
|
||||
|
@ -42,6 +46,8 @@ def package_x86_docker_step(branch, workflow_type):
|
|||
image: family/platform-ingest-logstash-ubuntu-2204
|
||||
machineType: "n2-standard-16"
|
||||
diskSizeGb: 200
|
||||
artifact_paths:
|
||||
- "**/*.hprof"
|
||||
command: |
|
||||
export WORKFLOW_TYPE="{workflow_type}"
|
||||
export PATH="/opt/buildkite-agent/.rbenv/bin:/opt/buildkite-agent/.pyenv/bin:$PATH"
|
||||
|
@ -61,6 +67,8 @@ def package_aarch64_docker_step(branch, workflow_type):
|
|||
imagePrefix: platform-ingest-logstash-ubuntu-2204-aarch64
|
||||
instanceType: "m6g.4xlarge"
|
||||
diskSizeGb: 200
|
||||
artifact_paths:
|
||||
- "**/*.hprof"
|
||||
command: |
|
||||
export WORKFLOW_TYPE="{workflow_type}"
|
||||
export PATH="/opt/buildkite-agent/.rbenv/bin:/opt/buildkite-agent/.pyenv/bin:$PATH"
|
||||
|
@ -106,6 +114,7 @@ def build_steps_to_yaml(branch, workflow_type):
|
|||
if __name__ == "__main__":
|
||||
try:
|
||||
workflow_type = os.environ["WORKFLOW_TYPE"]
|
||||
version_qualifier = os.environ.get("VERSION_QUALIFIER", "")
|
||||
except ImportError:
|
||||
print(f"Missing env variable WORKFLOW_TYPE. Use export WORKFLOW_TYPE=<staging|snapshot>\n.Exiting.")
|
||||
exit(1)
|
||||
|
@ -114,18 +123,25 @@ if __name__ == "__main__":
|
|||
|
||||
structure = {"steps": []}
|
||||
|
||||
# Group defining parallel steps that build and save artifacts
|
||||
group_key = to_bk_key_friendly_string(f"logstash_dra_{workflow_type}")
|
||||
if workflow_type.upper() == "SNAPSHOT" and len(version_qualifier)>0:
|
||||
structure["steps"].append({
|
||||
"label": f"no-op pipeline because prerelease builds (VERSION_QUALIFIER is set to [{version_qualifier}]) don't support the [{workflow_type}] workflow",
|
||||
"command": ":",
|
||||
"skip": "VERSION_QUALIFIER (prerelease builds) not supported with SNAPSHOT DRA",
|
||||
})
|
||||
else:
|
||||
# Group defining parallel steps that build and save artifacts
|
||||
group_key = to_bk_key_friendly_string(f"logstash_dra_{workflow_type}")
|
||||
|
||||
structure["steps"].append({
|
||||
"group": f":Build Artifacts - {workflow_type.upper()}",
|
||||
"key": group_key,
|
||||
"steps": build_steps_to_yaml(branch, workflow_type),
|
||||
})
|
||||
structure["steps"].append({
|
||||
"group": f":Build Artifacts - {workflow_type.upper()}",
|
||||
"key": group_key,
|
||||
"steps": build_steps_to_yaml(branch, workflow_type),
|
||||
})
|
||||
|
||||
# Final step: pull artifacts built above and publish them via the release-manager
|
||||
structure["steps"].extend(
|
||||
yaml.safe_load(publish_dra_step(branch, workflow_type, depends_on=group_key)),
|
||||
)
|
||||
# Final step: pull artifacts built above and publish them via the release-manager
|
||||
structure["steps"].extend(
|
||||
yaml.safe_load(publish_dra_step(branch, workflow_type, depends_on=group_key)),
|
||||
)
|
||||
|
||||
print('# yaml-language-server: $schema=https://raw.githubusercontent.com/buildkite/pipeline-schema/main/schema.json\n' + yaml.dump(structure, Dumper=yaml.Dumper, sort_keys=False))
|
||||
print(YAML_HEADER + yaml.dump(structure, Dumper=yaml.Dumper, sort_keys=False))
|
||||
|
|
|
@ -7,7 +7,9 @@ echo "####################################################################"
|
|||
|
||||
source ./$(dirname "$0")/common.sh
|
||||
|
||||
PLAIN_STACK_VERSION=$STACK_VERSION
|
||||
# DRA_BRANCH can be used for manually testing packaging with PRs
|
||||
# e.g. define `DRA_BRANCH="main"` and `RUN_SNAPSHOT="true"` under Options/Environment Variables in the Buildkite UI after clicking new Build
|
||||
BRANCH="${DRA_BRANCH:="${BUILDKITE_BRANCH:=""}"}"
|
||||
|
||||
# This is the branch selector that needs to be passed to the release-manager
|
||||
# It has to be the name of the branch which originates the artifacts.
|
||||
|
@ -15,29 +17,24 @@ RELEASE_VER=`cat versions.yml | sed -n 's/^logstash\:[[:space:]]\([[:digit:]]*\.
|
|||
if [ -n "$(git ls-remote --heads origin $RELEASE_VER)" ] ; then
|
||||
RELEASE_BRANCH=$RELEASE_VER
|
||||
else
|
||||
RELEASE_BRANCH=main
|
||||
RELEASE_BRANCH="${BRANCH:="main"}"
|
||||
fi
|
||||
echo "RELEASE BRANCH: $RELEASE_BRANCH"
|
||||
|
||||
if [ -n "$VERSION_QUALIFIER_OPT" ]; then
|
||||
# Qualifier is passed from CI as optional field and specify the version postfix
|
||||
# in case of alpha or beta releases:
|
||||
# e.g: 8.0.0-alpha1
|
||||
STACK_VERSION="${STACK_VERSION}-${VERSION_QUALIFIER_OPT}"
|
||||
PLAIN_STACK_VERSION="${PLAIN_STACK_VERSION}-${VERSION_QUALIFIER_OPT}"
|
||||
fi
|
||||
VERSION_QUALIFIER="${VERSION_QUALIFIER:=""}"
|
||||
|
||||
case "$WORKFLOW_TYPE" in
|
||||
snapshot)
|
||||
STACK_VERSION=${STACK_VERSION}-SNAPSHOT
|
||||
:
|
||||
;;
|
||||
staging)
|
||||
;;
|
||||
*)
|
||||
error "Worklflow (WORKFLOW_TYPE variable) is not set, exiting..."
|
||||
error "Workflow (WORKFLOW_TYPE variable) is not set, exiting..."
|
||||
;;
|
||||
esac
|
||||
|
||||
info "Uploading artifacts for ${WORKFLOW_TYPE} workflow on branch: ${RELEASE_BRANCH}"
|
||||
info "Uploading artifacts for ${WORKFLOW_TYPE} workflow on branch: ${RELEASE_BRANCH} for version: ${STACK_VERSION} with version_qualifier: ${VERSION_QUALIFIER}"
|
||||
|
||||
if [ "$RELEASE_VER" != "7.17" ]; then
|
||||
# Version 7.17.x doesn't generates ARM artifacts for Darwin
|
||||
|
@ -55,7 +52,16 @@ rm -f build/logstash-ubi8-${STACK_VERSION}-docker-image-aarch64.tar.gz
|
|||
info "Downloaded ARTIFACTS sha report"
|
||||
for file in build/logstash-*; do shasum $file;done
|
||||
|
||||
mv build/distributions/dependencies-reports/logstash-${STACK_VERSION}.csv build/distributions/dependencies-${STACK_VERSION}.csv
|
||||
FINAL_VERSION=$STACK_VERSION
|
||||
if [[ -n "$VERSION_QUALIFIER" ]]; then
|
||||
FINAL_VERSION="$FINAL_VERSION-${VERSION_QUALIFIER}"
|
||||
fi
|
||||
|
||||
if [[ "$WORKFLOW_TYPE" == "snapshot" ]]; then
|
||||
FINAL_VERSION="${STACK_VERSION}-SNAPSHOT"
|
||||
fi
|
||||
|
||||
mv build/distributions/dependencies-reports/logstash-${FINAL_VERSION}.csv build/distributions/dependencies-${FINAL_VERSION}.csv
|
||||
|
||||
# set required permissions on artifacts and directory
|
||||
chmod -R a+r build/*
|
||||
|
@ -73,6 +79,22 @@ release_manager_login
|
|||
# ensure the latest image has been pulled
|
||||
docker pull docker.elastic.co/infra/release-manager:latest
|
||||
|
||||
echo "+++ :clipboard: Listing DRA artifacts for version [$STACK_VERSION], branch [$RELEASE_BRANCH], workflow [$WORKFLOW_TYPE], QUALIFIER [$VERSION_QUALIFIER]"
|
||||
docker run --rm \
|
||||
--name release-manager \
|
||||
-e VAULT_ROLE_ID \
|
||||
-e VAULT_SECRET_ID \
|
||||
--mount type=bind,readonly=false,src="$PWD",target=/artifacts \
|
||||
docker.elastic.co/infra/release-manager:latest \
|
||||
cli list \
|
||||
--project logstash \
|
||||
--branch "${RELEASE_BRANCH}" \
|
||||
--commit "$(git rev-parse HEAD)" \
|
||||
--workflow "${WORKFLOW_TYPE}" \
|
||||
--version "${STACK_VERSION}" \
|
||||
--artifact-set main \
|
||||
--qualifier "${VERSION_QUALIFIER}"
|
||||
|
||||
info "Running the release manager ..."
|
||||
|
||||
# collect the artifacts for use with the unified build
|
||||
|
@ -88,8 +110,9 @@ docker run --rm \
|
|||
--branch ${RELEASE_BRANCH} \
|
||||
--commit "$(git rev-parse HEAD)" \
|
||||
--workflow "${WORKFLOW_TYPE}" \
|
||||
--version "${PLAIN_STACK_VERSION}" \
|
||||
--version "${STACK_VERSION}" \
|
||||
--artifact-set main \
|
||||
--qualifier "${VERSION_QUALIFIER}" \
|
||||
${DRA_DRY_RUN} | tee rm-output.txt
|
||||
|
||||
# extract the summary URL from a release manager output line like:
|
||||
|
|
|
@ -147,11 +147,6 @@ rake artifact:deb artifact:rpm
|
|||
set -eo pipefail
|
||||
source .buildkite/scripts/common/vm-agent-multi-jdk.sh
|
||||
source /etc/os-release
|
||||
if [[ "$$(echo $$ID_LIKE | tr '[:upper:]' '[:lower:]')" =~ (rhel|fedora) && "$${VERSION_ID%.*}" -le 7 ]]; then
|
||||
# jruby-9.3.10.0 unavailable on centos-7 / oel-7, see https://github.com/jruby/jruby/issues/7579#issuecomment-1425885324 / https://github.com/jruby/jruby/issues/7695
|
||||
# we only need a working jruby to run the acceptance test framework -- the packages have been prebuilt in a previous stage
|
||||
rbenv local jruby-9.4.5.0
|
||||
fi
|
||||
ci/acceptance_tests.sh"""),
|
||||
}
|
||||
steps.append(step)
|
||||
|
|
18
.buildkite/scripts/health-report-tests/README.md
Normal file
18
.buildkite/scripts/health-report-tests/README.md
Normal file
|
@ -0,0 +1,18 @@
|
|||
## Description
|
||||
This package for integration tests of the Health Report API.
|
||||
Export `LS_BRANCH` to run on a specific branch. By default, it uses the main branch.
|
||||
|
||||
## How to run the Health Report Integration test?
|
||||
### Prerequisites
|
||||
Make sure you have python installed. Install the integration test dependencies with the following command:
|
||||
```shell
|
||||
python3 -mpip install -r .buildkite/scripts/health-report-tests/requirements.txt
|
||||
```
|
||||
|
||||
### Run the integration tests
|
||||
```shell
|
||||
python3 .buildkite/scripts/health-report-tests/main.py
|
||||
```
|
||||
|
||||
### Troubleshooting
|
||||
- If you get `WARNING: pip is configured with locations that require TLS/SSL,...` warning message, make sure you have python >=3.12.4 installed.
|
0
.buildkite/scripts/health-report-tests/__init__.py
Normal file
0
.buildkite/scripts/health-report-tests/__init__.py
Normal file
101
.buildkite/scripts/health-report-tests/bootstrap.py
Normal file
101
.buildkite/scripts/health-report-tests/bootstrap.py
Normal file
|
@ -0,0 +1,101 @@
|
|||
"""
|
||||
Health Report Integration test bootstrapper with Python script
|
||||
- A script to resolve Logstash version if not provided
|
||||
- Download LS docker image and spin up
|
||||
- When tests finished, teardown the Logstash
|
||||
"""
|
||||
import os
|
||||
import subprocess
|
||||
import util
|
||||
import yaml
|
||||
|
||||
|
||||
class Bootstrap:
|
||||
ELASTIC_STACK_VERSIONS_URL = "https://artifacts-api.elastic.co/v1/versions"
|
||||
|
||||
def __init__(self) -> None:
|
||||
f"""
|
||||
A constructor of the {Bootstrap}.
|
||||
Returns:
|
||||
Resolves Logstash branch considering provided LS_BRANCH
|
||||
Checks out git branch
|
||||
"""
|
||||
logstash_branch = os.environ.get("LS_BRANCH")
|
||||
if logstash_branch is None:
|
||||
# version is not specified, use the main branch, no need to git checkout
|
||||
print(f"LS_BRANCH is not specified, using main branch.")
|
||||
else:
|
||||
# LS_BRANCH accepts major latest as a major.x or specific branch as X.Y
|
||||
if logstash_branch.find(".x") == -1:
|
||||
print(f"Using specified branch: {logstash_branch}")
|
||||
util.git_check_out_branch(logstash_branch)
|
||||
else:
|
||||
major_version = logstash_branch.split(".")[0]
|
||||
if major_version and major_version.isnumeric():
|
||||
resolved_version = self.__resolve_latest_stack_version_for(major_version)
|
||||
minor_version = resolved_version.split(".")[1]
|
||||
branch = major_version + "." + minor_version
|
||||
print(f"Using resolved branch: {branch}")
|
||||
util.git_check_out_branch(branch)
|
||||
else:
|
||||
raise ValueError(f"Invalid value set to LS_BRANCH. Please set it properly (ex: 8.x or 9.0) and "
|
||||
f"rerun again")
|
||||
|
||||
def __resolve_latest_stack_version_for(self, major_version: str) -> str:
|
||||
resolved_version = ""
|
||||
response = util.call_url_with_retry(self.ELASTIC_STACK_VERSIONS_URL)
|
||||
release_versions = response.json()["versions"]
|
||||
for release_version in reversed(release_versions):
|
||||
if release_version.find("SNAPSHOT") > 0:
|
||||
continue
|
||||
if release_version.split(".")[0] == major_version:
|
||||
print(f"Resolved latest version for {major_version} is {release_version}.")
|
||||
resolved_version = release_version
|
||||
break
|
||||
|
||||
if resolved_version == "":
|
||||
raise ValueError(f"Cannot resolve latest version for {major_version} major")
|
||||
return resolved_version
|
||||
|
||||
def install_plugin(self, plugin_path: str) -> None:
|
||||
util.run_or_raise_error(
|
||||
["bin/logstash-plugin", "install", plugin_path],
|
||||
f"Failed to install {plugin_path}")
|
||||
|
||||
def build_logstash(self):
|
||||
print(f"Building Logstash...")
|
||||
util.run_or_raise_error(
|
||||
["./gradlew", "clean", "bootstrap", "assemble", "installDefaultGems"],
|
||||
"Failed to build Logstash")
|
||||
print(f"Logstash has successfully built.")
|
||||
|
||||
def apply_config(self, config: dict) -> None:
|
||||
with open(os.getcwd() + "/.buildkite/scripts/health-report-tests/config/pipelines.yml", 'w') as pipelines_file:
|
||||
yaml.dump(config, pipelines_file)
|
||||
|
||||
def run_logstash(self, full_start_required: bool) -> subprocess.Popen:
|
||||
# --config.reload.automatic is to make instance active
|
||||
# it is helpful when testing crash pipeline cases
|
||||
config_path = os.getcwd() + "/.buildkite/scripts/health-report-tests/config"
|
||||
process = subprocess.Popen(["bin/logstash", "--config.reload.automatic", "--path.settings", config_path,
|
||||
"-w 1"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, shell=False)
|
||||
if process.poll() is not None:
|
||||
print(f"Logstash failed to run, check the the config and logs, then rerun.")
|
||||
return None
|
||||
|
||||
# Read stdout and stderr in real-time
|
||||
logs = []
|
||||
for stdout_line in iter(process.stdout.readline, ""):
|
||||
logs.append(stdout_line.strip())
|
||||
# we don't wait for Logstash fully start as we also test slow pipeline start scenarios
|
||||
if full_start_required is False and "Starting pipeline" in stdout_line:
|
||||
break
|
||||
if full_start_required is True and "Pipeline started" in stdout_line:
|
||||
break
|
||||
if "Logstash shut down" in stdout_line or "Logstash stopped" in stdout_line:
|
||||
print(f"Logstash couldn't spin up.")
|
||||
print(logs)
|
||||
return None
|
||||
|
||||
print(f"Logstash is running with PID: {process.pid}.")
|
||||
return process
|
|
@ -0,0 +1 @@
|
|||
# Intentionally left blank
|
69
.buildkite/scripts/health-report-tests/config_validator.py
Normal file
69
.buildkite/scripts/health-report-tests/config_validator.py
Normal file
|
@ -0,0 +1,69 @@
|
|||
import yaml
|
||||
from typing import Any, List, Dict
|
||||
|
||||
|
||||
class ConfigValidator:
|
||||
REQUIRED_KEYS = {
|
||||
"root": ["name", "config", "conditions", "expectation"],
|
||||
"config": ["pipeline.id", "config.string"],
|
||||
"conditions": ["full_start_required"],
|
||||
"expectation": ["status", "symptom", "indicators"],
|
||||
"indicators": ["pipelines"],
|
||||
"pipelines": ["status", "symptom", "indicators"],
|
||||
"DYNAMIC": ["status", "symptom", "diagnosis", "impacts", "details"],
|
||||
"details": ["status"],
|
||||
"status": ["state"]
|
||||
}
|
||||
|
||||
def __init__(self):
|
||||
self.yaml_content = None
|
||||
|
||||
def __has_valid_keys(self, data: any, key_path: str, repeated: bool) -> bool:
|
||||
if isinstance(data, str) or isinstance(data, bool): # we reached values
|
||||
return True
|
||||
|
||||
# we have two indicators section and for the next repeated ones, we go deeper
|
||||
first_key = next(iter(data))
|
||||
data = data[first_key] if repeated and key_path == "indicators" else data
|
||||
|
||||
if isinstance(data, dict):
|
||||
# pipeline-id is a DYNAMIC
|
||||
required = self.REQUIRED_KEYS.get("DYNAMIC" if repeated and key_path == "indicators" else key_path, [])
|
||||
repeated = not repeated if key_path == "indicators" else repeated
|
||||
for key in required:
|
||||
if key not in data:
|
||||
print(f"Missing key '{key}' in '{key_path}'")
|
||||
return False
|
||||
else:
|
||||
dic_keys_result = self.__has_valid_keys(data[key], key, repeated)
|
||||
if dic_keys_result is False:
|
||||
return False
|
||||
elif isinstance(data, list):
|
||||
for item in data:
|
||||
list_keys_result = self.__has_valid_keys(item, key_path, repeated)
|
||||
if list_keys_result is False:
|
||||
return False
|
||||
return True
|
||||
|
||||
def load(self, file_path: str) -> None:
|
||||
"""Load the YAML file content into self.yaml_content."""
|
||||
self.yaml_content: [Dict[str, Any]] = None
|
||||
try:
|
||||
with open(file_path, 'r') as file:
|
||||
self.yaml_content = yaml.safe_load(file)
|
||||
except yaml.YAMLError as exc:
|
||||
print(f"Error in YAML file: {exc}")
|
||||
self.yaml_content = None
|
||||
|
||||
def is_valid(self) -> bool:
|
||||
"""Validate the entire YAML structure."""
|
||||
if self.yaml_content is None:
|
||||
print(f"YAML content is empty.")
|
||||
return False
|
||||
|
||||
if not isinstance(self.yaml_content, dict):
|
||||
print(f"YAML structure is not as expected, it should start with a Dict.")
|
||||
return False
|
||||
|
||||
result = self.__has_valid_keys(self.yaml_content, "root", False)
|
||||
return True if result is True else False
|
|
@ -0,0 +1,16 @@
|
|||
"""
|
||||
A class to provide information about Logstash node stats.
|
||||
"""
|
||||
|
||||
import util
|
||||
|
||||
|
||||
class LogstashHealthReport:
|
||||
LOGSTASH_HEALTH_REPORT_URL = "http://localhost:9600/_health_report"
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def get(self):
|
||||
response = util.call_url_with_retry(self.LOGSTASH_HEALTH_REPORT_URL)
|
||||
return response.json()
|
87
.buildkite/scripts/health-report-tests/main.py
Normal file
87
.buildkite/scripts/health-report-tests/main.py
Normal file
|
@ -0,0 +1,87 @@
|
|||
"""
|
||||
Main entry point of the LS health report API integration test suites
|
||||
"""
|
||||
import glob
|
||||
import os
|
||||
import time
|
||||
import traceback
|
||||
import yaml
|
||||
from bootstrap import Bootstrap
|
||||
from scenario_executor import ScenarioExecutor
|
||||
from config_validator import ConfigValidator
|
||||
|
||||
|
||||
class BootstrapContextManager:
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def __enter__(self):
|
||||
print(f"Starting Logstash Health Report Integration test.")
|
||||
self.bootstrap = Bootstrap()
|
||||
self.bootstrap.build_logstash()
|
||||
|
||||
plugin_path = os.getcwd() + "/qa/support/logstash-integration-failure_injector/logstash-integration" \
|
||||
"-failure_injector-*.gem"
|
||||
matching_files = glob.glob(plugin_path)
|
||||
if len(matching_files) == 0:
|
||||
raise ValueError(f"Could not find logstash-integration-failure_injector plugin.")
|
||||
|
||||
self.bootstrap.install_plugin(matching_files[0])
|
||||
print(f"logstash-integration-failure_injector successfully installed.")
|
||||
return self.bootstrap
|
||||
|
||||
def __exit__(self, exc_type, exc_value, exc_traceback):
|
||||
if exc_type is not None:
|
||||
print(traceback.format_exception(exc_type, exc_value, exc_traceback))
|
||||
|
||||
|
||||
def main():
|
||||
with BootstrapContextManager() as bootstrap:
|
||||
scenario_executor = ScenarioExecutor()
|
||||
config_validator = ConfigValidator()
|
||||
|
||||
working_dir = os.getcwd()
|
||||
scenario_files_path = working_dir + "/.buildkite/scripts/health-report-tests/tests/*.yaml"
|
||||
scenario_files = glob.glob(scenario_files_path)
|
||||
|
||||
for scenario_file in scenario_files:
|
||||
print(f"Validating {scenario_file} scenario file.")
|
||||
config_validator.load(scenario_file)
|
||||
if config_validator.is_valid() is False:
|
||||
print(f"{scenario_file} scenario file is not valid.")
|
||||
return
|
||||
else:
|
||||
print(f"Validation succeeded.")
|
||||
|
||||
has_failed_scenario = False
|
||||
for scenario_file in scenario_files:
|
||||
with open(scenario_file, 'r') as file:
|
||||
# scenario_content: Dict[str, Any] = None
|
||||
scenario_content = yaml.safe_load(file)
|
||||
print(f"Testing `{scenario_content.get('name')}` scenario.")
|
||||
scenario_name = scenario_content['name']
|
||||
|
||||
is_full_start_required = next(sub.get('full_start_required') for sub in
|
||||
scenario_content.get('conditions') if 'full_start_required' in sub)
|
||||
config = scenario_content['config']
|
||||
if config is not None:
|
||||
bootstrap.apply_config(config)
|
||||
expectations = scenario_content.get("expectation")
|
||||
process = bootstrap.run_logstash(is_full_start_required)
|
||||
if process is not None:
|
||||
try:
|
||||
scenario_executor.on(scenario_name, expectations)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
has_failed_scenario = True
|
||||
process.terminate()
|
||||
time.sleep(5) # leave some window to terminate the process
|
||||
|
||||
if has_failed_scenario:
|
||||
# intentionally fail due to visibility
|
||||
raise Exception("Some of scenarios failed, check the log for details.")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
17
.buildkite/scripts/health-report-tests/main.sh
Executable file
17
.buildkite/scripts/health-report-tests/main.sh
Executable file
|
@ -0,0 +1,17 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
export PATH="/opt/buildkite-agent/.rbenv/bin:/opt/buildkite-agent/.pyenv/bin:/opt/buildkite-agent/.java/bin:$PATH"
|
||||
export JAVA_HOME="/opt/buildkite-agent/.java"
|
||||
eval "$(rbenv init -)"
|
||||
eval "$(pyenv init -)"
|
||||
|
||||
echo "--- Installing pip"
|
||||
sudo apt-get install python3-pip -y
|
||||
|
||||
echo "--- Installing dependencies"
|
||||
python3 -mpip install -r .buildkite/scripts/health-report-tests/requirements.txt
|
||||
|
||||
echo "--- Running tests"
|
||||
python3 .buildkite/scripts/health-report-tests/main.py
|
2
.buildkite/scripts/health-report-tests/requirements.txt
Normal file
2
.buildkite/scripts/health-report-tests/requirements.txt
Normal file
|
@ -0,0 +1,2 @@
|
|||
requests==2.32.3
|
||||
pyyaml==6.0.2
|
65
.buildkite/scripts/health-report-tests/scenario_executor.py
Normal file
65
.buildkite/scripts/health-report-tests/scenario_executor.py
Normal file
|
@ -0,0 +1,65 @@
|
|||
"""
|
||||
A class to execute the given scenario for Logstash Health Report integration test
|
||||
"""
|
||||
import time
|
||||
from logstash_health_report import LogstashHealthReport
|
||||
|
||||
|
||||
class ScenarioExecutor:
|
||||
logstash_health_report_api = LogstashHealthReport()
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def __has_intersection(self, expects, results):
|
||||
# we expect expects to be existing in results
|
||||
for expect in expects:
|
||||
for result in results:
|
||||
if result.get('help_url') and "health-report-pipeline-status.html#" not in result.get('help_url'):
|
||||
return False
|
||||
if not all(key in result and result[key] == value for key, value in expect.items()):
|
||||
return False
|
||||
return True
|
||||
|
||||
def __get_difference(self, differences: list, expectations: dict, reports: dict) -> dict:
|
||||
for key in expectations.keys():
|
||||
|
||||
if type(expectations.get(key)) != type(reports.get(key)):
|
||||
differences.append(f"Scenario expectation and Health API report structure differs for {key}.")
|
||||
return differences
|
||||
|
||||
if isinstance(expectations.get(key), str):
|
||||
if expectations.get(key) != reports.get(key):
|
||||
differences.append({key: {"expected": expectations.get(key), "got": reports.get(key)}})
|
||||
continue
|
||||
elif isinstance(expectations.get(key), dict):
|
||||
self.__get_difference(differences, expectations.get(key), reports.get(key))
|
||||
elif isinstance(expectations.get(key), list):
|
||||
if not self.__has_intersection(expectations.get(key), reports.get(key)):
|
||||
differences.append({key: {"expected": expectations.get(key), "got": reports.get(key)}})
|
||||
return differences
|
||||
|
||||
def __is_expected(self, expectations: dict) -> None:
|
||||
reports = self.logstash_health_report_api.get()
|
||||
differences = self.__get_difference([], expectations, reports)
|
||||
if differences:
|
||||
print("Differences found in 'expectation' section between YAML content and stats:")
|
||||
for diff in differences:
|
||||
print(f"Difference: {diff}")
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
def on(self, scenario_name: str, expectations: dict) -> None:
|
||||
# retriable check the expectations
|
||||
attempts = 5
|
||||
while self.__is_expected(expectations) is False:
|
||||
attempts = attempts - 1
|
||||
if attempts == 0:
|
||||
break
|
||||
time.sleep(1)
|
||||
|
||||
if attempts == 0:
|
||||
raise Exception(f"{scenario_name} failed.")
|
||||
else:
|
||||
print(f"Scenario `{scenario_name}` expectaion meets the health report stats.")
|
|
@ -0,0 +1,31 @@
|
|||
name: "Abnormally terminated pipeline"
|
||||
config:
|
||||
- pipeline.id: abnormally-terminated-pp
|
||||
config.string: |
|
||||
input { heartbeat { interval => 1 } }
|
||||
filter { failure_injector { crash_at => filter } }
|
||||
output { stdout {} }
|
||||
pipeline.workers: 1
|
||||
pipeline.batch.size: 1
|
||||
conditions:
|
||||
- full_start_required: true
|
||||
expectation:
|
||||
status: "red"
|
||||
symptom: "1 indicator is unhealthy (`pipelines`)"
|
||||
indicators:
|
||||
pipelines:
|
||||
status: "red"
|
||||
symptom: "1 indicator is unhealthy (`abnormally-terminated-pp`)"
|
||||
indicators:
|
||||
abnormally-terminated-pp:
|
||||
status: "red"
|
||||
symptom: "The pipeline is unhealthy; 1 area is impacted and 1 diagnosis is available"
|
||||
diagnosis:
|
||||
- cause: "pipeline is not running, likely because it has encountered an error"
|
||||
- action: "view logs to determine the cause of abnormal pipeline shutdown"
|
||||
impacts:
|
||||
- description: "the pipeline is not currently processing"
|
||||
- impact_areas: ["pipeline_execution"]
|
||||
details:
|
||||
status:
|
||||
state: "TERMINATED"
|
|
@ -0,0 +1,29 @@
|
|||
name: "Successfully terminated pipeline"
|
||||
config:
|
||||
- pipeline.id: normally-terminated-pp
|
||||
config.string: |
|
||||
input { generator { count => 1 } }
|
||||
output { stdout {} }
|
||||
pipeline.workers: 1
|
||||
pipeline.batch.size: 1
|
||||
conditions:
|
||||
- full_start_required: true
|
||||
expectation:
|
||||
status: "yellow"
|
||||
symptom: "1 indicator is concerning (`pipelines`)"
|
||||
indicators:
|
||||
pipelines:
|
||||
status: "yellow"
|
||||
symptom: "1 indicator is concerning (`normally-terminated-pp`)"
|
||||
indicators:
|
||||
normally-terminated-pp:
|
||||
status: "yellow"
|
||||
symptom: "The pipeline is concerning; 1 area is impacted and 1 diagnosis is available"
|
||||
diagnosis:
|
||||
- cause: "pipeline has finished running because its inputs have been closed and events have been processed"
|
||||
- action: "if you expect this pipeline to run indefinitely, you will need to configure its inputs to continue receiving or fetching events"
|
||||
impacts:
|
||||
- impact_areas: ["pipeline_execution"]
|
||||
details:
|
||||
status:
|
||||
state: "FINISHED"
|
30
.buildkite/scripts/health-report-tests/tests/slow-start.yaml
Normal file
30
.buildkite/scripts/health-report-tests/tests/slow-start.yaml
Normal file
|
@ -0,0 +1,30 @@
|
|||
name: "Slow start pipeline"
|
||||
config:
|
||||
- pipeline.id: slow-start-pp
|
||||
config.string: |
|
||||
input { heartbeat {} }
|
||||
filter { failure_injector { degrade_at => [register] } }
|
||||
output { stdout {} }
|
||||
pipeline.workers: 1
|
||||
pipeline.batch.size: 1
|
||||
conditions:
|
||||
- full_start_required: false
|
||||
expectation:
|
||||
status: "yellow"
|
||||
symptom: "1 indicator is concerning (`pipelines`)"
|
||||
indicators:
|
||||
pipelines:
|
||||
status: "yellow"
|
||||
symptom: "1 indicator is concerning (`slow-start-pp`)"
|
||||
indicators:
|
||||
slow-start-pp:
|
||||
status: "yellow"
|
||||
symptom: "The pipeline is concerning; 1 area is impacted and 1 diagnosis is available"
|
||||
diagnosis:
|
||||
- cause: "pipeline is loading"
|
||||
- action: "if pipeline does not come up quickly, you may need to check the logs to see if it is stalled"
|
||||
impacts:
|
||||
- impact_areas: ["pipeline_execution"]
|
||||
details:
|
||||
status:
|
||||
state: "LOADING"
|
36
.buildkite/scripts/health-report-tests/util.py
Normal file
36
.buildkite/scripts/health-report-tests/util.py
Normal file
|
@ -0,0 +1,36 @@
|
|||
import os
|
||||
import requests
|
||||
import subprocess
|
||||
from requests.adapters import HTTPAdapter, Retry
|
||||
|
||||
|
||||
def call_url_with_retry(url: str, max_retries: int = 5, delay: int = 1) -> requests.Response:
|
||||
f"""
|
||||
Calls the given {url} with maximum of {max_retries} retries with {delay} delay.
|
||||
"""
|
||||
schema = "https://" if "https://" in url else "http://"
|
||||
session = requests.Session()
|
||||
# retry on most common failures such as connection timeout(408), etc...
|
||||
retries = Retry(total=max_retries, backoff_factor=delay, status_forcelist=[408, 502, 503, 504])
|
||||
session.mount(schema, HTTPAdapter(max_retries=retries))
|
||||
return session.get(url)
|
||||
|
||||
|
||||
def git_check_out_branch(branch_name: str) -> None:
|
||||
f"""
|
||||
Checks out specified branch or fails with error if checkout operation fails.
|
||||
"""
|
||||
run_or_raise_error(["git", "checkout", branch_name],
|
||||
"Error occurred while checking out the " + branch_name + " branch")
|
||||
|
||||
|
||||
def run_or_raise_error(commands: list, error_message):
|
||||
f"""
|
||||
Executes the {list} commands and raises an {Exception} if opration fails.
|
||||
"""
|
||||
result = subprocess.run(commands, env=os.environ.copy(), universal_newlines=True, stdout=subprocess.PIPE)
|
||||
if result.returncode != 0:
|
||||
full_error_message = (error_message + ", output: " + result.stdout.decode('utf-8')) \
|
||||
if result.stdout else error_message
|
||||
raise Exception(f"{full_error_message}")
|
||||
|
|
@ -15,6 +15,8 @@ steps:
|
|||
multiple: true
|
||||
default: "${DEFAULT_MATRIX_OS}"
|
||||
options:
|
||||
- label: "Windows 2025"
|
||||
value: "windows-2025"
|
||||
- label: "Windows 2022"
|
||||
value: "windows-2022"
|
||||
- label: "Windows 2019"
|
||||
|
|
|
@ -1 +1 @@
|
|||
jruby-9.3.10.0
|
||||
jruby-9.4.9.0
|
||||
|
|
1017
Gemfile.jruby-3.1.lock.release
Normal file
1017
Gemfile.jruby-3.1.lock.release
Normal file
File diff suppressed because it is too large
Load diff
|
@ -14,6 +14,7 @@ gem "logstash-output-elasticsearch", ">= 11.14.0"
|
|||
gem "polyglot", require: false
|
||||
gem "treetop", require: false
|
||||
gem "faraday", "~> 1", :require => false # due elasticsearch-transport (elastic-transport) depending faraday '~> 1'
|
||||
gem "minitar", "~> 1", :group => :build
|
||||
gem "childprocess", "~> 4", :group => :build
|
||||
gem "fpm", "~> 1", ">= 1.14.1", :group => :build # compound due to bugfix https://github.com/jordansissel/fpm/pull/1856
|
||||
gem "gems", "~> 1", :group => :build
|
||||
|
@ -39,5 +40,7 @@ gem "simplecov", "~> 0.22.0", :group => :development
|
|||
gem "simplecov-json", require: false, :group => :development
|
||||
gem "jar-dependencies", "= 0.4.1" # Gem::LoadError with jar-dependencies 0.4.2
|
||||
gem "murmurhash3", "= 0.1.6" # Pins until version 0.1.7-java is released
|
||||
gem "date", "= 3.3.3"
|
||||
gem "thwait"
|
||||
gem "bigdecimal", "~> 3.1"
|
||||
gem "psych", "5.2.2"
|
||||
|
|
|
@ -101,6 +101,7 @@ allprojects {
|
|||
"--add-opens=java.base/java.lang=ALL-UNNAMED",
|
||||
"--add-opens=java.base/java.util=ALL-UNNAMED"
|
||||
]
|
||||
maxHeapSize = "2g"
|
||||
//https://stackoverflow.com/questions/3963708/gradle-how-to-display-test-results-in-the-console-in-real-time
|
||||
testLogging {
|
||||
// set options for log level LIFECYCLE
|
||||
|
|
|
@ -32,6 +32,7 @@ spec:
|
|||
- resource:logstash-linux-jdk-matrix-pipeline
|
||||
- resource:logstash-windows-jdk-matrix-pipeline
|
||||
- resource:logstash-benchmark-pipeline
|
||||
- resource:logstash-health-report-tests-pipeline
|
||||
|
||||
# ***********************************
|
||||
# Declare serverless IT pipeline
|
||||
|
@ -642,4 +643,57 @@ spec:
|
|||
|
||||
# *******************************
|
||||
# SECTION END: Benchmark pipeline
|
||||
# *******************************
|
||||
|
||||
# ***********************************
|
||||
# Declare Health Report Tests pipeline
|
||||
# ***********************************
|
||||
---
|
||||
# yaml-language-server: $schema=https://gist.githubusercontent.com/elasticmachine/988b80dae436cafea07d9a4a460a011d/raw/rre.schema.json
|
||||
apiVersion: backstage.io/v1alpha1
|
||||
kind: Resource
|
||||
metadata:
|
||||
name: logstash-health-report-tests-pipeline
|
||||
description: Buildkite pipeline for the Logstash Health Report Tests
|
||||
links:
|
||||
- title: ':logstash Logstash Health Report Tests (Daily, Auto) pipeline'
|
||||
url: https://buildkite.com/elastic/logstash-health-report-tests-pipeline
|
||||
spec:
|
||||
type: buildkite-pipeline
|
||||
owner: group:logstash
|
||||
system: platform-ingest
|
||||
implementation:
|
||||
apiVersion: buildkite.elastic.dev/v1
|
||||
kind: Pipeline
|
||||
metadata:
|
||||
name: logstash-health-report-tests-pipeline
|
||||
description: ':logstash: Logstash Health Report tests :pipeline:'
|
||||
spec:
|
||||
repository: elastic/logstash
|
||||
pipeline_file: ".buildkite/health_report_tests_pipeline.yml"
|
||||
maximum_timeout_in_minutes: 60
|
||||
provider_settings:
|
||||
trigger_mode: none # don't trigger jobs from github activity
|
||||
env:
|
||||
ELASTIC_SLACK_NOTIFICATIONS_ENABLED: 'true'
|
||||
SLACK_NOTIFICATIONS_CHANNEL: '#logstash-build'
|
||||
SLACK_NOTIFICATIONS_ON_SUCCESS: 'false'
|
||||
SLACK_NOTIFICATIONS_SKIP_FOR_RETRIES: 'true'
|
||||
teams:
|
||||
ingest-fp:
|
||||
access_level: MANAGE_BUILD_AND_READ
|
||||
logstash:
|
||||
access_level: MANAGE_BUILD_AND_READ
|
||||
ingest-eng-prod:
|
||||
access_level: MANAGE_BUILD_AND_READ
|
||||
everyone:
|
||||
access_level: READ_ONLY
|
||||
schedules:
|
||||
Daily Health Report tests on main branch:
|
||||
branch: main
|
||||
cronline: 30 20 * * *
|
||||
message: Daily trigger of Health Report Tests Pipeline
|
||||
|
||||
# *******************************
|
||||
# SECTION END: Health Report Tests pipeline
|
||||
# *******************************
|
|
@ -154,7 +154,7 @@ appender.deprecation_rolling.policies.size.size = 100MB
|
|||
appender.deprecation_rolling.strategy.type = DefaultRolloverStrategy
|
||||
appender.deprecation_rolling.strategy.max = 30
|
||||
|
||||
logger.deprecation.name = org.logstash.deprecation, deprecation
|
||||
logger.deprecation.name = org.logstash.deprecation
|
||||
logger.deprecation.level = WARN
|
||||
logger.deprecation.appenderRef.deprecation_rolling.ref = deprecation_plain_rolling
|
||||
logger.deprecation.additivity = false
|
||||
|
|
|
@ -332,7 +332,7 @@
|
|||
#
|
||||
# Determine where to allocate memory buffers, for plugins that leverage them.
|
||||
# Default to direct, optionally can be switched to heap to select Java heap space.
|
||||
# pipeline.buffer.type: direct
|
||||
# pipeline.buffer.type: heap
|
||||
#
|
||||
# ------------ X-Pack Settings (not applicable for OSS build)--------------
|
||||
#
|
||||
|
|
|
@ -15,7 +15,7 @@ RUN go build
|
|||
<%# Start image_flavor 'ironbank' %>
|
||||
ARG BASE_REGISTRY=registry1.dso.mil
|
||||
ARG BASE_IMAGE=ironbank/redhat/ubi/ubi9
|
||||
ARG BASE_TAG=9.3
|
||||
ARG BASE_TAG=9.5
|
||||
ARG LOGSTASH_VERSION=<%= elastic_version %>
|
||||
ARG GOLANG_VERSION=1.21.8
|
||||
|
||||
|
@ -106,7 +106,7 @@ FROM <%= base_image %>
|
|||
|
||||
RUN for iter in {1..10}; do \
|
||||
<% if image_flavor == 'wolfi' %>
|
||||
<%= package_manager %> add --no-cache curl bash && \
|
||||
<%= package_manager %> add --no-cache curl bash openssl && \
|
||||
<% else -%>
|
||||
<% if image_flavor == 'full' || image_flavor == 'oss' -%>
|
||||
export DEBIAN_FRONTEND=noninteractive && \
|
||||
|
|
|
@ -14,7 +14,7 @@ tags:
|
|||
# Build args passed to Dockerfile ARGs
|
||||
args:
|
||||
BASE_IMAGE: "redhat/ubi/ubi9"
|
||||
BASE_TAG: "9.3"
|
||||
BASE_TAG: "9.5"
|
||||
LOGSTASH_VERSION: "<%= elastic_version %>"
|
||||
GOLANG_VERSION: "1.21.8"
|
||||
|
||||
|
|
2
docs/static/azure-module.asciidoc
vendored
2
docs/static/azure-module.asciidoc
vendored
|
@ -7,7 +7,7 @@ experimental[]
|
|||
<titleabbrev>Azure Module (deprecated)</titleabbrev>
|
||||
++++
|
||||
|
||||
deprecated[7.8.0, "We recommend using the Azure modules in {filebeat-ref}/filebeat-module-azure.html[{Filebeat}] and {metricbeat-ref}/metricbeat-module-azure.html[{metricbeat}], which are compliant with the {ecs-ref}/index.html[Elastic Common Schema (ECS)]"]
|
||||
deprecated[7.8.0, "Replaced by the https://www.elastic.co/guide/en/integrations/current/azure-events.html[Azure Logs integration]."]
|
||||
|
||||
The https://azure.microsoft.com/en-us/overview/what-is-azure/[Microsoft Azure]
|
||||
module in Logstash helps you easily integrate your Azure activity logs and SQL
|
||||
|
|
12
docs/static/dead-letter-queues.asciidoc
vendored
12
docs/static/dead-letter-queues.asciidoc
vendored
|
@ -21,9 +21,10 @@ loss in this situation, you can <<configuring-dlq,configure Logstash>> to write
|
|||
unsuccessful events to a dead letter queue instead of dropping them.
|
||||
|
||||
NOTE: The dead letter queue is currently supported only for the
|
||||
<<plugins-outputs-elasticsearch,{es} output>>. The dead letter queue is used for
|
||||
documents with response codes of 400 or 404, both of which indicate an event
|
||||
<<plugins-outputs-elasticsearch,{es} output>> and <<conditionals, conditional statements evaluation>>.
|
||||
The dead letter queue is used for documents with response codes of 400 or 404, both of which indicate an event
|
||||
that cannot be retried.
|
||||
It's also used when a conditional evaluation encounter an error.
|
||||
|
||||
Each event written to the dead letter queue includes the original event,
|
||||
metadata that describes the reason the event could not be processed, information
|
||||
|
@ -57,7 +58,12 @@ status code per entry to indicate why the action could not be performed.
|
|||
If the DLQ is configured, individual indexing failures are routed there.
|
||||
|
||||
Even if you regularly process events, events remain in the dead letter queue.
|
||||
The dead letter queue requires <<dlq-clear,manual intervention>> to clear it.
|
||||
The dead letter queue requires <<dlq-clear,manual intervention>> to clear it.
|
||||
|
||||
[[conditionals-dlq]]
|
||||
==== Conditional statements and the dead letter queue
|
||||
When a conditional statement reaches an error in processing an event, such as comparing string and integer values,
|
||||
the event, as it is at the time of evaluation, is inserted into the dead letter queue.
|
||||
|
||||
[[configuring-dlq]]
|
||||
==== Configuring {ls} to use dead letter queues
|
||||
|
|
2
docs/static/ls-to-cloud.asciidoc
vendored
2
docs/static/ls-to-cloud.asciidoc
vendored
|
@ -7,7 +7,7 @@ When you configure the Elasticsearch output plugin to use <<plugins-outputs-elas
|
|||
Examples:
|
||||
|
||||
* `output {elasticsearch { cloud_id => "<cloud id>" cloud_auth => "<cloud auth>" } }`
|
||||
* `output {elasticsearch { cloud_id => "<cloud id>" api_key => "<api key>" } }``
|
||||
* `output {elasticsearch { cloud_id => "<cloud id>" api_key => "<api key>" } }`
|
||||
|
||||
{ess-leadin-short}
|
||||
|
||||
|
|
156
docs/static/monitoring/monitoring-apis.asciidoc
vendored
156
docs/static/monitoring/monitoring-apis.asciidoc
vendored
|
@ -2,13 +2,13 @@
|
|||
[[monitoring]]
|
||||
== APIs for monitoring {ls}
|
||||
|
||||
{ls} provides monitoring APIs for retrieving runtime metrics
|
||||
about {ls}:
|
||||
{ls} provides monitoring APIs for retrieving runtime information about {ls}:
|
||||
|
||||
* <<node-info-api>>
|
||||
* <<plugins-api>>
|
||||
* <<node-stats-api>>
|
||||
* <<hot-threads-api>>
|
||||
* <<logstash-health-report-api>>
|
||||
|
||||
|
||||
You can use the root resource to retrieve general information about the Logstash instance, including
|
||||
|
@ -1184,3 +1184,155 @@ Example of a human-readable response:
|
|||
org.jruby.internal.runtime.NativeThread.join(NativeThread.java:75)
|
||||
|
||||
--------------------------------------------------
|
||||
|
||||
|
||||
[[logstash-health-report-api]]
|
||||
=== Health report API
|
||||
|
||||
An API that reports the health status of Logstash.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
curl -XGET 'localhost:9600/_health_report?pretty'
|
||||
--------------------------------------------------
|
||||
|
||||
==== Description
|
||||
|
||||
The health API returns a report with the health status of Logstash and the pipelines that are running inside of it.
|
||||
The report contains a list of indicators that compose Logstash functionality.
|
||||
|
||||
Each indicator has a health status of: `green`, `unknown`, `yellow`, or `red`.
|
||||
The indicator will provide an explanation and metadata describing the reason for its current health status.
|
||||
|
||||
The top-level status is controlled by the worst indicator status.
|
||||
|
||||
In the event that an indicator's status is non-green, a list of impacts may be present in the indicator result which detail the functionalities that are negatively affected by the health issue.
|
||||
Each impact carries with it a severity level, an area of the system that is affected, and a simple description of the impact on the system.
|
||||
|
||||
Some health indicators can determine the root cause of a health problem and prescribe a set of steps that can be performed in order to improve the health of the system.
|
||||
The root cause and remediation steps are encapsulated in a `diagnosis`.
|
||||
A diagnosis contains a cause detailing a root cause analysis, an action containing a brief description of the steps to take to fix the problem, and the URL for detailed troubleshooting help.
|
||||
|
||||
NOTE: The health indicators perform root cause analysis of non-green health statuses.
|
||||
This can be computationally expensive when called frequently.
|
||||
|
||||
==== Response body
|
||||
|
||||
`status`::
|
||||
(Optional, string) Health status of {ls}, based on the aggregated status of all indicators. Statuses are:
|
||||
|
||||
`green`:::
|
||||
{ls} is healthy.
|
||||
|
||||
`unknown`:::
|
||||
The health of {ls} could not be determined.
|
||||
|
||||
`yellow`:::
|
||||
The functionality of {ls} is in a degraded state and may need remediation to avoid the health becoming `red`.
|
||||
|
||||
`red`:::
|
||||
{ls} is experiencing an outage or certain features are unavailable for use.
|
||||
|
||||
`indicators`::
|
||||
(object) Information about the health of the {ls} indicators.
|
||||
|
||||
+
|
||||
.Properties of `indicators`
|
||||
[%collapsible%open]
|
||||
====
|
||||
`<indicator>`::
|
||||
(object) Contains health results for an indicator.
|
||||
+
|
||||
.Properties of `<indicator>`
|
||||
[%collapsible%open]
|
||||
=======
|
||||
`status`::
|
||||
(string) Health status of the indicator. Statuses are:
|
||||
|
||||
`green`:::
|
||||
The indicator is healthy.
|
||||
|
||||
`unknown`:::
|
||||
The health of the indicator could not be determined.
|
||||
|
||||
`yellow`:::
|
||||
The functionality of an indicator is in a degraded state and may need remediation to avoid the health becoming `red`.
|
||||
|
||||
`red`:::
|
||||
The indicator is experiencing an outage or certain features are unavailable for use.
|
||||
|
||||
`symptom`::
|
||||
(string) A message providing information about the current health status.
|
||||
|
||||
`details`::
|
||||
(Optional, object) An object that contains additional information about the indicator that has lead to the current health status result.
|
||||
Each indicator has <<logstash-health-api-response-details, a unique set of details>>.
|
||||
|
||||
`impacts`::
|
||||
(Optional, array) If a non-healthy status is returned, indicators may include a list of impacts that this health status will have on {ls}.
|
||||
+
|
||||
.Properties of `impacts`
|
||||
[%collapsible%open]
|
||||
========
|
||||
`severity`::
|
||||
(integer) How important this impact is to the functionality of {ls}.
|
||||
A value of 1 is the highest severity, with larger values indicating lower severity.
|
||||
|
||||
`description`::
|
||||
(string) A description of the impact on {ls}.
|
||||
|
||||
`impact_areas`::
|
||||
(array of strings) The areas {ls} functionality that this impact affects.
|
||||
Possible values are:
|
||||
+
|
||||
--
|
||||
* `pipeline_execution`
|
||||
--
|
||||
|
||||
========
|
||||
|
||||
`diagnosis`::
|
||||
(Optional, array) If a non-healthy status is returned, indicators may include a list of diagnosis that encapsulate the cause of the health issue and an action to take in order to remediate the problem.
|
||||
+
|
||||
.Properties of `diagnosis`
|
||||
[%collapsible%open]
|
||||
========
|
||||
`cause`::
|
||||
(string) A description of a root cause of this health problem.
|
||||
|
||||
`action`::
|
||||
(string) A brief description the steps that should be taken to remediate the problem.
|
||||
A more detailed step-by-step guide to remediate the problem is provided by the `help_url` field.
|
||||
|
||||
`help_url`::
|
||||
(string) A link to the troubleshooting guide that'll fix the health problem.
|
||||
========
|
||||
=======
|
||||
====
|
||||
|
||||
[role="child_attributes"]
|
||||
[[logstash-health-api-response-details]]
|
||||
==== Indicator Details
|
||||
|
||||
Each health indicator in the health API returns a set of details that further explains the state of the system.
|
||||
The details have contents and a structure that is unique to each indicator.
|
||||
|
||||
[[logstash-health-api-response-details-pipeline]]
|
||||
===== Pipeline Indicator Details
|
||||
|
||||
`pipelines/indicators/<pipeline_id>/details`::
|
||||
(object) Information about the specified pipeline.
|
||||
+
|
||||
.Properties of `pipelines/indicators/<pipeline_id>/details`
|
||||
[%collapsible%open]
|
||||
====
|
||||
`status`::
|
||||
(object) Details related to the pipeline's current status and run-state.
|
||||
+
|
||||
.Properties of `status`
|
||||
[%collapsible%open]
|
||||
========
|
||||
`state`::
|
||||
(string) The current state of the pipeline, including whether it is `loading`, `running`, `finished`, or `terminated`.
|
||||
========
|
||||
====
|
||||
|
|
16
docs/static/plugin-manager.asciidoc
vendored
16
docs/static/plugin-manager.asciidoc
vendored
|
@ -112,6 +112,22 @@ bin/logstash-plugin update logstash-input-github <2>
|
|||
<1> updates all installed plugins
|
||||
<2> updates only the plugin you specify
|
||||
|
||||
[discrete]
|
||||
[[updating-major]]
|
||||
==== Major version plugin updates
|
||||
|
||||
To avoid introducing breaking changes, the plugin manager updates only plugins for which newer _minor_ or _patch_ versions exist by default.
|
||||
If you wish to also include breaking changes, specify `--level=major`.
|
||||
|
||||
[source,shell]
|
||||
----------------------------------
|
||||
bin/logstash-plugin update --level=major <1>
|
||||
bin/logstash-plugin update --level=major logstash-input-github <2>
|
||||
----------------------------------
|
||||
<1> updates all installed plugins to latest, including major versions with breaking changes
|
||||
<2> updates only the plugin you specify to latest, including major versions with breaking changes
|
||||
|
||||
|
||||
[discrete]
|
||||
[[removing-plugins]]
|
||||
=== Removing plugins
|
||||
|
|
516
docs/static/releasenotes.asciidoc
vendored
516
docs/static/releasenotes.asciidoc
vendored
|
@ -3,6 +3,15 @@
|
|||
|
||||
This section summarizes the changes in the following releases:
|
||||
|
||||
* <<logstash-8-16-4,Logstash 8.16.4>>
|
||||
* <<logstash-8-16-3,Logstash 8.16.3>>
|
||||
* <<logstash-8-16-2,Logstash 8.16.2>>
|
||||
* <<logstash-8-16-1,Logstash 8.16.1>>
|
||||
* <<logstash-8-16-0,Logstash 8.16.0>>
|
||||
* <<logstash-8-15-4,Logstash 8.15.5>>
|
||||
* <<logstash-8-15-4,Logstash 8.15.4>>
|
||||
* <<logstash-8-15-3,Logstash 8.15.3>>
|
||||
* <<logstash-8-15-2,Logstash 8.15.2>>
|
||||
* <<logstash-8-15-1,Logstash 8.15.1>>
|
||||
* <<logstash-8-15-0,Logstash 8.15.0>>
|
||||
* <<logstash-8-14-3,Logstash 8.14.3>>
|
||||
|
@ -66,9 +75,505 @@ This section summarizes the changes in the following releases:
|
|||
* <<logstash-8-0-0-alpha1,Logstash 8.0.0-alpha1>>
|
||||
|
||||
|
||||
[[logstash-8-16-4]]
|
||||
=== Logstash 8.16.4 Release Notes
|
||||
|
||||
[[notable-8-16-4]]
|
||||
==== Notable issues fixed
|
||||
|
||||
* The plugin manager's `update` command now correctly updates only _minor_ versions of plugins by default to avoid breaking changes.
|
||||
If you wish to also include breaking changes, you must specify `--level=major` https://github.com/elastic/logstash/pull/16975[#16975]
|
||||
* The plugin manager no longer has issues installing plugins with embedded jars or depending on snakeyaml https://github.com/elastic/logstash/pull/16925[#16925]
|
||||
* The plugin manager now correctly supports authenticated proxies by transmitting username and password from proxy environment URI https://github.com/elastic/logstash/pull/16957[#16957]
|
||||
* The buffered-tokenizer, which is used by many plugins to split streams of bytes by a delimiter, now properly resumes at the next delimiter after encountering a buffer-full condition https://github.com/elastic/logstash/pull/17021[#17021]
|
||||
|
||||
[[dependencies-8-16-4]]
|
||||
==== Updates to dependencies
|
||||
|
||||
* Update JDK to 21.0.6+7 https://github.com/elastic/logstash/pull/16990[#16990]
|
||||
|
||||
[[plugins-8-16-4]]
|
||||
==== Plugins
|
||||
|
||||
*Elastic_integration Filter - 8.16.1*
|
||||
|
||||
* Provides a guidance in logs when plugin version mismatches with connected Elasticsearch `major.minor` version https://github.com/elastic/logstash-filter-elastic_integration/pull/253[#253]
|
||||
* Embeds Ingest Node components from Elasticsearch 8.16
|
||||
* Compatible with Logstash 8.15+
|
||||
|
||||
*Elasticsearch Filter - 3.17.0*
|
||||
|
||||
* Added support for custom headers https://github.com/logstash-plugins/logstash-filter-elasticsearch/pull/190[#190]
|
||||
|
||||
*Beats Input - 6.9.2*
|
||||
|
||||
* Name netty threads according to their purpose and the plugin id https://github.com/logstash-plugins/logstash-input-beats/pull/511[#511]
|
||||
|
||||
*Elasticsearch Input - 4.21.1*
|
||||
|
||||
* Fix: prevent plugin crash when hits contain illegal structure https://github.com/logstash-plugins/logstash-input-elasticsearch/pull/183[#183]
|
||||
* When a hit cannot be converted to an event, the input now emits an event tagged with `_elasticsearch_input_failure` with an `[event][original]` containing a JSON-encoded string representation of the entire hit.
|
||||
|
||||
* Add support for custom headers https://github.com/logstash-plugins/logstash-input-elasticsearch/pull/217[#217]
|
||||
|
||||
*Http Input - 3.10.1*
|
||||
|
||||
* Properly naming netty threads https://github.com/logstash-plugins/logstash-input-http/pull/191[#191]
|
||||
|
||||
* Add improved proactive rate-limiting, rejecting new requests when queue has been actively blocking for more than 10 seconds https://github.com/logstash-plugins/logstash-input-http/pull/179[#179]
|
||||
|
||||
*Tcp Input - 6.4.5*
|
||||
|
||||
* Name netty threads with plugin id and their purpose https://github.com/logstash-plugins/logstash-input-tcp/pull/229[#229]
|
||||
|
||||
*Snmp Integration - 4.0.6*
|
||||
|
||||
* [DOC] Fix typo in snmptrap migration section https://github.com/logstash-plugins/logstash-integration-snmp/pull/74[#74]
|
||||
|
||||
*Elasticsearch Output - 11.22.12*
|
||||
|
||||
* Properly handle http code 413 (Payload Too Large) https://github.com/logstash-plugins/logstash-output-elasticsearch/pull/1199[#1199]
|
||||
* Remove irrelevant log warning about elastic stack version https://github.com/logstash-plugins/logstash-output-elasticsearch/pull/1202[#1202]
|
||||
|
||||
|
||||
[[logstash-8-16-3]]
|
||||
=== Logstash 8.16.3 Release Notes
|
||||
|
||||
[[notable-8.16.3]]
|
||||
==== Notable issues fixed
|
||||
|
||||
* Avoid lock contention when ecs_compatibility is explicitly specified https://github.com/elastic/logstash/pull/16786[#16786]
|
||||
* Ensure that the Jackson read constraints defaults (Maximum Number value length, Maximum String value length, and Maximum Nesting depth) are applied at runtime if they are absent from jvm.options https://github.com/elastic/logstash/pull/16832[#16832]
|
||||
|
||||
[[dependencies-8.16.3]]
|
||||
==== Updates to dependencies
|
||||
|
||||
* Update Iron Bank base image to ubi9/9.5 https://github.com/elastic/logstash/pull/16825[#16825]
|
||||
|
||||
[[plugins-8.16.3]]
|
||||
==== Plugins
|
||||
|
||||
*Elastic_integration Filter - 8.16.0*
|
||||
|
||||
* Aligns with stack major and minor versions https://github.com/elastic/logstash-filter-elastic_integration/pull/210[#210]
|
||||
* Embeds Ingest Node components from Elasticsearch 8.16
|
||||
* Compatible with Logstash 8.15+
|
||||
|
||||
*Azure_event_hubs Input - 1.5.1*
|
||||
|
||||
* Updated multiple Java dependencies https://github.com/logstash-plugins/logstash-input-azure_event_hubs/pull/99[#99]
|
||||
|
||||
*Elastic_enterprise_search Integration - 3.0.1*
|
||||
|
||||
* Add deprecation log for App Search and Workplace Search. https://github.com/logstash-plugins/logstash-integration-elastic_enterprise_search/pull/22[#22]
|
||||
|
||||
*Jdbc Integration - 5.5.2*
|
||||
|
||||
* The input plugin's prior behaviour of opening a new database connection for each scheduled run (removed in `v5.4.1`) is restored, ensuring that infrequently-run schedules do not hold open connections to their databases indefinitely, _without_ reintroducing the leak https://github.com/logstash-plugins/logstash-integration-jdbc/pull/130[#130]
|
||||
|
||||
*Kafka Integration - 11.5.4*
|
||||
|
||||
* Update kafka client to 3.8.1 and transitive dependencies https://github.com/logstash-plugins/logstash-integration-kafka/pull/188[#188]
|
||||
* Removed `jar-dependencies` dependency https://github.com/logstash-plugins/logstash-integration-kafka/pull/187[#187]
|
||||
|
||||
*Snmp Integration - 4.0.5*
|
||||
|
||||
* Fix typo resulting in "uninitialized constant" exception for invalid column name https://github.com/logstash-plugins/logstash-integration-snmp/pull/73[#73]
|
||||
|
||||
[[logstash-8-16-2]]
|
||||
=== Logstash 8.16.2 Release Notes
|
||||
|
||||
[[notable-8-16-2]]
|
||||
==== Notable issues fixed
|
||||
|
||||
* Reset internal size counter in BufferedTokenizer during flush https://github.com/elastic/logstash/pull/16771[#16771].
|
||||
Fixes <<known-issue-8-16-1-json_lines,"input buffer full" error>> that could appear with versions 8.16.0 and 8.16.1.
|
||||
* Ensure overrides to jackson settings are applied during startup https://github.com/elastic/logstash/pull/16758[#16758].
|
||||
|
||||
[[dependencies-8-16-2]]
|
||||
==== Updates to dependencies
|
||||
|
||||
* Pin `jar-dependencies` to `0.4.1` and `date` to `3.3.3` to avoid clashes between what's bundled with JRuby and newer versions in Rubygems https://github.com/elastic/logstash/pull/16749[#16749] https://github.com/elastic/logstash/pull/16779[#16779]
|
||||
|
||||
==== Plugins
|
||||
|
||||
*Elastic_integration Filter - 0.1.17*
|
||||
|
||||
* Add `x-elastic-product-origin` header to Elasticsearch requests https://github.com/elastic/logstash-filter-elastic_integration/pull/197[#197]
|
||||
|
||||
*Elasticsearch Filter - 3.16.2*
|
||||
|
||||
* Add `x-elastic-product-origin` header to Elasticsearch requests https://github.com/logstash-plugins/logstash-filter-elasticsearch/pull/185[#185]
|
||||
|
||||
*Elasticsearch Input - 4.20.5*
|
||||
|
||||
* Add `x-elastic-product-origin` header to Elasticsearch requests https://github.com/logstash-plugins/logstash-input-elasticsearch/pull/211[#211]
|
||||
|
||||
*Jdbc Integration - 5.5.1*
|
||||
|
||||
* Document `statement_retry_attempts` and `statement_retry_attempts_wait_time` options https://github.com/logstash-plugins/logstash-integration-jdbc/pull/177[#177]
|
||||
|
||||
*Kafka Integration - 11.5.3*
|
||||
|
||||
* Update kafka client to 3.7.1 and transitive dependencies https://github.com/logstash-plugins/logstash-integration-kafka/pull/186[#186]
|
||||
|
||||
*Logstash Integration - 1.0.4*
|
||||
|
||||
* Align output plugin with documentation by producing event-oriented ndjson-compatible payloads instead of JSON array of events https://github.com/logstash-plugins/logstash-integration-logstash/pull/25[#25]
|
||||
|
||||
*Elasticsearch Output - 11.22.10*
|
||||
|
||||
* Add `x-elastic-product-origin` header to Elasticsearch requests https://github.com/logstash-plugins/logstash-output-elasticsearch/pull/1195[#1195]
|
||||
|
||||
|
||||
[[logstash-8-16-1]]
|
||||
=== Logstash 8.16.1 Release Notes
|
||||
|
||||
[[known-issues-8-16-1]]
|
||||
==== Known issue
|
||||
|
||||
[[known-issue-8-16-1-json_lines]]
|
||||
===== "Input buffer full" error with {ls} 8.16.0, 8.16.1, or 8.17.0
|
||||
|
||||
If you are using `json_lines` codec 3.2.0 (or later) with {ls} 8.16.0, 8.16.1, or 8.17.0, you may see an error similar to this one, crashing the pipelines:
|
||||
```
|
||||
unable to process event. {:message=>"input buffer full", :class=>"Java::JavaLang::IllegalStateException", :backtrace=>["org.logstash.common.BufferedTokenizerExt.extract(BufferedTokenizerExt.java:83)", "usr.share.logstash.vendor.bundle.jruby.$3_dot_1_dot_0.gems.logstash_minus_codec_minus_json_lines_minus_3_dot_2_dot_2.lib.logstash.codecs.json_lines.RUBY$method$decode$0(/usr/share/logstash/vendor/bundle/jruby/3.1.0/gems/logstash-codec-json_lines-3.2.2/lib/logstash/codecs/json_lines.rb:69)", "org.jruby.internal.runtime.methods.CompiledIRMethod.call(CompiledIRMethod.java:165)", "org.jruby.internal.runtime.methods.MixedModeIRMethod.call(MixedModeIRMethod.java:185)",
|
||||
```
|
||||
The issue was fixed in https://github.com/elastic/logstash/pull/16760.
|
||||
|
||||
This problem is most likely to be seen when you are using the <<plugins-integrations-logstash,{ls} integration>> plugin to ship data between two {ls} instances, but may appear in other situations, too.
|
||||
|
||||
**Workaround for {ls}-to-{ls} communication**
|
||||
|
||||
The {ls}-to-{ls} issue can be mitigated by:
|
||||
|
||||
* Downgrading the _receiving_ {ls} to `8.16.2`, or any {ls} in the `8.15` series, **_AND/OR_**
|
||||
* Upgrading the {ls} integration filter of the _sending_ {ls} to version `1.0.4`.
|
||||
|
||||
**Workaround for other `json_lines` codec situations**
|
||||
|
||||
Other `json_lines` codec issues can be mitigated by:
|
||||
|
||||
* Downgrading {ls} to `8.16.2`, or any {ls} in the `8.15` series.
|
||||
|
||||
[[notable-8-16-1]]
|
||||
==== Notable issues fixed
|
||||
|
||||
* PipelineBusV2 deadlock proofing: We fixed an issue that could cause a deadlock when the pipeline-to-pipeline feature was in use, causing pipelines (and consequently) {ls} to never terminate https://github.com/elastic/logstash/pull/16680[#16680]
|
||||
|
||||
==== Plugins
|
||||
|
||||
*Elastic_integration Filter - 0.1.16*
|
||||
|
||||
* Reflect the Elasticsearch GeoIP changes into the plugin and sync with Elasticsearch 8.16 branch https://github.com/elastic/logstash-filter-elastic_integration/pull/170[#170]
|
||||
|
||||
*Xml Filter - 4.2.1*
|
||||
|
||||
* patch rexml to improve performance of multi-threaded xml parsing https://github.com/logstash-plugins/logstash-filter-xml/pull/84[#84]
|
||||
|
||||
*Beats Input - 6.9.1*
|
||||
|
||||
* Upgrade netty to 4.1.115 https://github.com/logstash-plugins/logstash-input-beats/pull/507[#507]
|
||||
|
||||
*Http Input - 3.9.2*
|
||||
|
||||
* Upgrade netty to 4.1.115 https://github.com/logstash-plugins/logstash-input-http/pull/183[#183]
|
||||
|
||||
*Tcp Input - 6.4.4*
|
||||
|
||||
* Upgrade netty to 4.1.115 https://github.com/logstash-plugins/logstash-input-tcp/pull/227[#227]
|
||||
|
||||
*Http Output - 5.7.1*
|
||||
|
||||
* Added new development `rackup` dependency to fix tests
|
||||
|
||||
|
||||
[[logstash-8-16-0]]
|
||||
=== Logstash 8.16.0 Release Notes
|
||||
|
||||
[[known-issues-8-16-0]]
|
||||
==== Known issues
|
||||
|
||||
[[known-issue-8-16-0-shutdown-failure]]
|
||||
===== {ls} may fail to shut down under some circumstances
|
||||
{ls} may fail to shut down when you are using <<pipeline-to-pipeline>>.
|
||||
Check out issue https://github.com/elastic/logstash/issues/16657[#16657] for details.
|
||||
|
||||
Workaround: Add `-Dlogstash.pipelinebus.implementation=v1` to `config/jvm.options`.
|
||||
This change reverts the `PipelineBus` to `v1`, a version that does not exhibit this issue, but may impact performance in pipeline-to-pipeline scenarios.
|
||||
|
||||
[[known-issue-8-16-0-json_lines]]
|
||||
===== "Input buffer full" error with {ls} 8.16.0, 8.16.1, or 8.17.0
|
||||
|
||||
If you are using `json_lines` codec 3.2.0 (or later) with {ls} 8.16.0, 8.16.1, or 8.17.0, you may see an error similar to this one, crashing the pipelines:
|
||||
```
|
||||
unable to process event. {:message=>"input buffer full", :class=>"Java::JavaLang::IllegalStateException", :backtrace=>["org.logstash.common.BufferedTokenizerExt.extract(BufferedTokenizerExt.java:83)", "usr.share.logstash.vendor.bundle.jruby.$3_dot_1_dot_0.gems.logstash_minus_codec_minus_json_lines_minus_3_dot_2_dot_2.lib.logstash.codecs.json_lines.RUBY$method$decode$0(/usr/share/logstash/vendor/bundle/jruby/3.1.0/gems/logstash-codec-json_lines-3.2.2/lib/logstash/codecs/json_lines.rb:69)", "org.jruby.internal.runtime.methods.CompiledIRMethod.call(CompiledIRMethod.java:165)", "org.jruby.internal.runtime.methods.MixedModeIRMethod.call(MixedModeIRMethod.java:185)",
|
||||
```
|
||||
The issue was fixed in https://github.com/elastic/logstash/pull/16760.
|
||||
|
||||
This problem is most likely to be seen when you are using the <<plugins-integrations-logstash,{ls} integration>> plugin to ship data between two {ls} instances, but may appear in other situations, too.
|
||||
|
||||
**Workaround for {ls}-to-{ls} communication**
|
||||
|
||||
The {ls}-to-{ls} issue can be mitigated by:
|
||||
|
||||
* Downgrading the _receiving_ {ls} to `8.16.2`, or any {ls} in the `8.15` series, **_AND/OR_**
|
||||
* Upgrading the {ls} integration filter of the _sending_ {ls} to version `1.0.4`.
|
||||
|
||||
**Workaround for other `json_lines` codec situations**
|
||||
|
||||
Other `json_lines` codec issues can be mitigated by:
|
||||
|
||||
* Downgrading {ls} to `8.16.2`, or any {ls} in the `8.15` series.
|
||||
|
||||
[[health-api-8-16-0]]
|
||||
==== Announcing the new {ls} Health Report API
|
||||
|
||||
The new Health Report API (`GET /_health_report`) is available starting with {ls} `8.16.0`.
|
||||
This API uses indicators capable of detecting the degraded status of pipelines and
|
||||
providing actionable insights https://github.com/elastic/logstash/pull/16520[#16520], https://github.com/elastic/logstash/pull/16532[#16532].
|
||||
|
||||
**Upgrading from earlier versions.** If your existing automation relies on liveliness scripts that expect the {ls} API status to be unavailable or to return a hardcoded `green` status, you can set a property to preserve pre-8.16.0.
|
||||
To maintain existing behavior for API responses, add the `-Dlogstash.forceApiStatus=green` property to your `config/jvm.options` file.
|
||||
|
||||
This setting prevents the new Health API status from affecting the top-level `status` field of existing {ls} API responses, forcing other APIs to return the previous hard-coded `green` value. https://github.com/elastic/logstash/pull/16535[#16535]
|
||||
|
||||
Check out the <<logstash-health-report-api>> docs more for info.
|
||||
|
||||
[[featured-8-16-0]]
|
||||
==== New features and enhancements
|
||||
|
||||
* {ls} now gracefully handles `if` conditionals in pipeline definitions that can't be evaluated (https://github.com/elastic/logstash/pull/16322[#16322]), either by dropping
|
||||
the event or by sending it to the pipeline's DLQ if enabled. https://github.com/elastic/logstash/pull/16423[#16423]
|
||||
|
||||
[[core-8-16-0]]
|
||||
==== Other changes to Logstash core
|
||||
|
||||
* Added deprecation logs for modules `netflow`, `fb_apache` and `azure`. https://github.com/elastic/logstash/pull/16548[#16548]
|
||||
|
||||
* Added deprecation logs for users that doesn't explicitly select a value for `pipeline.buffer.type` forcing them to proactively make a choice before version `9.0` when this setting will default to heap. https://github.com/elastic/logstash/pull/16498[#16498]
|
||||
|
||||
* The flag `--event_api.tags.illegal` was deprecated and will be removed in version 9. This flag remains available throughout all version 8.x releases. Users who rely on this flag to allow non strings assignment to `tags` field should update their pipeline. https://github.com/elastic/logstash/pull/16507[#16507]
|
||||
|
||||
[[dependencies-8.16.0]]
|
||||
==== Updates to dependencies
|
||||
|
||||
* Updated JRuby to 9.4.9.0 https://github.com/elastic/logstash/pull/16638[#16638]
|
||||
|
||||
|
||||
[[plugins-8-16-0]]
|
||||
==== Plugins
|
||||
|
||||
*Cef Codec - 6.2.8*
|
||||
|
||||
* [DOC] Added missing documentation of the `raw_data_field` option https://github.com/logstash-plugins/logstash-codec-cef/pull/105[#105]
|
||||
|
||||
*Json_lines Codec - 3.2.2*
|
||||
|
||||
* Raised the default value of the `decode_size_limit_bytes` option to 512 MB https://github.com/logstash-plugins/logstash-codec-json_lines/pull/46[#46]
|
||||
|
||||
* Added the `decode_size_limit_bytes` option to limit the maximum size of JSON lines that can be parsed. https://github.com/logstash-plugins/logstash-codec-json_lines/pull/43[#43]
|
||||
|
||||
*Elastic_integration Filter - 0.1.15*
|
||||
|
||||
* Use Elasticsearch code from its `8.16` branch and adapt to changes in Elasticsearch GeoIP processor https://github.com/elastic/logstash-filter-elastic_integration/pull/170[#170]
|
||||
|
||||
*Geoip Filter - 7.3.1*
|
||||
|
||||
* Fixed a pipeline crash when looking up a database with customised fields https://github.com/logstash-plugins/logstash-filter-geoip/pull/225[#225]
|
||||
|
||||
*Azure_event_hubs Input - 1.5.0*
|
||||
|
||||
* Updated Azure Event Hub client library to version `3.3.0` https://github.com/logstash-plugins/logstash-input-azure_event_hubs/pull/96[#96]
|
||||
|
||||
*Beats Input - 6.9.0*
|
||||
|
||||
* Improved plugin's shutdown process and fixed a crash when a connection is terminated while processing messages https://github.com/logstash-plugins/logstash-input-beats/pull/500[#500]
|
||||
|
||||
*Http Input - 3.9.1*
|
||||
|
||||
* Fixed an issue where the value of `ssl_enabled` during `run` wasn't correctly logged https://github.com/logstash-plugins/logstash-input-http/pull/180[#180]
|
||||
|
||||
* Separated Netty boss and worker groups to improve the graceful shutdown https://github.com/logstash-plugins/logstash-input-http/pull/178[#178]
|
||||
|
||||
*Tcp Input - 6.4.3*
|
||||
|
||||
* Updated dependencies for TCP input https://github.com/logstash-plugins/logstash-input-tcp/pull/224[#224]
|
||||
|
||||
*Jdbc Integration - 5.5.0*
|
||||
|
||||
* Added support for SQL `DATE` columns to jdbc static and streaming filters https://github.com/logstash-plugins/logstash-integration-jdbc/pull/171[#171]
|
||||
|
||||
*Rabbitmq Integration - 7.4.0*
|
||||
|
||||
* Removed obsolete `verify_ssl` and `debug` options https://github.com/logstash-plugins/logstash-integration-rabbitmq/pull/60[#60]
|
||||
|
||||
[[logstash-8-15-5]]
|
||||
=== Logstash 8.15.5 Release Notes
|
||||
|
||||
[[notable-8-15-5]]
|
||||
==== Notable issues fixed
|
||||
|
||||
* PipelineBusV2 deadlock proofing: We fixed an issue that could cause a deadlock when the pipeline-to-pipeline feature was in use, causing pipelines (and consequently) {ls} to never terminate https://github.com/elastic/logstash/pull/16681[#16681]
|
||||
* We reverted a change in BufferedTokenizer (https://github.com/elastic/logstash/pull/16482[#16482]) that improved handling of large messages but introduced a double encoding bug https://github.com/elastic/logstash/pull/16687[#16687].
|
||||
|
||||
==== Plugins
|
||||
|
||||
*Elastic_integration Filter - 0.1.16*
|
||||
|
||||
* Reflect the Elasticsearch GeoIP changes into the plugin and sync with Elasticsearch 8.16 branch https://github.com/elastic/logstash-filter-elastic_integration/pull/170[#170]
|
||||
|
||||
*Xml Filter - 4.2.1*
|
||||
|
||||
* patch rexml to improve performance of multi-threaded xml parsing https://github.com/logstash-plugins/logstash-filter-xml/pull/84[#84]
|
||||
|
||||
*Tcp Input - 6.4.4*
|
||||
|
||||
* update netty to 4.1.115 https://github.com/logstash-plugins/logstash-input-tcp/pull/227[#227]
|
||||
|
||||
*Http Output - 5.7.1*
|
||||
|
||||
* Added new development `rackup` dependency to fix tests
|
||||
|
||||
[[logstash-8-15-4]]
|
||||
=== Logstash 8.15.4 Release Notes
|
||||
|
||||
[[known-issues-8-15-4]]
|
||||
==== Known issue
|
||||
|
||||
**{ls} may fail to shut down under some circumstances when you are using <<pipeline-to-pipeline>>.**
|
||||
Check out issue https://github.com/elastic/logstash/issues/16657[#16657] for details.
|
||||
|
||||
Workaround: Add `-Dlogstash.pipelinebus.implementation=v1` to `config/jvm.options`.
|
||||
This change reverts the `PipelineBus` to `v1`, a version that does not exhibit this issue, but may impact performance in pipeline-to-pipeline scenarios.
|
||||
|
||||
[[notable-8-15-4]]
|
||||
==== Notable issues fixed
|
||||
|
||||
* Fixed an issue where Logstash could not consume lines correctly when a codec with a delimiter is in use and the input buffer becomes full https://github.com/elastic/logstash/pull/16482[#16482]
|
||||
|
||||
[[dependencies-8-15-4]]
|
||||
==== Updates to dependencies
|
||||
|
||||
* Updated JRuby to 9.4.9.0 https://github.com/elastic/logstash/pull/16638[#16638]
|
||||
|
||||
[[plugins-8-15-4]]
|
||||
==== Plugins
|
||||
|
||||
*Cef Codec - 6.2.8*
|
||||
|
||||
* [DOC] Added `raw_data_field` to docs https://github.com/logstash-plugins/logstash-codec-cef/pull/105[#105]
|
||||
|
||||
*Elastic_integration Filter - 0.1.15*
|
||||
|
||||
* Fixed the connection failure where SSL verification mode is disabled over SSL connection https://github.com/elastic/logstash-filter-elastic_integration/pull/165[#165]
|
||||
|
||||
*Geoip Filter - 7.3.1*
|
||||
|
||||
* Fixed issue causing pipelines to crash during lookup when a database has custom fields https://github.com/logstash-plugins/logstash-filter-geoip/pull/225[#225]
|
||||
|
||||
*Tcp Input - 6.4.3*
|
||||
|
||||
* Updated dependencies https://github.com/logstash-plugins/logstash-input-tcp/pull/224[#224]
|
||||
|
||||
|
||||
[[logstash-8-15-3]]
|
||||
=== Logstash 8.15.3 Release Notes
|
||||
|
||||
[[known-issues-8-15-3]]
|
||||
==== Known issue
|
||||
|
||||
**{ls} may fail to shut down under some circumstances when you are using <<pipeline-to-pipeline>>.**
|
||||
Check out issue https://github.com/elastic/logstash/issues/16657[#16657] for details.
|
||||
|
||||
Workaround: Add `-Dlogstash.pipelinebus.implementation=v1` to `config/jvm.options`.
|
||||
This change reverts the `PipelineBus` to `v1`, a version that does not exhibit this issue, but may impact performance in pipeline-to-pipeline scenarios.
|
||||
|
||||
|
||||
[[notable-8.15.3]]
|
||||
==== Notable issues fixed
|
||||
|
||||
* Improved the pipeline bootstrap error logs to include the cause's backtrace, giving a hint where the issue occurred https://github.com/elastic/logstash/pull/16495[#16495]
|
||||
|
||||
* Fixed Logstash core compatibility issues with `logstash-input-azure_event_hubs` versions `1.4.8` and earlier https://github.com/elastic/logstash/pull/16485[#16485]
|
||||
|
||||
==== Plugins
|
||||
|
||||
*Elastic_integration Filter - 0.1.14*
|
||||
|
||||
* Enabled the use of org.elasticsearch.ingest.common.Processors in Ingest Pipelines, resolving an issue where some integrations would fail to load https://github.com/elastic/logstash-filter-elastic_integration/pull/162[#162]
|
||||
|
||||
*Azure_event_hubs Input - 1.4.9*
|
||||
|
||||
* Fixed issue with `getHostContext` method accessibility, causing plugin not to be able to run https://github.com/logstash-plugins/logstash-input-azure_event_hubs/pull/93[#93]
|
||||
|
||||
* Fixed connection placeholder replacements errors with Logstash `8.15.1` and `8.15.2` https://github.com/logstash-plugins/logstash-input-azure_event_hubs/pull/92[#92]
|
||||
|
||||
*Kafka Integration - 11.5.2*
|
||||
|
||||
* Updated avro to 1.11.4 and confluent kafka to 7.4.7 https://github.com/logstash-plugins/logstash-integration-kafka/pull/184[#184]
|
||||
|
||||
|
||||
[[logstash-8-15-2]]
|
||||
=== Logstash 8.15.2 Release Notes
|
||||
|
||||
[[known-issues-8-15-2]]
|
||||
==== Known issue
|
||||
|
||||
**{ls} may fail to shut down under some circumstances when you are using <<pipeline-to-pipeline>>.**
|
||||
Check out issue https://github.com/elastic/logstash/issues/16657[#16657] for details.
|
||||
|
||||
Workaround: Add `-Dlogstash.pipelinebus.implementation=v1` to `config/jvm.options`.
|
||||
This change reverts the `PipelineBus` to `v1`, a version that does not exhibit this issue, but may impact performance in pipeline-to-pipeline scenarios.
|
||||
|
||||
|
||||
[[notable-8.15.2]]
|
||||
==== Notable issues fixed
|
||||
|
||||
* Fixed a https://github.com/elastic/logstash/issues/16437[regression] from {ls} 8.15.1 in which {ls} removes all quotes from docker env variables, possibly causing {ls} not to start https://github.com/elastic/logstash/pull/16456[#16456]
|
||||
|
||||
==== Plugins
|
||||
|
||||
*Beats Input - 6.8.4*
|
||||
|
||||
* Fix to populate the `@metadata` fields even if the source's metadata value is `nil` https://github.com/logstash-plugins/logstash-input-beats/pull/502[#502]
|
||||
|
||||
*Dead_letter_queue Input - 2.0.1*
|
||||
|
||||
* Fix NullPointerException when the plugin closes https://github.com/logstash-plugins/logstash-input-dead_letter_queue/pull/53[#53]
|
||||
|
||||
*Elastic_serverless_forwarder Input - 0.1.5*
|
||||
|
||||
* [DOC] Fix attributes to accurately set and clear default codec values https://github.com/logstash-plugins/logstash-input-elastic_serverless_forwarder/pull/8[#8]
|
||||
|
||||
*Logstash Integration - 1.0.3*
|
||||
|
||||
* [DOC] Fix attributes to accurately set and clear default codec values https://github.com/logstash-plugins/logstash-integration-logstash/pull/23[#23]
|
||||
|
||||
*Elasticsearch Output - 11.22.9*
|
||||
|
||||
* Vendor ECS template for Elasticsearch 9.x in built gem https://github.com/logstash-plugins/logstash-output-elasticsearch/pull/1188[#1188]
|
||||
* Added ECS template for Elasticsearch 9.x https://github.com/logstash-plugins/logstash-output-elasticsearch/pull/1187[#1187]
|
||||
|
||||
|
||||
[[logstash-8-15-1]]
|
||||
=== Logstash 8.15.1 Release Notes
|
||||
|
||||
[[known-issues-8-15-1]]
|
||||
==== Known issues
|
||||
|
||||
* **{ls} may fail to start under some circumstances.** Single and double quotes are stripped from a pipeline configuration if the configuration includes environment or keystore variable references.
|
||||
If this situation occurs, {ls} may fail to start or some plugins may use a malformed configuration.
|
||||
Check out issue https://github.com/elastic/logstash/issues/16437[#16437] for details.
|
||||
+
|
||||
Workaround: Downgrade to {ls} 8.15.0, or temporarily avoid using environment and keystore variable references.
|
||||
|
||||
* **{ls} may fail to shut down under some circumstances when you are using <<pipeline-to-pipeline>>.**
|
||||
Check out issue https://github.com/elastic/logstash/issues/16657[#16657] for details.
|
||||
+
|
||||
Workaround: Add `-Dlogstash.pipelinebus.implementation=v1` to `config/jvm.options`.
|
||||
This change reverts the `PipelineBus` to `v1`, a version that does not exhibit this issue, but may impact performance in pipeline-to-pipeline scenarios.
|
||||
|
||||
[[notable-8.15.1]]
|
||||
==== Performance improvements and notable issues fixed
|
||||
|
||||
|
@ -102,6 +607,15 @@ This section summarizes the changes in the following releases:
|
|||
[[logstash-8-15-0]]
|
||||
=== Logstash 8.15.0 Release Notes
|
||||
|
||||
[[known-issues-8-15-0]]
|
||||
==== Known issue
|
||||
|
||||
**{ls} may fail to shut down under some circumstances when you are using <<pipeline-to-pipeline>>.**
|
||||
Check out issue https://github.com/elastic/logstash/issues/16657[#16657] for details.
|
||||
|
||||
Workaround: Add `-Dlogstash.pipelinebus.implementation=v1` to `config/jvm.options`.
|
||||
This change reverts the `PipelineBus` to `v1`, a version that does not exhibit this issue, but may impact performance in pipeline-to-pipeline scenarios.
|
||||
|
||||
[[snmp-ga-8.15.0]]
|
||||
==== Announcing the new {ls} SNMP integration plugin
|
||||
|
||||
|
@ -2475,4 +2989,4 @@ We have added another flag to the Benchmark CLI to allow passing a data file wit
|
|||
This feature allows users to run the Benchmark CLI in a custom test case with a custom config and a custom dataset. https://github.com/elastic/logstash/pull/12437[#12437]
|
||||
|
||||
==== Plugin releases
|
||||
Plugins align with release 7.14.0
|
||||
Plugins align with release 7.14.0
|
||||
|
|
3
docs/static/security/api-keys.asciidoc
vendored
3
docs/static/security/api-keys.asciidoc
vendored
|
@ -249,8 +249,7 @@ POST /_security/api_key
|
|||
"name": "logstash_host001", <1>
|
||||
"role_descriptors": {
|
||||
"logstash_monitoring": { <2>
|
||||
"cluster": ["monitor"],
|
||||
"index": ["read"]
|
||||
"cluster": ["monitor", "manage_logstash_pipelines"]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
4
docs/static/security/es-security.asciidoc
vendored
4
docs/static/security/es-security.asciidoc
vendored
|
@ -78,7 +78,7 @@ As always, there's a definite argument for consistency across deployments.
|
|||
[[es-sec-plugin]]
|
||||
==== Configure the elasticsearch output
|
||||
|
||||
Use the <<plugins-outputs-elasticsearch,`elasticsearch output`'s>> <<plugins-outputs-elasticsearch-cacert,`cacert` option>> to point to the certificate's location.
|
||||
Use the <<plugins-outputs-elasticsearch,`elasticsearch output`'s>> <<plugins-outputs-elasticsearch-ssl_certificate_authorities,`ssl_certificate_authorities` option>> to point to the certificate's location.
|
||||
|
||||
**Example**
|
||||
|
||||
|
@ -87,7 +87,7 @@ Use the <<plugins-outputs-elasticsearch,`elasticsearch output`'s>> <<plugins-out
|
|||
output {
|
||||
elasticsearch {
|
||||
hosts => ["https://...] <1>
|
||||
cacert => '/etc/logstash/config/certs/ca.crt' <2>
|
||||
ssl_certificate_authorities => ['/etc/logstash/config/certs/ca.crt'] <2>
|
||||
}
|
||||
}
|
||||
-------
|
||||
|
|
18
docs/static/security/grant-access.asciidoc
vendored
18
docs/static/security/grant-access.asciidoc
vendored
|
@ -1,6 +1,6 @@
|
|||
[discrete]
|
||||
[[ls-user-access]]
|
||||
=== Granting access to the Logstash indices
|
||||
=== Granting access to the indices Logstash creates
|
||||
|
||||
To access the indices Logstash creates, users need the `read` and
|
||||
`view_index_metadata` privileges:
|
||||
|
@ -13,14 +13,20 @@ privileges for the Logstash indices. You can create roles from the
|
|||
---------------------------------------------------------------
|
||||
POST _security/role/logstash_reader
|
||||
{
|
||||
"cluster": ["manage_logstash_pipelines"]
|
||||
"cluster": ["manage_logstash_pipelines"],
|
||||
"indices": [
|
||||
{
|
||||
"names": [ "logstash-*" ],
|
||||
"privileges": ["read","view_index_metadata"]
|
||||
}
|
||||
]
|
||||
}
|
||||
---------------------------------------------------------------
|
||||
|
||||
. Assign your Logstash users the `logstash_reader` role. If the Logstash user
|
||||
will be using
|
||||
{logstash-ref}/logstash-centralized-pipeline-management.html[centralized pipeline management],
|
||||
also assign the `logstash_admin` role. You can create and manage users from the
|
||||
also assign the `logstash_system` role. You can create and manage users from the
|
||||
**Management > Users** UI in {kib} or through the `user` API:
|
||||
+
|
||||
[source, sh]
|
||||
|
@ -28,9 +34,9 @@ also assign the `logstash_admin` role. You can create and manage users from the
|
|||
POST _security/user/logstash_user
|
||||
{
|
||||
"password" : "x-pack-test-password",
|
||||
"roles" : [ "logstash_reader", "logstash_admin"], <1>
|
||||
"roles" : [ "logstash_reader", "logstash_system"], <1>
|
||||
"full_name" : "Kibana User for Logstash"
|
||||
}
|
||||
---------------------------------------------------------------
|
||||
<1> `logstash_admin` is a built-in role that provides access to system
|
||||
indices for managing configurations.
|
||||
<1> `logstash_system` is a built-in role that provides the necessary permissions to
|
||||
check the availability of the supported features of {es} cluster.
|
|
@ -54,9 +54,10 @@ section in your Logstash configuration, or a different one. Defaults to
|
|||
If your {es} cluster is protected with basic authentication, these settings
|
||||
provide the username and password that the Logstash instance uses to
|
||||
authenticate for accessing the configuration data. The username you specify here
|
||||
should have the built-in `logstash_admin` role and the customized `logstash_writer` role, which provides access to system
|
||||
indices for managing configurations. Starting with Elasticsearch version 7.10.0, the
|
||||
`logstash_admin` role inherits the `manage_logstash_pipelines` cluster privilege for centralized pipeline management.
|
||||
should have the built-in `logstash_admin` and `logstash_system` roles.
|
||||
These roles provide access to system indices for managing configurations.
|
||||
|
||||
NOTE: Starting with Elasticsearch version 7.10.0, the `logstash_admin` role inherits the `manage_logstash_pipelines` cluster privilege for centralized pipeline management.
|
||||
If a user has created their own roles and granted them access to the .logstash index, those roles will continue to work in 7.x but will need to be updated for 8.0.
|
||||
|
||||
`xpack.management.elasticsearch.proxy`::
|
||||
|
@ -143,8 +144,8 @@ If you're using {es} in {ecloud}, you can set your auth credentials here.
|
|||
This setting is an alternative to both `xpack.management.elasticsearch.username`
|
||||
and `xpack.management.elasticsearch.password`. If `cloud_auth` is configured,
|
||||
those settings should not be used.
|
||||
The credentials you specify here should be for a user with the `logstash_admin` role, which
|
||||
provides access to system indices for managing configurations.
|
||||
The credentials you specify here should be for a user with the `logstash_admin` and `logstash_system` roles, which
|
||||
provide access to system indices for managing configurations.
|
||||
|
||||
`xpack.management.elasticsearch.api_key`::
|
||||
|
||||
|
|
44
docs/static/troubleshoot/health-pipeline-flow-worker-utilization.asciidoc
vendored
Normal file
44
docs/static/troubleshoot/health-pipeline-flow-worker-utilization.asciidoc
vendored
Normal file
|
@ -0,0 +1,44 @@
|
|||
[[health-report-pipeline-flow-worker-utilization]]
|
||||
=== Health Report Pipeline Flow: Worker Utilization
|
||||
|
||||
The Pipeline indicator has a `flow:worker_utilization` probe that is capable of producing one of several diagnoses about blockages in the pipeline.
|
||||
|
||||
A pipeline is considered "blocked" when its workers are fully-utilized, because if they are consistently spending 100% of their time processing events, they are unable to pick up new events from the queue.
|
||||
This can cause back-pressure to cascade to upstream services, which can result in data loss or duplicate processing depending on upstream configuration.
|
||||
|
||||
The issue typically stems from one or more causes:
|
||||
|
||||
* a downstream resource being blocked,
|
||||
* a plugin consuming more resources than expected, and/or
|
||||
* insufficient resources being allocated to the pipeline.
|
||||
|
||||
To address the issue, observe the <<plugin-flow-rates>> from the <<node-stats-api>>, and identify which plugins have the highest `worker_utilization`.
|
||||
This will tell you which plugins are spending the most of the pipeline's worker resources.
|
||||
|
||||
* If the offending plugin connects to a downstream service or another pipeline that is exerting back-pressure, the issue needs to be addressed in the downstream service or pipeline.
|
||||
* If the offending plugin connects to a downstream service with high network latency, throughput for the pipeline may be improved by <<tuning-logstash-settings, allocating more worker resources to the pipeline>>.
|
||||
* If the offending plugin is a computation-heavy filter such as `grok` or `kv`, its configuration may need to be tuned to eliminate wasted computation.
|
||||
|
||||
[[health-report-pipeline-flow-worker-utilization-diagnosis-blocked-5m]]
|
||||
==== [[blocked-5m]]Blocked Pipeline (5 minutes)
|
||||
|
||||
A pipeline that has been completely blocked for five minutes or more represents a critical blockage to the flow of events through your pipeline that needs to be addressed immediately to avoid or limit data loss.
|
||||
See above for troubleshooting steps.
|
||||
|
||||
[[health-report-pipeline-flow-worker-utilization-diagnosis-nearly-blocked-5m]]
|
||||
==== [[nearly-blocked-5m]]Nearly Blocked Pipeline (5 minutes)
|
||||
|
||||
A pipeline that has been nearly blocked for five minutes or more may be creating intermittent blockage to the flow of events through your pipeline, which can result in the risk of data loss.
|
||||
See above for troubleshooting steps.
|
||||
|
||||
[[health-report-pipeline-flow-worker-utilization-diagnosis-blocked-1m]]
|
||||
==== [[blocked-1m]]Blocked Pipeline (1 minute)
|
||||
|
||||
A pipeline that has been completely blocked for one minute or more represents a high-risk or upcoming blockage to the flow of events through your pipeline that likely needs to be addressed soon to avoid or limit data loss.
|
||||
See above for troubleshooting steps.
|
||||
|
||||
[[health-report-pipeline-flow-worker-utilization-diagnosis-nearly-blocked-1m]]
|
||||
==== [[nearly-blocked-1m]]Nearly Blocked Pipeline (1 minute)
|
||||
|
||||
A pipeline that has been nearly blocked for one minute or more may be creating intermittent blockage to the flow of events through your pipeline, which can result in the risk of data loss.
|
||||
See above for troubleshooting steps.
|
37
docs/static/troubleshoot/health-pipeline-status.asciidoc
vendored
Normal file
37
docs/static/troubleshoot/health-pipeline-status.asciidoc
vendored
Normal file
|
@ -0,0 +1,37 @@
|
|||
[[health-report-pipeline-status]]
|
||||
=== Health Report Pipeline Status
|
||||
|
||||
The Pipeline indicator has a `status` probe that is capable of producing one of several diagnoses about the pipeline's lifecycle, indicating whether the pipeline is currently running.
|
||||
|
||||
[[health-report-pipeline-status-diagnosis-loading]]
|
||||
==== [[loading]]Loading Pipeline
|
||||
|
||||
A pipeline that is loading is not yet processing data, and is considered a temporarily-degraded pipeline state.
|
||||
Some plugins perform actions or pre-validation that can delay the starting of the pipeline, such as when a plugin pre-establishes a connection to an external service before allowing the pipeline to start.
|
||||
When these plugins take significant time to start up, the whole pipeline can remain in a loading state for an extended time.
|
||||
|
||||
If your pipeline does not come up in a reasonable amount of time, consider checking the Logstash logs to see if the plugin shows evidence of being caught in a retry loop.
|
||||
|
||||
[[health-report-pipeline-status-diagnosis-finished]]
|
||||
==== [[finished]]Finished Pipeline
|
||||
|
||||
A logstash pipeline whose input plugins have all completed will be shut down once events have finished processing.
|
||||
|
||||
Many plugins can be configured to run indefinitely, either by listening for new inbound events or by polling for events on a schedule.
|
||||
A finished pipeline will not produce or process any more events until it is restarted, which will occur if the pipeline's definition is changed and pipeline reloads are enabled.
|
||||
If you wish to keep your pipeline runing, consider configuring its input to run on a schedule or otherwise listen for new events.
|
||||
|
||||
[[health-report-pipeline-status-diagnosis-terminated]]
|
||||
==== [[terminated]]Terminated Pipeline
|
||||
|
||||
When a Logstash pipeline's filter or output plugins crash, the entire pipeline is terminated and intervention is required.
|
||||
|
||||
A terminated pipeline will not produce or process any more events until it is restarted, which will occur if the pipeline's definition is changed and pipeline reloads are enabled.
|
||||
Check the logs to determine the cause of the crash, and report the issue to the plugin maintainers.
|
||||
|
||||
[[health-report-pipeline-status-diagnosis-unknown]]
|
||||
==== [[unknown]]Unknown Pipeline
|
||||
|
||||
When a Logstash pipeline either cannot be created or has recently been deleted the health report doesn't know enough to produce a meaningful status.
|
||||
|
||||
Check the logs to determine if the pipeline crashed during creation, and report the issue to the plugin maintainers.
|
|
@ -28,3 +28,5 @@ include::ts-logstash.asciidoc[]
|
|||
include::ts-plugins-general.asciidoc[]
|
||||
include::ts-plugins.asciidoc[]
|
||||
include::ts-other-issues.asciidoc[]
|
||||
include::health-pipeline-status.asciidoc[]
|
||||
include::health-pipeline-flow-worker-utilization.asciidoc[]
|
||||
|
|
|
@ -15,6 +15,9 @@
|
|||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# work around https://github.com/jruby/jruby/issues/8579
|
||||
require_relative './patches/jar_dependencies'
|
||||
|
||||
module LogStash
|
||||
module Bundler
|
||||
extend self
|
||||
|
@ -264,6 +267,7 @@ module LogStash
|
|||
elsif options[:update]
|
||||
arguments << "update"
|
||||
arguments << expand_logstash_mixin_dependencies(options[:update])
|
||||
arguments << "--#{options[:level] || 'minor'}"
|
||||
arguments << "--local" if options[:local]
|
||||
arguments << "--conservative" if options[:conservative]
|
||||
elsif options[:clean]
|
||||
|
|
|
@ -21,7 +21,7 @@ def require_jar(*args)
|
|||
return nil unless Jars.require?
|
||||
result = Jars.require_jar(*args)
|
||||
if result.is_a? String
|
||||
# JAR_DEBUG=1 will now show theses
|
||||
# JARS_VERBOSE=true will show these
|
||||
Jars.debug { "--- jar coordinate #{args[0..-2].join(':')} already loaded with version #{result} - omit version #{args[-1]}" }
|
||||
Jars.debug { " try to load from #{caller.join("\n\t")}" }
|
||||
return false
|
||||
|
@ -29,3 +29,29 @@ def require_jar(*args)
|
|||
Jars.debug { " register #{args.inspect} - #{result == true}" }
|
||||
result
|
||||
end
|
||||
|
||||
# work around https://github.com/jruby/jruby/issues/8579
|
||||
# the ruby maven 3.9.3 + maven-libs 3.9.9 gems will output unnecessary text we need to trim down during `load_from_maven`
|
||||
# remove everything from "--" until the end of the line
|
||||
# the `[...-5]` is just to remove the color changing characters from the end of the string that exist before "--"
|
||||
require 'jars/installer'
|
||||
|
||||
class ::Jars::Installer
|
||||
def self.load_from_maven(file)
|
||||
Jars.debug { "[load_from_maven] called with arguments: #{file.inspect}" }
|
||||
result = []
|
||||
::File.read(file).each_line do |line|
|
||||
if line.match?(/ --/)
|
||||
Jars.debug { "[load_from_maven] line: #{line.inspect}" }
|
||||
fixed_line = line.strip.gsub(/ --.+?$/, "")[0...-5]
|
||||
Jars.debug { "[load_from_maven] fixed_line: #{fixed_line.inspect}" }
|
||||
dep = ::Jars::Installer::Dependency.new(fixed_line)
|
||||
else
|
||||
dep = ::Jars::Installer::Dependency.new(line)
|
||||
end
|
||||
result << dep if dep && dep.scope == :runtime
|
||||
end
|
||||
Jars.debug { "[load_from_maven] returned: #{result.inspect}" }
|
||||
result
|
||||
end
|
||||
end
|
||||
|
|
|
@ -55,8 +55,8 @@ def apply_env_proxy_settings(settings)
|
|||
scheme = settings[:protocol].downcase
|
||||
java.lang.System.setProperty("#{scheme}.proxyHost", settings[:host])
|
||||
java.lang.System.setProperty("#{scheme}.proxyPort", settings[:port].to_s)
|
||||
java.lang.System.setProperty("#{scheme}.proxyUsername", settings[:username].to_s)
|
||||
java.lang.System.setProperty("#{scheme}.proxyPassword", settings[:password].to_s)
|
||||
java.lang.System.setProperty("#{scheme}.proxyUser", settings[:username].to_s)
|
||||
java.lang.System.setProperty("#{scheme}.proxyPass", settings[:password].to_s)
|
||||
end
|
||||
|
||||
def extract_proxy_values_from_uri(proxy_uri)
|
||||
|
|
|
@ -24,7 +24,13 @@ class LogStash::PluginManager::Update < LogStash::PluginManager::Command
|
|||
# These are local gems used by LS and needs to be filtered out of other plugin gems
|
||||
NON_PLUGIN_LOCAL_GEMS = ["logstash-core", "logstash-core-plugin-api"]
|
||||
|
||||
SUPPORTED_LEVELS = %w(major minor patch)
|
||||
|
||||
parameter "[PLUGIN] ...", "Plugin name(s) to upgrade to latest version", :attribute_name => :plugins_arg
|
||||
option "--level", "LEVEL", "restrict updates to given semantic version level (one of #{SUPPORTED_LEVELS})", :default => "minor" do |given_level|
|
||||
fail("unsupported level `#{given_level}`; expected one of #{SUPPORTED_LEVELS}") unless SUPPORTED_LEVELS.include?(given_level)
|
||||
given_level
|
||||
end
|
||||
option "--[no-]verify", :flag, "verify plugin validity before installation", :default => true
|
||||
option "--local", :flag, "force local-only plugin update. see bin/logstash-plugin package|unpack", :default => false
|
||||
option "--[no-]conservative", :flag, "do a conservative update of plugin's dependencies", :default => true
|
||||
|
@ -82,6 +88,7 @@ class LogStash::PluginManager::Update < LogStash::PluginManager::Command
|
|||
# Bundler cannot update and clean gems in one operation so we have to call the CLI twice.
|
||||
Bundler.settings.temporary(:frozen => false) do # Unfreeze the bundle when updating gems
|
||||
output = LogStash::Bundler.invoke! update: plugins,
|
||||
level: level,
|
||||
rubygems_source: gemfile.gemset.sources,
|
||||
local: local?,
|
||||
conservative: conservative?
|
||||
|
|
|
@ -12,7 +12,13 @@ if File.exist?(project_versions_yaml_path)
|
|||
# we ignore the copy in git and we overwrite an existing file
|
||||
# each time we build the logstash-core gem
|
||||
original_lines = IO.readlines(project_versions_yaml_path)
|
||||
original_lines << ""
|
||||
# introduce the version qualifier (e.g. beta1, rc1) into the copied yml so it's displayed by Logstash
|
||||
unless ENV['VERSION_QUALIFIER'].to_s.strip.empty?
|
||||
logstash_version_line = original_lines.find {|line| line.match(/^logstash:/) }
|
||||
logstash_version_line.chomp!
|
||||
logstash_version_line << "-#{ENV['VERSION_QUALIFIER']}\n"
|
||||
end
|
||||
original_lines << "\n"
|
||||
original_lines << "# This is a copy the project level versions.yml into this gem's root and it is created when the gemspec is evaluated."
|
||||
gem_versions_yaml_path = File.expand_path("./versions-gem-copy.yml", File.dirname(__FILE__))
|
||||
File.open(gem_versions_yaml_path, 'w') do |new_file|
|
||||
|
|
|
@ -57,6 +57,7 @@ def versionMap = (Map) (new Yaml()).load(new File("$projectDir/../versions.yml")
|
|||
|
||||
description = """Logstash Core Java"""
|
||||
|
||||
String logstashCoreVersion = versionMap['logstash-core']
|
||||
String jacksonVersion = versionMap['jackson']
|
||||
String jacksonDatabindVersion = versionMap['jackson-databind']
|
||||
String jrubyVersion = versionMap['jruby']['version']
|
||||
|
@ -183,6 +184,23 @@ artifacts {
|
|||
}
|
||||
}
|
||||
|
||||
task generateVersionInfoResources(type: DefaultTask) {
|
||||
ext.outDir = layout.buildDirectory.dir("generated-resources/version-info").get()
|
||||
|
||||
inputs.property("version-info:logstash-core", logstashCoreVersion)
|
||||
outputs.dir(ext.outDir)
|
||||
|
||||
doLast {
|
||||
mkdir outDir;
|
||||
def resourceFile = outDir.file('version-info.properties').asFile
|
||||
resourceFile.text = "logstash-core: ${logstashCoreVersion}"
|
||||
}
|
||||
}
|
||||
sourceSets {
|
||||
main { output.dir(generateVersionInfoResources.outputs.files) }
|
||||
}
|
||||
processResources.dependsOn generateVersionInfoResources
|
||||
|
||||
configurations {
|
||||
provided
|
||||
}
|
||||
|
|
|
@ -40,6 +40,8 @@ class LogStash::Agent
|
|||
attr_reader :metric, :name, :settings, :dispatcher, :ephemeral_id, :pipeline_bus
|
||||
attr_accessor :logger
|
||||
|
||||
attr_reader :health_observer
|
||||
|
||||
# initialize method for LogStash::Agent
|
||||
# @param params [Hash] potential parameters are:
|
||||
# :name [String] - identifier for the agent
|
||||
|
@ -51,6 +53,9 @@ class LogStash::Agent
|
|||
@auto_reload = setting("config.reload.automatic")
|
||||
@ephemeral_id = SecureRandom.uuid
|
||||
|
||||
java_import("org.logstash.health.HealthObserver")
|
||||
@health_observer ||= HealthObserver.new
|
||||
|
||||
# Mutex to synchronize in the exclusive method
|
||||
# Initial usage for the Ruby pipeline initialization which is not thread safe
|
||||
@webserver_control_lock = Mutex.new
|
||||
|
@ -151,6 +156,31 @@ class LogStash::Agent
|
|||
transition_to_stopped
|
||||
end
|
||||
|
||||
include org.logstash.health.PipelineIndicator::PipelineDetailsProvider
|
||||
def pipeline_details(pipeline_id)
|
||||
logger.trace("fetching pipeline details for `#{pipeline_id}`")
|
||||
pipeline_id = pipeline_id.to_sym
|
||||
|
||||
java_import org.logstash.health.PipelineIndicator
|
||||
|
||||
pipeline_state = @pipelines_registry.states.get(pipeline_id)
|
||||
if pipeline_state.nil?
|
||||
return PipelineIndicator::Details.new(PipelineIndicator::Status::UNKNOWN)
|
||||
end
|
||||
|
||||
pipeline_state.synchronize do |sync_state|
|
||||
status = case
|
||||
when sync_state.loading? then PipelineIndicator::Status::LOADING
|
||||
when sync_state.crashed? then PipelineIndicator::Status::TERMINATED
|
||||
when sync_state.running? then PipelineIndicator::Status::RUNNING
|
||||
when sync_state.finished? then PipelineIndicator::Status::FINISHED
|
||||
else PipelineIndicator::Status::UNKNOWN
|
||||
end
|
||||
|
||||
PipelineIndicator::Details.new(status, sync_state.pipeline&.to_java.collectWorkerUtilizationFlowObservation)
|
||||
end
|
||||
end
|
||||
|
||||
def auto_reload?
|
||||
@auto_reload
|
||||
end
|
||||
|
@ -395,7 +425,13 @@ class LogStash::Agent
|
|||
)
|
||||
end
|
||||
rescue SystemExit, Exception => e
|
||||
logger.error("Failed to execute action", :action => action, :exception => e.class.name, :message => e.message, :backtrace => e.backtrace)
|
||||
error_details = { :action => action, :exception => e.class.name, :message => e.message, :backtrace => e.backtrace }
|
||||
cause = e.cause
|
||||
if cause && e != cause
|
||||
error_details[:cause] = { :exception => cause.class, :message => cause.message }
|
||||
error_details[:cause][:backtrace] = cause.backtrace if cause.backtrace
|
||||
end
|
||||
logger.error('Failed to execute action', error_details)
|
||||
converge_result.add(action, LogStash::ConvergeResult::FailedAction.from_exception(e))
|
||||
end
|
||||
end
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
require "logstash/api/service"
|
||||
require "logstash/api/commands/system/basicinfo_command"
|
||||
require "logstash/api/commands/system/plugins_command"
|
||||
require "logstash/api/commands/health_report"
|
||||
require "logstash/api/commands/stats"
|
||||
require "logstash/api/commands/node"
|
||||
require "logstash/api/commands/default_metadata"
|
||||
|
@ -34,6 +35,7 @@ module LogStash
|
|||
:plugins_command => ::LogStash::Api::Commands::System::Plugins,
|
||||
:stats => ::LogStash::Api::Commands::Stats,
|
||||
:node => ::LogStash::Api::Commands::Node,
|
||||
:health_report => ::LogStash::Api::Commands::HealthReport,
|
||||
:default_metadata => ::LogStash::Api::Commands::DefaultMetadata
|
||||
}
|
||||
end
|
||||
|
|
|
@ -22,20 +22,14 @@ module LogStash
|
|||
module Commands
|
||||
class DefaultMetadata < Commands::Base
|
||||
def all
|
||||
res = {:host => host,
|
||||
:version => version,
|
||||
:http_address => http_address,
|
||||
:id => service.agent.id,
|
||||
:name => service.agent.name,
|
||||
:ephemeral_id => service.agent.ephemeral_id,
|
||||
:status => "green", # This is hard-coded to mirror x-pack behavior
|
||||
:snapshot => ::BUILD_INFO["build_snapshot"],
|
||||
res = base_info.merge({
|
||||
:status => service.agent.health_observer.status,
|
||||
:pipeline => {
|
||||
:workers => LogStash::SETTINGS.get("pipeline.workers"),
|
||||
:batch_size => LogStash::SETTINGS.get("pipeline.batch.size"),
|
||||
:batch_delay => LogStash::SETTINGS.get("pipeline.batch.delay"),
|
||||
},
|
||||
}
|
||||
})
|
||||
monitoring = {}
|
||||
if enabled_xpack_monitoring?
|
||||
monitoring = monitoring.merge({
|
||||
|
@ -49,12 +43,24 @@ module LogStash
|
|||
res.merge(monitoring.empty? ? {} : {:monitoring => monitoring})
|
||||
end
|
||||
|
||||
def base_info
|
||||
{
|
||||
:host => host,
|
||||
:version => version,
|
||||
:http_address => http_address,
|
||||
:id => service.agent.id,
|
||||
:name => service.agent.name,
|
||||
:ephemeral_id => service.agent.ephemeral_id,
|
||||
:snapshot => ::BUILD_INFO["build_snapshot"],
|
||||
}
|
||||
end
|
||||
|
||||
def host
|
||||
@@host ||= Socket.gethostname
|
||||
end
|
||||
|
||||
def version
|
||||
LOGSTASH_CORE_VERSION
|
||||
LOGSTASH_VERSION
|
||||
end
|
||||
|
||||
def http_address
|
||||
|
|
31
logstash-core/lib/logstash/api/commands/health_report.rb
Normal file
31
logstash-core/lib/logstash/api/commands/health_report.rb
Normal file
|
@ -0,0 +1,31 @@
|
|||
# Licensed to Elasticsearch B.V. under one or more contributor
|
||||
# license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright
|
||||
# ownership. Elasticsearch B.V. licenses this file to you under
|
||||
# the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
require "logstash/api/commands/base"
|
||||
|
||||
module LogStash
|
||||
module Api
|
||||
module Commands
|
||||
class HealthReport < Commands::Base
|
||||
|
||||
def all(selected_fields = [])
|
||||
service.agent.health_observer.report
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
49
logstash-core/lib/logstash/api/modules/health_report.rb
Normal file
49
logstash-core/lib/logstash/api/modules/health_report.rb
Normal file
|
@ -0,0 +1,49 @@
|
|||
# Licensed to Elasticsearch B.V. under one or more contributor
|
||||
# license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright
|
||||
# ownership. Elasticsearch B.V. licenses this file to you under
|
||||
# the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
module LogStash
|
||||
module Api
|
||||
module Modules
|
||||
class HealthReport < ::LogStash::Api::Modules::Base
|
||||
|
||||
get "/" do
|
||||
payload = health_report.all.then do |health_report_pojo|
|
||||
# The app_helper needs a ruby-hash.
|
||||
# Manually creating a map of properties works around the issue.
|
||||
base_metadata.merge({
|
||||
status: health_report_pojo.status,
|
||||
symptom: health_report_pojo.symptom,
|
||||
indicators: health_report_pojo.indicators,
|
||||
})
|
||||
end
|
||||
|
||||
respond_with(payload, {exclude_default_metadata: true})
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def health_report
|
||||
@health_report ||= factory.build(:health_report)
|
||||
end
|
||||
|
||||
def base_metadata
|
||||
@factory.build(:default_metadata).base_info
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
|
@ -18,6 +18,7 @@
|
|||
require "rack"
|
||||
require "sinatra/base"
|
||||
require "logstash/api/modules/base"
|
||||
require "logstash/api/modules/health_report"
|
||||
require "logstash/api/modules/node"
|
||||
require "logstash/api/modules/node_stats"
|
||||
require "logstash/api/modules/plugins"
|
||||
|
@ -123,6 +124,7 @@ module LogStash
|
|||
|
||||
def self.rack_namespaces(agent)
|
||||
{
|
||||
"/_health_report" => LogStash::Api::Modules::HealthReport,
|
||||
"/_node" => LogStash::Api::Modules::Node,
|
||||
"/_stats" => LogStash::Api::Modules::Stats,
|
||||
"/_node/stats" => LogStash::Api::Modules::NodeStats,
|
||||
|
|
|
@ -67,6 +67,9 @@ module LogStash module Config
|
|||
raise LogStash::ConfigLoadingError, I18n.t("logstash.modules.configuration.modules-unavailable", **i18n_opts)
|
||||
end
|
||||
|
||||
specified_and_available_names
|
||||
.each { |mn| deprecation_logger.deprecated("The #{mn} module has been deprecated and will be removed in version 9.") }
|
||||
|
||||
specified_and_available_names.each do |module_name|
|
||||
connect_fail_args = {}
|
||||
begin
|
||||
|
|
|
@ -35,10 +35,10 @@ module LogStash
|
|||
|
||||
[
|
||||
Setting::Boolean.new("allow_superuser", true),
|
||||
Setting::String.new("node.name", Socket.gethostname),
|
||||
Setting::NullableString.new("path.config", nil, false),
|
||||
Setting::SettingString.new("node.name", Socket.gethostname),
|
||||
Setting::SettingNullableString.new("path.config", nil, false),
|
||||
Setting::WritableDirectory.new("path.data", ::File.join(LogStash::Environment::LOGSTASH_HOME, "data")),
|
||||
Setting::NullableString.new("config.string", nil, false),
|
||||
Setting::SettingNullableString.new("config.string", nil, false),
|
||||
Setting::Modules.new("modules.cli", LogStash::Util::ModulesSettingArray, []),
|
||||
Setting::Modules.new("modules", LogStash::Util::ModulesSettingArray, []),
|
||||
Setting.new("modules_list", Array, []),
|
||||
|
@ -50,10 +50,10 @@ module LogStash
|
|||
Setting::Boolean.new("config.reload.automatic", false),
|
||||
Setting::TimeValue.new("config.reload.interval", "3s"), # in seconds
|
||||
Setting::Boolean.new("config.support_escapes", false),
|
||||
Setting::String.new("config.field_reference.escape_style", "none", true, %w(none percent ampersand)),
|
||||
Setting::String.new("event_api.tags.illegal", "rename", true, %w(rename warn)),
|
||||
Setting::SettingString.new("config.field_reference.escape_style", "none", true, %w(none percent ampersand)),
|
||||
Setting::SettingString.new("event_api.tags.illegal", "rename", true, %w(rename warn)),
|
||||
Setting::Boolean.new("metric.collect", true),
|
||||
Setting::String.new("pipeline.id", "main"),
|
||||
Setting::SettingString.new("pipeline.id", "main"),
|
||||
Setting::Boolean.new("pipeline.system", false),
|
||||
Setting::PositiveInteger.new("pipeline.workers", LogStash::Config::CpuCoreStrategy.maximum),
|
||||
Setting::PositiveInteger.new("pipeline.batch.size", 125),
|
||||
|
@ -65,32 +65,32 @@ module LogStash
|
|||
Setting::CoercibleString.new("pipeline.ordered", "auto", true, ["auto", "true", "false"]),
|
||||
Setting::CoercibleString.new("pipeline.ecs_compatibility", "v8", true, %w(disabled v1 v8)),
|
||||
Setting.new("path.plugins", Array, []),
|
||||
Setting::NullableString.new("interactive", nil, false),
|
||||
Setting::SettingNullableString.new("interactive", nil, false),
|
||||
Setting::Boolean.new("config.debug", false),
|
||||
Setting::String.new("log.level", "info", true, ["fatal", "error", "warn", "debug", "info", "trace"]),
|
||||
Setting::SettingString.new("log.level", "info", true, ["fatal", "error", "warn", "debug", "info", "trace"]),
|
||||
Setting::Boolean.new("version", false),
|
||||
Setting::Boolean.new("help", false),
|
||||
Setting::Boolean.new("enable-local-plugin-development", false),
|
||||
Setting::String.new("log.format", "plain", true, ["json", "plain"]),
|
||||
Setting::SettingString.new("log.format", "plain", true, ["json", "plain"]),
|
||||
Setting::Boolean.new("log.format.json.fix_duplicate_message_fields", false),
|
||||
Setting::Boolean.new("api.enabled", true).with_deprecated_alias("http.enabled"),
|
||||
Setting::String.new("api.http.host", "127.0.0.1").with_deprecated_alias("http.host"),
|
||||
Setting::SettingString.new("api.http.host", "127.0.0.1").with_deprecated_alias("http.host"),
|
||||
Setting::PortRange.new("api.http.port", 9600..9700).with_deprecated_alias("http.port"),
|
||||
Setting::String.new("api.environment", "production").with_deprecated_alias("http.environment"),
|
||||
Setting::String.new("api.auth.type", "none", true, %w(none basic)),
|
||||
Setting::String.new("api.auth.basic.username", nil, false).nullable,
|
||||
Setting::SettingString.new("api.environment", "production").with_deprecated_alias("http.environment"),
|
||||
Setting::SettingString.new("api.auth.type", "none", true, %w(none basic)),
|
||||
Setting::SettingString.new("api.auth.basic.username", nil, false).nullable,
|
||||
Setting::Password.new("api.auth.basic.password", nil, false).nullable,
|
||||
Setting::String.new("api.auth.basic.password_policy.mode", "WARN", true, %w[WARN ERROR]),
|
||||
Setting::SettingString.new("api.auth.basic.password_policy.mode", "WARN", true, %w[WARN ERROR]),
|
||||
Setting::Numeric.new("api.auth.basic.password_policy.length.minimum", 8),
|
||||
Setting::String.new("api.auth.basic.password_policy.include.upper", "REQUIRED", true, %w[REQUIRED OPTIONAL]),
|
||||
Setting::String.new("api.auth.basic.password_policy.include.lower", "REQUIRED", true, %w[REQUIRED OPTIONAL]),
|
||||
Setting::String.new("api.auth.basic.password_policy.include.digit", "REQUIRED", true, %w[REQUIRED OPTIONAL]),
|
||||
Setting::String.new("api.auth.basic.password_policy.include.symbol", "OPTIONAL", true, %w[REQUIRED OPTIONAL]),
|
||||
Setting::SettingString.new("api.auth.basic.password_policy.include.upper", "REQUIRED", true, %w[REQUIRED OPTIONAL]),
|
||||
Setting::SettingString.new("api.auth.basic.password_policy.include.lower", "REQUIRED", true, %w[REQUIRED OPTIONAL]),
|
||||
Setting::SettingString.new("api.auth.basic.password_policy.include.digit", "REQUIRED", true, %w[REQUIRED OPTIONAL]),
|
||||
Setting::SettingString.new("api.auth.basic.password_policy.include.symbol", "OPTIONAL", true, %w[REQUIRED OPTIONAL]),
|
||||
Setting::Boolean.new("api.ssl.enabled", false),
|
||||
Setting::ExistingFilePath.new("api.ssl.keystore.path", nil, false).nullable,
|
||||
Setting::Password.new("api.ssl.keystore.password", nil, false).nullable,
|
||||
Setting::StringArray.new("api.ssl.supported_protocols", nil, true, %w[TLSv1 TLSv1.1 TLSv1.2 TLSv1.3]),
|
||||
Setting::String.new("queue.type", "memory", true, ["persisted", "memory"]),
|
||||
Setting::SettingString.new("queue.type", "memory", true, ["persisted", "memory"]),
|
||||
Setting::Boolean.new("queue.drain", false),
|
||||
Setting::Bytes.new("queue.page_capacity", "64mb"),
|
||||
Setting::Bytes.new("queue.max_bytes", "1024mb"),
|
||||
|
@ -102,16 +102,16 @@ module LogStash
|
|||
Setting::Boolean.new("dead_letter_queue.enable", false),
|
||||
Setting::Bytes.new("dead_letter_queue.max_bytes", "1024mb"),
|
||||
Setting::Numeric.new("dead_letter_queue.flush_interval", 5000),
|
||||
Setting::String.new("dead_letter_queue.storage_policy", "drop_newer", true, ["drop_newer", "drop_older"]),
|
||||
Setting::NullableString.new("dead_letter_queue.retain.age"), # example 5d
|
||||
Setting::SettingString.new("dead_letter_queue.storage_policy", "drop_newer", true, ["drop_newer", "drop_older"]),
|
||||
Setting::SettingNullableString.new("dead_letter_queue.retain.age"), # example 5d
|
||||
Setting::TimeValue.new("slowlog.threshold.warn", "-1"),
|
||||
Setting::TimeValue.new("slowlog.threshold.info", "-1"),
|
||||
Setting::TimeValue.new("slowlog.threshold.debug", "-1"),
|
||||
Setting::TimeValue.new("slowlog.threshold.trace", "-1"),
|
||||
Setting::String.new("keystore.classname", "org.logstash.secret.store.backend.JavaKeyStore"),
|
||||
Setting::String.new("keystore.file", ::File.join(::File.join(LogStash::Environment::LOGSTASH_HOME, "config"), "logstash.keystore"), false), # will be populated on
|
||||
Setting::NullableString.new("monitoring.cluster_uuid"),
|
||||
Setting::String.new("pipeline.buffer.type", "direct", true, ["direct", "heap"])
|
||||
Setting::SettingString.new("keystore.classname", "org.logstash.secret.store.backend.JavaKeyStore"),
|
||||
Setting::SettingString.new("keystore.file", ::File.join(::File.join(LogStash::Environment::LOGSTASH_HOME, "config"), "logstash.keystore"), false), # will be populated on
|
||||
Setting::SettingNullableString.new("monitoring.cluster_uuid"),
|
||||
Setting::SettingString.new("pipeline.buffer.type", nil, false, ["direct", "heap"])
|
||||
# post_process
|
||||
].each {|setting| SETTINGS.register(setting) }
|
||||
|
||||
|
|
|
@ -65,6 +65,7 @@ module LogStash; class JavaPipeline < AbstractPipeline
|
|||
@flushing = java.util.concurrent.atomic.AtomicBoolean.new(false)
|
||||
@flushRequested = java.util.concurrent.atomic.AtomicBoolean.new(false)
|
||||
@shutdownRequested = java.util.concurrent.atomic.AtomicBoolean.new(false)
|
||||
@crash_detected = Concurrent::AtomicBoolean.new(false)
|
||||
@outputs_registered = Concurrent::AtomicBoolean.new(false)
|
||||
|
||||
# @finished_execution signals that the pipeline thread has finished its execution
|
||||
|
@ -87,6 +88,10 @@ module LogStash; class JavaPipeline < AbstractPipeline
|
|||
@finished_execution.true?
|
||||
end
|
||||
|
||||
def finished_run?
|
||||
@finished_run.true?
|
||||
end
|
||||
|
||||
def ready?
|
||||
@ready.value
|
||||
end
|
||||
|
@ -229,6 +234,10 @@ module LogStash; class JavaPipeline < AbstractPipeline
|
|||
@running.false?
|
||||
end
|
||||
|
||||
def crashed?
|
||||
@crash_detected.true?
|
||||
end
|
||||
|
||||
# register_plugins calls #register_plugin on the plugins list and upon exception will call Plugin#do_close on all registered plugins
|
||||
# @param plugins [Array[Plugin]] the list of plugins to register
|
||||
def register_plugins(plugins)
|
||||
|
@ -305,6 +314,7 @@ module LogStash; class JavaPipeline < AbstractPipeline
|
|||
rescue => e
|
||||
# WorkerLoop.run() catches all Java Exception class and re-throws as IllegalStateException with the
|
||||
# original exception as the cause
|
||||
@crash_detected.make_true
|
||||
@logger.error(
|
||||
"Pipeline worker error, the pipeline will be stopped",
|
||||
default_logging_keys(:error => e.cause.message, :exception => e.cause.class, :backtrace => e.cause.backtrace)
|
||||
|
@ -319,6 +329,7 @@ module LogStash; class JavaPipeline < AbstractPipeline
|
|||
begin
|
||||
start_inputs
|
||||
rescue => e
|
||||
@crash_detected.make_true
|
||||
# if there is any exception in starting inputs, make sure we shutdown workers.
|
||||
# exception will already by logged in start_inputs
|
||||
shutdown_workers
|
||||
|
@ -628,7 +639,7 @@ module LogStash; class JavaPipeline < AbstractPipeline
|
|||
case settings.get("pipeline.ordered")
|
||||
when "auto"
|
||||
if settings.set?("pipeline.workers") && settings.get("pipeline.workers") == 1
|
||||
@logger.warn("'pipeline.ordered' is enabled and is likely less efficient, consider disabling if preserving event order is not necessary")
|
||||
@logger.warn("'pipeline.ordered' is enabled and is likely less efficient, consider disabling if preserving event order is not necessary") unless settings.get("pipeline.system")
|
||||
return true
|
||||
end
|
||||
when "true"
|
||||
|
|
|
@ -69,13 +69,13 @@ module LogStash module Modules class LogStashConfig
|
|||
# validate the values and replace them in the template.
|
||||
case default
|
||||
when String
|
||||
get_setting(LogStash::Setting::NullableString.new(name, default.to_s))
|
||||
get_setting(LogStash::Setting::SettingNullableString.new(name, default.to_s))
|
||||
when Numeric
|
||||
get_setting(LogStash::Setting::Numeric.new(name, default))
|
||||
when true, false
|
||||
get_setting(LogStash::Setting::Boolean.new(name, default))
|
||||
else
|
||||
get_setting(LogStash::Setting::NullableString.new(name, default.to_s))
|
||||
get_setting(LogStash::Setting::SettingNullableString.new(name, default.to_s))
|
||||
end
|
||||
end
|
||||
|
||||
|
|
|
@ -79,8 +79,13 @@ module Clamp
|
|||
new_flag = opts[:new_flag]
|
||||
new_value = opts.fetch(:new_value, value)
|
||||
passthrough = opts.fetch(:passthrough, false)
|
||||
obsoleted_version = opts[:obsoleted_version]
|
||||
|
||||
LogStash::DeprecationMessage.instance << "DEPRECATION WARNING: The flag #{option.switches} has been deprecated, please use \"--#{new_flag}=#{new_value}\" instead."
|
||||
dmsg = "DEPRECATION WARNING: The flag #{option.switches} has been deprecated"
|
||||
dmsg += obsoleted_version.nil? ? " and may be removed in a future release" : " and will be removed in version #{obsoleted_version}"
|
||||
dmsg += new_flag.nil? ? ".": ", please use \"--#{new_flag}=#{new_value}\" instead."
|
||||
|
||||
LogStash::DeprecationMessage.instance << dmsg
|
||||
|
||||
if passthrough
|
||||
LogStash::SETTINGS.set(option.attribute_name, value)
|
||||
|
|
|
@ -46,13 +46,21 @@ module LogStash module PipelineAction
|
|||
# The execute assume that the thread safety access of the pipeline
|
||||
# is managed by the caller.
|
||||
def execute(agent, pipelines_registry)
|
||||
attach_health_indicator(agent)
|
||||
new_pipeline = LogStash::JavaPipeline.new(@pipeline_config, @metric, agent)
|
||||
success = pipelines_registry.create_pipeline(pipeline_id, new_pipeline) do
|
||||
new_pipeline.start # block until the pipeline is correctly started or crashed
|
||||
end
|
||||
|
||||
LogStash::ConvergeResult::ActionResult.create(self, success)
|
||||
end
|
||||
|
||||
def attach_health_indicator(agent)
|
||||
health_observer = agent.health_observer
|
||||
health_observer.detach_pipeline_indicator(pipeline_id) # just in case ...
|
||||
health_observer.attach_pipeline_indicator(pipeline_id, agent)
|
||||
end
|
||||
|
||||
def to_s
|
||||
"PipelineAction::Create<#{pipeline_id}>"
|
||||
end
|
||||
|
|
|
@ -27,10 +27,15 @@ module LogStash module PipelineAction
|
|||
|
||||
def execute(agent, pipelines_registry)
|
||||
success = pipelines_registry.delete_pipeline(@pipeline_id)
|
||||
detach_health_indicator(agent) if success
|
||||
|
||||
LogStash::ConvergeResult::ActionResult.create(self, success)
|
||||
end
|
||||
|
||||
def detach_health_indicator(agent)
|
||||
agent.health_observer.detach_pipeline_indicator(pipeline_id)
|
||||
end
|
||||
|
||||
def to_s
|
||||
"PipelineAction::Delete<#{pipeline_id}>"
|
||||
end
|
||||
|
|
|
@ -31,10 +31,15 @@ module LogStash module PipelineAction
|
|||
end
|
||||
|
||||
success = pipelines_registry.delete_pipeline(@pipeline_id)
|
||||
detach_health_indicator(agent) if success
|
||||
|
||||
LogStash::ConvergeResult::ActionResult.create(self, success)
|
||||
end
|
||||
|
||||
def detach_health_indicator(agent)
|
||||
agent.health_observer.detach_pipeline_indicator(pipeline_id)
|
||||
end
|
||||
|
||||
def to_s
|
||||
"PipelineAction::StopAndDelete<#{pipeline_id}>"
|
||||
end
|
||||
|
|
|
@ -28,6 +28,7 @@ module LogStash
|
|||
@lock = Monitor.new
|
||||
end
|
||||
|
||||
# a terminated pipeline has either crashed OR finished normally
|
||||
def terminated?
|
||||
@lock.synchronize do
|
||||
# a loading pipeline is never considered terminated
|
||||
|
@ -35,6 +36,20 @@ module LogStash
|
|||
end
|
||||
end
|
||||
|
||||
# a finished pipeline finished _normally_ without exception
|
||||
def finished?
|
||||
@lock.synchronize do
|
||||
# a loading pipeline is never considered terminated
|
||||
@loading.false? && @pipeline.finished_run?
|
||||
end
|
||||
end
|
||||
|
||||
def crashed?
|
||||
@lock.synchronize do
|
||||
@pipeline&.crashed?
|
||||
end
|
||||
end
|
||||
|
||||
def running?
|
||||
@lock.synchronize do
|
||||
# not terminated and not loading
|
||||
|
@ -104,6 +119,7 @@ module LogStash
|
|||
end
|
||||
end
|
||||
|
||||
|
||||
def empty?
|
||||
@lock.synchronize do
|
||||
@states.empty?
|
||||
|
|
|
@ -9,10 +9,11 @@ module LogStash
|
|||
|
||||
def ecs_compatibility
|
||||
@_ecs_compatibility || LogStash::Util.synchronize(self) do
|
||||
@_ecs_compatibility ||= begin
|
||||
# use config_init-set value if present
|
||||
break @ecs_compatibility unless @ecs_compatibility.nil?
|
||||
# use config_init-set value if present
|
||||
@_ecs_compatibility ||= @ecs_compatibility
|
||||
|
||||
# load default from settings
|
||||
@_ecs_compatibility ||= begin
|
||||
pipeline = execution_context.pipeline
|
||||
pipeline_settings = pipeline && pipeline.settings
|
||||
pipeline_settings ||= LogStash::SETTINGS
|
||||
|
|
|
@ -92,10 +92,6 @@ class LogStash::Runner < Clamp::StrictCommand
|
|||
:default => LogStash::SETTINGS.get_default("config.field_reference.escape_style"),
|
||||
:attribute_name => "config.field_reference.escape_style"
|
||||
|
||||
option ["--event_api.tags.illegal"], "STRING",
|
||||
I18n.t("logstash.runner.flag.event_api.tags.illegal"),
|
||||
:default => LogStash::SETTINGS.get_default("event_api.tags.illegal"),
|
||||
:attribute_name => "event_api.tags.illegal"
|
||||
|
||||
# Module settings
|
||||
option ["--modules"], "MODULES",
|
||||
|
@ -267,6 +263,12 @@ class LogStash::Runner < Clamp::StrictCommand
|
|||
I18n.t("logstash.runner.flag.http_port"),
|
||||
:new_flag => "api.http.port", :passthrough => true # use settings to disambiguate
|
||||
|
||||
deprecated_option ["--event_api.tags.illegal"], "STRING",
|
||||
I18n.t("logstash.runner.flag.event_api.tags.illegal"),
|
||||
:default => LogStash::SETTINGS.get_default("event_api.tags.illegal"),
|
||||
:attribute_name => "event_api.tags.illegal", :passthrough => true,
|
||||
:obsoleted_version => "9"
|
||||
|
||||
# We configure the registry and load any plugin that can register hooks
|
||||
# with logstash, this needs to be done before any operation.
|
||||
SYSTEM_SETTINGS = LogStash::SETTINGS.clone
|
||||
|
@ -310,9 +312,17 @@ class LogStash::Runner < Clamp::StrictCommand
|
|||
if setting("config.debug") && !logger.debug?
|
||||
logger.warn("--config.debug was specified, but log.level was not set to \'debug\'! No config info will be logged.")
|
||||
end
|
||||
if setting("pipeline.buffer.type") != nil
|
||||
configure_pipeline_buffer_type
|
||||
if setting("pipeline.buffer.type") == nil
|
||||
deprecation_logger.deprecated(
|
||||
"'pipeline.buffer.type' setting is not explicitly defined."\
|
||||
"Before moving to 9.x set it to 'heap' and tune heap size upward, or set it to 'direct' to maintain existing behavior."
|
||||
)
|
||||
|
||||
# set to direct to keep backward ecs_compatibility
|
||||
buffer_type_setting = @settings.get_setting("pipeline.buffer.type")
|
||||
buffer_type_setting.set("direct")
|
||||
end
|
||||
configure_pipeline_buffer_type
|
||||
|
||||
while (msg = LogStash::DeprecationMessage.instance.shift)
|
||||
deprecation_logger.deprecated msg
|
||||
|
@ -340,8 +350,8 @@ class LogStash::Runner < Clamp::StrictCommand
|
|||
# Add local modules to the registry before everything else
|
||||
LogStash::Modules::Util.register_local_modules(LogStash::Environment::LOGSTASH_HOME)
|
||||
|
||||
# Set up the Jackson defaults
|
||||
LogStash::Util::Jackson.set_jackson_defaults(logger)
|
||||
# Verify the Jackson defaults
|
||||
LogStash::Util::Jackson.verify_jackson_overrides
|
||||
|
||||
@dispatcher = LogStash::EventDispatcher.new(self)
|
||||
LogStash::PLUGIN_REGISTRY.hooks.register_emitter(self.class, @dispatcher)
|
||||
|
|
|
@ -86,7 +86,10 @@ module LogStash
|
|||
end
|
||||
|
||||
def register(setting)
|
||||
return setting.map { |s| register(s) } if setting.kind_of?(Array)
|
||||
# Method #with_deprecated_alias returns collection containing couple of other settings.
|
||||
# In case the method is implemented in Ruby returns an Array while for the Java implementation
|
||||
# return a List, so the following type checking before going deep by one layer.
|
||||
return setting.map { |s| register(s) } if setting.kind_of?(Array) || setting.kind_of?(java.util.List)
|
||||
|
||||
if @settings.key?(setting.name)
|
||||
raise ArgumentError.new("Setting \"#{setting.name}\" has already been registered as #{setting.inspect}")
|
||||
|
@ -151,7 +154,7 @@ module LogStash
|
|||
def to_hash
|
||||
hash = {}
|
||||
@settings.each do |name, setting|
|
||||
next if setting.kind_of? Setting::DeprecatedAlias
|
||||
next if (setting.kind_of? Setting::DeprecatedAlias) || (setting.kind_of? Java::org.logstash.settings.DeprecatedAlias)
|
||||
hash[name] = setting.value
|
||||
end
|
||||
hash
|
||||
|
@ -244,54 +247,73 @@ module LogStash
|
|||
class Setting
|
||||
include LogStash::Settings::LOGGABLE_PROXY
|
||||
|
||||
attr_reader :name, :default
|
||||
attr_reader :wrapped_setting
|
||||
|
||||
def initialize(name, klass, default = nil, strict = true, &validator_proc)
|
||||
@name = name
|
||||
unless klass.is_a?(Class)
|
||||
raise ArgumentError.new("Setting \"#{@name}\" must be initialized with a class (received #{klass})")
|
||||
raise ArgumentError.new("Setting \"#{name}\" must be initialized with a class (received #{klass})")
|
||||
end
|
||||
setting_builder = Java::org.logstash.settings.BaseSetting.create(name)
|
||||
.defaultValue(default)
|
||||
.strict(strict)
|
||||
if validator_proc
|
||||
setting_builder = setting_builder.validator(validator_proc)
|
||||
end
|
||||
|
||||
@wrapped_setting = setting_builder.build()
|
||||
|
||||
@klass = klass
|
||||
@validator_proc = validator_proc
|
||||
@value = nil
|
||||
@value_is_set = false
|
||||
@strict = strict
|
||||
|
||||
validate(default) if @strict
|
||||
@default = default
|
||||
validate(default) if strict?
|
||||
end
|
||||
|
||||
def default
|
||||
@wrapped_setting.default
|
||||
end
|
||||
|
||||
def name
|
||||
@wrapped_setting.name
|
||||
end
|
||||
|
||||
def initialize_copy(original)
|
||||
@wrapped_setting = original.wrapped_setting.clone
|
||||
end
|
||||
|
||||
# To be used only internally
|
||||
def update_wrapper(wrapped_setting)
|
||||
@wrapped_setting = wrapped_setting
|
||||
end
|
||||
|
||||
def value
|
||||
@value_is_set ? @value : default
|
||||
@wrapped_setting.value()
|
||||
end
|
||||
|
||||
def set?
|
||||
@value_is_set
|
||||
@wrapped_setting.set?
|
||||
end
|
||||
|
||||
def strict?
|
||||
@strict
|
||||
@wrapped_setting.strict?
|
||||
end
|
||||
|
||||
def set(value)
|
||||
validate(value) if @strict
|
||||
@value = value
|
||||
@value_is_set = true
|
||||
@value
|
||||
validate(value) if strict?
|
||||
@wrapped_setting.set(value)
|
||||
@wrapped_setting.value
|
||||
end
|
||||
|
||||
def reset
|
||||
@value = nil
|
||||
@value_is_set = false
|
||||
@wrapped_setting.reset
|
||||
end
|
||||
|
||||
def to_hash
|
||||
{
|
||||
"name" => @name,
|
||||
"name" => @wrapped_setting.name,
|
||||
"klass" => @klass,
|
||||
"value" => @value,
|
||||
"value_is_set" => @value_is_set,
|
||||
"default" => @default,
|
||||
"value" => @wrapped_setting.value,
|
||||
"value_is_set" => @wrapped_setting.set?,
|
||||
"default" => @wrapped_setting.default,
|
||||
# Proc#== will only return true if it's the same obj
|
||||
# so no there's no point in comparing it
|
||||
# also thereś no use case atm to return the proc
|
||||
|
@ -301,7 +323,7 @@ module LogStash
|
|||
end
|
||||
|
||||
def inspect
|
||||
"<#{self.class.name}(#{name}): #{value.inspect}" + (@value_is_set ? '' : ' (DEFAULT)') + ">"
|
||||
"<#{self.class.name}(#{name}): #{value.inspect}" + (@wrapped_setting.set? ? '' : ' (DEFAULT)') + ">"
|
||||
end
|
||||
|
||||
def ==(other)
|
||||
|
@ -323,58 +345,65 @@ module LogStash
|
|||
end
|
||||
|
||||
def format(output)
|
||||
effective_value = self.value
|
||||
default_value = self.default
|
||||
setting_name = self.name
|
||||
@wrapped_setting.format(output)
|
||||
end
|
||||
|
||||
if default_value == value # print setting and its default value
|
||||
output << "#{setting_name}: #{effective_value.inspect}" unless effective_value.nil?
|
||||
elsif default_value.nil? # print setting and warn it has been set
|
||||
output << "*#{setting_name}: #{effective_value.inspect}"
|
||||
elsif effective_value.nil? # default setting not set by user
|
||||
output << "#{setting_name}: #{default_value.inspect}"
|
||||
else # print setting, warn it has been set, and show default value
|
||||
output << "*#{setting_name}: #{effective_value.inspect} (default: #{default_value.inspect})"
|
||||
end
|
||||
def clone(*args)
|
||||
copy = self.dup
|
||||
copy.update_wrapper(@wrapped_setting.clone())
|
||||
copy
|
||||
end
|
||||
|
||||
protected
|
||||
def validate(input)
|
||||
if !input.is_a?(@klass)
|
||||
raise ArgumentError.new("Setting \"#{@name}\" must be a #{@klass}. Received: #{input} (#{input.class})")
|
||||
raise ArgumentError.new("Setting \"#{@wrapped_setting.name}\" must be a #{@klass}. Received: #{input} (#{input.class})")
|
||||
end
|
||||
|
||||
if @validator_proc && !@validator_proc.call(input)
|
||||
raise ArgumentError.new("Failed to validate setting \"#{@name}\" with value: #{input}")
|
||||
raise ArgumentError.new("Failed to validate setting \"#{@wrapped_setting.name}\" with value: #{input}")
|
||||
end
|
||||
end
|
||||
|
||||
class Coercible < Setting
|
||||
def initialize(name, klass, default = nil, strict = true, &validator_proc)
|
||||
@name = name
|
||||
unless klass.is_a?(Class)
|
||||
raise ArgumentError.new("Setting \"#{@name}\" must be initialized with a class (received #{klass})")
|
||||
raise ArgumentError.new("Setting \"#{name}\" must be initialized with a class (received #{klass})")
|
||||
end
|
||||
|
||||
@klass = klass
|
||||
@validator_proc = validator_proc
|
||||
@value = nil
|
||||
@value_is_set = false
|
||||
|
||||
# needed to have the name method accessible when invoking validate
|
||||
@wrapped_setting = Java::org.logstash.settings.BaseSetting.create(name)
|
||||
.defaultValue(default)
|
||||
.strict(strict)
|
||||
.build()
|
||||
|
||||
if strict
|
||||
coerced_default = coerce(default)
|
||||
validate(coerced_default)
|
||||
@default = coerced_default
|
||||
updated_default = coerced_default
|
||||
else
|
||||
@default = default
|
||||
updated_default = default
|
||||
end
|
||||
|
||||
# default value must be coerced to the right type before being set
|
||||
setting_builder = Java::org.logstash.settings.BaseSetting.create(name)
|
||||
.defaultValue(updated_default)
|
||||
.strict(strict)
|
||||
if validator_proc
|
||||
setting_builder = setting_builder.validator(validator_proc)
|
||||
end
|
||||
|
||||
@wrapped_setting = setting_builder.build()
|
||||
end
|
||||
|
||||
def set(value)
|
||||
coerced_value = coerce(value)
|
||||
validate(coerced_value)
|
||||
@value = coerce(coerced_value)
|
||||
@value_is_set = true
|
||||
@value
|
||||
@wrapped_setting.set(coerced_value)
|
||||
coerced_value
|
||||
end
|
||||
|
||||
def coerce(value)
|
||||
|
@ -383,22 +412,7 @@ module LogStash
|
|||
end
|
||||
### Specific settings #####
|
||||
|
||||
class Boolean < Coercible
|
||||
def initialize(name, default, strict = true, &validator_proc)
|
||||
super(name, Object, default, strict, &validator_proc)
|
||||
end
|
||||
|
||||
def coerce(value)
|
||||
case value
|
||||
when TrueClass, "true"
|
||||
true
|
||||
when FalseClass, "false"
|
||||
false
|
||||
else
|
||||
raise ArgumentError.new("could not coerce #{value} into a boolean")
|
||||
end
|
||||
end
|
||||
end
|
||||
java_import org.logstash.settings.Boolean
|
||||
|
||||
class Numeric < Coercible
|
||||
def initialize(name, default = nil, strict = true)
|
||||
|
@ -509,27 +523,10 @@ module LogStash
|
|||
@validator_class.validate(value)
|
||||
end
|
||||
end
|
||||
|
||||
java_import org.logstash.settings.SettingString
|
||||
|
||||
class String < Setting
|
||||
def initialize(name, default = nil, strict = true, possible_strings = [])
|
||||
@possible_strings = possible_strings
|
||||
super(name, ::String, default, strict)
|
||||
end
|
||||
|
||||
def validate(value)
|
||||
super(value)
|
||||
unless @possible_strings.empty? || @possible_strings.include?(value)
|
||||
raise ArgumentError.new("Invalid value \"#{name}: #{value}\". Options are: #{@possible_strings.inspect}")
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
class NullableString < String
|
||||
def validate(value)
|
||||
return if value.nil?
|
||||
super(value)
|
||||
end
|
||||
end
|
||||
java_import org.logstash.settings.SettingNullableString
|
||||
|
||||
class Password < Coercible
|
||||
def initialize(name, default = nil, strict = true)
|
||||
|
@ -733,15 +730,15 @@ module LogStash
|
|||
protected
|
||||
def validate(input)
|
||||
if !input.is_a?(@klass)
|
||||
raise ArgumentError.new("Setting \"#{@name}\" must be a #{@klass}. Received: #{input} (#{input.class})")
|
||||
raise ArgumentError.new("Setting \"#{@wrapped_setting.name}\" must be a #{@klass}. Received: #{input} (#{input.class})")
|
||||
end
|
||||
|
||||
unless input.all? {|el| el.kind_of?(@element_class) }
|
||||
raise ArgumentError.new("Values of setting \"#{@name}\" must be #{@element_class}. Received: #{input.map(&:class)}")
|
||||
raise ArgumentError.new("Values of setting \"#{@wrapped_setting.name}\" must be #{@element_class}. Received: #{input.map(&:class)}")
|
||||
end
|
||||
|
||||
if @validator_proc && !@validator_proc.call(input)
|
||||
raise ArgumentError.new("Failed to validate setting \"#{@name}\" with value: #{input}")
|
||||
raise ArgumentError.new("Failed to validate setting \"#{@wrapped_setting.name}\" with value: #{input}")
|
||||
end
|
||||
end
|
||||
end
|
||||
|
@ -782,7 +779,7 @@ module LogStash
|
|||
return unless invalid_value.any?
|
||||
|
||||
raise ArgumentError,
|
||||
"Failed to validate the setting \"#{@name}\" value(s): #{invalid_value.inspect}. Valid options are: #{@possible_strings.inspect}"
|
||||
"Failed to validate the setting \"#{@wrapped_setting.name}\" value(s): #{invalid_value.inspect}. Valid options are: #{@possible_strings.inspect}"
|
||||
end
|
||||
end
|
||||
|
||||
|
@ -792,9 +789,9 @@ module LogStash
|
|||
end
|
||||
|
||||
def set(value)
|
||||
@value = coerce(value)
|
||||
@value_is_set = true
|
||||
@value
|
||||
coerced_value = coerce(value)
|
||||
@wrapped_setting.set(coerced_value)
|
||||
coerced_value
|
||||
end
|
||||
|
||||
def coerce(value)
|
||||
|
@ -810,6 +807,8 @@ module LogStash
|
|||
end
|
||||
end
|
||||
|
||||
java_import org.logstash.settings.NullableSetting
|
||||
|
||||
# @see Setting#nullable
|
||||
# @api internal
|
||||
class Nullable < SimpleDelegator
|
||||
|
@ -839,8 +838,7 @@ module LogStash
|
|||
@canonical_proxy = canonical_proxy
|
||||
|
||||
clone = @canonical_proxy.canonical_setting.clone
|
||||
clone.instance_variable_set(:@name, alias_name)
|
||||
clone.instance_variable_set(:@default, nil)
|
||||
clone.update_wrapper(clone.wrapped_setting.deprecate(alias_name))
|
||||
|
||||
super(clone)
|
||||
end
|
||||
|
|
|
@ -18,76 +18,13 @@
|
|||
module LogStash
|
||||
module Util
|
||||
module Jackson
|
||||
def self.set_jackson_defaults(logger)
|
||||
JacksonStreamReadConstraintsDefaults.new(logger).configure
|
||||
|
||||
def self.verify_jackson_overrides
|
||||
java_import org.logstash.ObjectMappers
|
||||
|
||||
ObjectMappers::getConfiguredStreamReadConstraints().validateIsGlobalDefault()
|
||||
end
|
||||
|
||||
class JacksonStreamReadConstraintsDefaults
|
||||
|
||||
java_import com.fasterxml.jackson.core.StreamReadConstraints
|
||||
|
||||
PROPERTY_MAX_STRING_LENGTH = 'logstash.jackson.stream-read-constraints.max-string-length'.freeze
|
||||
PROPERTY_MAX_NUMBER_LENGTH = 'logstash.jackson.stream-read-constraints.max-number-length'.freeze
|
||||
PROPERTY_MAX_NESTING_DEPTH = 'logstash.jackson.stream-read-constraints.max-nesting-depth'.freeze
|
||||
|
||||
def initialize(logger)
|
||||
@logger = logger
|
||||
end
|
||||
|
||||
public
|
||||
|
||||
def configure
|
||||
max_string_len = get_default_value_override!(PROPERTY_MAX_STRING_LENGTH)
|
||||
max_num_len = get_default_value_override!(PROPERTY_MAX_NUMBER_LENGTH)
|
||||
max_nesting_depth = get_default_value_override!(PROPERTY_MAX_NESTING_DEPTH)
|
||||
|
||||
if max_string_len || max_num_len || max_nesting_depth
|
||||
begin
|
||||
override_default_stream_read_constraints(max_string_len, max_num_len, max_nesting_depth)
|
||||
rescue java.lang.IllegalArgumentException => e
|
||||
raise LogStash::ConfigurationError, "Invalid `logstash.jackson.*` system properties configuration: #{e.message}"
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def get_default_value_override!(property)
|
||||
value = get_property_value(property)
|
||||
return if value.nil?
|
||||
|
||||
begin
|
||||
int_value = java.lang.Integer.parseInt(value)
|
||||
|
||||
if int_value < 1
|
||||
raise LogStash::ConfigurationError, "System property '#{property}' must be bigger than zero. Received: #{int_value}"
|
||||
end
|
||||
|
||||
@logger.info("Jackson default value override `#{property}` configured to `#{int_value}`")
|
||||
|
||||
int_value
|
||||
rescue java.lang.NumberFormatException => _e
|
||||
raise LogStash::ConfigurationError, "System property '#{property}' must be a positive integer value. Received: #{value}"
|
||||
end
|
||||
end
|
||||
|
||||
def get_property_value(name)
|
||||
java.lang.System.getProperty(name)
|
||||
end
|
||||
|
||||
def override_default_stream_read_constraints(max_string_len, max_num_len, max_nesting_depth)
|
||||
builder = new_stream_read_constraints_builder
|
||||
builder.maxStringLength(max_string_len) if max_string_len
|
||||
builder.maxNumberLength(max_num_len) if max_num_len
|
||||
builder.maxNestingDepth(max_nesting_depth) if max_nesting_depth
|
||||
|
||||
StreamReadConstraints.overrideDefaultStreamReadConstraints(builder.build)
|
||||
end
|
||||
|
||||
def new_stream_read_constraints_builder
|
||||
StreamReadConstraints::builder
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
|
@ -33,8 +33,8 @@ module LogStash::Util::SettingsHelper
|
|||
# The `path.settings` and `path.logs` can not be defined in "logstash/environment" since the `Environment::LOGSTASH_HOME` doesn't
|
||||
# exist unless launched via "bootstrap/environment"
|
||||
def self.pre_process
|
||||
LogStash::SETTINGS.register(LogStash::Setting::String.new("path.settings", ::File.join(LogStash::Environment::LOGSTASH_HOME, "config")))
|
||||
LogStash::SETTINGS.register(LogStash::Setting::String.new("path.logs", ::File.join(LogStash::Environment::LOGSTASH_HOME, "logs")))
|
||||
LogStash::SETTINGS.register(LogStash::Setting::SettingString.new("path.settings", ::File.join(LogStash::Environment::LOGSTASH_HOME, "config")))
|
||||
LogStash::SETTINGS.register(LogStash::Setting::SettingString.new("path.logs", ::File.join(LogStash::Environment::LOGSTASH_HOME, "logs")))
|
||||
end
|
||||
|
||||
# Ensure that any settings are re-calculated after loading yaml
|
||||
|
|
|
@ -51,7 +51,7 @@ module ::LogStash::Util::SubstitutionVariables
|
|||
# If the value does not match the pattern, the 'value' param returns as-is
|
||||
# When setting refine to true, substituted value will be cleaned against escaped single/double quotes
|
||||
# and generates array if resolved substituted value is array string
|
||||
def replace_placeholders(value, refine)
|
||||
def replace_placeholders(value, refine = false)
|
||||
if value.kind_of?(::LogStash::Util::Password)
|
||||
interpolated = replace_placeholders(value.value, refine)
|
||||
return ::LogStash::Util::Password.new(interpolated)
|
||||
|
@ -87,15 +87,33 @@ module ::LogStash::Util::SubstitutionVariables
|
|||
|
||||
# ENV ${var} value may carry single quote or escaped double quote
|
||||
# or single/double quoted entries in array string, needs to be refined
|
||||
refined_value = placeholder_value.gsub(/[\\"\\']/, '')
|
||||
refined_value = strip_enclosing_char(strip_enclosing_char(placeholder_value, "'"), '"')
|
||||
if refined_value.start_with?('[') && refined_value.end_with?(']')
|
||||
# remove square brackets, split by comma and cleanup leading/trailing whitespace
|
||||
refined_value[1..-2].split(',').map(&:strip)
|
||||
refined_array = refined_value[1..-2].split(',').map(&:strip)
|
||||
refined_array.each_with_index do |str, index|
|
||||
refined_array[index] = strip_enclosing_char(strip_enclosing_char(str, "'"), '"')
|
||||
end
|
||||
refined_array
|
||||
else
|
||||
refined_value
|
||||
end
|
||||
end # def replace_placeholders
|
||||
|
||||
private
|
||||
|
||||
# removes removed_char of string_value if string_value is wrapped with removed_char
|
||||
def strip_enclosing_char(string_value, remove_char)
|
||||
return string_value unless string_value.is_a?(String)
|
||||
return string_value if string_value.empty?
|
||||
|
||||
if string_value.start_with?(remove_char) && string_value.end_with?(remove_char)
|
||||
string_value[1..-2] # Remove the first and last characters
|
||||
else
|
||||
string_value
|
||||
end
|
||||
end
|
||||
|
||||
class << self
|
||||
private
|
||||
|
||||
|
|
|
@ -141,7 +141,7 @@ en:
|
|||
logstash's configuration before you choose to restart a running system.
|
||||
tags-illegal-warning: >-
|
||||
Setting `event_api.tags.illegal` to `warn` allows illegal values in the reserved `tags` field, which may crash pipeline unexpectedly.
|
||||
This flag value is deprecated and may be removed in a future release.
|
||||
This flag is deprecated and will be removed in version 9.
|
||||
# YAML named reference to the logstash.runner.configuration
|
||||
# so we can later alias it from logstash.agent.configuration
|
||||
configuration: &runner_configuration
|
||||
|
|
|
@ -17,7 +17,13 @@ if File.exist?(project_versions_yaml_path)
|
|||
# we ignore the copy in git and we overwrite an existing file
|
||||
# each time we build the logstash-core gem
|
||||
original_lines = IO.readlines(project_versions_yaml_path)
|
||||
original_lines << ""
|
||||
# introduce the version qualifier (e.g. beta1, rc1) into the copied yml so it's displayed by Logstash
|
||||
unless ENV['VERSION_QUALIFIER'].to_s.strip.empty?
|
||||
logstash_version_line = original_lines.find {|line| line.match(/^logstash:/) }
|
||||
logstash_version_line.chomp!
|
||||
logstash_version_line << "-#{ENV['VERSION_QUALIFIER']}\n"
|
||||
end
|
||||
original_lines << "\n"
|
||||
original_lines << "# This is a copy the project level versions.yml into this gem's root and it is created when the gemspec is evaluated."
|
||||
gem_versions_yaml_path = File.expand_path("./versions-gem-copy.yml", File.dirname(__FILE__))
|
||||
File.open(gem_versions_yaml_path, 'w') do |new_file|
|
||||
|
@ -57,7 +63,7 @@ Gem::Specification.new do |gem|
|
|||
gem.add_runtime_dependency "sinatra", '~> 4'
|
||||
gem.add_runtime_dependency 'puma', '~> 6.3', '>= 6.4.2'
|
||||
gem.add_runtime_dependency 'ruby-maven-libs', '~> 3', '>= 3.8.9'
|
||||
|
||||
gem.add_runtime_dependency "jar-dependencies",'= 0.4.1' # Pin to `0.4.1` until https://github.com/jruby/jruby/issues/7262 is resolved
|
||||
|
||||
gem.add_runtime_dependency "treetop", "~> 1" #(MIT license)
|
||||
|
||||
|
@ -66,7 +72,7 @@ Gem::Specification.new do |gem|
|
|||
gem.add_runtime_dependency "thwait"
|
||||
|
||||
# filetools and rakelib
|
||||
gem.add_runtime_dependency "minitar", "~> 0.8"
|
||||
gem.add_runtime_dependency "minitar", "~> 1"
|
||||
gem.add_runtime_dependency "rubyzip", "~> 1"
|
||||
gem.add_runtime_dependency "thread_safe", "~> 0.3.6" #(Apache 2.0 license)
|
||||
|
||||
|
|
|
@ -35,8 +35,8 @@ describe LogStash::Api::Commands::DefaultMetadata do
|
|||
before :all do
|
||||
registerIfNot(LogStash::Setting::Boolean.new("xpack.monitoring.enabled", false))
|
||||
registerIfNot(LogStash::Setting::ArrayCoercible.new("xpack.monitoring.elasticsearch.hosts", String, ["http://localhost:9200"]))
|
||||
registerIfNot(LogStash::Setting::NullableString.new("xpack.monitoring.elasticsearch.username", "logstash_TEST system"))
|
||||
registerIfNot(LogStash::Setting::NullableString.new("xpack.monitoring.elasticsearch.username", "logstash_TEST system"))
|
||||
registerIfNot(LogStash::Setting::SettingNullableString.new("xpack.monitoring.elasticsearch.username", "logstash_TEST system"))
|
||||
registerIfNot(LogStash::Setting::SettingNullableString.new("xpack.monitoring.elasticsearch.username", "logstash_TEST system"))
|
||||
end
|
||||
|
||||
after :each do
|
||||
|
|
|
@ -443,9 +443,8 @@ describe LogStash::JavaPipeline do
|
|||
LogStash::PLUGIN_REGISTRY.add(:output, "spec_sampler_output", PipelineHelpers::SpecSamplerOutput)
|
||||
end
|
||||
|
||||
describe "given a pipeline executing an event that would trigger an evaluation error" do
|
||||
context "given a pipeline executing an event that would trigger an evaluation error" do
|
||||
let(:pipeline) do
|
||||
settings.set_value("queue.drain", true)
|
||||
LogStash::JavaPipeline.new(
|
||||
org.logstash.config.ir.PipelineConfig.new(
|
||||
LogStash::Config::Source::Local, :main,
|
||||
|
@ -470,11 +469,50 @@ describe LogStash::JavaPipeline do
|
|||
pipeline.close
|
||||
end
|
||||
|
||||
subject {results.length > 1 ? results : results.first}
|
||||
describe "when DLQ is disabled" do
|
||||
let(:settings) do
|
||||
s = super()
|
||||
s.set_value("queue.drain", true)
|
||||
s
|
||||
end
|
||||
|
||||
it "should raise an error without killing the pipeline" do
|
||||
expect(subject).to be nil
|
||||
expect(pipeline.last_error_evaluation_received).to match(/no implicit conversion of nil into Integer/)
|
||||
subject {results.length > 1 ? results : results.first}
|
||||
|
||||
it "should raise an error without killing the pipeline" do
|
||||
expect(subject).to be nil
|
||||
expect(pipeline.last_error_evaluation_received).to match(/no implicit conversion of nil into Integer/)
|
||||
end
|
||||
end
|
||||
|
||||
describe "when DLQ is enabled" do
|
||||
let(:dlq_path) { Dir.mktmpdir }
|
||||
|
||||
let(:settings) do
|
||||
s = super()
|
||||
s.set_value("queue.drain", true)
|
||||
s.set_value("pipeline.id", "test_dlq")
|
||||
s.set_value("dead_letter_queue.enable", true)
|
||||
s.set_value("path.dead_letter_queue", dlq_path)
|
||||
s
|
||||
end
|
||||
|
||||
after do
|
||||
FileUtils.rm_rf(settings.get_value("path.dead_letter_queue"))
|
||||
end
|
||||
|
||||
subject {results.length > 1 ? results : results.first}
|
||||
|
||||
it "should raise an error without killing the pipeline and insert the event into DLQ" do
|
||||
expect(subject).to be nil
|
||||
expect(pipeline.last_error_evaluation_received).to match(/no implicit conversion of nil into Integer/)
|
||||
dlq_path = java.nio.file.Paths.get(settings.get_value("path.dead_letter_queue"), "test_dlq")
|
||||
dlq_reader = org.logstash.common.io.DeadLetterQueueReader.new(dlq_path)
|
||||
entry = dlq_reader.pollEntry(40)
|
||||
expect(entry).to_not be_nil
|
||||
expect(entry.reason).to match(/condition evaluation error.*no implicit conversion of nil into Integer/)
|
||||
expect(entry.plugin_id).to eq("if-statement")
|
||||
expect(entry.plugin_type).to eq("if-statement")
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
|
@ -549,6 +587,7 @@ describe LogStash::JavaPipeline do
|
|||
|
||||
# wait until there is no more worker thread since we have a single worker that should have died
|
||||
wait(5).for {subject.worker_threads.any?(&:alive?)}.to be_falsey
|
||||
expect(subject.crashed?).to be true
|
||||
|
||||
# at this point the input plugin should have been asked to stop
|
||||
wait(5).for {dummyinput.stop?}.to be_truthy
|
||||
|
@ -576,6 +615,7 @@ describe LogStash::JavaPipeline do
|
|||
|
||||
# wait until there is no more worker thread since we have a single worker that should have died
|
||||
wait(5).for {subject.worker_threads.any?(&:alive?)}.to be_falsey
|
||||
expect(subject.crashed?).to be true
|
||||
|
||||
# at this point the input plugin should have been asked to stop
|
||||
wait(5).for {dummyinput.stop?}.to be_truthy
|
||||
|
@ -741,6 +781,7 @@ describe LogStash::JavaPipeline do
|
|||
expect(input).to receive(:do_close).once
|
||||
pipeline.start
|
||||
pipeline.shutdown
|
||||
expect(pipeline.crashed?).to be false
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
@ -30,6 +30,7 @@ describe LogStash::PipelineAction::Create do
|
|||
|
||||
before do
|
||||
clear_data_dir
|
||||
allow(agent).to receive(:health_observer).and_return(double("HealthObserver").as_null_object)
|
||||
end
|
||||
|
||||
subject { described_class.new(pipeline_config, metric) }
|
||||
|
@ -66,6 +67,11 @@ describe LogStash::PipelineAction::Create do
|
|||
it "returns a successful execution status" do
|
||||
expect(subject.execute(agent, pipelines)).to be_truthy
|
||||
end
|
||||
|
||||
it "attached an indicator to the agent's health observer" do
|
||||
expect(agent.health_observer).to receive(:attach_pipeline_indicator).with(:main, agent)
|
||||
subject.execute(agent, pipelines)
|
||||
end
|
||||
end
|
||||
|
||||
context "when the pipeline doesn't start" do
|
||||
|
|
78
logstash-core/spec/logstash/pipeline_action/delete_spec.rb
Normal file
78
logstash-core/spec/logstash/pipeline_action/delete_spec.rb
Normal file
|
@ -0,0 +1,78 @@
|
|||
# Licensed to Elasticsearch B.V. under one or more contributor
|
||||
# license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright
|
||||
# ownership. Elasticsearch B.V. licenses this file to you under
|
||||
# the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
require "spec_helper"
|
||||
require_relative "../../support/helpers"
|
||||
require_relative "../../support/matchers"
|
||||
require "logstash/pipelines_registry"
|
||||
require "logstash/pipeline_action/delete"
|
||||
require "logstash/inputs/generator"
|
||||
|
||||
|
||||
describe LogStash::PipelineAction::Delete do
|
||||
let(:pipeline_config) { "input { dummyblockinginput {} } output { null {} }" }
|
||||
let(:pipeline_id) { :main }
|
||||
let(:pipeline) { mock_java_pipeline_from_string(pipeline_config) }
|
||||
let(:pipelines) do
|
||||
LogStash::PipelinesRegistry.new.tap do |chm|
|
||||
chm.create_pipeline(pipeline_id, pipeline) { true }
|
||||
end
|
||||
end
|
||||
let(:agent) { double("agent") }
|
||||
|
||||
subject { described_class.new(pipeline_id) }
|
||||
|
||||
before do
|
||||
clear_data_dir
|
||||
allow(agent).to receive(:health_observer).and_return(double("HealthObserver").as_null_object)
|
||||
pipeline.start
|
||||
end
|
||||
|
||||
after do
|
||||
pipeline.shutdown
|
||||
end
|
||||
|
||||
it "returns the pipeline_id" do
|
||||
expect(subject.pipeline_id).to eq(:main)
|
||||
end
|
||||
|
||||
context "when the pipeline is still running" do
|
||||
|
||||
it 'fails to delete the pipeline' do
|
||||
action_result = subject.execute(agent, pipelines)
|
||||
expect(action_result).to_not be_successful
|
||||
|
||||
expect(pipelines.get_pipeline(pipeline_id)).to_not be_nil
|
||||
end
|
||||
end
|
||||
|
||||
context "when the pipeline has completed" do
|
||||
let(:pipeline_config) { "input { generator { count => 1 } } output { null {} }"}
|
||||
|
||||
before(:each) do
|
||||
sleep(0.1) until pipelines.non_running_pipelines.keys.include?(pipeline_id)
|
||||
end
|
||||
|
||||
it 'deletes the pipeline' do
|
||||
action_result = subject.execute(agent, pipelines)
|
||||
expect(action_result).to be_successful
|
||||
|
||||
expect(pipelines.get_pipeline(pipeline_id)).to be_nil
|
||||
expect(agent.health_observer).to have_received(:detach_pipeline_indicator).with(pipeline_id)
|
||||
end
|
||||
end
|
||||
end
|
|
@ -0,0 +1,79 @@
|
|||
# Licensed to Elasticsearch B.V. under one or more contributor
|
||||
# license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright
|
||||
# ownership. Elasticsearch B.V. licenses this file to you under
|
||||
# the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
require "spec_helper"
|
||||
require_relative "../../support/helpers"
|
||||
require_relative "../../support/matchers"
|
||||
require "logstash/pipelines_registry"
|
||||
require "logstash/pipeline_action/delete"
|
||||
require "logstash/inputs/generator"
|
||||
|
||||
|
||||
describe LogStash::PipelineAction::StopAndDelete do
|
||||
let(:pipeline_config) { "input { dummyblockinginput {} } output { null {} }" }
|
||||
let(:pipeline_id) { :main }
|
||||
let(:pipeline) { mock_java_pipeline_from_string(pipeline_config) }
|
||||
let(:pipelines) do
|
||||
LogStash::PipelinesRegistry.new.tap do |chm|
|
||||
chm.create_pipeline(pipeline_id, pipeline) { true }
|
||||
end
|
||||
end
|
||||
let(:agent) { double("agent") }
|
||||
|
||||
subject { described_class.new(pipeline_id) }
|
||||
|
||||
before do
|
||||
clear_data_dir
|
||||
allow(agent).to receive(:health_observer).and_return(double("HealthObserver").as_null_object)
|
||||
pipeline.start
|
||||
end
|
||||
|
||||
after do
|
||||
pipeline.shutdown
|
||||
end
|
||||
|
||||
it "returns the pipeline_id" do
|
||||
expect(subject.pipeline_id).to eq(:main)
|
||||
end
|
||||
|
||||
context "when the pipeline is still running" do
|
||||
it 'stops and deletes the pipeline' do
|
||||
action_result = subject.execute(agent, pipelines)
|
||||
expect(action_result).to be_successful
|
||||
|
||||
expect(pipelines.get_pipeline(pipeline_id)).to be_nil
|
||||
expect(agent.health_observer).to have_received(:detach_pipeline_indicator).with(pipeline_id)
|
||||
end
|
||||
end
|
||||
|
||||
context "when the pipeline has completed" do
|
||||
let(:pipeline_config) { "input { generator { count => 1 } } output { null {} }"}
|
||||
|
||||
before(:each) do
|
||||
sleep(0.1) until pipelines.non_running_pipelines.keys.include?(pipeline_id)
|
||||
end
|
||||
|
||||
it 'deletes the pipeline' do
|
||||
action_result = subject.execute(agent, pipelines)
|
||||
expect(action_result).to be_successful
|
||||
|
||||
expect(pipelines.get_pipeline(pipeline_id)).to be_nil
|
||||
|
||||
expect(agent.health_observer).to have_received(:detach_pipeline_indicator).with(pipeline_id)
|
||||
end
|
||||
end
|
||||
end
|
|
@ -23,7 +23,7 @@ describe LogStash::QueueFactory do
|
|||
let(:settings_array) do
|
||||
[
|
||||
LogStash::Setting::WritableDirectory.new("path.queue", Stud::Temporary.pathname),
|
||||
LogStash::Setting::String.new("queue.type", "memory", true, ["persisted", "memory"]),
|
||||
LogStash::Setting::SettingString.new("queue.type", "memory", true, ["persisted", "memory"]),
|
||||
LogStash::Setting::Bytes.new("queue.page_capacity", "8mb"),
|
||||
LogStash::Setting::Bytes.new("queue.max_bytes", "64mb"),
|
||||
LogStash::Setting::Numeric.new("queue.max_events", 0),
|
||||
|
@ -31,7 +31,7 @@ describe LogStash::QueueFactory do
|
|||
LogStash::Setting::Numeric.new("queue.checkpoint.writes", 1024),
|
||||
LogStash::Setting::Numeric.new("queue.checkpoint.interval", 1000),
|
||||
LogStash::Setting::Boolean.new("queue.checkpoint.retry", false),
|
||||
LogStash::Setting::String.new("pipeline.id", pipeline_id),
|
||||
LogStash::Setting::SettingString.new("pipeline.id", pipeline_id),
|
||||
LogStash::Setting::PositiveInteger.new("pipeline.batch.size", 125),
|
||||
LogStash::Setting::PositiveInteger.new("pipeline.workers", LogStash::Config::CpuCoreStrategy.maximum)
|
||||
]
|
||||
|
|
|
@ -265,8 +265,38 @@ describe LogStash::Runner do
|
|||
|
||||
context "when deprecated :http.host is defined by the user" do
|
||||
let(:args) { ["--http.host", "localhost", "-e", pipeline_string]}
|
||||
let(:events) { [] }
|
||||
|
||||
before(:each) do
|
||||
java_import org.apache.logging.log4j.LogManager
|
||||
logger = LogManager.getLogger("org.logstash.settings.DeprecatedAlias")
|
||||
deprecated_logger = LogManager.getLogger("org.logstash.deprecation.settings.DeprecatedAlias")
|
||||
|
||||
@custom_appender = CustomAppender.new(events).tap {|appender| appender.start }
|
||||
|
||||
java_import org.apache.logging.log4j.Level
|
||||
logger.addAppender(@custom_appender)
|
||||
deprecated_logger.addAppender(@custom_appender)
|
||||
# had to set level after appending as it was "error" for some reason
|
||||
logger.setLevel(Level::INFO)
|
||||
deprecated_logger.setLevel(Level::INFO)
|
||||
|
||||
expect(@custom_appender.started?).to be_truthy
|
||||
end
|
||||
|
||||
after(:each) do
|
||||
events.clear
|
||||
java_import org.apache.logging.log4j.LogManager
|
||||
logger = LogManager.getLogger("org.logstash.settings.DeprecatedAlias")
|
||||
deprecated_logger = LogManager.getLogger("org.logstash.deprecation.settings.DeprecatedAlias")
|
||||
# The Logger's AbstractConfiguration contains a cache of Appender, by class name. The cache is updated
|
||||
# iff is absent, so to make subsequent add_appender effective we have cleanup on teardown, else the new
|
||||
# appender instance is simply not used by the logger
|
||||
logger.remove_appender(@custom_appender)
|
||||
deprecated_logger.remove_appender(@custom_appender)
|
||||
end
|
||||
|
||||
it "creates an Agent whose `api.http.host` uses the provided value and provides helpful deprecation message" do
|
||||
expect(deprecation_logger_stub).to receive(:deprecated).with(a_string_including "`http.host` is a deprecated alias for `api.http.host`")
|
||||
expect(runner_deprecation_logger_stub).to receive(:deprecated).with(a_string_including 'The flag ["--http.host"] has been deprecated')
|
||||
expect(LogStash::Agent).to receive(:new) do |settings|
|
||||
expect(settings.set?("api.http.host")).to be(true)
|
||||
|
@ -274,6 +304,9 @@ describe LogStash::Runner do
|
|||
end
|
||||
|
||||
subject.run("bin/logstash", args)
|
||||
|
||||
expect(events).not_to be_empty
|
||||
expect(events[0]).to match(/`http.host` is a deprecated alias for `api.http.host`/)
|
||||
end
|
||||
end
|
||||
|
||||
|
@ -380,6 +413,32 @@ describe LogStash::Runner do
|
|||
end
|
||||
end
|
||||
|
||||
context "event_api.tags.illegal" do
|
||||
let(:runner_deprecation_logger_stub) { double("DeprecationLogger(Runner)").as_null_object }
|
||||
let(:args) { ["--event_api.tags.illegal", "warn", "-e", pipeline_string] }
|
||||
before(:each) { allow(runner).to receive(:deprecation_logger).and_return(runner_deprecation_logger_stub) }
|
||||
DEPRECATED_MSG="The flag [\"--event_api.tags.illegal\"] has been deprecated and will be removed in version 9"
|
||||
|
||||
it "gives deprecation message when setting to `warn`" do
|
||||
expect(runner_deprecation_logger_stub).to receive(:deprecated)
|
||||
.with(a_string_including "This flag is deprecated and will be removed in version 9")
|
||||
.with(a_string_including DEPRECATED_MSG)
|
||||
subject.run("bin/logstash", args)
|
||||
end
|
||||
|
||||
it "gives deprecation message when setting to `rename`" do
|
||||
expect(runner_deprecation_logger_stub).to receive(:deprecated)
|
||||
.with(a_string_including DEPRECATED_MSG)
|
||||
subject.run("bin/logstash", args)
|
||||
end
|
||||
|
||||
it "does not give deprecation message when unset" do
|
||||
expect(runner_deprecation_logger_stub).not_to receive(:deprecated)
|
||||
.with(a_string_including DEPRECATED_MSG)
|
||||
subject.run("bin/logstash", ["-e", pipeline_string])
|
||||
end
|
||||
end
|
||||
|
||||
context "when :pipeline_workers is not defined by the user" do
|
||||
it "should not pass the value to the pipeline" do
|
||||
expect(LogStash::Agent).to receive(:new) do |settings|
|
||||
|
@ -545,8 +604,8 @@ describe LogStash::Runner do
|
|||
subject { LogStash::Runner.new("") }
|
||||
let(:args) { ["-e", "input {} output {}"] }
|
||||
|
||||
it 'should be set' do
|
||||
expect(LogStash::Util::Jackson).to receive(:set_jackson_defaults)
|
||||
it 'should be verified' do
|
||||
expect(LogStash::Util::Jackson).to receive(:verify_jackson_overrides)
|
||||
subject.run(args)
|
||||
end
|
||||
end
|
||||
|
|
|
@ -20,13 +20,13 @@ require "logstash/settings"
|
|||
|
||||
describe LogStash::Setting::Nullable do
|
||||
let(:setting_name) { "this.that" }
|
||||
let(:normal_setting) { LogStash::Setting::String.new(setting_name, nil, false, possible_strings) }
|
||||
let(:normal_setting) { LogStash::Setting::SettingString.new(setting_name, nil, false, possible_strings) }
|
||||
let(:possible_strings) { [] } # empty means any string passes
|
||||
|
||||
subject(:nullable_setting) { normal_setting.nullable }
|
||||
|
||||
it 'is a kind of Nullable' do
|
||||
expect(nullable_setting).to be_a_kind_of(described_class)
|
||||
expect(nullable_setting).to be_a_kind_of(LogStash::Setting::NullableSetting)
|
||||
end
|
||||
|
||||
it "retains the wrapped setting's name" do
|
||||
|
@ -56,14 +56,14 @@ describe LogStash::Setting::Nullable do
|
|||
context 'to an invalid wrong-type value' do
|
||||
let(:candidate_value) { 127 } # wrong type, expects String
|
||||
it 'is an invalid setting' do
|
||||
expect { nullable_setting.validate_value }.to raise_error(ArgumentError, a_string_including("Setting \"#{setting_name}\" must be a "))
|
||||
expect { nullable_setting.validate_value }.to raise_error(java.lang.ClassCastException, a_string_including("class java.lang.Long cannot be cast to class java.lang.String"))
|
||||
end
|
||||
end
|
||||
context 'to an invalid value not in the allow-list' do
|
||||
let(:possible_strings) { %w(this that)}
|
||||
let(:candidate_value) { "another" } # wrong type, expects String
|
||||
it 'is an invalid setting' do
|
||||
expect { nullable_setting.validate_value }.to raise_error(ArgumentError, a_string_including("Invalid value"))
|
||||
expect { nullable_setting.validate_value }.to raise_error(java.lang.IllegalArgumentException, a_string_including("Invalid value"))
|
||||
end
|
||||
end
|
||||
context 'to a valid value' do
|
||||
|
|
|
@ -25,15 +25,44 @@ describe LogStash::Setting::SettingWithDeprecatedAlias do
|
|||
let(:default_value) { "DeFaUlT" }
|
||||
|
||||
let(:settings) { LogStash::Settings.new }
|
||||
let(:canonical_setting) { LogStash::Setting::String.new(canonical_setting_name, default_value, true) }
|
||||
let(:canonical_setting) { LogStash::Setting::SettingString.new(canonical_setting_name, default_value, true) }
|
||||
|
||||
let(:events) { [] }
|
||||
|
||||
before(:each) do
|
||||
java_import org.apache.logging.log4j.LogManager
|
||||
logger = LogManager.getLogger("org.logstash.settings.DeprecatedAlias")
|
||||
deprecated_logger = LogManager.getLogger("org.logstash.deprecation.settings.DeprecatedAlias")
|
||||
|
||||
@custom_appender = CustomAppender.new(events).tap {|appender| appender.start }
|
||||
|
||||
java_import org.apache.logging.log4j.Level
|
||||
logger.addAppender(@custom_appender)
|
||||
deprecated_logger.addAppender(@custom_appender)
|
||||
# had to set level after appending as it was "error" for some reason
|
||||
logger.setLevel(Level::INFO)
|
||||
deprecated_logger.setLevel(Level::INFO)
|
||||
|
||||
expect(@custom_appender.started?).to be_truthy
|
||||
|
||||
allow(LogStash::Settings).to receive(:logger).and_return(double("SettingsLogger").as_null_object)
|
||||
allow(LogStash::Settings).to receive(:deprecation_logger).and_return(double("SettingsDeprecationLogger").as_null_object)
|
||||
|
||||
settings.register(canonical_setting.with_deprecated_alias(deprecated_setting_name))
|
||||
end
|
||||
|
||||
after(:each) do
|
||||
events.clear
|
||||
java_import org.apache.logging.log4j.LogManager
|
||||
logger = LogManager.getLogger("org.logstash.settings.DeprecatedAlias")
|
||||
deprecated_logger = LogManager.getLogger("org.logstash.deprecation.settings.DeprecatedAlias")
|
||||
# The Logger's AbstractConfiguration contains a cache of Appender, by class name. The cache is updated
|
||||
# iff is absent, so to make subsequent add_appender effective we have cleanup on teardown, else the new
|
||||
# appender instance is simply not used by the logger
|
||||
logger.remove_appender(@custom_appender)
|
||||
deprecated_logger.remove_appender(@custom_appender)
|
||||
end
|
||||
|
||||
shared_examples '#validate_value success' do
|
||||
context '#validate_value' do
|
||||
it "returns without raising" do
|
||||
|
@ -57,6 +86,7 @@ describe LogStash::Setting::SettingWithDeprecatedAlias do
|
|||
it 'does not emit a deprecation warning' do
|
||||
expect(LogStash::Settings.deprecation_logger).to_not receive(:deprecated).with(a_string_including(deprecated_setting_name))
|
||||
settings.get_setting(deprecated_setting_name).observe_post_process
|
||||
expect(events).to be_empty
|
||||
end
|
||||
end
|
||||
end
|
||||
|
@ -66,6 +96,7 @@ describe LogStash::Setting::SettingWithDeprecatedAlias do
|
|||
|
||||
before(:each) do
|
||||
settings.set(deprecated_setting_name, value)
|
||||
settings.get_setting(deprecated_setting_name).observe_post_process
|
||||
end
|
||||
|
||||
it 'resolves to the value provided for the deprecated alias' do
|
||||
|
@ -73,15 +104,15 @@ describe LogStash::Setting::SettingWithDeprecatedAlias do
|
|||
end
|
||||
|
||||
it 'logs a deprecation warning' do
|
||||
expect(LogStash::Settings.deprecation_logger).to have_received(:deprecated).with(a_string_including(deprecated_setting_name))
|
||||
expect(events[0]).to include(deprecated_setting_name)
|
||||
end
|
||||
|
||||
include_examples '#validate_value success'
|
||||
|
||||
context "#observe_post_process" do
|
||||
it 're-emits the deprecation warning' do
|
||||
expect(LogStash::Settings.deprecation_logger).to receive(:deprecated).with(a_string_including(deprecated_setting_name))
|
||||
settings.get_setting(deprecated_setting_name).observe_post_process
|
||||
expect(events[0]).to include(deprecated_setting_name)
|
||||
end
|
||||
end
|
||||
|
||||
|
@ -105,6 +136,38 @@ describe LogStash::Setting::SettingWithDeprecatedAlias do
|
|||
expect { settings.get_setting(canonical_setting_name).deprecated_alias.validate_value }.to_not raise_error
|
||||
end
|
||||
end
|
||||
|
||||
context 'obsoleted version' do
|
||||
before(:each) do
|
||||
settings.register(subject.with_deprecated_alias(deprecated_name))
|
||||
end
|
||||
|
||||
describe "ruby string setting" do
|
||||
let(:new_value) { "ironman" }
|
||||
let(:old_value) { "iron man" }
|
||||
let(:canonical_name) { "iron.setting" }
|
||||
let(:deprecated_name) { "iron.oxide.setting" }
|
||||
subject { LogStash::Setting::SettingString.new(canonical_name, old_value, true) }
|
||||
|
||||
it 'logs a deprecation warning with target remove version' do
|
||||
settings.set(deprecated_name, new_value)
|
||||
settings.get_setting(deprecated_name).observe_post_process
|
||||
expect(events.length).to be 2
|
||||
expect(events[1]).to include(deprecated_name)
|
||||
end
|
||||
end
|
||||
describe "java boolean setting" do
|
||||
let(:new_value) { false }
|
||||
let(:old_value) { true }
|
||||
let(:canonical_name) { "bool.setting" }
|
||||
let(:deprecated_name) { "boo.setting" }
|
||||
subject { LogStash::Setting::Boolean.new(canonical_name, old_value, true) }
|
||||
|
||||
it 'does not raise error' do
|
||||
expect { settings.set(deprecated_name, new_value) }.to_not raise_error
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
context "when only the canonical setting is set" do
|
||||
|
@ -117,15 +180,16 @@ describe LogStash::Setting::SettingWithDeprecatedAlias do
|
|||
end
|
||||
|
||||
it 'does not produce a relevant deprecation warning' do
|
||||
expect(LogStash::Settings.deprecation_logger).to_not have_received(:deprecated).with(a_string_including(deprecated_setting_name))
|
||||
settings.get_setting(deprecated_setting_name).observe_post_process
|
||||
expect(events).to be_empty
|
||||
end
|
||||
|
||||
include_examples '#validate_value success'
|
||||
|
||||
context "#observe_post_process" do
|
||||
it 'does not emit a deprecation warning' do
|
||||
expect(LogStash::Settings.deprecation_logger).to_not receive(:deprecated).with(a_string_including(deprecated_setting_name))
|
||||
settings.get_setting(deprecated_setting_name).observe_post_process
|
||||
expect(events).to be_empty
|
||||
end
|
||||
end
|
||||
end
|
||||
|
@ -139,15 +203,15 @@ describe LogStash::Setting::SettingWithDeprecatedAlias do
|
|||
context '#validate_value' do
|
||||
it "raises helpful exception" do
|
||||
expect { settings.get_setting(canonical_setting_name).validate_value }
|
||||
.to raise_exception(ArgumentError, a_string_including("Both `#{canonical_setting_name}` and its deprecated alias `#{deprecated_setting_name}` have been set. Please only set `#{canonical_setting_name}`"))
|
||||
.to raise_exception(java.lang.IllegalStateException, a_string_including("Both `#{canonical_setting_name}` and its deprecated alias `#{deprecated_setting_name}` have been set. Please only set `#{canonical_setting_name}`"))
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
context 'Settings#get on deprecated alias' do
|
||||
it 'produces a WARN-level message to the logger' do
|
||||
expect(LogStash::Settings.logger).to receive(:warn).with(a_string_including "setting `#{canonical_setting_name}` has been queried by its deprecated alias `#{deprecated_setting_name}`")
|
||||
settings.get(deprecated_setting_name)
|
||||
expect(events[0]).to include("setting `#{canonical_setting_name}` has been queried by its deprecated alias `#{deprecated_setting_name}`")
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
@ -18,13 +18,14 @@
|
|||
require "spec_helper"
|
||||
require "logstash/settings"
|
||||
|
||||
describe LogStash::Setting::String do
|
||||
# Mirrored in java class org.logstash.settings.SettingStringTest
|
||||
describe LogStash::Setting::SettingString do
|
||||
let(:possible_values) { ["a", "b", "c"] }
|
||||
subject { described_class.new("mytext", possible_values.first, true, possible_values) }
|
||||
describe "#set" do
|
||||
context "when a value is given outside of possible_values" do
|
||||
it "should raise an ArgumentError" do
|
||||
expect { subject.set("d") }.to raise_error(ArgumentError)
|
||||
expect { subject.set("d") }.to raise_error(java.lang.IllegalArgumentException)
|
||||
end
|
||||
end
|
||||
context "when a value is given within possible_values" do
|
||||
|
|
|
@ -62,6 +62,28 @@ describe LogStash::Settings do
|
|||
end
|
||||
end
|
||||
|
||||
describe "#to_hash" do
|
||||
let(:java_deprecated_alias) { LogStash::Setting::Boolean.new("java.actual", true).with_deprecated_alias("java.deprecated") }
|
||||
let(:ruby_deprecated_alias) { LogStash::Setting::PortRange.new("ruby.actual", 9600..9700).with_deprecated_alias("ruby.deprecated") }
|
||||
let(:non_deprecated) { LogStash::Setting::Boolean.new("plain_setting", false) }
|
||||
|
||||
before :each do
|
||||
subject.register(java_deprecated_alias)
|
||||
subject.register(ruby_deprecated_alias)
|
||||
subject.register(non_deprecated)
|
||||
end
|
||||
|
||||
it "filter deprecated alias settings" do
|
||||
generated_settings_hash = subject.to_hash
|
||||
|
||||
expect(generated_settings_hash).not_to have_key("java.deprecated")
|
||||
expect(generated_settings_hash).not_to have_key("ruby.deprecated")
|
||||
expect(generated_settings_hash).to have_key("java.actual")
|
||||
expect(generated_settings_hash).to have_key("ruby.actual")
|
||||
expect(generated_settings_hash).to have_key("plain_setting")
|
||||
end
|
||||
end
|
||||
|
||||
describe "#get_subset" do
|
||||
let(:numeric_setting_1) { LogStash::Setting.new("num.1", Numeric, 1) }
|
||||
let(:numeric_setting_2) { LogStash::Setting.new("num.2", Numeric, 2) }
|
||||
|
@ -132,8 +154,8 @@ describe LogStash::Settings do
|
|||
settings.on_post_process do
|
||||
settings.set("baz", "bot")
|
||||
end
|
||||
settings.register(LogStash::Setting::String.new("foo", "bar"))
|
||||
settings.register(LogStash::Setting::String.new("baz", "somedefault"))
|
||||
settings.register(LogStash::Setting::SettingString.new("foo", "bar"))
|
||||
settings.register(LogStash::Setting::SettingString.new("baz", "somedefault"))
|
||||
settings.post_process
|
||||
end
|
||||
|
||||
|
@ -161,7 +183,7 @@ describe LogStash::Settings do
|
|||
context "transient settings" do
|
||||
subject do
|
||||
settings = described_class.new
|
||||
settings.register(LogStash::Setting::String.new("exist", "bonsoir"))
|
||||
settings.register(LogStash::Setting::SettingString.new("exist", "bonsoir"))
|
||||
settings
|
||||
end
|
||||
|
||||
|
@ -215,9 +237,9 @@ describe LogStash::Settings do
|
|||
|
||||
subject do
|
||||
settings = described_class.new
|
||||
settings.register(LogStash::Setting::String.new("interpolated_env", "missing"))
|
||||
settings.register(LogStash::Setting::String.new("with_dot_env", "missing"))
|
||||
settings.register(LogStash::Setting::String.new("interpolated_store", "missing"))
|
||||
settings.register(LogStash::Setting::SettingString.new("interpolated_env", "missing"))
|
||||
settings.register(LogStash::Setting::SettingString.new("with_dot_env", "missing"))
|
||||
settings.register(LogStash::Setting::SettingString.new("interpolated_store", "missing"))
|
||||
settings
|
||||
end
|
||||
|
||||
|
|
|
@ -43,6 +43,40 @@ describe FileWatch::BufferedTokenizer do
|
|||
expect(subject.extract("\n\n\n")).to eq(["", "", ""])
|
||||
end
|
||||
|
||||
describe 'flush' do
|
||||
let(:data) { "content without a delimiter" }
|
||||
before(:each) do
|
||||
subject.extract(data)
|
||||
end
|
||||
|
||||
it "emits the contents of the buffer" do
|
||||
expect(subject.flush).to eq(data)
|
||||
end
|
||||
|
||||
it "resets the state of the buffer" do
|
||||
subject.flush
|
||||
expect(subject).to be_empty
|
||||
end
|
||||
|
||||
context 'with decode_size_limit_bytes' do
|
||||
subject { FileWatch::BufferedTokenizer.new("\n", 100) }
|
||||
|
||||
it "validates size limit" do
|
||||
expect { FileWatch::BufferedTokenizer.new("\n", -101) }.to raise_error(java.lang.IllegalArgumentException, "Size limit must be positive")
|
||||
expect { FileWatch::BufferedTokenizer.new("\n", 0) }.to raise_error(java.lang.IllegalArgumentException, "Size limit must be positive")
|
||||
end
|
||||
|
||||
it "emits the contents of the buffer" do
|
||||
expect(subject.flush).to eq(data)
|
||||
end
|
||||
|
||||
it "resets the state of the buffer" do
|
||||
subject.flush
|
||||
expect(subject).to be_empty
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
context 'with delimiter' do
|
||||
subject { FileWatch::BufferedTokenizer.new(delimiter) }
|
||||
|
||||
|
|
|
@ -1,113 +0,0 @@
|
|||
# Licensed to Elasticsearch B.V. under one or more contributor
|
||||
# license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright
|
||||
# ownership. Elasticsearch B.V. licenses this file to you under
|
||||
# the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
require 'spec_helper'
|
||||
|
||||
describe LogStash::Util::Jackson do
|
||||
it 'configures the read constraints defaults' do
|
||||
read_constraints_defaults = double('read_constraints_defaults')
|
||||
expect(read_constraints_defaults).to receive(:configure)
|
||||
|
||||
expect(LogStash::Util::Jackson::JacksonStreamReadConstraintsDefaults).to receive(:new).and_return(read_constraints_defaults)
|
||||
|
||||
LogStash::Util::Jackson.set_jackson_defaults(double('logger').as_null_object)
|
||||
end
|
||||
end
|
||||
|
||||
describe LogStash::Util::Jackson::JacksonStreamReadConstraintsDefaults do
|
||||
let(:logger) { double('logger') }
|
||||
|
||||
subject { described_class.new(logger) }
|
||||
|
||||
shared_examples 'stream read constraint property' do |property|
|
||||
let(:property) { property }
|
||||
let(:value) { nil }
|
||||
let(:builder) { double('builder') }
|
||||
let(:builder_set_value_method) { expected_builder_set_value_method(property) }
|
||||
|
||||
before(:each) do
|
||||
allow(logger).to receive(:info)
|
||||
|
||||
allow(builder).to receive(:build).and_return(com.fasterxml.jackson.core.StreamReadConstraints::builder.build)
|
||||
allow(builder).to receive(builder_set_value_method).with(value.to_i)
|
||||
|
||||
allow(subject).to receive(:new_stream_read_constraints_builder).and_return(builder)
|
||||
allow(subject).to receive(:get_property_value) do |name|
|
||||
if name == property
|
||||
value.to_s
|
||||
else
|
||||
nil
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
context 'with valid number' do
|
||||
let(:value) { '10' }
|
||||
it 'does not raises an error and set value' do
|
||||
expect { subject.configure }.to_not raise_error
|
||||
expect(builder).to have_received(builder_set_value_method).with(value.to_i)
|
||||
end
|
||||
end
|
||||
|
||||
context 'with non-number value' do
|
||||
let(:value) { 'foo' }
|
||||
it 'raises an error and does not set value' do
|
||||
expect { subject.configure }.to raise_error(LogStash::ConfigurationError, /System property '#{property}' must be a positive integer value. Received: #{value}/)
|
||||
expect(builder).to_not have_received(builder_set_value_method)
|
||||
end
|
||||
end
|
||||
|
||||
context 'with zeroed value' do
|
||||
let(:value) { '0' }
|
||||
it 'raises an error and does not set value' do
|
||||
expect { subject.configure }.to raise_error(LogStash::ConfigurationError, /System property '#{property}' must be bigger than zero. Received: #{value}/)
|
||||
expect(builder).to_not have_received(builder_set_value_method)
|
||||
end
|
||||
end
|
||||
|
||||
context 'with zeroed value' do
|
||||
let(:value) { '-1' }
|
||||
it 'raises an error and does not set value' do
|
||||
expect { subject.configure }.to raise_error(LogStash::ConfigurationError, /System property '#{property}' must be bigger than zero. Received: #{value}/)
|
||||
expect(builder).to_not have_received(builder_set_value_method)
|
||||
end
|
||||
end
|
||||
|
||||
def expected_builder_set_value_method(property)
|
||||
case property
|
||||
when LogStash::Util::Jackson::JacksonStreamReadConstraintsDefaults::PROPERTY_MAX_STRING_LENGTH
|
||||
return :maxStringLength
|
||||
when LogStash::Util::Jackson::JacksonStreamReadConstraintsDefaults::PROPERTY_MAX_NUMBER_LENGTH
|
||||
return :maxNumberLength
|
||||
when LogStash::Util::Jackson::JacksonStreamReadConstraintsDefaults::PROPERTY_MAX_NESTING_DEPTH
|
||||
return :maxNestingDepth
|
||||
else
|
||||
raise 'Invalid system property value'
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
[
|
||||
LogStash::Util::Jackson::JacksonStreamReadConstraintsDefaults::PROPERTY_MAX_STRING_LENGTH,
|
||||
LogStash::Util::Jackson::JacksonStreamReadConstraintsDefaults::PROPERTY_MAX_NUMBER_LENGTH,
|
||||
LogStash::Util::Jackson::JacksonStreamReadConstraintsDefaults::PROPERTY_MAX_NESTING_DEPTH,
|
||||
].each { |system_property|
|
||||
context "#{system_property}" do
|
||||
it_behaves_like "stream read constraint property", system_property
|
||||
end
|
||||
}
|
||||
end
|
|
@ -0,0 +1,63 @@
|
|||
# Licensed to Elasticsearch B.V. under one or more contributor
|
||||
# license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright
|
||||
# ownership. Elasticsearch B.V. licenses this file to you under
|
||||
# the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
require "spec_helper"
|
||||
require "logstash/util/substitution_variables"
|
||||
|
||||
describe LogStash::Util::SubstitutionVariables do
|
||||
|
||||
subject { Class.new { extend LogStash::Util::SubstitutionVariables } }
|
||||
|
||||
context "ENV or Keystore ${VAR} with single/double quotes" do
|
||||
# single or double quotes come from ENV/Keystore ${VAR} value
|
||||
let(:xpack_monitoring_host) { '"http://node1:9200"' }
|
||||
let(:xpack_monitoring_hosts) { "'[\"http://node3:9200\", \"http://node4:9200\"]'" }
|
||||
let(:xpack_management_pipeline_id) { '"*"' }
|
||||
let(:config_string) {
|
||||
"'input {
|
||||
stdin { }
|
||||
beats { port => 5040 }
|
||||
}
|
||||
output {
|
||||
elasticsearch {
|
||||
hosts => [\"https://es:9200\"]
|
||||
user => \"elastic\"
|
||||
password => 'changeme'
|
||||
}
|
||||
}'"
|
||||
}
|
||||
|
||||
# this happens mostly when running LS with docker
|
||||
it "stripes out quotes" do
|
||||
expect(subject.send(:strip_enclosing_char, xpack_monitoring_host, '"')).to eql('http://node1:9200')
|
||||
expect(subject.send(:strip_enclosing_char, xpack_monitoring_hosts, "'")).to eql('["http://node3:9200", "http://node4:9200"]')
|
||||
expect(subject.send(:strip_enclosing_char, xpack_management_pipeline_id, '"')).to eql('*')
|
||||
# make sure we keep the hosts, user and password param enclosed quotes
|
||||
expect(subject.send(:strip_enclosing_char, config_string, "'")).to eql('input {
|
||||
stdin { }
|
||||
beats { port => 5040 }
|
||||
}
|
||||
output {
|
||||
elasticsearch {
|
||||
hosts => ["https://es:9200"]
|
||||
user => "elastic"
|
||||
password => \'changeme\'
|
||||
}
|
||||
}')
|
||||
end
|
||||
end
|
||||
end
|
|
@ -55,7 +55,13 @@ describe LogStash::WebServer do
|
|||
end
|
||||
|
||||
let(:logger) { LogStash::Logging::Logger.new("testing") }
|
||||
let(:agent) { OpenStruct.new({:webserver => webserver_block, :http_address => "127.0.0.1", :id => "myid", :name => "myname"}) }
|
||||
let(:agent) { OpenStruct.new({
|
||||
webserver: webserver_block,
|
||||
http_address: "127.0.0.1",
|
||||
id: "myid",
|
||||
name: "myname",
|
||||
health_observer: org.logstash.health.HealthObserver.new,
|
||||
}) }
|
||||
let(:webserver_block) { OpenStruct.new({}) }
|
||||
|
||||
subject(:webserver) { LogStash::WebServer.new(logger, agent, webserver_options) }
|
||||
|
|
|
@ -26,6 +26,7 @@ import java.io.InputStream;
|
|||
import java.io.PrintStream;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.Paths;
|
||||
import java.util.Properties;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
|
@ -45,6 +46,25 @@ import javax.annotation.Nullable;
|
|||
*/
|
||||
public final class Logstash implements Runnable, AutoCloseable {
|
||||
|
||||
public static final String VERSION_FULL;
|
||||
public static final String VERSION_MAJOR;
|
||||
public static final String VERSION_MINOR;
|
||||
public static final String VERSION_PATCH;
|
||||
|
||||
static {
|
||||
final Properties properties = new Properties();
|
||||
try {
|
||||
properties.load(Logstash.class.getResourceAsStream("/version-info.properties"));
|
||||
VERSION_FULL = properties.getProperty("logstash-core");
|
||||
final String[] versions = VERSION_FULL.split("\\.");
|
||||
VERSION_MAJOR = versions[0];
|
||||
VERSION_MINOR = versions[1];
|
||||
VERSION_PATCH = versions[2];
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
private static final Logger LOGGER = LogManager.getLogger(Logstash.class);
|
||||
|
||||
/**
|
||||
|
|
|
@ -42,6 +42,7 @@ import java.io.IOException;
|
|||
import java.math.BigDecimal;
|
||||
import java.math.BigInteger;
|
||||
import java.util.HashMap;
|
||||
|
||||
import org.apache.logging.log4j.core.jackson.Log4jJsonObjectMapper;
|
||||
import org.jruby.RubyBignum;
|
||||
import org.jruby.RubyBoolean;
|
||||
|
@ -52,12 +53,27 @@ import org.jruby.RubyString;
|
|||
import org.jruby.RubySymbol;
|
||||
import org.jruby.ext.bigdecimal.RubyBigDecimal;
|
||||
import org.logstash.ext.JrubyTimestampExtLibrary;
|
||||
import org.logstash.jackson.StreamReadConstraintsUtil;
|
||||
import org.logstash.log.RubyBasicObjectSerializer;
|
||||
|
||||
public final class ObjectMappers {
|
||||
|
||||
static final String RUBY_SERIALIZERS_MODULE_ID = "RubySerializers";
|
||||
|
||||
static final StreamReadConstraintsUtil CONFIGURED_STREAM_READ_CONSTRAINTS;
|
||||
|
||||
static {
|
||||
// The StreamReadConstraintsUtil needs to load the configured constraints from system
|
||||
// properties and apply them _statically_, before any object mappers are initialized.
|
||||
CONFIGURED_STREAM_READ_CONSTRAINTS = StreamReadConstraintsUtil.fromSystemProperties();
|
||||
CONFIGURED_STREAM_READ_CONSTRAINTS.applyAsGlobalDefault();
|
||||
}
|
||||
|
||||
public static StreamReadConstraintsUtil getConfiguredStreamReadConstraints() {
|
||||
return CONFIGURED_STREAM_READ_CONSTRAINTS;
|
||||
}
|
||||
|
||||
|
||||
private static final SimpleModule RUBY_SERIALIZERS =
|
||||
new SimpleModule(RUBY_SERIALIZERS_MODULE_ID)
|
||||
.addSerializer(RubyString.class, new RubyStringSerializer())
|
||||
|
|
|
@ -590,13 +590,18 @@ public final class Queue implements Closeable {
|
|||
* @throws IOException if an IO error occurs
|
||||
*/
|
||||
public synchronized Batch nonBlockReadBatch(int limit) throws IOException {
|
||||
final SerializedBatchHolder serializedBatchHolder;
|
||||
lock.lock();
|
||||
try {
|
||||
Page p = nextReadPage();
|
||||
return (isHeadPage(p) && p.isFullyRead()) ? null : readPageBatch(p, limit, 0L);
|
||||
if (isHeadPage(p) && p.isFullyRead()) {
|
||||
return null;
|
||||
}
|
||||
serializedBatchHolder = readPageBatch(p, limit, 0L);
|
||||
} finally {
|
||||
lock.unlock();
|
||||
}
|
||||
return serializedBatchHolder.deserialize();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -607,7 +612,11 @@ public final class Queue implements Closeable {
|
|||
* @throws QueueRuntimeException if queue is closed
|
||||
* @throws IOException if an IO error occurs
|
||||
*/
|
||||
public synchronized Batch readBatch(int limit, long timeout) throws IOException {
|
||||
public Batch readBatch(int limit, long timeout) throws IOException {
|
||||
return readSerializedBatch(limit, timeout).deserialize();
|
||||
}
|
||||
|
||||
private synchronized SerializedBatchHolder readSerializedBatch(int limit, long timeout) throws IOException {
|
||||
lock.lock();
|
||||
|
||||
try {
|
||||
|
@ -618,7 +627,7 @@ public final class Queue implements Closeable {
|
|||
}
|
||||
|
||||
/**
|
||||
* read a {@link Batch} from the given {@link Page}. If the page is a head page, try to maximize the
|
||||
* read a {@link SerializedBatchHolder} from the given {@link Page}. If the page is a head page, try to maximize the
|
||||
* batch size by waiting for writes.
|
||||
* @param p the {@link Page} to read from.
|
||||
* @param limit size limit of the batch to read.
|
||||
|
@ -626,7 +635,7 @@ public final class Queue implements Closeable {
|
|||
* @return {@link Batch} with read elements or null if nothing was read
|
||||
* @throws IOException if an IO error occurs
|
||||
*/
|
||||
private Batch readPageBatch(Page p, int limit, long timeout) throws IOException {
|
||||
private SerializedBatchHolder readPageBatch(Page p, int limit, long timeout) throws IOException {
|
||||
int left = limit;
|
||||
final List<byte[]> elements = new ArrayList<>(limit);
|
||||
|
||||
|
@ -678,7 +687,7 @@ public final class Queue implements Closeable {
|
|||
removeUnreadPage(p);
|
||||
}
|
||||
|
||||
return new Batch(elements, firstSeqNum, this);
|
||||
return new SerializedBatchHolder(elements, firstSeqNum);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -894,4 +903,18 @@ public final class Queue implements Closeable {
|
|||
final long pMaxSeq = pMinSeq + (long) page.getElementCount();
|
||||
return seqNum >= pMinSeq && seqNum < pMaxSeq;
|
||||
}
|
||||
|
||||
class SerializedBatchHolder {
|
||||
private final List<byte[]> elements;
|
||||
private final long firstSeqNum;
|
||||
|
||||
private SerializedBatchHolder(List<byte[]> elements, long firstSeqNum) {
|
||||
this.elements = elements;
|
||||
this.firstSeqNum = firstSeqNum;
|
||||
}
|
||||
|
||||
private Batch deserialize() {
|
||||
return new Batch(elements, firstSeqNum, Queue.this);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,14 +23,18 @@ package org.logstash.common;
|
|||
import org.jruby.Ruby;
|
||||
import org.jruby.RubyArray;
|
||||
import org.jruby.RubyClass;
|
||||
import org.jruby.RubyEncoding;
|
||||
import org.jruby.RubyObject;
|
||||
import org.jruby.RubyString;
|
||||
import org.jruby.anno.JRubyClass;
|
||||
import org.jruby.anno.JRubyMethod;
|
||||
import org.jruby.runtime.ThreadContext;
|
||||
import org.jruby.runtime.builtin.IRubyObject;
|
||||
import org.jruby.util.ByteList;
|
||||
import org.logstash.RubyUtil;
|
||||
|
||||
import java.nio.charset.Charset;
|
||||
|
||||
@JRubyClass(name = "BufferedTokenizer")
|
||||
public class BufferedTokenizerExt extends RubyObject {
|
||||
|
||||
|
@ -40,10 +44,13 @@ public class BufferedTokenizerExt extends RubyObject {
|
|||
freeze(RubyUtil.RUBY.getCurrentContext());
|
||||
|
||||
private @SuppressWarnings("rawtypes") RubyArray input = RubyUtil.RUBY.newArray();
|
||||
private StringBuilder headToken = new StringBuilder();
|
||||
private RubyString delimiter = NEW_LINE;
|
||||
private int sizeLimit;
|
||||
private boolean hasSizeLimit;
|
||||
private int inputSize;
|
||||
private boolean bufferFullErrorNotified = false;
|
||||
private String encodingName;
|
||||
|
||||
public BufferedTokenizerExt(final Ruby runtime, final RubyClass metaClass) {
|
||||
super(runtime, metaClass);
|
||||
|
@ -55,7 +62,11 @@ public class BufferedTokenizerExt extends RubyObject {
|
|||
this.delimiter = args[0].convertToString();
|
||||
}
|
||||
if (args.length == 2) {
|
||||
this.sizeLimit = args[1].convertToInteger().getIntValue();
|
||||
final int sizeLimit = args[1].convertToInteger().getIntValue();
|
||||
if (sizeLimit <= 0) {
|
||||
throw new IllegalArgumentException("Size limit must be positive");
|
||||
}
|
||||
this.sizeLimit = sizeLimit;
|
||||
this.hasSizeLimit = true;
|
||||
}
|
||||
this.inputSize = 0;
|
||||
|
@ -76,23 +87,76 @@ public class BufferedTokenizerExt extends RubyObject {
|
|||
@JRubyMethod
|
||||
@SuppressWarnings("rawtypes")
|
||||
public RubyArray extract(final ThreadContext context, IRubyObject data) {
|
||||
RubyEncoding encoding = (RubyEncoding) data.convertToString().encoding(context);
|
||||
encodingName = encoding.getEncoding().getCharsetName();
|
||||
final RubyArray entities = data.convertToString().split(delimiter, -1);
|
||||
if (!bufferFullErrorNotified) {
|
||||
input.clear();
|
||||
input.concat(entities);
|
||||
} else {
|
||||
// after a full buffer signal
|
||||
if (input.isEmpty()) {
|
||||
// after a buffer full error, the remaining part of the line, till next delimiter,
|
||||
// has to be consumed, unless the input buffer doesn't still contain fragments of
|
||||
// subsequent tokens.
|
||||
entities.shift(context);
|
||||
input.concat(entities);
|
||||
} else {
|
||||
// merge last of the input with first of incoming data segment
|
||||
if (!entities.isEmpty()) {
|
||||
RubyString last = ((RubyString) input.pop(context));
|
||||
RubyString nextFirst = ((RubyString) entities.shift(context));
|
||||
entities.unshift(last.concat(nextFirst));
|
||||
input.concat(entities);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (hasSizeLimit) {
|
||||
final int entitiesSize = ((RubyString) entities.first()).size();
|
||||
if (bufferFullErrorNotified) {
|
||||
bufferFullErrorNotified = false;
|
||||
if (input.isEmpty()) {
|
||||
return RubyUtil.RUBY.newArray();
|
||||
}
|
||||
}
|
||||
final int entitiesSize = ((RubyString) input.first()).size();
|
||||
if (inputSize + entitiesSize > sizeLimit) {
|
||||
throw new IllegalStateException("input buffer full");
|
||||
bufferFullErrorNotified = true;
|
||||
headToken = new StringBuilder();
|
||||
String errorMessage = String.format("input buffer full, consumed token which exceeded the sizeLimit %d; inputSize: %d, entitiesSize %d", sizeLimit, inputSize, entitiesSize);
|
||||
inputSize = 0;
|
||||
input.shift(context); // consume the token fragment that generates the buffer full
|
||||
throw new IllegalStateException(errorMessage);
|
||||
}
|
||||
this.inputSize = inputSize + entitiesSize;
|
||||
}
|
||||
input.append(entities.shift(context));
|
||||
if (entities.isEmpty()) {
|
||||
|
||||
if (input.getLength() < 2) {
|
||||
// this is a specialization case which avoid adding and removing from input accumulator
|
||||
// when it contains just one element
|
||||
headToken.append(input.shift(context)); // remove head
|
||||
return RubyUtil.RUBY.newArray();
|
||||
}
|
||||
entities.unshift(input.join(context));
|
||||
input.clear();
|
||||
input.append(entities.pop(context));
|
||||
inputSize = ((RubyString) input.first()).size();
|
||||
return entities;
|
||||
|
||||
if (headToken.length() > 0) {
|
||||
// if there is a pending token part, merge it with the first token segment present
|
||||
// in the accumulator, and clean the pending token part.
|
||||
headToken.append(input.shift(context)); // append buffer to first element and
|
||||
// create new RubyString with the data specified encoding
|
||||
RubyString encodedHeadToken = toEncodedRubyString(context, headToken.toString());
|
||||
input.unshift(encodedHeadToken); // reinsert it into the array
|
||||
headToken = new StringBuilder();
|
||||
}
|
||||
headToken.append(input.pop(context)); // put the leftovers in headToken for later
|
||||
inputSize = headToken.length();
|
||||
return input;
|
||||
}
|
||||
|
||||
private RubyString toEncodedRubyString(ThreadContext context, String input) {
|
||||
// Depends on the encodingName being set by the extract method, could potentially raise if not set.
|
||||
RubyString result = RubyUtil.RUBY.newString(new ByteList(input.getBytes(Charset.forName(encodingName))));
|
||||
result.force_encoding(context, RubyUtil.RUBY.newString(encodingName));
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -104,14 +168,30 @@ public class BufferedTokenizerExt extends RubyObject {
|
|||
*/
|
||||
@JRubyMethod
|
||||
public IRubyObject flush(final ThreadContext context) {
|
||||
final IRubyObject buffer = input.join(context);
|
||||
input.clear();
|
||||
return buffer;
|
||||
final IRubyObject buffer = RubyUtil.toRubyObject(headToken.toString());
|
||||
headToken = new StringBuilder();
|
||||
inputSize = 0;
|
||||
|
||||
// create new RubyString with the last data specified encoding, if exists
|
||||
RubyString encodedHeadToken;
|
||||
if (encodingName != null) {
|
||||
encodedHeadToken = toEncodedRubyString(context, buffer.toString());
|
||||
} else {
|
||||
// When used with TCP input it could be that on socket connection the flush method
|
||||
// is invoked while no invocation of extract, leaving the encoding name unassigned.
|
||||
// In such case also the headToken must be empty
|
||||
if (!buffer.toString().isEmpty()) {
|
||||
throw new IllegalStateException("invoked flush with unassigned encoding but not empty head token, this shouldn't happen");
|
||||
}
|
||||
encodedHeadToken = (RubyString) buffer;
|
||||
}
|
||||
|
||||
return encodedHeadToken;
|
||||
}
|
||||
|
||||
@JRubyMethod(name = "empty?")
|
||||
public IRubyObject isEmpty(final ThreadContext context) {
|
||||
return input.empty_p();
|
||||
return RubyUtil.RUBY.newBoolean(headToken.toString().isEmpty() && (inputSize == 0));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -7,7 +7,10 @@ import org.logstash.Event;
|
|||
* */
|
||||
public class ConditionalEvaluationError extends RuntimeException {
|
||||
private static final long serialVersionUID = -8633589068902565868L;
|
||||
private final Event failedEvent;
|
||||
|
||||
// This class is serializable because of inheritance from Throwable, however it's not expected
|
||||
// to be ever transmitted on wire on stored in some binary storage.
|
||||
private final transient Event failedEvent;
|
||||
|
||||
ConditionalEvaluationError(Throwable cause, Event failedEvent) {
|
||||
super(cause);
|
||||
|
|
|
@ -35,12 +35,16 @@ import java.util.ArrayList;
|
|||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.Optional;
|
||||
import java.util.UUID;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.function.Supplier;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
|
@ -82,6 +86,7 @@ import org.logstash.config.ir.compiler.ConditionalEvaluationError;
|
|||
import org.logstash.execution.queue.QueueWriter;
|
||||
import org.logstash.ext.JRubyAbstractQueueWriteClientExt;
|
||||
import org.logstash.ext.JRubyWrappedWriteClientExt;
|
||||
import org.logstash.health.PipelineIndicator;
|
||||
import org.logstash.instrument.metrics.AbstractMetricExt;
|
||||
import org.logstash.instrument.metrics.AbstractNamespacedMetricExt;
|
||||
import org.logstash.instrument.metrics.FlowMetric;
|
||||
|
@ -161,12 +166,13 @@ public class AbstractPipelineExt extends RubyBasicObject {
|
|||
|
||||
private QueueReadClientBase filterQueueClient;
|
||||
|
||||
private ArrayList<FlowMetric> flowMetrics = new ArrayList<>();
|
||||
private transient final ScopedFlowMetrics scopedFlowMetrics = new ScopedFlowMetrics();
|
||||
private @SuppressWarnings("rawtypes") RubyArray inputs;
|
||||
private @SuppressWarnings("rawtypes") RubyArray filters;
|
||||
private @SuppressWarnings("rawtypes") RubyArray outputs;
|
||||
|
||||
private String lastErrorEvaluationReceived = "";
|
||||
private transient DeadLetterQueueWriter javaDlqWriter;
|
||||
|
||||
public AbstractPipelineExt(final Ruby runtime, final RubyClass metaClass) {
|
||||
super(runtime, metaClass);
|
||||
|
@ -180,10 +186,32 @@ public class AbstractPipelineExt extends RubyBasicObject {
|
|||
@Override
|
||||
public void notify(ConditionalEvaluationError err) {
|
||||
lastErrorEvaluationReceived = err.getCause().getMessage();
|
||||
LOGGER.warn("{}. Event was dropped, enable debug logging to see the event's payload.", lastErrorEvaluationReceived);
|
||||
if (isDLQEnabled()) {
|
||||
LOGGER.warn("{}. Failing event was sent to dead letter queue", lastErrorEvaluationReceived);
|
||||
} else {
|
||||
LOGGER.warn("{}. Event was dropped, enable debug logging to see the event's payload", lastErrorEvaluationReceived);
|
||||
}
|
||||
LOGGER.debug("Event generating the fault: {}", err.failedEvent().toMap().toString());
|
||||
|
||||
// logs the exception at debug level
|
||||
if (LOGGER.isDebugEnabled()) {
|
||||
debugLogStackTrace(err);
|
||||
}
|
||||
|
||||
if (isDLQEnabled()) {
|
||||
try {
|
||||
javaDlqWriter.writeEntry(err.failedEvent(), "if-statement", "if-statement", "condition evaluation error, " + lastErrorEvaluationReceived);
|
||||
} catch (IOException ioex) {
|
||||
LOGGER.error("Can't write in DLQ", ioex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private boolean isDLQEnabled() {
|
||||
return javaDlqWriter != null;
|
||||
}
|
||||
|
||||
private void debugLogStackTrace(ConditionalEvaluationError err) {
|
||||
try (StringWriter sw = new StringWriter(); PrintWriter pw = new PrintWriter(sw)) {
|
||||
err.printStackTrace(pw);
|
||||
LOGGER.debug("{}", sw);
|
||||
|
@ -372,7 +400,7 @@ public class AbstractPipelineExt extends RubyBasicObject {
|
|||
public final IRubyObject dlqWriter(final ThreadContext context) {
|
||||
if (dlqWriter == null) {
|
||||
if (dlqEnabled(context).isTrue()) {
|
||||
final DeadLetterQueueWriter javaDlqWriter = createDeadLetterQueueWriterFromSettings(context);
|
||||
javaDlqWriter = createDeadLetterQueueWriterFromSettings(context);
|
||||
dlqWriter = JavaUtil.convertJavaToUsableRubyObject(context.runtime, javaDlqWriter);
|
||||
} else {
|
||||
dlqWriter = RubyUtil.DUMMY_DLQ_WRITER_CLASS.callMethod(context, "new");
|
||||
|
@ -530,6 +558,7 @@ public class AbstractPipelineExt extends RubyBasicObject {
|
|||
@JRubyMethod(name = "initialize_flow_metrics")
|
||||
public final IRubyObject initializeFlowMetrics(final ThreadContext context) {
|
||||
if (metric.collector(context).isNil()) { return context.nil; }
|
||||
if (!getSetting(context, "metric.collect").isTrue()) { return context.nil; }
|
||||
|
||||
final UptimeMetric uptimeMetric = initOrGetUptimeMetric(context, buildNamespace(), UPTIME_IN_MILLIS_KEY);
|
||||
final Metric<Number> uptimeInPreciseMillis = uptimeMetric.withUnitsPrecise(MILLISECONDS);
|
||||
|
@ -540,34 +569,34 @@ public class AbstractPipelineExt extends RubyBasicObject {
|
|||
|
||||
final LongCounter eventsInCounter = initOrGetCounterMetric(context, eventsNamespace, IN_KEY);
|
||||
final FlowMetric inputThroughput = createFlowMetric(INPUT_THROUGHPUT_KEY, eventsInCounter, uptimeInPreciseSeconds);
|
||||
this.flowMetrics.add(inputThroughput);
|
||||
this.scopedFlowMetrics.register(ScopedFlowMetrics.Scope.WORKER, inputThroughput);
|
||||
storeMetric(context, flowNamespace, inputThroughput);
|
||||
|
||||
final LongCounter eventsFilteredCounter = initOrGetCounterMetric(context, eventsNamespace, FILTERED_KEY);
|
||||
final FlowMetric filterThroughput = createFlowMetric(FILTER_THROUGHPUT_KEY, eventsFilteredCounter, uptimeInPreciseSeconds);
|
||||
this.flowMetrics.add(filterThroughput);
|
||||
this.scopedFlowMetrics.register(ScopedFlowMetrics.Scope.WORKER, filterThroughput);
|
||||
storeMetric(context, flowNamespace, filterThroughput);
|
||||
|
||||
final LongCounter eventsOutCounter = initOrGetCounterMetric(context, eventsNamespace, OUT_KEY);
|
||||
final FlowMetric outputThroughput = createFlowMetric(OUTPUT_THROUGHPUT_KEY, eventsOutCounter, uptimeInPreciseSeconds);
|
||||
this.flowMetrics.add(outputThroughput);
|
||||
this.scopedFlowMetrics.register(ScopedFlowMetrics.Scope.WORKER, outputThroughput);
|
||||
storeMetric(context, flowNamespace, outputThroughput);
|
||||
|
||||
final TimerMetric queuePushWaitInMillis = initOrGetTimerMetric(context, eventsNamespace, PUSH_DURATION_KEY);
|
||||
final FlowMetric backpressureFlow = createFlowMetric(QUEUE_BACKPRESSURE_KEY, queuePushWaitInMillis, uptimeInPreciseMillis);
|
||||
this.flowMetrics.add(backpressureFlow);
|
||||
this.scopedFlowMetrics.register(ScopedFlowMetrics.Scope.WORKER, backpressureFlow);
|
||||
storeMetric(context, flowNamespace, backpressureFlow);
|
||||
|
||||
final TimerMetric durationInMillis = initOrGetTimerMetric(context, eventsNamespace, DURATION_IN_MILLIS_KEY);
|
||||
final FlowMetric concurrencyFlow = createFlowMetric(WORKER_CONCURRENCY_KEY, durationInMillis, uptimeInPreciseMillis);
|
||||
this.flowMetrics.add(concurrencyFlow);
|
||||
this.scopedFlowMetrics.register(ScopedFlowMetrics.Scope.WORKER, concurrencyFlow);
|
||||
storeMetric(context, flowNamespace, concurrencyFlow);
|
||||
|
||||
final int workerCount = getSetting(context, SettingKeyDefinitions.PIPELINE_WORKERS).convertToInteger().getIntValue();
|
||||
final UpScaledMetric percentScaledDurationInMillis = new UpScaledMetric(durationInMillis, 100);
|
||||
final UpScaledMetric availableWorkerTimeInMillis = new UpScaledMetric(uptimeInPreciseMillis, workerCount);
|
||||
final FlowMetric utilizationFlow = createFlowMetric(WORKER_UTILIZATION_KEY, percentScaledDurationInMillis, availableWorkerTimeInMillis);
|
||||
this.flowMetrics.add(utilizationFlow);
|
||||
this.scopedFlowMetrics.register(ScopedFlowMetrics.Scope.WORKER, utilizationFlow);
|
||||
storeMetric(context, flowNamespace, utilizationFlow);
|
||||
|
||||
initializePqFlowMetrics(context, flowNamespace, uptimeMetric);
|
||||
|
@ -577,10 +606,22 @@ public class AbstractPipelineExt extends RubyBasicObject {
|
|||
|
||||
@JRubyMethod(name = "collect_flow_metrics")
|
||||
public final IRubyObject collectFlowMetrics(final ThreadContext context) {
|
||||
this.flowMetrics.forEach(FlowMetric::capture);
|
||||
this.scopedFlowMetrics.captureAll();
|
||||
return context.nil;
|
||||
}
|
||||
|
||||
// short-term limits the scope of what is included in the flow observations
|
||||
public final PipelineIndicator.FlowObservation collectWorkerUtilizationFlowObservation() {
|
||||
return this.collectFlowObservation(WORKER_UTILIZATION_KEY.asJavaString()::equals);
|
||||
}
|
||||
|
||||
public final PipelineIndicator.FlowObservation collectFlowObservation(final Predicate<String> filter) {
|
||||
Map<String, Map<String, Double>> collect = this.scopedFlowMetrics.getFlowMetrics(ScopedFlowMetrics.Scope.WORKER).stream()
|
||||
.filter(fm -> filter.test(fm.getName()))
|
||||
.collect(Collectors.toUnmodifiableMap(FlowMetric::getName, FlowMetric::getValue));
|
||||
return new PipelineIndicator.FlowObservation(collect);
|
||||
}
|
||||
|
||||
private static FlowMetric createFlowMetric(final RubySymbol name,
|
||||
final Metric<? extends Number> numeratorMetric,
|
||||
final Metric<? extends Number> denominatorMetric) {
|
||||
|
@ -648,12 +689,13 @@ public class AbstractPipelineExt extends RubyBasicObject {
|
|||
|
||||
final Supplier<NumberGauge> eventsGaugeMetricSupplier = () -> initOrGetNumberGaugeMetric(context, queueNamespace, EVENTS_KEY).orElse(null);
|
||||
final FlowMetric growthEventsFlow = createFlowMetric(QUEUE_PERSISTED_GROWTH_EVENTS_KEY, eventsGaugeMetricSupplier, () -> uptimeInPreciseSeconds);
|
||||
this.flowMetrics.add(growthEventsFlow);
|
||||
this.scopedFlowMetrics.register(ScopedFlowMetrics.Scope.WORKER, growthEventsFlow);
|
||||
storeMetric(context, flowNamespace, growthEventsFlow);
|
||||
|
||||
final Supplier<NumberGauge> queueSizeInBytesMetricSupplier = () -> initOrGetNumberGaugeMetric(context, queueCapacityNamespace, QUEUE_SIZE_IN_BYTES_KEY).orElse(null);
|
||||
final FlowMetric growthBytesFlow = createFlowMetric(QUEUE_PERSISTED_GROWTH_BYTES_KEY, queueSizeInBytesMetricSupplier, () -> uptimeInPreciseSeconds);
|
||||
this.flowMetrics.add(growthBytesFlow);
|
||||
this.scopedFlowMetrics.register(ScopedFlowMetrics.Scope.WORKER, growthBytesFlow);
|
||||
|
||||
storeMetric(context, flowNamespace, growthBytesFlow);
|
||||
}
|
||||
}
|
||||
|
@ -682,7 +724,7 @@ public class AbstractPipelineExt extends RubyBasicObject {
|
|||
final LongCounter eventsOut = initOrGetCounterMetric(context, eventsNamespace, OUT_KEY);
|
||||
|
||||
final FlowMetric throughputFlow = createFlowMetric(PLUGIN_THROUGHPUT_KEY, eventsOut, uptimeInPreciseSeconds);
|
||||
this.flowMetrics.add(throughputFlow);
|
||||
this.scopedFlowMetrics.register(ScopedFlowMetrics.Scope.PLUGIN, throughputFlow);
|
||||
|
||||
final RubySymbol[] flowNamespace = buildNamespace(PLUGINS_KEY, INPUTS_KEY, RubyUtil.RUBY.newString(id).intern(), FLOW_KEY);
|
||||
storeMetric(context, flowNamespace, throughputFlow);
|
||||
|
@ -695,12 +737,12 @@ public class AbstractPipelineExt extends RubyBasicObject {
|
|||
final TimerMetric durationInMillis = initOrGetTimerMetric(context, eventsNamespace, DURATION_IN_MILLIS_KEY);
|
||||
final LongCounter counterEvents = initOrGetCounterMetric(context, eventsNamespace, IN_KEY);
|
||||
final FlowMetric workerCostPerEvent = createFlowMetric(WORKER_MILLIS_PER_EVENT_KEY, durationInMillis, counterEvents);
|
||||
this.flowMetrics.add(workerCostPerEvent);
|
||||
this.scopedFlowMetrics.register(ScopedFlowMetrics.Scope.PLUGIN, workerCostPerEvent);
|
||||
|
||||
final UpScaledMetric percentScaledDurationInMillis = new UpScaledMetric(durationInMillis, 100);
|
||||
final UpScaledMetric availableWorkerTimeInMillis = new UpScaledMetric(uptimeInPreciseMillis, workerCount);
|
||||
final FlowMetric workerUtilization = createFlowMetric(WORKER_UTILIZATION_KEY, percentScaledDurationInMillis, availableWorkerTimeInMillis);
|
||||
this.flowMetrics.add(workerUtilization);
|
||||
this.scopedFlowMetrics.register(ScopedFlowMetrics.Scope.PLUGIN, workerUtilization);
|
||||
|
||||
final RubySymbol[] flowNamespace = buildNamespace(PLUGINS_KEY, key, RubyUtil.RUBY.newString(id).intern(), FLOW_KEY);
|
||||
storeMetric(context, flowNamespace, workerCostPerEvent);
|
||||
|
@ -861,4 +903,33 @@ public class AbstractPipelineExt extends RubyBasicObject {
|
|||
public final RubyString getLastErrorEvaluationReceived(final ThreadContext context) {
|
||||
return RubyString.newString(context.runtime, lastErrorEvaluationReceived);
|
||||
}
|
||||
|
||||
private static class ScopedFlowMetrics {
|
||||
enum Scope {
|
||||
WORKER,
|
||||
PLUGIN
|
||||
}
|
||||
private final Map<Scope, List<FlowMetric>> flowsByScope = new ConcurrentHashMap<>();
|
||||
|
||||
void register(final Scope scope, final FlowMetric metric) {
|
||||
flowsByScope.compute(scope, (s, scopedFlows) -> {
|
||||
if (scopedFlows == null) {
|
||||
return List.of(metric);
|
||||
} else {
|
||||
final ArrayList<FlowMetric> mutable = new ArrayList<>(scopedFlows.size() + 1);
|
||||
mutable.addAll(scopedFlows);
|
||||
mutable.add(metric);
|
||||
return List.copyOf(mutable);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void captureAll() {
|
||||
flowsByScope.values().stream().flatMap(List::stream).forEach(FlowMetric::capture);
|
||||
}
|
||||
|
||||
List<FlowMetric> getFlowMetrics(final Scope scope) {
|
||||
return flowsByScope.getOrDefault(scope, List.of());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,6 +22,7 @@ package org.logstash.ext;
|
|||
|
||||
import java.util.concurrent.ArrayBlockingQueue;
|
||||
import java.util.concurrent.BlockingQueue;
|
||||
|
||||
import org.jruby.Ruby;
|
||||
import org.jruby.RubyClass;
|
||||
import org.jruby.RubyNumeric;
|
||||
|
|
|
@ -0,0 +1,60 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch B.V. under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch B.V. licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.logstash.health;
|
||||
|
||||
import com.fasterxml.jackson.core.JsonGenerator;
|
||||
import com.fasterxml.jackson.databind.SerializerProvider;
|
||||
import com.fasterxml.jackson.databind.annotation.JsonSerialize;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
|
||||
@JsonSerialize(using = ApiHealthReport.JsonSerializer.class)
|
||||
public class ApiHealthReport {
|
||||
private final MultiIndicator.Report delegate;
|
||||
|
||||
public ApiHealthReport(final MultiIndicator.Report delegate) {
|
||||
this.delegate = delegate;
|
||||
}
|
||||
|
||||
public Status getStatus() {
|
||||
return delegate.status();
|
||||
}
|
||||
|
||||
public String getSymptom() {
|
||||
return delegate.symptom();
|
||||
}
|
||||
|
||||
public Map<String, Indicator.Report> getIndicators() {
|
||||
return delegate.indicators();
|
||||
}
|
||||
|
||||
public static class JsonSerializer extends com.fasterxml.jackson.databind.JsonSerializer<ApiHealthReport> {
|
||||
@Override
|
||||
public void serialize(final ApiHealthReport apiHealthReport,
|
||||
final JsonGenerator jsonGenerator,
|
||||
final SerializerProvider serializerProvider) throws IOException {
|
||||
jsonGenerator.writeStartObject();
|
||||
jsonGenerator.writeObjectField("status", apiHealthReport.getStatus());
|
||||
jsonGenerator.writeObjectField("symptom", apiHealthReport.getSymptom());
|
||||
jsonGenerator.writeObjectField("indicators", apiHealthReport.getIndicators());
|
||||
jsonGenerator.writeEndObject();
|
||||
}
|
||||
}
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue