mirror of
https://github.com/elastic/logstash.git
synced 2025-04-22 13:47:21 -04:00
Compare commits
32 commits
Author | SHA1 | Date | |
---|---|---|---|
|
6861ea613a | ||
|
7d75395e77 | ||
|
9f71e03312 | ||
|
6b8968476d | ||
|
3467751f90 | ||
|
bd7e40885c | ||
|
04d193d613 | ||
|
ff37802a77 | ||
|
a5b7d2bfad | ||
|
a42c71a904 | ||
|
0ba5330b5f | ||
|
8949dc77b6 | ||
|
f9d6b42a7e | ||
|
8fa13707fa | ||
|
224421f3e9 | ||
|
550e935835 | ||
|
7afb42c13a | ||
|
78fb379282 | ||
|
9878e2737a | ||
|
9cdb8b839f | ||
|
7277a369a1 | ||
|
304b8b25e6 | ||
|
1401565c48 | ||
|
581ac90ec6 | ||
|
e673e0d36f | ||
|
b1773f8c02 | ||
|
1f12208528 | ||
|
ed535a822a | ||
|
c2195bc1ee | ||
|
ceddff3ab3 | ||
|
18583787b3 | ||
|
ba5af17681 |
56 changed files with 2067 additions and 286 deletions
|
@ -17,6 +17,9 @@ steps:
|
|||
|
||||
source .buildkite/scripts/common/vm-agent.sh
|
||||
ci/unit_tests.sh ruby
|
||||
retry:
|
||||
automatic:
|
||||
- limit: 3
|
||||
|
||||
- label: ":java: Java unit tests"
|
||||
key: "java-unit-tests"
|
||||
|
@ -28,6 +31,9 @@ steps:
|
|||
|
||||
source .buildkite/scripts/common/vm-agent.sh
|
||||
ci/unit_tests.sh java
|
||||
retry:
|
||||
automatic:
|
||||
- limit: 3
|
||||
|
||||
- label: ":lab_coat: Integration Tests / part 1"
|
||||
key: "integration-tests-part-1"
|
||||
|
@ -36,6 +42,9 @@ steps:
|
|||
|
||||
source .buildkite/scripts/common/vm-agent.sh
|
||||
ci/integration_tests.sh split 0
|
||||
retry:
|
||||
automatic:
|
||||
- limit: 3
|
||||
|
||||
- label: ":lab_coat: Integration Tests / part 2"
|
||||
key: "integration-tests-part-2"
|
||||
|
@ -44,6 +53,9 @@ steps:
|
|||
|
||||
source .buildkite/scripts/common/vm-agent.sh
|
||||
ci/integration_tests.sh split 1
|
||||
retry:
|
||||
automatic:
|
||||
- limit: 3
|
||||
|
||||
- label: ":lab_coat: IT Persistent Queues / part 1"
|
||||
key: "integration-tests-qa-part-1"
|
||||
|
@ -53,6 +65,9 @@ steps:
|
|||
source .buildkite/scripts/common/vm-agent.sh
|
||||
export FEATURE_FLAG=persistent_queues
|
||||
ci/integration_tests.sh split 0
|
||||
retry:
|
||||
automatic:
|
||||
- limit: 3
|
||||
|
||||
- label: ":lab_coat: IT Persistent Queues / part 2"
|
||||
key: "integration-tests-qa-part-2"
|
||||
|
@ -62,6 +77,9 @@ steps:
|
|||
source .buildkite/scripts/common/vm-agent.sh
|
||||
export FEATURE_FLAG=persistent_queues
|
||||
ci/integration_tests.sh split 1
|
||||
retry:
|
||||
automatic:
|
||||
- limit: 3
|
||||
|
||||
- label: ":lab_coat: x-pack unit tests"
|
||||
key: "x-pack-unit-tests"
|
||||
|
@ -70,6 +88,9 @@ steps:
|
|||
|
||||
source .buildkite/scripts/common/vm-agent.sh
|
||||
x-pack/ci/unit_tests.sh
|
||||
retry:
|
||||
automatic:
|
||||
- limit: 3
|
||||
|
||||
- label: ":lab_coat: x-pack integration"
|
||||
key: "integration-tests-x-pack"
|
||||
|
@ -78,6 +99,9 @@ steps:
|
|||
|
||||
source .buildkite/scripts/common/vm-agent.sh
|
||||
x-pack/ci/integration_tests.sh
|
||||
retry:
|
||||
automatic:
|
||||
- limit: 3
|
||||
|
||||
- group: "Acceptance Phase"
|
||||
depends_on: "testing-phase"
|
||||
|
@ -88,6 +112,9 @@ steps:
|
|||
set -euo pipefail
|
||||
|
||||
source .buildkite/scripts/common/vm-agent.sh && ci/docker_acceptance_tests.sh {{matrix}}
|
||||
retry:
|
||||
automatic:
|
||||
- limit: 3
|
||||
matrix:
|
||||
- "full"
|
||||
- "oss"
|
||||
|
|
1015
Gemfile.jruby-3.1.lock.release
Normal file
1015
Gemfile.jruby-3.1.lock.release
Normal file
File diff suppressed because it is too large
Load diff
|
@ -9,7 +9,7 @@ gem "paquet", "~> 0.2"
|
|||
gem "pleaserun", "~>0.0.28", require: false
|
||||
gem "rake", "~> 13", require: false
|
||||
gem "ruby-progressbar", "~> 1", require: false
|
||||
gem "ruby-maven-libs", "~> 3", ">= 3.8.9"
|
||||
gem "ruby-maven-libs", "~> 3", ">= 3.9.6.1"
|
||||
gem "logstash-output-elasticsearch", ">= 11.14.0"
|
||||
gem "polyglot", require: false
|
||||
gem "treetop", require: false
|
||||
|
@ -20,11 +20,16 @@ gem "gems", "~> 1", :group => :build
|
|||
gem "octokit", "~> 4.25", :group => :build
|
||||
gem "rubyzip", "~> 1", :group => :build
|
||||
gem "stud", "~> 0.0.22", :group => :build
|
||||
# remove fileutils declaration when start using Ruby 3.2+, by default includes `fileutils-v1.7.0`
|
||||
# (https://git.ruby-lang.org/ruby.git/commit/?h=ruby_3_2&id=05caafb4731c796890027cafedaac59dc108a23a)
|
||||
# note that the reason to use 1.7.0 is due to https://github.com/logstash-plugins/logstash-integration-aws/issues/28
|
||||
gem "fileutils", "~> 1.7"
|
||||
|
||||
gem "rubocop", :group => :development
|
||||
gem "belzebuth", :group => :development
|
||||
gem "benchmark-ips", :group => :development
|
||||
gem "ci_reporter_rspec", "~> 1", :group => :development
|
||||
gem "rexml", "3.2.6", :group => :development
|
||||
gem "flores", "~> 0.0.8", :group => :development
|
||||
gem "json-schema", "~> 2", :group => :development
|
||||
gem "logstash-devutils", "~> 2.6.0", :group => :development
|
||||
|
|
190
NOTICE.TXT
190
NOTICE.TXT
|
@ -90,7 +90,7 @@ Copyright © 2019 Red Hat, Inc. All rights reserved. “Red Hat,” is a registe
|
|||
All other trademarks are the property of their respective owners.
|
||||
|
||||
==========
|
||||
Notice for: addressable-2.8.6
|
||||
Notice for: addressable-2.8.7
|
||||
----------
|
||||
|
||||
Copyright © Bob Aman
|
||||
|
@ -298,7 +298,7 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
|
||||
==========
|
||||
Notice for: amazing_print-1.5.0
|
||||
Notice for: amazing_print-1.6.0
|
||||
----------
|
||||
|
||||
MIT License
|
||||
|
@ -722,7 +722,7 @@ Notice for: aws-eventstream-1.3.0
|
|||
limitations under the License.
|
||||
|
||||
==========
|
||||
Notice for: aws-partitions-1.883.0
|
||||
Notice for: aws-partitions-1.925.0
|
||||
----------
|
||||
|
||||
|
||||
|
@ -929,7 +929,7 @@ Notice for: aws-partitions-1.883.0
|
|||
limitations under the License.
|
||||
|
||||
==========
|
||||
Notice for: aws-sdk-cloudfront-1.87.0
|
||||
Notice for: aws-sdk-cloudfront-1.89.0
|
||||
----------
|
||||
|
||||
|
||||
|
@ -1136,7 +1136,7 @@ Notice for: aws-sdk-cloudfront-1.87.0
|
|||
limitations under the License.
|
||||
|
||||
==========
|
||||
Notice for: aws-sdk-cloudwatch-1.85.0
|
||||
Notice for: aws-sdk-cloudwatch-1.90.0
|
||||
----------
|
||||
|
||||
|
||||
|
@ -1343,7 +1343,7 @@ Notice for: aws-sdk-cloudwatch-1.85.0
|
|||
limitations under the License.
|
||||
|
||||
==========
|
||||
Notice for: aws-sdk-core-3.191.0
|
||||
Notice for: aws-sdk-core-3.194.2
|
||||
----------
|
||||
|
||||
|
||||
|
@ -1550,7 +1550,7 @@ Notice for: aws-sdk-core-3.191.0
|
|||
limitations under the License.
|
||||
|
||||
==========
|
||||
Notice for: aws-sdk-kms-1.77.0
|
||||
Notice for: aws-sdk-kms-1.80.0
|
||||
----------
|
||||
|
||||
|
||||
|
@ -1757,7 +1757,7 @@ Notice for: aws-sdk-kms-1.77.0
|
|||
limitations under the License.
|
||||
|
||||
==========
|
||||
Notice for: aws-sdk-resourcegroups-1.58.0
|
||||
Notice for: aws-sdk-resourcegroups-1.60.0
|
||||
----------
|
||||
|
||||
|
||||
|
@ -1964,7 +1964,7 @@ Notice for: aws-sdk-resourcegroups-1.58.0
|
|||
limitations under the License.
|
||||
|
||||
==========
|
||||
Notice for: aws-sdk-s3-1.143.0
|
||||
Notice for: aws-sdk-s3-1.149.1
|
||||
----------
|
||||
|
||||
|
||||
|
@ -2171,7 +2171,7 @@ Notice for: aws-sdk-s3-1.143.0
|
|||
limitations under the License.
|
||||
|
||||
==========
|
||||
Notice for: aws-sdk-sns-1.71.0
|
||||
Notice for: aws-sdk-sns-1.74.0
|
||||
----------
|
||||
|
||||
|
||||
|
@ -2378,7 +2378,7 @@ Notice for: aws-sdk-sns-1.71.0
|
|||
limitations under the License.
|
||||
|
||||
==========
|
||||
Notice for: aws-sdk-sqs-1.70.0
|
||||
Notice for: aws-sdk-sqs-1.73.0
|
||||
----------
|
||||
|
||||
|
||||
|
@ -2837,7 +2837,7 @@ OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|||
SUCH DAMAGE.
|
||||
|
||||
==========
|
||||
Notice for: bigdecimal-3.1.6
|
||||
Notice for: bigdecimal-3.1.8
|
||||
----------
|
||||
|
||||
# source: https://github.com/ruby/bigdecimal/blob/v3.1.4/LICENSE
|
||||
|
@ -2899,7 +2899,7 @@ You can redistribute it and/or modify it under either the terms of the
|
|||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
PURPOSE.
|
||||
==========
|
||||
Notice for: bindata-2.4.15
|
||||
Notice for: bindata-2.5.0
|
||||
----------
|
||||
|
||||
Copyright (C) 2007-2012 Dion Mendel. All rights reserved.
|
||||
|
@ -3086,7 +3086,7 @@ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
|||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
==========
|
||||
Notice for: com.fasterxml.jackson.core:jackson-annotations-2.15.2
|
||||
Notice for: com.fasterxml.jackson.core:jackson-annotations-2.15.4
|
||||
----------
|
||||
|
||||
This copy of Jackson JSON processor annotations is licensed under the
|
||||
|
@ -3303,7 +3303,7 @@ http://www.apache.org/licenses/LICENSE-2.0
|
|||
limitations under the License.
|
||||
|
||||
==========
|
||||
Notice for: com.fasterxml.jackson.core:jackson-core-2.15.2
|
||||
Notice for: com.fasterxml.jackson.core:jackson-core-2.15.4
|
||||
----------
|
||||
|
||||
# Jackson JSON processor
|
||||
|
@ -3530,7 +3530,7 @@ from the source code management (SCM) system project uses.
|
|||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==========
|
||||
Notice for: com.fasterxml.jackson.core:jackson-databind-2.15.2
|
||||
Notice for: com.fasterxml.jackson.core:jackson-databind-2.15.4
|
||||
----------
|
||||
|
||||
# Jackson JSON processor
|
||||
|
@ -3981,7 +3981,7 @@ http://www.apache.org/licenses/LICENSE-2.0
|
|||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==========
|
||||
Notice for: com.fasterxml.jackson.module:jackson-module-afterburner-2.15.2
|
||||
Notice for: com.fasterxml.jackson.module:jackson-module-afterburner-2.15.4
|
||||
----------
|
||||
|
||||
This copy of Jackson JSON processor databind module is licensed under the
|
||||
|
@ -5140,7 +5140,7 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|||
THE SOFTWARE.
|
||||
|
||||
==========
|
||||
Notice for: dalli-3.2.7
|
||||
Notice for: dalli-3.2.8
|
||||
----------
|
||||
|
||||
Copyright (c) Peter M. Goldstein, Mike Perham
|
||||
|
@ -5589,7 +5589,7 @@ source: https://github.com/elastic/enterprise-search-ruby/blob/v7.16.0/LICENSE
|
|||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==========
|
||||
Notice for: elastic-transport-8.3.1
|
||||
Notice for: elastic-transport-8.3.2
|
||||
----------
|
||||
|
||||
source: https://github.com/elastic/elastic-transport-ruby/blob/v8.3.0/LICENSE
|
||||
|
@ -5797,7 +5797,7 @@ source: https://github.com/elastic/elastic-transport-ruby/blob/v8.3.0/LICENSE
|
|||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==========
|
||||
Notice for: elasticsearch-7.17.10
|
||||
Notice for: elasticsearch-7.17.11
|
||||
----------
|
||||
|
||||
source: https://github.com/elastic/elasticsearch-ruby/blob/v5.0.4/elasticsearch-api/LICENSE.txt
|
||||
|
@ -5817,7 +5817,7 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
|
||||
==========
|
||||
Notice for: elasticsearch-api-7.17.10
|
||||
Notice for: elasticsearch-api-7.17.11
|
||||
----------
|
||||
|
||||
source: https://github.com/elastic/elasticsearch-ruby/blob/v5.0.4/elasticsearch-transport/LICENSE.txt
|
||||
|
@ -5837,7 +5837,7 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
|
||||
==========
|
||||
Notice for: elasticsearch-transport-7.17.10
|
||||
Notice for: elasticsearch-transport-7.17.11
|
||||
----------
|
||||
|
||||
source: https://github.com/elastic/elasticsearch-ruby/blob/v5.0.4/elasticsearch/LICENSE.txt
|
||||
|
@ -5885,7 +5885,7 @@ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
|||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
==========
|
||||
Notice for: et-orbi-1.2.7
|
||||
Notice for: et-orbi-1.2.11
|
||||
----------
|
||||
|
||||
source: https://github.com/floraison/et-orbi/blob/v1.2.7/LICENSE.txt
|
||||
|
@ -6157,7 +6157,33 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|||
THE SOFTWARE.
|
||||
|
||||
==========
|
||||
Notice for: fugit-1.9.0
|
||||
Notice for: fileutils-1.7.2
|
||||
----------
|
||||
|
||||
Copyright (C) 1993-2013 Yukihiro Matsumoto. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
1. Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
SUCH DAMAGE.
|
||||
==========
|
||||
Notice for: fugit-1.11.0
|
||||
----------
|
||||
|
||||
source: https://github.com/floraison/fugit/blob/v1.5.2/LICENSE.txt
|
||||
|
@ -6292,7 +6318,7 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|||
|
||||
|
||||
==========
|
||||
Notice for: http-cookie-1.0.5
|
||||
Notice for: http-cookie-1.0.6
|
||||
----------
|
||||
|
||||
source: https://github.com/sparklemotion/http-cookie/blob/v1.0.3/LICENSE.txt
|
||||
|
@ -6380,7 +6406,7 @@ FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
|||
IN THE SOFTWARE.
|
||||
|
||||
==========
|
||||
Notice for: i18n-1.14.1
|
||||
Notice for: i18n-1.14.5
|
||||
----------
|
||||
|
||||
source: https://github.com/svenfuchs/i18n/blob/v0.6.9/MIT-LICENSE
|
||||
|
@ -6865,7 +6891,7 @@ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
|||
of your accepting any such warranty or additional liability.
|
||||
|
||||
==========
|
||||
Notice for: jrjackson-0.4.18
|
||||
Notice for: jrjackson-0.4.20
|
||||
----------
|
||||
|
||||
https://github.com/guyboertje/jrjackson/blob/v0.4.6/README.md
|
||||
|
@ -9357,7 +9383,7 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
|
||||
==========
|
||||
Notice for: jruby-openssl-0.14.2
|
||||
Notice for: jruby-openssl-0.14.6
|
||||
----------
|
||||
|
||||
source: https://github.com/jruby/jruby-openssl/blob/v0.9.21/LICENSE.txt
|
||||
|
@ -9417,7 +9443,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==========
|
||||
Notice for: json-2.7.1
|
||||
Notice for: json-2.7.2
|
||||
----------
|
||||
|
||||
source: https://github.com/tmattia/json-generator/blob/v0.1.0/LICENSE.txt
|
||||
|
@ -9445,7 +9471,7 @@ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
|||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
==========
|
||||
Notice for: jwt-2.7.1
|
||||
Notice for: jwt-2.8.2
|
||||
----------
|
||||
|
||||
source: https://github.com/jwt/ruby-jwt/blob/v2.2.2/LICENSE
|
||||
|
@ -9594,7 +9620,7 @@ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
|||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
==========
|
||||
Notice for: method_source-1.0.0
|
||||
Notice for: method_source-1.1.0
|
||||
----------
|
||||
|
||||
source: https://github.com/banister/method_source/blob/v0.9.0/README.markdown#license
|
||||
|
@ -9730,7 +9756,7 @@ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
|||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
==========
|
||||
Notice for: multipart-post-2.3.0
|
||||
Notice for: multipart-post-2.4.1
|
||||
----------
|
||||
|
||||
source: https://github.com/nicksieger/multipart-post/blob/v2.0.0/README.md#license
|
||||
|
@ -9870,7 +9896,7 @@ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
|||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
==========
|
||||
Notice for: net-imap-0.4.9.1
|
||||
Notice for: net-imap-0.4.14
|
||||
----------
|
||||
|
||||
# source: https://github.com/ruby/net-imap/blob/v0.3.7/LICENSE.txt
|
||||
|
@ -10014,7 +10040,7 @@ LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|||
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
SUCH DAMAGE.
|
||||
==========
|
||||
Notice for: net-smtp-0.4.0.1
|
||||
Notice for: net-smtp-0.5.0
|
||||
----------
|
||||
|
||||
# source: https://github.com/ruby/net-smtp/blob/v0.4.0/LICENSE.txt
|
||||
|
@ -10042,7 +10068,7 @@ LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|||
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
SUCH DAMAGE.
|
||||
==========
|
||||
Notice for: nio4r-2.7.0
|
||||
Notice for: nio4r-2.7.3
|
||||
----------
|
||||
|
||||
Released under the MIT license.
|
||||
|
@ -10057,7 +10083,7 @@ The above copyright notice and this permission notice shall be included in all c
|
|||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
==========
|
||||
Notice for: nokogiri-1.16.0
|
||||
Notice for: nokogiri-1.16.6
|
||||
----------
|
||||
|
||||
source: https://github.com/sparklemotion/nokogiri/blob/v1.8.2/LICENSE.md
|
||||
|
@ -10303,7 +10329,7 @@ Copyright (C) 1999-2019 by Shigeru Chiba, All rights reserved.
|
|||
This software is distributed under the Mozilla Public License Version 1.1, the GNU Lesser General Public License Version 2.1 or later, or the Apache License Version 2.0.
|
||||
|
||||
==========
|
||||
Notice for: org.jruby:jruby-core-9.4.5.0
|
||||
Notice for: org.jruby:jruby-core-9.4.7.0
|
||||
----------
|
||||
|
||||
JRuby is Copyright (c) 2007-2018 The JRuby project
|
||||
|
@ -10586,7 +10612,7 @@ Eclipse Public License - v 2.0
|
|||
|
||||
You may add additional accurate notices of copyright ownership.
|
||||
==========
|
||||
Notice for: org.logstash:jvm-options-parser-8.13.0
|
||||
Notice for: org.logstash:jvm-options-parser-8.14.2
|
||||
----------
|
||||
|
||||
Copyright (c) 2022 Elasticsearch B.V. <http://www.elastic.co>
|
||||
|
@ -10774,7 +10800,7 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|||
SOFTWARE.
|
||||
|
||||
==========
|
||||
Notice for: public_suffix-5.0.4
|
||||
Notice for: public_suffix-5.0.5
|
||||
----------
|
||||
|
||||
Copyright (c) 2009-2018 Simone Carletti <weppos@weppos.net>
|
||||
|
@ -10889,7 +10915,7 @@ OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|||
SUCH DAMAGE.
|
||||
|
||||
==========
|
||||
Notice for: rack-2.2.8
|
||||
Notice for: rack-2.2.9
|
||||
----------
|
||||
|
||||
The MIT License (MIT)
|
||||
|
@ -10940,7 +10966,7 @@ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
|||
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
==========
|
||||
Notice for: rake-13.1.0
|
||||
Notice for: rake-13.2.1
|
||||
----------
|
||||
|
||||
Copyright (c) Jim Weirich
|
||||
|
@ -11017,6 +11043,82 @@ HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|||
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
SUCH DAMAGE.
|
||||
==========
|
||||
Notice for: ruby-maven-libs-3.9.6.1
|
||||
----------
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form.
|
||||
3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed.
|
||||
4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions:
|
||||
(a) You must give any other recipients of the Work or Derivative Works a copy of this License; and
|
||||
(b) You must cause any modified files to carry prominent notices stating that You changed the files; and
|
||||
(c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and
|
||||
(d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License.
|
||||
You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions.
|
||||
6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file.
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License.
|
||||
8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages.
|
||||
9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability.
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
Standard License Header
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
==========
|
||||
Notice for: ruby-progressbar-1.13.0
|
||||
----------
|
||||
|
@ -11185,7 +11287,7 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
|
||||
==========
|
||||
Notice for: sequel-5.76.0
|
||||
Notice for: sequel-5.80.0
|
||||
----------
|
||||
|
||||
Copyright (c) 2007-2008 Sharon Rosner
|
||||
|
@ -11752,7 +11854,7 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|||
THE SOFTWARE.
|
||||
|
||||
==========
|
||||
Notice for: tzinfo-data-1.2023.4
|
||||
Notice for: tzinfo-data-1.2024.1
|
||||
----------
|
||||
|
||||
Copyright (c) 2005-2018 Philip Ross
|
||||
|
@ -11776,7 +11878,7 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|||
THE SOFTWARE.
|
||||
|
||||
==========
|
||||
Notice for: webhdfs-0.10.2
|
||||
Notice for: webhdfs-0.11.0
|
||||
----------
|
||||
|
||||
Copyright (C) 2012 Fluentd Project
|
||||
|
|
|
@ -314,6 +314,8 @@
|
|||
# * json
|
||||
#
|
||||
# log.format: plain
|
||||
# log.format.json.fix_duplicate_message_fields: false
|
||||
#
|
||||
# path.logs:
|
||||
#
|
||||
# ------------ Other Settings --------------
|
||||
|
|
|
@ -71,6 +71,7 @@ var validSettings = []string{
|
|||
"http.port", // DEPRECATED: prefer `api.http.port`
|
||||
"log.level",
|
||||
"log.format",
|
||||
"log.format.json.fix_duplicate_message_fields",
|
||||
"modules",
|
||||
"metric.collect",
|
||||
"path.logs",
|
||||
|
|
3
docs/static/config-management.asciidoc
vendored
3
docs/static/config-management.asciidoc
vendored
|
@ -6,8 +6,7 @@ manage updates to your configuration over time.
|
|||
|
||||
The topics in this section describe Logstash configuration management features
|
||||
only. For information about other config management tools, such as Puppet and
|
||||
Chef, see the documentation for those projects. Also take a look at the
|
||||
https://forge.puppet.com/elastic/logstash[Logstash Puppet module documentation].
|
||||
Chef, see the documentation for those projects.
|
||||
|
||||
:edit_url!:
|
||||
include::management/centralized-pipelines.asciidoc[]
|
||||
|
|
13
docs/static/ea-integrations.asciidoc
vendored
13
docs/static/ea-integrations.asciidoc
vendored
|
@ -80,3 +80,16 @@ output { <3>
|
|||
<1> Use `filter-elastic_integration` as the first filter in your pipeline
|
||||
<2> You can use additional filters as long as they follow `filter-elastic_integration`
|
||||
<3> Sample config to output data to multiple destinations
|
||||
|
||||
[discrete]
|
||||
[[es-tips]]
|
||||
==== Using `filter-elastic_integration` with `output-elasticsearch`
|
||||
|
||||
Elastic {integrations} are designed to work with {logstash-ref}/plugins-outputs-elasticsearch.html#plugins-outputs-elasticsearch-data-streams[data streams] and {logstash-ref}/plugins-outputs-elasticsearch.html#_compatibility_with_the_elastic_common_schema_ecs[ECS-compatible] output.
|
||||
Be sure that these features are enabled in the {logstash-ref}/plugins-outputs-elasticsearch.html[`output-elasticsearch`] plugin.
|
||||
|
||||
* Set {logstash-ref}/plugins-outputs-elasticsearch.html#plugins-outputs-elasticsearch-data_stream[`data-stream`] to `true`. +
|
||||
(Check out {logstash-ref}/plugins-outputs-elasticsearch.html#plugins-outputs-elasticsearch-data-streams[Data streams] for additional data streams settings.)
|
||||
* Set {logstash-ref}/plugins-outputs-elasticsearch.html#plugins-outputs-elasticsearch-ecs_compatibility[`ecs-compatibility`] to `v1` or `v8`.
|
||||
|
||||
Check out the {logstash-ref}/plugins-outputs-elasticsearch.html[`output-elasticsearch` plugin] docs for additional settings.
|
||||
|
|
2
docs/static/persistent-queues.asciidoc
vendored
2
docs/static/persistent-queues.asciidoc
vendored
|
@ -61,7 +61,7 @@ The total of `queue.max_bytes` for _all_ queues should be
|
|||
lower than the capacity of your disk.
|
||||
+
|
||||
TIP: If you are using persistent queues to protect against data loss, but don't
|
||||
require much buffering, you can set `queue.max_bytes` to a smaller value.
|
||||
require much buffering, you can set `queue.max_bytes` to a smaller value as long as it is not less than the value of `queue.page_capacity`.
|
||||
A smaller value produces smaller queues and improves queue performance.
|
||||
|
||||
`queue.checkpoint.acks`:: Sets the number of acked events before forcing a checkpoint.
|
||||
|
|
150
docs/static/releasenotes.asciidoc
vendored
150
docs/static/releasenotes.asciidoc
vendored
|
@ -3,6 +3,11 @@
|
|||
|
||||
This section summarizes the changes in the following releases:
|
||||
|
||||
* <<logstash-8-14-2,Logstash 8.14.2>>
|
||||
* <<logstash-8-14-1,Logstash 8.14.1>>
|
||||
* <<logstash-8-14-0,Logstash 8.14.0>>
|
||||
* <<logstash-8-13-4,Logstash 8.13.4>>
|
||||
* <<logstash-8-13-3,Logstash 8.13.3>>
|
||||
* <<logstash-8-13-2,Logstash 8.13.2>>
|
||||
* <<logstash-8-13-1,Logstash 8.13.1>>
|
||||
* <<logstash-8-13-0,Logstash 8.13.0>>
|
||||
|
@ -58,6 +63,149 @@ This section summarizes the changes in the following releases:
|
|||
* <<logstash-8-0-0-alpha1,Logstash 8.0.0-alpha1>>
|
||||
|
||||
|
||||
[[logstash-8-14-2]]
|
||||
=== Logstash 8.14.2 Release Notes
|
||||
|
||||
[[notable-8.14.2]]
|
||||
==== Notable issues fixed
|
||||
|
||||
* Fixes a regression from Logstash 8.7.0 that prevented pipelines from starting when they included plugins with unicode ids https://github.com/elastic/logstash/pull/15971[#15971]
|
||||
* Fixes a regression from Logstash 8.12.0 that prevented pipelines from starting when they included a geoip filter that used the managed databases feature after the databases had been updated https://github.com/elastic/logstash/pull/16222[#16222]
|
||||
* Fixes an issue with the dead-letter queue that could cause it to emit superfluous warning messages when age-based retention was enabled while determining whether a fully-consumed segment that had already been removed was also expired https://github.com/elastic/logstash/pull/16204[#16204]
|
||||
|
||||
==== Plugins
|
||||
|
||||
*Fluent Codec - 3.4.3*
|
||||
|
||||
* Fix: reduce overhead of unpacking packforward-payloads by reusing a single instance https://github.com/logstash-plugins/logstash-codec-fluent/pull/32[#32]
|
||||
|
||||
*Elastic_integration Filter - 0.1.10*
|
||||
|
||||
* Fixes handling of array-type event fields by treating them as lists https://github.com/elastic/logstash-filter-elastic_integration/pull/146[#146]
|
||||
* Syncs with Elasticsearch 8.14, including support for new user-provided GeoIP database types `ConnectionType`, `Domain` and `Isp` https://github.com/elastic/logstash-filter-elastic_integration/pull/147[#147]
|
||||
|
||||
*Elasticsearch Input - 4.20.3*
|
||||
|
||||
* [DOC] Update link to bypass redirect, resolving directly to correct content https://github.com/logstash-plugins/logstash-input-elasticsearch/pull/206[#206]
|
||||
|
||||
*Elasticsearch Output - 11.22.7*
|
||||
|
||||
* [DOC] `ssl_key` requires PKCS#8 format https://github.com/logstash-plugins/logstash-output-elasticsearch/pull/1181[#1181]
|
||||
|
||||
|
||||
[[logstash-8-14-1]]
|
||||
=== Logstash 8.14.1 Release Notes
|
||||
|
||||
* Fixes JSON serialization of payloads that are not UTF-8, eliminating an issue where the Elasticsearch Output could get stuck in a retry loop (#16072) https://github.com/elastic/logstash/pull/16168[#16168]
|
||||
* Fixes Persistent Queue bug in which a PQ configured with `queue.max_bytes` equal to its `queue.page_capacity` could become permanently blocked when _precisely_ full https://github.com/elastic/logstash/pull/16178[#16178]
|
||||
* Fixes a regression in multi-local pipeline loader that caused variable-references in a configured `pipelines.yml` to not be replaced by their values in the environment and/or keystore https://github.com/elastic/logstash/pull/16201[#16201]
|
||||
|
||||
|
||||
==== Plugins
|
||||
|
||||
*Elastic_integration Filter - 0.1.9*
|
||||
|
||||
- [DOC] Removes Tech Preview label and adds link to extending integrations topic in LSR https://github.com/elastic/logstash-filter-elastic_integration/pull/142[#142]
|
||||
|
||||
|
||||
*Azure_event_hubs Input - 1.4.7*
|
||||
|
||||
* [DOCS] Clarify examples for single and multiple event hubs https://github.com/logstash-plugins/logstash-input-azure_event_hubs/pull/90[#90]
|
||||
|
||||
* [DOCS] Add outbound port requirements for Event Hub https://github.com/logstash-plugins/logstash-input-azure_event_hubs/pull/88[#88]
|
||||
|
||||
*Jdbc Integration - 5.4.11*
|
||||
|
||||
* Fixes an issue in which any one instance of a JDBC input plugin using `jdbc_default_timezone` changes the behaviour of plugin instances that do _not_ use `jdbc_default_timezone`, ensuring that timezone offsets remain consistent for each instance of the plugin _as configured_ https://github.com/logstash-plugins/logstash-integration-jdbc/pull/151[#151]
|
||||
* Fixes an exception that could occur while reloading `jdbc_static` databases when the underlying connection to the remote has been broken https://github.com/logstash-plugins/logstash-integration-jdbc/pull/165[#165]
|
||||
|
||||
*Kafka Integration - 11.4.2*
|
||||
|
||||
* Add default client_id of logstash to kafka output https://github.com/logstash-plugins/logstash-integration-kafka/pull/169[#169]
|
||||
|
||||
*Http Output - 5.6.1*
|
||||
|
||||
* Added body logging for non 2xx responses https://github.com/logstash-plugins/logstash-output-http/pull/142[#142]
|
||||
|
||||
*Tcp Output - 6.2.1*
|
||||
|
||||
* Document correct default plugin codec https://github.com/logstash-plugins/logstash-output-tcp/pull/54[#54]
|
||||
|
||||
|
||||
[[logstash-8-14-0]]
|
||||
=== Logstash 8.14.0 Release Notes
|
||||
[[featured-8-14-0]]
|
||||
==== Announcing the new SNMP integration plugin (Technical Preview)
|
||||
|
||||
The new {logstash-ref}/plugins-integrations-snmp.html#plugins-integrations-snmp-migration[`logstash-integration-snmp`] plugin is now available in _Technical Preview_, and can be installed on {ls} 8.13.
|
||||
|
||||
experimental[]
|
||||
|
||||
The `logstash-integration-snmp` plugin combines our
|
||||
classic `logstash-input-snmp` and `logstash-input-snmptrap` plugins into a single Ruby gem at v4.0.0.
|
||||
Current 1.x versions of the `input-snmp` plugin are bundled with {ls} by default, and will soon be replaced by the 4.0.0+ version contained in this new integration.
|
||||
|
||||
If you want to try the new `integration-snmp` plugin while it is in Technical Preview, run `bin/logstash-plugin install logstash-integration-snmp`.
|
||||
|
||||
IMPORTANT: Before you install the new integration, be aware of {logstash-ref}/plugins-integrations-snmp.html#plugins-integrations-snmp-migration[behavioral and mapping differences] between current stand-alone plugins and the new versions included in `integration-snmp`.
|
||||
|
||||
[[notable-8.14.0]]
|
||||
==== Enhancements and notable issues fixed
|
||||
|
||||
* Fixed a bug that created duplicated `io.netty.allocator.maxOrder` system property when using environment variable `LS_JAVA_OPTS` in Docker https://github.com/elastic/logstash/pull/16079[#16079]
|
||||
|
||||
==== Plugins
|
||||
|
||||
*Jdbc Integration - 5.4.10*
|
||||
|
||||
* [DOC] Added database-specific considerations https://github.com/logstash-plugins/logstash-integration-jdbc/pull/167[#167]
|
||||
|
||||
*Kafka Integration - 11.4.1*
|
||||
|
||||
* Added `message_headers` option to set headers of record for Kafka output https://github.com/logstash-plugins/logstash-integration-kafka/pull/162[#162]
|
||||
|
||||
[[dependencies-8.14.0]]
|
||||
==== Updates to dependencies
|
||||
|
||||
* Update JRuby to 9.4.7.0 https://github.com/elastic/logstash/pull/16125[#16125]
|
||||
|
||||
|
||||
[[logstash-8-13-4]]
|
||||
=== Logstash 8.13.4 Release Notes
|
||||
|
||||
No user-facing changes in Logstash core.
|
||||
|
||||
==== Plugins
|
||||
|
||||
*Elasticsearch Output - 11.22.6*
|
||||
|
||||
* [DOC] Logstash output.elasticsearch index can be alias or datastream https://github.com/logstash-plugins/logstash-output-elasticsearch/pull/1179[#1179]
|
||||
|
||||
|
||||
[[logstash-8-13-3]]
|
||||
=== Logstash 8.13.3 Release Notes
|
||||
|
||||
No user-facing changes in Logstash core.
|
||||
|
||||
==== Plugins
|
||||
|
||||
*Beats Input - 6.8.3*
|
||||
|
||||
* Updated netty to 4.1.109 https://github.com/logstash-plugins/logstash-input-beats/pull/495[#495]
|
||||
|
||||
*Http Input - 3.8.1*
|
||||
|
||||
* Updated netty to 4.1.109 https://github.com/logstash-plugins/logstash-input-http/pull/173[#173]
|
||||
|
||||
*Tcp Input - 6.4.2*
|
||||
|
||||
* Updated netty to 4.1.109 https://github.com/logstash-plugins/logstash-input-tcp/pull/220[#220]
|
||||
|
||||
*Multiline Codec - 3.1.2*
|
||||
|
||||
* Fixed a race condition in periodic runner that blocks clean up process and pipeline shutdown https://github.com/logstash-plugins/logstash-codec-multiline/pull/72[#72]
|
||||
|
||||
|
||||
[[logstash-8-13-2]]
|
||||
=== Logstash 8.13.2 Release Notes
|
||||
|
||||
|
@ -2154,4 +2302,4 @@ We have added another flag to the Benchmark CLI to allow passing a data file wit
|
|||
This feature allows users to run the Benchmark CLI in a custom test case with a custom config and a custom dataset. https://github.com/elastic/logstash/pull/12437[#12437]
|
||||
|
||||
==== Plugin releases
|
||||
Plugins align with release 7.14.0
|
||||
Plugins align with release 7.14.0
|
|
@ -230,6 +230,9 @@ With this command, Logstash concatenates three config files, `/tmp/one`, `/tmp/t
|
|||
Specify if Logstash should write its own logs in JSON form (one event per line) or in plain text
|
||||
(using Ruby's Object#inspect). The default is "plain".
|
||||
|
||||
*`--log.format.json.fix_duplicate_message_fields ENABLED`*::
|
||||
Avoid `message` field collision using JSON log format. Possible values are `false` (default) and `true`.
|
||||
|
||||
*`--path.settings SETTINGS_DIR`*::
|
||||
Set the directory containing the `logstash.yml` <<logstash-settings-file,settings file>> as well
|
||||
as the log4j logging configuration. This can also be set through the LS_SETTINGS_DIR environment variable.
|
||||
|
|
4
docs/static/settings-file.asciidoc
vendored
4
docs/static/settings-file.asciidoc
vendored
|
@ -336,6 +336,10 @@ The log level. Valid options are:
|
|||
| The log format. Set to `json` to log in JSON format, or `plain` to use `Object#.inspect`.
|
||||
| `plain`
|
||||
|
||||
| `log.format.json.fix_duplicate_message_fields`
|
||||
| When the log format is `json` avoid collision of field names in log lines.
|
||||
| `false`
|
||||
|
||||
| `path.logs`
|
||||
| The directory where Logstash will write its log to.
|
||||
| `LOGSTASH_HOME/logs`
|
||||
|
|
20
docs/static/tab-widgets/install-agent.asciidoc
vendored
20
docs/static/tab-widgets/install-agent.asciidoc
vendored
|
@ -1,10 +1,5 @@
|
|||
// tag::fleet-managed[]
|
||||
. When the **Add Agent flyout** appears, stay on the **Enroll in fleet** tab
|
||||
+
|
||||
--
|
||||
[role="screenshot"]
|
||||
image::../monitoring/images/integration-agent-add.png[Add agent flyout in {kib}]
|
||||
--
|
||||
. When the **Add Agent flyout** appears, stay on the **Enroll in fleet** tab.
|
||||
. Skip the **Select enrollment token** step. The enrollment token you need is
|
||||
already selected.
|
||||
+
|
||||
|
@ -20,20 +15,11 @@ It takes about a minute for {agent} to enroll in {fleet}, download the
|
|||
configuration specified in the policy you just created, and start collecting
|
||||
data.
|
||||
|
||||
--
|
||||
[role="screenshot"]
|
||||
image::../monitoring/images/integration-agent-confirm.png[Agent confirm data]
|
||||
--
|
||||
// end::fleet-managed[]
|
||||
|
||||
// tag::standalone[]
|
||||
. When the **Add Agent flyout** appears, navigate to the **Run standalone** tab
|
||||
+
|
||||
--
|
||||
[role="screenshot"]
|
||||
image::../monitoring/images/integration-agent-add-standalone.png[Add agent flyout in {kib}]
|
||||
--
|
||||
. Configure the agent. Follow all the instructions in **Install Elastic Agent on your host**
|
||||
. When the **Add Agent flyout** appears, navigate to the **Run standalone** tab.
|
||||
. Configure the agent. Follow the instructions in **Install Elastic Agent on your host**.
|
||||
. After unpacking the binary, replace the `elastic-agent.yml` file with that supplied in the Add Agent flyout on the "Run standalone" tab, replacing the values of `ES_USERNAME` and `ES_PASSWORD` appropriately.
|
||||
. Run `sudo ./elastic-agent install`
|
||||
// end::standalone[]
|
62
docs/static/troubleshoot/ts-logstash.asciidoc
vendored
62
docs/static/troubleshoot/ts-logstash.asciidoc
vendored
|
@ -204,3 +204,65 @@ As the logging library used in Logstash is synchronous, heavy logging can affect
|
|||
*Solution*
|
||||
|
||||
Reset the logging level to `info`.
|
||||
|
||||
[[ts-pipeline-logging-json-duplicated-message-field]]
|
||||
==== Logging in json format can write duplicate `message` fields
|
||||
|
||||
*Symptoms*
|
||||
|
||||
When log format is `json` and certain log events (for example errors from JSON codec plugin)
|
||||
contains two instances of the `message` field.
|
||||
|
||||
Without setting this flag, json log would contain objects like:
|
||||
|
||||
[source,json]
|
||||
-----
|
||||
{
|
||||
"level":"WARN",
|
||||
"loggerName":"logstash.codecs.jsonlines",
|
||||
"timeMillis":1712937761955,
|
||||
"thread":"[main]<stdin",
|
||||
"logEvent":{
|
||||
"message":"JSON parse error, original data now in message field",
|
||||
"message":"Unexpected close marker '}': expected ']' (for Array starting at [Source: (String)\"{\"name\": [}\"; line: 1, column: 10])\n at [Source: (String)\"{\"name\": [}\"; line: 1, column: 12]",
|
||||
"exception":"LogStash::Json::ParserError",
|
||||
"data":"{\"name\": [}"
|
||||
}
|
||||
}
|
||||
-----
|
||||
|
||||
Please note the duplication of `message` field, while being technically valid json, it is not always parsed correctly.
|
||||
|
||||
*Solution*
|
||||
In `config/logstash.yml` enable the strict json flag:
|
||||
|
||||
[source,yaml]
|
||||
-----
|
||||
log.format.json.fix_duplicate_message_fields: true
|
||||
-----
|
||||
|
||||
or pass the command line switch
|
||||
|
||||
[source]
|
||||
-----
|
||||
bin/logstash --log.format.json.fix_duplicate_message_fields true
|
||||
-----
|
||||
|
||||
With `log.format.json.fix_duplicate_message_fields` enabled the duplication of `message` field is removed,
|
||||
adding to the field name a `_1` suffix:
|
||||
|
||||
[source,json]
|
||||
-----
|
||||
{
|
||||
"level":"WARN",
|
||||
"loggerName":"logstash.codecs.jsonlines",
|
||||
"timeMillis":1712937629789,
|
||||
"thread":"[main]<stdin",
|
||||
"logEvent":{
|
||||
"message":"JSON parse error, original data now in message field",
|
||||
"message_1":"Unexpected close marker '}': expected ']' (for Array starting at [Source: (String)\"{\"name\": [}\"; line: 1, column: 10])\n at [Source: (String)\"{\"name\": [}\"; line: 1, column: 12]",
|
||||
"exception":"LogStash::Json::ParserError",
|
||||
"data":"{\"name\": [}"
|
||||
}
|
||||
}
|
||||
-----
|
|
@ -140,6 +140,7 @@ jacocoTestReport {
|
|||
javaTests.finalizedBy(jacocoTestReport)
|
||||
|
||||
tasks.register("rubyTests", Test) {
|
||||
dependsOn compileTestJava
|
||||
inputs.files fileTree("${projectDir}/lib")
|
||||
inputs.files fileTree("${projectDir}/spec")
|
||||
systemProperty 'logstash.root.dir', projectDir.parent
|
||||
|
|
|
@ -47,7 +47,7 @@ module LogStash::Api::AppHelpers
|
|||
end
|
||||
|
||||
content_type "application/json"
|
||||
LogStash::Json.dump(data, {:pretty => pretty?})
|
||||
LogStash::Json.dump(stringify_symbols(data), {:pretty => pretty?})
|
||||
else
|
||||
content_type "text/plain"
|
||||
data.to_s
|
||||
|
@ -81,6 +81,16 @@ module LogStash::Api::AppHelpers
|
|||
params.has_key?("pretty")
|
||||
end
|
||||
|
||||
# Recursively stringify symbols in the provided data structure
|
||||
def stringify_symbols(data)
|
||||
case data
|
||||
when Hash then Hash[data.each_pair.map { |k,v| [stringify_symbols(k), stringify_symbols(v)] }]
|
||||
when Array then data.map { |v| stringify_symbols(v) }
|
||||
when Symbol then data.to_s.encode('UTF-8')
|
||||
else data
|
||||
end
|
||||
end
|
||||
|
||||
def generate_error_hash(error)
|
||||
{
|
||||
:path => request.path,
|
||||
|
|
|
@ -30,7 +30,7 @@ module LogStash module Config module Source
|
|||
end
|
||||
|
||||
def pipeline_configs
|
||||
pipelines = retrieve_yaml_pipelines
|
||||
pipelines = deep_replace(retrieve_yaml_pipelines)
|
||||
pipelines_settings = pipelines.map do |pipeline_settings|
|
||||
clone = @original_settings.clone
|
||||
clone.merge_pipeline_settings(pipeline_settings)
|
||||
|
|
|
@ -72,6 +72,7 @@ module LogStash
|
|||
Setting::Boolean.new("help", false),
|
||||
Setting::Boolean.new("enable-local-plugin-development", false),
|
||||
Setting::String.new("log.format", "plain", true, ["json", "plain"]),
|
||||
Setting::Boolean.new("log.format.json.fix_duplicate_message_fields", false),
|
||||
Setting::Boolean.new("api.enabled", true).with_deprecated_alias("http.enabled"),
|
||||
Setting::String.new("api.http.host", "127.0.0.1").with_deprecated_alias("http.host"),
|
||||
Setting::PortRange.new("api.http.port", 9600..9700).with_deprecated_alias("http.port"),
|
||||
|
@ -124,6 +125,7 @@ module LogStash
|
|||
SETTINGS.on_post_process do |settings|
|
||||
# Configure Logstash logging facility. This needs to be done as early as possible to
|
||||
# make sure the logger has the correct settings tnd the log level is correctly defined.
|
||||
java.lang.System.setProperty("ls.log.format.json.fix_duplicate_message_fields", settings.get("log.format.json.fix_duplicate_message_fields").to_s)
|
||||
java.lang.System.setProperty("ls.logs", settings.get("path.logs"))
|
||||
java.lang.System.setProperty("ls.log.format", settings.get("log.format"))
|
||||
java.lang.System.setProperty("ls.log.level", settings.get("log.level"))
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
# under the License.
|
||||
|
||||
require "logstash/environment"
|
||||
require "logstash/util/unicode_normalizer"
|
||||
require "jrjackson"
|
||||
|
||||
module LogStash
|
||||
|
@ -32,16 +33,35 @@ module LogStash
|
|||
end
|
||||
|
||||
def jruby_dump(o, options = {})
|
||||
encoding_normalized_data = normalize_encoding(o)
|
||||
|
||||
# TODO [guyboertje] remove these comments in 5.0
|
||||
# test for enumerable here to work around an omission in JrJackson::Json.dump to
|
||||
# also look for Java::JavaUtil::ArrayList, see TODO submit issue
|
||||
# o.is_a?(Enumerable) ? JrJackson::Raw.generate(o) : JrJackson::Json.dump(o)
|
||||
JrJackson::Base.generate(o, options)
|
||||
JrJackson::Base.generate(encoding_normalized_data, options)
|
||||
rescue => e
|
||||
raise LogStash::Json::GeneratorError.new(e.message)
|
||||
end
|
||||
|
||||
alias_method :load, "jruby_load".to_sym
|
||||
alias_method :dump, "jruby_dump".to_sym
|
||||
|
||||
private
|
||||
def normalize_encoding(data)
|
||||
case data
|
||||
when String
|
||||
LogStash::UnicodeNormalizer.normalize_string_encoding(data)
|
||||
when Array
|
||||
data.map { |item| normalize_encoding(item) }
|
||||
when Hash
|
||||
# origin key might change when normalizing, so requires transformation
|
||||
data.to_hash # if coming from jruby objects such as UnmodifiableMap
|
||||
.transform_keys { |key| normalize_encoding(key) }
|
||||
.transform_values { |value| normalize_encoding(value) }
|
||||
else
|
||||
data # use as it is
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
@ -232,6 +232,11 @@ class LogStash::Runner < Clamp::StrictCommand
|
|||
:attribute_name => "log.format",
|
||||
:default => LogStash::SETTINGS.get_default("log.format")
|
||||
|
||||
option ["--log.format.json.fix_duplicate_message_fields"], "FORMAT_JSON_STRICT",
|
||||
I18n.t("logstash.runner.flag.log_format_json_fix_duplicate_message_fields"),
|
||||
:attribute_name => "log.format.json.fix_duplicate_message_fields",
|
||||
:default => LogStash::SETTINGS.get_default("log.format.json.fix_duplicate_message_fields")
|
||||
|
||||
option ["--path.settings"], "SETTINGS_DIR",
|
||||
I18n.t("logstash.runner.flag.path_settings"),
|
||||
:attribute_name => "path.settings",
|
||||
|
|
38
logstash-core/lib/logstash/util/unicode_normalizer.rb
Normal file
38
logstash-core/lib/logstash/util/unicode_normalizer.rb
Normal file
|
@ -0,0 +1,38 @@
|
|||
# Licensed to Elasticsearch B.V. under one or more contributor
|
||||
# license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright
|
||||
# ownership. Elasticsearch B.V. licenses this file to you under
|
||||
# the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
module LogStash
|
||||
|
||||
# A class to normalize the invalid unicode data
|
||||
class UnicodeNormalizer
|
||||
|
||||
include LogStash::Util::Loggable
|
||||
|
||||
# Tries to normalize input string to UTF-8 when
|
||||
# input string encoding is not UTF-8,
|
||||
# and replaces invalid unicode bytes with replacement characters ('uFFFD')
|
||||
# string_data - The String data to be normalized.
|
||||
# Returns the normalized string data.
|
||||
def self.normalize_string_encoding(string_data)
|
||||
# when given BINARY-flagged string, assume it is UTF-8 so that
|
||||
# subsequent cleanup retains valid UTF-8 sequences
|
||||
source_encoding = string_data.encoding
|
||||
source_encoding = Encoding::UTF_8 if source_encoding == Encoding::BINARY
|
||||
string_data.encode(Encoding::UTF_8, source_encoding, invalid: :replace, undef: :replace).scrub
|
||||
end
|
||||
end
|
||||
end
|
|
@ -1,3 +1,20 @@
|
|||
# Licensed to Elasticsearch B.V. under one or more contributor
|
||||
# license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright
|
||||
# ownership. Elasticsearch B.V. licenses this file to you under
|
||||
# the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# encoding: utf-8
|
||||
|
||||
module LogStash::Util::UnicodeTrimmer
|
||||
|
|
|
@ -423,6 +423,8 @@ en:
|
|||
log_format: |+
|
||||
Specify if Logstash should write its own logs in JSON form (one
|
||||
event per line) or in plain text (using Ruby's Object#inspect)
|
||||
log_format_json_fix_duplicate_message_fields: |+
|
||||
Enable to avoid duplication of message fields in JSON form.
|
||||
debug: |+
|
||||
Set the log level to debug.
|
||||
DEPRECATED: use --log.level=debug instead.
|
||||
|
|
|
@ -146,16 +146,17 @@ describe LogStash::Config::Source::MultiLocal do
|
|||
|
||||
describe "#pipeline_configs" do
|
||||
|
||||
let(:config_string) {
|
||||
"input {
|
||||
udp {
|
||||
port => 5555 # intentional comment contains \"${UDP_DEV_PORT}\" variable, shouldn't break functionalities
|
||||
host => \"127.0.0.1\"
|
||||
}
|
||||
# another intentional comment contains \"${UDP_PROD_HOST}\" variable, shouldn't break functionalities
|
||||
}
|
||||
output {}"
|
||||
}
|
||||
# let(:config_string) {
|
||||
# "input {
|
||||
# udp {
|
||||
# port => 5555 # intentional comment contains \"${UDP_DEV_PORT}\" variable, shouldn't break functionalities
|
||||
# host => \"127.0.0.1\"
|
||||
# }
|
||||
# # another intentional comment contains \"${UDP_PROD_HOST}\" variable, shouldn't break functionalities
|
||||
# }
|
||||
# output {}"
|
||||
# }
|
||||
let(:config_string) { "input {} output {}" }
|
||||
let(:retrieved_pipelines) do
|
||||
[
|
||||
{ "pipeline.id" => "main", "config.string" => config_string },
|
||||
|
|
|
@ -118,4 +118,78 @@ describe "LogStash::Json" do
|
|||
o = LogStash::Json.load(" ")
|
||||
expect(o).to be_nil
|
||||
end
|
||||
|
||||
context "Unicode edge-cases" do
|
||||
matcher :be_utf8 do
|
||||
match(:notify_expectation_failures => true) do |actual|
|
||||
aggregate_failures do
|
||||
expect(actual).to have_attributes(:encoding => Encoding::UTF_8, :valid_encoding? => true)
|
||||
expect(actual.bytes).to eq(@expected_bytes) unless @expected_bytes.nil?
|
||||
end
|
||||
end
|
||||
chain :with_bytes do |expected_bytes|
|
||||
@expected_bytes = expected_bytes
|
||||
end
|
||||
end
|
||||
|
||||
let(:result) { LogStash::Json::dump(input) }
|
||||
|
||||
context "with valid non-unicode encoding" do
|
||||
let(:input) { "Th\xEFs \xCCs W\xCFnd\xD8w\x8A".b.force_encoding(Encoding::WINDOWS_1252).freeze }
|
||||
it 'transcodes to equivalent UTF-8 code-points' do
|
||||
aggregate_failures do
|
||||
expect(result).to be_utf8.with_bytes("\u{22}Th\u{EF}s \u{CC}s W\u{CF}nd\u{D8}w\u{160}\u{22}".bytes)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
context "with unicode that has invalid sequences" do
|
||||
let(:input) { "Thïs is a not-quite-v\xCEalid uni\xF0\x9D\x84code string 💖ok".b.force_encoding(Encoding::UTF_8).freeze }
|
||||
it 'replaces each invalid sequence with the xFFFD replacement character' do
|
||||
expect(result).to be_utf8.with_bytes("\x22Thïs is a not-quite-v\u{FFFD}alid uni\u{FFFD}code string 💖ok\x22".bytes)
|
||||
end
|
||||
end
|
||||
|
||||
context 'with valid unicode' do
|
||||
let(:input) { "valid \u{A7}\u{a9c5}\u{18a5}\u{1f984} unicode".encode('UTF-8').freeze }
|
||||
it 'keeps the unicode in-tact' do
|
||||
expect(result).to be_utf8.with_bytes(('"' + input + '"').bytes)
|
||||
end
|
||||
end
|
||||
|
||||
context 'with binary-flagged input' do
|
||||
|
||||
context 'that contains only lower-ascii' do
|
||||
let(:input) { "hello, world. This is a test including newline(\x0A) literal-backslash(\x5C) double-quote(\x22)".b.force_encoding(Encoding::BINARY).freeze}
|
||||
it 'does not munge the bytes' do
|
||||
expect(result).to be_utf8.with_bytes("\x22hello, world. This is a test including newline(\x5Cn) literal-backslash(\x5C\x5C) double-quote(\x5C\x22)\x22".bytes)
|
||||
end
|
||||
end
|
||||
|
||||
context 'that contains bytes outside lower-ascii' do
|
||||
let(:input) { "Thïs is a not-quite-v\xCEalid uni\xF0\x9D\x84code string 💖ok".b.force_encoding(Encoding::BINARY).freeze }
|
||||
it 'replaces each invalid sequence with the xFFFD replacement character' do
|
||||
expect(result).to be_utf8.with_bytes("\x22Thïs is a not-quite-v\u{FFFD}alid uni\u{FFFD}code string 💖ok\x22".bytes)
|
||||
end
|
||||
end
|
||||
|
||||
end
|
||||
|
||||
context 'with hash data structure' do
|
||||
let(:input) {{"Th\xEFs key and".b.force_encoding(Encoding::WINDOWS_1252).freeze =>
|
||||
{"Thïs key also".b.force_encoding(Encoding::UTF_8).freeze => "not-quite-v\xCEalid uni\xF0\x9D\x84code string 💖ok".b.force_encoding(Encoding::UTF_8).freeze}}}
|
||||
it 'normalizes and replaces each invalid key-value with the xFFFD replacement character' do
|
||||
expect(result).to be_utf8.with_bytes("{\"Th\u{EF}s key and\":{\"Thïs key also\":\"not-quite-v\u{FFFD}alid uni\u{FFFD}code string 💖ok\"}}".bytes)
|
||||
end
|
||||
end
|
||||
|
||||
context 'with array data structure' do
|
||||
let(:input) {["Th\xEFs entry and".b.force_encoding(Encoding::WINDOWS_1252).freeze,
|
||||
"Thïs entry also".b.force_encoding(Encoding::UTF_8).freeze,
|
||||
"not-quite-v\xCEalid uni\xF0\x9D\x84code strings 💖ok".b.force_encoding(Encoding::UTF_8).freeze]}
|
||||
it 'normalizes and replaces each invalid array values with the xFFFD replacement character' do
|
||||
expect(result).to be_utf8.with_bytes("[\"Th\u{EF}s entry and\",\"Thïs entry also\",\"not-quite-v\u{FFFD}alid uni\u{FFFD}code strings 💖ok\"]".bytes)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
@ -506,18 +506,13 @@ public final class Queue implements Closeable {
|
|||
}
|
||||
|
||||
/**
|
||||
* <p>Checks if the Queue is full, with "full" defined as either of:</p>
|
||||
* <p>Assuming a maximum size of the queue larger than 0 is defined:</p>
|
||||
* <p>Checks if the Queue is full, with "full" defined as either of:
|
||||
* <ul>
|
||||
* <li>The sum of the size of all allocated pages is more than the allowed maximum Queue
|
||||
* size</li>
|
||||
* <li>The sum of the size of all allocated pages equal to the allowed maximum Queue size
|
||||
* and the current head page has no remaining capacity.</li>
|
||||
* </ul>
|
||||
* <p>or assuming a max unread count larger than 0, is defined "full" is also defined as:</p>
|
||||
* <ul>
|
||||
* <li>The current number of unread events exceeds or is equal to the configured maximum
|
||||
* number of allowed unread events.</li>
|
||||
* <li>{@code maxUnread} is non-zero and is met or exceeded by the total
|
||||
* number of unread events on all pages ({@code unreadCount})</li>
|
||||
* <li>{@code maxBytes} is non-zero and is exceeded by the sum of the sizes
|
||||
* of all allocated tail pages plus the portion of the current head page
|
||||
* containing events</li>
|
||||
* </ul>
|
||||
* @return True iff the queue is full
|
||||
*/
|
||||
|
@ -535,8 +530,7 @@ public final class Queue implements Closeable {
|
|||
return false;
|
||||
}
|
||||
|
||||
final long persistedByteSize = getPersistedByteSize();
|
||||
return ((persistedByteSize > this.maxBytes) || (persistedByteSize == this.maxBytes && !this.headPage.hasSpace(1)));
|
||||
return getPersistedByteSize() > this.maxBytes;
|
||||
}
|
||||
|
||||
private boolean isMaxUnreadReached() {
|
||||
|
|
|
@ -374,24 +374,32 @@ public final class DeadLetterQueueReader implements Closeable {
|
|||
}
|
||||
|
||||
private int compareByFileTimestamp(Path p1, Path p2) {
|
||||
FileTime timestamp1;
|
||||
// if one of the getLastModifiedTime raise an error, consider them equals
|
||||
// and fallback to the other comparator
|
||||
try {
|
||||
timestamp1 = Files.getLastModifiedTime(p1);
|
||||
} catch (IOException ex) {
|
||||
logger.warn("Error reading file's timestamp for {}", p1, ex);
|
||||
final Optional<FileTime> timestampResult1 = readLastModifiedTime(p1);
|
||||
final Optional<FileTime> timestampResult2 = readLastModifiedTime(p2);
|
||||
|
||||
// if one of the readLastModifiedTime encountered a file not found error or generic IO error,
|
||||
// consider them equals and fallback to the other comparator
|
||||
if (!timestampResult1.isPresent() || !timestampResult2.isPresent()) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
FileTime timestamp2;
|
||||
return timestampResult1.get().compareTo(timestampResult2.get());
|
||||
}
|
||||
|
||||
/**
|
||||
* Builder method. Read the timestamp if file on path p is present.
|
||||
* When the file
|
||||
* */
|
||||
private static Optional<FileTime> readLastModifiedTime(Path p) {
|
||||
try {
|
||||
timestamp2 = Files.getLastModifiedTime(p2);
|
||||
return Optional.of(Files.getLastModifiedTime(p));
|
||||
} catch (NoSuchFileException fileNotFoundEx) {
|
||||
logger.debug("File {} doesn't exist", p);
|
||||
return Optional.empty();
|
||||
} catch (IOException ex) {
|
||||
logger.warn("Error reading file's timestamp for {}", p2, ex);
|
||||
return 0;
|
||||
logger.warn("Error reading file's timestamp for {}", p, ex);
|
||||
return Optional.empty();
|
||||
}
|
||||
return timestamp1.compareTo(timestamp2);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -407,6 +415,9 @@ public final class DeadLetterQueueReader implements Closeable {
|
|||
Files.delete(segment);
|
||||
logger.debug("Deleted segment {}", segment);
|
||||
return Optional.of(eventsInSegment);
|
||||
} catch (NoSuchFileException fileNotFoundEx) {
|
||||
logger.debug("Expected file segment {} was already removed by a writer", segment);
|
||||
return Optional.empty();
|
||||
} catch (IOException ex) {
|
||||
logger.warn("Problem occurred in cleaning the segment {} after a repositioning", segment, ex);
|
||||
return Optional.empty();
|
||||
|
|
|
@ -105,7 +105,7 @@ public final class ConfigCompiler {
|
|||
}
|
||||
|
||||
private static Statement readStatementFromRubyHash(RubyHash hash, String key) {
|
||||
IRubyObject inputValue = hash.fastARef(RubyUtil.RUBY.newSymbol(key));
|
||||
IRubyObject inputValue = hash.fastARef(RubyUtil.RUBY.newString(key).intern());
|
||||
return inputValue.toJava(Statement.class);
|
||||
}
|
||||
|
||||
|
|
|
@ -73,7 +73,7 @@ public final class PipelineConfig {
|
|||
RubyArray.newArray(RUBY, uncastedConfigParts);
|
||||
|
||||
this.source = source;
|
||||
this.pipelineId = pipelineId.toString();
|
||||
this.pipelineId = pipelineId.name(RUBY.getCurrentContext()).asJavaString();
|
||||
SourceWithMetadata[] castedConfigParts = (SourceWithMetadata[]) configParts.toJava(SourceWithMetadata[].class);
|
||||
List<SourceWithMetadata> confParts = Arrays.asList(castedConfigParts);
|
||||
confParts.sort(Comparator.comparing(SourceWithMetadata::getProtocol)
|
||||
|
|
|
@ -128,7 +128,7 @@ public abstract class AbstractOutputDelegatorExt extends RubyObject {
|
|||
final ThreadContext context = RubyUtil.RUBY.getCurrentContext();
|
||||
this.id = RubyString.newString(context.runtime, id);
|
||||
synchronized (metric) {
|
||||
namespacedMetric = metric.namespace(context, context.runtime.newSymbol(id));
|
||||
namespacedMetric = metric.namespace(context, context.runtime.newString(id).intern());
|
||||
metricEvents = namespacedMetric.namespace(context, MetricKeys.EVENTS_KEY);
|
||||
namespacedMetric.gauge(context, MetricKeys.NAME_KEY, configName(context));
|
||||
eventMetricOut = LongCounter.fromRubyBase(metricEvents, MetricKeys.OUT_KEY);
|
||||
|
|
|
@ -65,7 +65,7 @@ public class JavaFilterDelegatorExt extends AbstractFilterDelegatorExt {
|
|||
new JavaFilterDelegatorExt(RubyUtil.RUBY, RubyUtil.JAVA_FILTER_DELEGATOR_CLASS);
|
||||
instance.configName = RubyUtil.RUBY.newString(configName);
|
||||
AbstractNamespacedMetricExt scopedMetric =
|
||||
metric.namespace(RubyUtil.RUBY.getCurrentContext(), RubyUtil.RUBY.newSymbol(filter.getId()));
|
||||
metric.namespace(RubyUtil.RUBY.getCurrentContext(), RubyUtil.RUBY.newString(filter.getId()).intern());
|
||||
instance.initMetrics(id, scopedMetric);
|
||||
instance.filter = filter;
|
||||
instance.initializeFilterMatchListener(pluginArgs);
|
||||
|
|
|
@ -60,7 +60,7 @@ public class JavaInputDelegatorExt extends RubyObject {
|
|||
final Map<String, Object> pluginArgs) {
|
||||
final JavaInputDelegatorExt instance =
|
||||
new JavaInputDelegatorExt(RubyUtil.RUBY, RubyUtil.JAVA_INPUT_DELEGATOR_CLASS);
|
||||
AbstractNamespacedMetricExt scopedMetric = metric.namespace(RubyUtil.RUBY.getCurrentContext(), RubyUtil.RUBY.newSymbol(input.getId()));
|
||||
AbstractNamespacedMetricExt scopedMetric = metric.namespace(RubyUtil.RUBY.getCurrentContext(), RubyUtil.RUBY.newString(input.getId()).intern());
|
||||
scopedMetric.gauge(RubyUtil.RUBY.getCurrentContext(), MetricKeys.NAME_KEY, RubyUtil.RUBY.newString(input.getName()));
|
||||
instance.setMetric(RubyUtil.RUBY.getCurrentContext(), scopedMetric);
|
||||
instance.input = input;
|
||||
|
|
|
@ -651,20 +651,20 @@ public class AbstractPipelineExt extends RubyBasicObject {
|
|||
|
||||
private void initializePluginThroughputFlowMetric(final ThreadContext context, final UptimeMetric uptime, final String id) {
|
||||
final Metric<Number> uptimeInPreciseSeconds = uptime.withUnitsPrecise(SECONDS);
|
||||
final RubySymbol[] eventsNamespace = buildNamespace(PLUGINS_KEY, INPUTS_KEY, RubyUtil.RUBY.newSymbol(id), EVENTS_KEY);
|
||||
final RubySymbol[] eventsNamespace = buildNamespace(PLUGINS_KEY, INPUTS_KEY, RubyUtil.RUBY.newString(id).intern(), EVENTS_KEY);
|
||||
final LongCounter eventsOut = initOrGetCounterMetric(context, eventsNamespace, OUT_KEY);
|
||||
|
||||
final FlowMetric throughputFlow = createFlowMetric(PLUGIN_THROUGHPUT_KEY, eventsOut, uptimeInPreciseSeconds);
|
||||
this.flowMetrics.add(throughputFlow);
|
||||
|
||||
final RubySymbol[] flowNamespace = buildNamespace(PLUGINS_KEY, INPUTS_KEY, RubyUtil.RUBY.newSymbol(id), FLOW_KEY);
|
||||
final RubySymbol[] flowNamespace = buildNamespace(PLUGINS_KEY, INPUTS_KEY, RubyUtil.RUBY.newString(id).intern(), FLOW_KEY);
|
||||
storeMetric(context, flowNamespace, throughputFlow);
|
||||
}
|
||||
|
||||
private void initializePluginWorkerFlowMetrics(final ThreadContext context, final int workerCount, final UptimeMetric uptime, final RubySymbol key, final String id) {
|
||||
final Metric<Number> uptimeInPreciseMillis = uptime.withUnitsPrecise(MILLISECONDS);
|
||||
|
||||
final RubySymbol[] eventsNamespace = buildNamespace(PLUGINS_KEY, key, RubyUtil.RUBY.newSymbol(id), EVENTS_KEY);
|
||||
final RubySymbol[] eventsNamespace = buildNamespace(PLUGINS_KEY, key, RubyUtil.RUBY.newString(id).intern(), EVENTS_KEY);
|
||||
final TimerMetric durationInMillis = initOrGetTimerMetric(context, eventsNamespace, DURATION_IN_MILLIS_KEY);
|
||||
final LongCounter counterEvents = initOrGetCounterMetric(context, eventsNamespace, IN_KEY);
|
||||
final FlowMetric workerCostPerEvent = createFlowMetric(WORKER_MILLIS_PER_EVENT_KEY, durationInMillis, counterEvents);
|
||||
|
@ -675,7 +675,7 @@ public class AbstractPipelineExt extends RubyBasicObject {
|
|||
final FlowMetric workerUtilization = createFlowMetric(WORKER_UTILIZATION_KEY, percentScaledDurationInMillis, availableWorkerTimeInMillis);
|
||||
this.flowMetrics.add(workerUtilization);
|
||||
|
||||
final RubySymbol[] flowNamespace = buildNamespace(PLUGINS_KEY, key, RubyUtil.RUBY.newSymbol(id), FLOW_KEY);
|
||||
final RubySymbol[] flowNamespace = buildNamespace(PLUGINS_KEY, key, RubyUtil.RUBY.newString(id).intern(), FLOW_KEY);
|
||||
storeMetric(context, flowNamespace, workerCostPerEvent);
|
||||
storeMetric(context, flowNamespace, workerUtilization);
|
||||
}
|
||||
|
@ -685,7 +685,7 @@ public class AbstractPipelineExt extends RubyBasicObject {
|
|||
final Metric<T> metric) {
|
||||
final IRubyObject collector = this.metric.collector(context);
|
||||
final IRubyObject fullNamespace = pipelineNamespacedPath(subPipelineNamespacePath);
|
||||
final IRubyObject metricKey = context.runtime.newSymbol(metric.getName());
|
||||
final IRubyObject metricKey = context.runtime.newString(metric.getName()).intern();
|
||||
|
||||
final IRubyObject wasRegistered = collector.callMethod(context, "register?", new IRubyObject[]{fullNamespace, metricKey, JavaUtil.convertJavaToUsableRubyObject(context.runtime, metric)});
|
||||
if (!wasRegistered.toJava(Boolean.class)) {
|
||||
|
@ -746,7 +746,7 @@ public class AbstractPipelineExt extends RubyBasicObject {
|
|||
RubyUtil.RUBY.getCurrentContext(),
|
||||
new IRubyObject[]{
|
||||
inputQueueClient(), pipelineId().convertToString().intern(),
|
||||
metric(), RubyUtil.RUBY.newSymbol(inputName)
|
||||
metric(), RubyUtil.RUBY.newString(inputName).intern()
|
||||
}
|
||||
);
|
||||
}
|
||||
|
|
|
@ -77,7 +77,7 @@ public final class JRubyWrappedWriteClientExt extends RubyObject implements Queu
|
|||
final IRubyObject pluginId) {
|
||||
this.writeClient = queueWriteClientExt;
|
||||
|
||||
final RubySymbol pipelineIdSym = getRuntime().newSymbol(pipelineId);
|
||||
final RubySymbol pipelineIdSym = getRuntime().newString(pipelineId).intern();
|
||||
final RubySymbol pluginIdSym = pluginId.asString().intern();
|
||||
|
||||
// Synchronize on the metric since setting up new fields on it is not threadsafe
|
||||
|
|
|
@ -69,7 +69,9 @@ public class CustomLogEventSerializer extends JsonSerializer<CustomLogEvent> {
|
|||
}
|
||||
|
||||
for (final Map.Entry<Object, Object> entry : message.getParams().entrySet()) {
|
||||
final String paramName = entry.getKey().toString();
|
||||
// Given that message params is a map and the generator just started a new object, containing
|
||||
// only one 'message' field, it could clash only on this field; fixit post-fixing it with '_1'
|
||||
final String paramName = renameParamNameIfClashingWithMessage(entry);
|
||||
final Object paramValue = entry.getValue();
|
||||
|
||||
try {
|
||||
|
@ -94,6 +96,16 @@ public class CustomLogEventSerializer extends JsonSerializer<CustomLogEvent> {
|
|||
}
|
||||
}
|
||||
|
||||
private static String renameParamNameIfClashingWithMessage(Map.Entry<Object, Object> entry) {
|
||||
final String paramName = entry.getKey().toString();
|
||||
if ("message".equals(paramName)) {
|
||||
if ("true".equalsIgnoreCase(System.getProperty("ls.log.format.json.fix_duplicate_message_fields"))) {
|
||||
return "message_1";
|
||||
}
|
||||
}
|
||||
return paramName;
|
||||
}
|
||||
|
||||
private boolean isValueSafeToWrite(Object value) {
|
||||
return value == null ||
|
||||
value instanceof String ||
|
||||
|
|
|
@ -34,7 +34,7 @@ public class CounterMetricImpl implements CounterMetric {
|
|||
public CounterMetricImpl(final ThreadContext threadContext,
|
||||
final AbstractNamespacedMetricExt metrics,
|
||||
final String metric) {
|
||||
this.longCounter = LongCounter.fromRubyBase(metrics, threadContext.getRuntime().newSymbol(metric));
|
||||
this.longCounter = LongCounter.fromRubyBase(metrics, threadContext.getRuntime().newString(metric).intern());
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -64,7 +64,7 @@ public class NamespacedMetricImpl implements NamespacedMetric {
|
|||
|
||||
@Override
|
||||
public co.elastic.logstash.api.TimerMetric timer(final String metric) {
|
||||
return TimerMetric.fromRubyBase(metrics, threadContext.getRuntime().newSymbol(metric));
|
||||
return TimerMetric.fromRubyBase(metrics, threadContext.getRuntime().newString(metric).intern());
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -121,7 +121,7 @@ public class NamespacedMetricImpl implements NamespacedMetric {
|
|||
}
|
||||
|
||||
private RubySymbol getSymbol(final String s) {
|
||||
return this.threadContext.getRuntime().newSymbol(s);
|
||||
return this.threadContext.getRuntime().newString(s).intern();
|
||||
}
|
||||
|
||||
private IRubyObject convert(final Object o) {
|
||||
|
|
|
@ -23,6 +23,7 @@ package org.logstash.plugins;
|
|||
import co.elastic.logstash.api.Metric;
|
||||
import co.elastic.logstash.api.NamespacedMetric;
|
||||
import org.jruby.RubyArray;
|
||||
import org.jruby.RubyString;
|
||||
import org.jruby.runtime.ThreadContext;
|
||||
import org.jruby.runtime.builtin.IRubyObject;
|
||||
import org.logstash.instrument.metrics.AbstractMetricExt;
|
||||
|
@ -45,7 +46,8 @@ public class RootMetricImpl implements Metric {
|
|||
@Override
|
||||
public NamespacedMetric namespace(final String... key) {
|
||||
final IRubyObject[] rubyfiedKeys = Stream.of(key)
|
||||
.map(this.threadContext.getRuntime()::newSymbol)
|
||||
.map(this.threadContext.getRuntime()::newString)
|
||||
.map(RubyString::intern)
|
||||
.toArray(IRubyObject[]::new);
|
||||
|
||||
return new NamespacedMetricImpl(
|
||||
|
|
|
@ -253,7 +253,7 @@ public final class PluginFactoryExt extends RubyBasicObject
|
|||
} else {
|
||||
final IRubyObject pluginInstance = ContextualizerExt.initializePlugin(context, executionCntx, klass, rubyArgs);
|
||||
|
||||
final AbstractNamespacedMetricExt scopedMetric = typeScopedMetric.namespace(context, RubyUtil.RUBY.newSymbol(id));
|
||||
final AbstractNamespacedMetricExt scopedMetric = typeScopedMetric.namespace(context, RubyUtil.RUBY.newString(id).intern());
|
||||
scopedMetric.gauge(context, MetricKeys.NAME_KEY, pluginInstance.callMethod(context, "config_name"));
|
||||
pluginInstance.callMethod(context, "metric=", scopedMetric);
|
||||
return pluginInstance;
|
||||
|
|
|
@ -53,7 +53,7 @@ public final class PluginMetricsFactoryExt extends RubyBasicObject {
|
|||
@JRubyMethod
|
||||
public AbstractNamespacedMetricExt create(final ThreadContext context, final IRubyObject pluginType) {
|
||||
return getRoot(context).namespace(
|
||||
context, RubyUtil.RUBY.newSymbol(String.format("%ss", pluginType.asJavaString()))
|
||||
context, RubyUtil.RUBY.newString(String.format("%ss", pluginType.asJavaString())).intern()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -519,9 +519,12 @@ public class QueueTest {
|
|||
public void reachMaxSizeTest() throws IOException, InterruptedException {
|
||||
Queueable element = new StringElement("0123456789"); // 10 bytes
|
||||
|
||||
int pageSize = computeCapacityForMmapPageIO(element, 10);
|
||||
int queueMaxSize = (pageSize * 10) - 1; // 100th will overflow max capacity while still on 10th page
|
||||
|
||||
// allow 10 elements per page but only 100 events in total
|
||||
Settings settings = TestSettings.persistedQueueSettings(
|
||||
computeCapacityForMmapPageIO(element, 10), computeCapacityForMmapPageIO(element, 100), dataPath
|
||||
pageSize, queueMaxSize, dataPath
|
||||
);
|
||||
try (Queue q = new Queue(settings)) {
|
||||
q.open();
|
||||
|
@ -542,6 +545,36 @@ public class QueueTest {
|
|||
}
|
||||
}
|
||||
|
||||
@Test(timeout = 50_000)
|
||||
public void preciselyMaxSizeTest() throws IOException, InterruptedException {
|
||||
Queueable element = new StringElement("0123456789"); // 10 bytes
|
||||
|
||||
int pageSize = computeCapacityForMmapPageIO(element, 10);
|
||||
int queueMaxSize = (pageSize * 10); // 100th will precisely fit max capacity
|
||||
|
||||
// allow 10 elements per page but only 100 events in total
|
||||
Settings settings = TestSettings.persistedQueueSettings(
|
||||
pageSize, queueMaxSize, dataPath
|
||||
);
|
||||
try (Queue q = new Queue(settings)) {
|
||||
q.open();
|
||||
|
||||
int elementCount = 100; // should be able to write 100 events before getting full
|
||||
for (int i = 0; i < elementCount; i++) {
|
||||
q.write(element);
|
||||
}
|
||||
|
||||
assertThat(q.isFull(), is(false));
|
||||
|
||||
// we expect this next write call to block so let's wrap it in a Future
|
||||
executor.submit(() -> q.write(element));
|
||||
while (!q.isFull()) {
|
||||
Thread.sleep(10);
|
||||
}
|
||||
assertThat(q.isFull(), is(true));
|
||||
}
|
||||
}
|
||||
|
||||
@Test(timeout = 50_000)
|
||||
public void ackingMakesQueueNotFullAgainTest() throws IOException, InterruptedException, ExecutionException {
|
||||
|
||||
|
|
|
@ -103,7 +103,7 @@ public class PipelineConfigTest extends RubyEnvTestCase {
|
|||
public void setUp() throws IncompleteSourceWithMetadataException {
|
||||
|
||||
source = RubyUtil.RUBY.getClass("LogStash::Config::Source::Local");
|
||||
pipelineIdSym = RubySymbol.newSymbol(RubyUtil.RUBY, PIPELINE_ID);
|
||||
pipelineIdSym = RubyUtil.RUBY.newString(PIPELINE_ID).intern();
|
||||
|
||||
final SourceCollector sourceCollector = new SourceCollector();
|
||||
sourceCollector.appendSource("file", "/tmp/1", 0, 0, "input { generator1 }\n", "{\"version\": \"1\"}");
|
||||
|
|
|
@ -216,7 +216,7 @@ public class OutputDelegatorTest extends PluginDelegatorTestCase {
|
|||
Class klazz;
|
||||
|
||||
StrategyPair(String symbolName, Class c) {
|
||||
this.symbol = RUBY.newSymbol(symbolName);
|
||||
this.symbol = RUBY.newString(symbolName).intern();
|
||||
this.klazz = c;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -47,7 +47,7 @@ public abstract class PluginDelegatorTestCase extends RubyEnvTestCase {
|
|||
final ThreadContext context = RUBY.getCurrentContext();
|
||||
@SuppressWarnings("rawtypes")
|
||||
final RubyArray namespaces = RubyArray.newArray(RUBY, 1);
|
||||
namespaces.add(0, RubySymbol.newSymbol(RUBY, getBaseMetricsPath().split("/")[0]));
|
||||
namespaces.add(0, RUBY.newString(getBaseMetricsPath().split("/")[0]).intern());
|
||||
IRubyObject metricWithCollector =
|
||||
runRubyScript("require \"logstash/instrument/collector\"\n" +
|
||||
"metricWithCollector = LogStash::Instrument::Metric.new(LogStash::Instrument::Collector.new)");
|
||||
|
@ -70,7 +70,7 @@ public abstract class PluginDelegatorTestCase extends RubyEnvTestCase {
|
|||
|
||||
RubyHash rh = metricStore;
|
||||
for (String p : path) {
|
||||
rh = (RubyHash) rh.op_aref(RUBY.getCurrentContext(), RUBY.newSymbol(p));
|
||||
rh = (RubyHash) rh.op_aref(RUBY.getCurrentContext(), RUBY.newString(p).intern());
|
||||
}
|
||||
return rh;
|
||||
}
|
||||
|
@ -78,13 +78,13 @@ public abstract class PluginDelegatorTestCase extends RubyEnvTestCase {
|
|||
protected abstract String getBaseMetricsPath();
|
||||
|
||||
protected String getMetricStringValue(RubyHash metricStore, String symbolName) {
|
||||
ConcreteJavaProxy counter = (ConcreteJavaProxy) metricStore.op_aref(RUBY.getCurrentContext(), RUBY.newSymbol(symbolName));
|
||||
ConcreteJavaProxy counter = (ConcreteJavaProxy) metricStore.op_aref(RUBY.getCurrentContext(), RUBY.newString(symbolName).intern());
|
||||
RubyString value = (RubyString) counter.callMethod("value");
|
||||
return value.asJavaString();
|
||||
}
|
||||
|
||||
protected long getMetricLongValue(RubyHash metricStore, String symbolName) {
|
||||
ConcreteJavaProxy counter = (ConcreteJavaProxy) metricStore.op_aref(RUBY.getCurrentContext(), RUBY.newSymbol(symbolName));
|
||||
ConcreteJavaProxy counter = (ConcreteJavaProxy) metricStore.op_aref(RUBY.getCurrentContext(), RUBY.newString(symbolName).intern());
|
||||
RubyFixnum count = (RubyFixnum) counter.callMethod("value");
|
||||
return count.getLongValue();
|
||||
}
|
||||
|
|
|
@ -58,7 +58,7 @@ public class MetricExtFactory {
|
|||
private RubyModule metricFactoryInterceptor(final String type, final Function<String,?> javaMetricFactory) {
|
||||
final ThreadContext context = RubyUtil.RUBY.getCurrentContext();
|
||||
|
||||
final IRubyObject interceptType = context.runtime.newSymbol(type);
|
||||
final IRubyObject interceptType = context.runtime.newString(type).intern();
|
||||
final IRubyObject metricFactory = JavaUtil.convertJavaToUsableRubyObject(context.runtime, MetricFactory.of(javaMetricFactory));
|
||||
final IRubyObject interceptorModule = INTERCEPTOR_MODULE_CLASS.newInstance(context, interceptType, metricFactory, Block.NULL_BLOCK);
|
||||
|
||||
|
|
|
@ -59,9 +59,11 @@ import org.logstash.RubyUtil;
|
|||
import static junit.framework.TestCase.assertFalse;
|
||||
import static junit.framework.TestCase.assertEquals;
|
||||
import static junit.framework.TestCase.assertNotNull;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
public class CustomLogEventTests {
|
||||
private static final String CONFIG = "log4j2-test1.xml";
|
||||
public static final String STRICT_JSON_PROPERTY_NAME = "ls.log.format.json.fix_duplicate_message_fields";
|
||||
|
||||
@ClassRule
|
||||
public static LoggerContextRule CTX = new LoggerContextRule(CONFIG);
|
||||
|
@ -174,4 +176,34 @@ public class CustomLogEventTests {
|
|||
assertEquals(1, logEventMapValue.get("first"));
|
||||
assertEquals(2, logEventMapValue.get("second"));
|
||||
}
|
||||
|
||||
@Test
|
||||
@SuppressWarnings("unchecked")
|
||||
public void testJSONLayoutWhenParamsContainsAnotherMessageField() throws JsonProcessingException {
|
||||
String prevSetting = System.getProperty(STRICT_JSON_PROPERTY_NAME);
|
||||
System.setProperty(STRICT_JSON_PROPERTY_NAME, Boolean.TRUE.toString());
|
||||
|
||||
ListAppender appender = CTX.getListAppender("JSONEventLogger").clear();
|
||||
Logger logger = LogManager.getLogger("JSONEventLogger");
|
||||
|
||||
Map<String, String> paramsWithAnotherMessageField = Collections.singletonMap("message", "something to say");
|
||||
logger.error("here is a map: {}", paramsWithAnotherMessageField);
|
||||
|
||||
List<String> messages = appender.getMessages();
|
||||
assertEquals(1, messages.size());
|
||||
|
||||
Map<String, Object> loggedMessage = ObjectMappers.JSON_MAPPER.readValue(messages.get(0), Map.class);
|
||||
assertEquals(5, loggedMessage.size());
|
||||
|
||||
Map<String, Object> actualLogEvent = (Map<String, Object>) loggedMessage.get("logEvent");
|
||||
assertEquals("here is a map: {}", actualLogEvent.get("message"));
|
||||
assertEquals("something to say", actualLogEvent.get("message_1"));
|
||||
|
||||
// tear down
|
||||
if (prevSetting == null) {
|
||||
System.clearProperty(STRICT_JSON_PROPERTY_NAME);
|
||||
} else {
|
||||
System.setProperty(STRICT_JSON_PROPERTY_NAME, prevSetting);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -64,19 +64,19 @@ public abstract class MetricTestCase extends RubyEnvTestCase {
|
|||
|
||||
RubyHash rh = metricStore;
|
||||
for (String p : path) {
|
||||
rh = (RubyHash) rh.op_aref(RUBY.getCurrentContext(), RUBY.newSymbol(p));
|
||||
rh = (RubyHash) rh.op_aref(RUBY.getCurrentContext(), RUBY.newString(p).intern());
|
||||
}
|
||||
return rh;
|
||||
}
|
||||
|
||||
protected String getMetricStringValue(RubyHash metricStore, String symbolName) {
|
||||
ConcreteJavaProxy counter = (ConcreteJavaProxy) metricStore.op_aref(RUBY.getCurrentContext(), RUBY.newSymbol(symbolName));
|
||||
ConcreteJavaProxy counter = (ConcreteJavaProxy) metricStore.op_aref(RUBY.getCurrentContext(), RUBY.newString(symbolName).intern());
|
||||
RubyString value = (RubyString) counter.callMethod("value");
|
||||
return value.asJavaString();
|
||||
}
|
||||
|
||||
protected long getMetricLongValue(RubyHash metricStore, String symbolName) {
|
||||
ConcreteJavaProxy counter = (ConcreteJavaProxy) metricStore.op_aref(RUBY.getCurrentContext(), RUBY.newSymbol(symbolName));
|
||||
ConcreteJavaProxy counter = (ConcreteJavaProxy) metricStore.op_aref(RUBY.getCurrentContext(), RUBY.newString(symbolName).intern());
|
||||
RubyFixnum count = (RubyFixnum) counter.callMethod("value");
|
||||
return count.getLongValue();
|
||||
}
|
||||
|
|
|
@ -27,22 +27,37 @@ import org.jruby.RubyHash;
|
|||
import org.junit.Ignore;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.util.Arrays;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
public class NamespacedMetricImplTest extends MetricTestCase {
|
||||
@Test
|
||||
public void testGauge() {
|
||||
final NamespacedMetric metrics = this.getInstance().namespace("test");
|
||||
doTestGauge("test");
|
||||
}
|
||||
@Test
|
||||
public void testGaugeNested() {
|
||||
doTestGauge("test", "deep");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGaugeUnicode() {
|
||||
doTestGauge("test", "Ümlãut-åccênt", "okay");
|
||||
}
|
||||
|
||||
public void doTestGauge(final String... metricNamespace) {
|
||||
final NamespacedMetric metrics = this.getInstance().namespace(metricNamespace);
|
||||
|
||||
metrics.gauge("abc", "def");
|
||||
{
|
||||
final RubyHash metricStore = getMetricStore(new String[]{"test"});
|
||||
final RubyHash metricStore = getMetricStore(Arrays.copyOf(metricNamespace, metricNamespace.length));
|
||||
assertThat(this.getMetricStringValue(metricStore, "abc")).isEqualTo("def");
|
||||
}
|
||||
|
||||
metrics.gauge("abc", "123");
|
||||
{
|
||||
final RubyHash metricStore = getMetricStore(new String[]{"test"});
|
||||
final RubyHash metricStore = getMetricStore(Arrays.copyOf(metricNamespace, metricNamespace.length));
|
||||
assertThat(this.getMetricStringValue(metricStore, "abc")).isEqualTo("123");
|
||||
}
|
||||
}
|
||||
|
@ -125,6 +140,17 @@ public class NamespacedMetricImplTest extends MetricTestCase {
|
|||
assertThat(namespaced2.namespaceName()).containsExactly("test", "abcdef", "12345", "qwerty");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testNamespaceUnicodeFragment() {
|
||||
final NamespacedMetric metrics = this.getInstance().namespace("test", "Ünîcødé", "nÉs†iñG");
|
||||
|
||||
final NamespacedMetric namespaced = metrics.namespace("abcdef");
|
||||
assertThat(namespaced.namespaceName()).containsExactly("test", "Ünîcødé", "nÉs†iñG", "abcdef");
|
||||
|
||||
final NamespacedMetric namespaced2 = namespaced.namespace("12345", "qwerty");
|
||||
assertThat(namespaced2.namespaceName()).containsExactly("test", "Ünîcødé", "nÉs†iñG", "abcdef", "12345", "qwerty");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testRoot() {
|
||||
final NamespacedMetric metrics = this.getInstance().namespace("test");
|
||||
|
|
|
@ -113,21 +113,8 @@ class LogstashService < Service
|
|||
# Can start LS in stdin and can send messages to stdin
|
||||
# Useful to test metrics and such
|
||||
def start_with_stdin(pipeline_config = STDIN_CONFIG)
|
||||
puts "Starting Logstash #{@logstash_bin} -e #{pipeline_config}"
|
||||
Bundler.with_unbundled_env do
|
||||
out = Tempfile.new("duplex")
|
||||
out.sync = true
|
||||
@process = build_child_process("-e", pipeline_config)
|
||||
# pipe STDOUT and STDERR to a file
|
||||
@process.io.stdout = @process.io.stderr = out
|
||||
@process.duplex = true
|
||||
@env_variables.map { |k, v| @process.environment[k] = v} unless @env_variables.nil?
|
||||
java_home = java.lang.System.getProperty('java.home')
|
||||
@process.environment['LS_JAVA_HOME'] = java_home
|
||||
@process.start
|
||||
wait_for_logstash
|
||||
puts "Logstash started with PID #{@process.pid}, LS_JAVA_HOME: #{java_home}" if alive?
|
||||
end
|
||||
spawn_logstash("-e", pipeline_config)
|
||||
wait_for_logstash
|
||||
end
|
||||
|
||||
def write_to_stdin(input)
|
||||
|
@ -138,6 +125,7 @@ class LogstashService < Service
|
|||
|
||||
# Spawn LS as a child process
|
||||
def spawn_logstash(*args)
|
||||
$stderr.puts "Starting Logstash #{Shellwords.escape(@logstash_bin)} #{Shellwords.join(args)}"
|
||||
Bundler.with_unbundled_env do
|
||||
out = Tempfile.new("duplex")
|
||||
out.sync = true
|
||||
|
|
|
@ -23,24 +23,62 @@ require "logstash/devutils/rspec/spec_helper"
|
|||
require "stud/try"
|
||||
|
||||
describe "Test Monitoring API" do
|
||||
before(:each) do |example|
|
||||
$stderr.puts("STARTING: #{example.full_description} (#{example.location})")
|
||||
end
|
||||
|
||||
before(:all) {
|
||||
@fixture = Fixture.new(__FILE__)
|
||||
ruby_encoding_info = %w(external internal locale filesystem).map do |type|
|
||||
Encoding.find(type)&.name&.then { |name| "#{type}:#{name}" }
|
||||
end.compact.join(", ")
|
||||
|
||||
$stderr.puts <<~ENCODINGINFO.tr("\n", ' ')
|
||||
INFO(spec runner process)
|
||||
Ruby.Encoding=(#{ruby_encoding_info})
|
||||
Java.Locale=`#{java.util.Locale::getDefault().toLanguageTag()}`
|
||||
Java.Charset=`#{java.nio.charset.Charset::defaultCharset().displayName()}`
|
||||
ENCODINGINFO
|
||||
}
|
||||
|
||||
let(:settings_overrides) do
|
||||
{}
|
||||
end
|
||||
|
||||
let(:logstash_service) { @fixture.get_service("logstash") }
|
||||
|
||||
before(:each) do
|
||||
# some settings values cannot be reliably passed on the command line
|
||||
# because we are not guaranteed that the shell's encoding supports UTF-8.
|
||||
# Merge our settings into the active settings file, to accommodate feature flags
|
||||
unless settings_overrides.empty?
|
||||
settings_file = logstash_service.application_settings_file
|
||||
FileUtils.cp(settings_file, "#{settings_file}.original")
|
||||
|
||||
base_settings = YAML.load(File.read(settings_file)) || {}
|
||||
effective_settings = base_settings.merge(settings_overrides) do |key, old_val, new_val|
|
||||
warn "Overriding setting `#{key}` with `#{new_val}` (was `#{old_val}`)"
|
||||
new_val
|
||||
end
|
||||
|
||||
IO.write(settings_file, effective_settings.to_yaml)
|
||||
end
|
||||
end
|
||||
|
||||
after(:all) {
|
||||
@fixture.teardown
|
||||
}
|
||||
|
||||
after(:each) {
|
||||
@fixture.get_service("logstash").teardown
|
||||
}
|
||||
after(:each) do
|
||||
settings_file = logstash_service.application_settings_file
|
||||
logstash_service.teardown
|
||||
FileUtils.mv("#{settings_file}.original", settings_file) if File.exist?("#{settings_file}.original")
|
||||
end
|
||||
|
||||
let(:number_of_events) { 5 }
|
||||
let(:max_retry) { 120 }
|
||||
let(:plugins_config) { "input { stdin {} } filter { mutate { add_tag => 'integration test adding tag' } } output { stdout {} }" }
|
||||
|
||||
it "can retrieve event stats" do
|
||||
logstash_service = @fixture.get_service("logstash")
|
||||
logstash_service.start_with_stdin
|
||||
logstash_service.wait_for_logstash
|
||||
number_of_events.times { logstash_service.write_to_stdin("Hello world") }
|
||||
|
@ -167,38 +205,179 @@ describe "Test Monitoring API" do
|
|||
end
|
||||
end
|
||||
|
||||
it "can retrieve queue stats" do
|
||||
logstash_service = @fixture.get_service("logstash")
|
||||
logstash_service.start_with_stdin
|
||||
logstash_service.wait_for_logstash
|
||||
shared_examples "pipeline metrics" do
|
||||
# let(:pipeline_id) { defined?(super()) or fail NotImplementedError }
|
||||
let(:settings_overrides) do
|
||||
super().merge({'pipeline.id' => pipeline_id})
|
||||
end
|
||||
|
||||
Stud.try(max_retry.times, [StandardError, RSpec::Expectations::ExpectationNotMetError]) do
|
||||
# node_stats can fail if the stats subsystem isn't ready
|
||||
result = logstash_service.monitoring_api.node_stats rescue nil
|
||||
expect(result).not_to be_nil
|
||||
# we use fetch here since we want failed fetches to raise an exception
|
||||
# and trigger the retry block
|
||||
queue_stats = result.fetch("pipelines").fetch("main").fetch("queue")
|
||||
expect(queue_stats).not_to be_nil
|
||||
if logstash_service.settings.feature_flag == "persistent_queues"
|
||||
expect(queue_stats["type"]).to eq "persisted"
|
||||
queue_data_stats = queue_stats.fetch("data")
|
||||
expect(queue_data_stats["free_space_in_bytes"]).not_to be_nil
|
||||
expect(queue_data_stats["storage_type"]).not_to be_nil
|
||||
expect(queue_data_stats["path"]).not_to be_nil
|
||||
expect(queue_stats["events"]).not_to be_nil
|
||||
queue_capacity_stats = queue_stats.fetch("capacity")
|
||||
expect(queue_capacity_stats["page_capacity_in_bytes"]).not_to be_nil
|
||||
expect(queue_capacity_stats["max_queue_size_in_bytes"]).not_to be_nil
|
||||
expect(queue_capacity_stats["max_unread_events"]).not_to be_nil
|
||||
else
|
||||
expect(queue_stats["type"]).to eq("memory")
|
||||
it "can retrieve queue stats" do
|
||||
logstash_service.start_with_stdin
|
||||
logstash_service.wait_for_logstash
|
||||
|
||||
Stud.try(max_retry.times, [StandardError, RSpec::Expectations::ExpectationNotMetError]) do
|
||||
# node_stats can fail if the stats subsystem isn't ready
|
||||
result = logstash_service.monitoring_api.node_stats rescue nil
|
||||
expect(result).not_to be_nil
|
||||
# we use fetch here since we want failed fetches to raise an exception
|
||||
# and trigger the retry block
|
||||
queue_stats = result.fetch("pipelines").fetch(pipeline_id).fetch("queue")
|
||||
expect(queue_stats).not_to be_nil
|
||||
if logstash_service.settings.feature_flag == "persistent_queues"
|
||||
expect(queue_stats["type"]).to eq "persisted"
|
||||
queue_data_stats = queue_stats.fetch("data")
|
||||
expect(queue_data_stats["free_space_in_bytes"]).not_to be_nil
|
||||
expect(queue_data_stats["storage_type"]).not_to be_nil
|
||||
expect(queue_data_stats["path"]).not_to be_nil
|
||||
expect(queue_stats["events"]).not_to be_nil
|
||||
queue_capacity_stats = queue_stats.fetch("capacity")
|
||||
expect(queue_capacity_stats["page_capacity_in_bytes"]).not_to be_nil
|
||||
expect(queue_capacity_stats["max_queue_size_in_bytes"]).not_to be_nil
|
||||
expect(queue_capacity_stats["max_unread_events"]).not_to be_nil
|
||||
else
|
||||
expect(queue_stats["type"]).to eq("memory")
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
it "retrieves the pipeline flow statuses" do
|
||||
logstash_service = @fixture.get_service("logstash")
|
||||
logstash_service.start_with_stdin
|
||||
logstash_service.wait_for_logstash
|
||||
number_of_events.times {
|
||||
logstash_service.write_to_stdin("Testing flow metrics")
|
||||
sleep(1)
|
||||
}
|
||||
|
||||
Stud.try(max_retry.times, [StandardError, RSpec::Expectations::ExpectationNotMetError]) do
|
||||
# node_stats can fail if the stats subsystem isn't ready
|
||||
result = logstash_service.monitoring_api.node_stats rescue nil
|
||||
expect(result).not_to be_nil
|
||||
# we use fetch here since we want failed fetches to raise an exception
|
||||
# and trigger the retry block
|
||||
expect(result).to include('pipelines' => hash_including(pipeline_id => hash_including('flow')))
|
||||
flow_status = result.dig("pipelines", pipeline_id, "flow")
|
||||
expect(flow_status).to_not be_nil
|
||||
expect(flow_status).to include(
|
||||
# due to three-decimal-place rounding, it is easy for our worker_concurrency and queue_backpressure
|
||||
# to be zero, so we are just looking for these to be _populated_
|
||||
'worker_concurrency' => hash_including('current' => a_value >= 0, 'lifetime' => a_value >= 0),
|
||||
'worker_utilization' => hash_including('current' => a_value >= 0, 'lifetime' => a_value >= 0),
|
||||
'queue_backpressure' => hash_including('current' => a_value >= 0, 'lifetime' => a_value >= 0),
|
||||
# depending on flow capture interval, our current rate can easily be zero, but our lifetime rates
|
||||
# should be non-zero so long as pipeline uptime is less than ~10 minutes.
|
||||
'input_throughput' => hash_including('current' => a_value >= 0, 'lifetime' => a_value > 0),
|
||||
'filter_throughput' => hash_including('current' => a_value >= 0, 'lifetime' => a_value > 0),
|
||||
'output_throughput' => hash_including('current' => a_value >= 0, 'lifetime' => a_value > 0)
|
||||
)
|
||||
if logstash_service.settings.feature_flag == "persistent_queues"
|
||||
expect(flow_status).to include(
|
||||
'queue_persisted_growth_bytes' => hash_including('current' => a_kind_of(Numeric), 'lifetime' => a_kind_of(Numeric)),
|
||||
'queue_persisted_growth_events' => hash_including('current' => a_kind_of(Numeric), 'lifetime' => a_kind_of(Numeric))
|
||||
)
|
||||
else
|
||||
expect(flow_status).to_not include('queue_persisted_growth_bytes')
|
||||
expect(flow_status).to_not include('queue_persisted_growth_events')
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
shared_examples "plugin-level flow metrics" do
|
||||
let(:settings_overrides) do
|
||||
super().merge({'config.string' => config_string})
|
||||
end
|
||||
|
||||
let(:config_string) do
|
||||
<<~EOPIPELINE
|
||||
input { stdin { id => '#{plugin_id_input}' } }
|
||||
filter { mutate { id => '#{plugin_id_filter}' add_tag => 'integration test adding tag' } }
|
||||
output { stdout { id => '#{plugin_id_output}' } }
|
||||
EOPIPELINE
|
||||
end
|
||||
|
||||
it "retrieves plugin level flow metrics" do
|
||||
logstash_service.spawn_logstash
|
||||
logstash_service.wait_for_logstash
|
||||
number_of_events.times {
|
||||
logstash_service.write_to_stdin("Testing plugin-level flow metrics")
|
||||
sleep(1)
|
||||
}
|
||||
|
||||
Stud.try(max_retry.times, [StandardError, RSpec::Expectations::ExpectationNotMetError]) do
|
||||
# node_stats can fail if the stats subsystem isn't ready
|
||||
result = logstash_service.monitoring_api.node_stats rescue nil
|
||||
# if the result is nil, we probably aren't ready yet
|
||||
# our assertion failure will cause Stud to retry
|
||||
expect(result).not_to be_nil
|
||||
|
||||
expect(result).to include('pipelines' => hash_including(pipeline_id => hash_including('plugins' => hash_including('inputs', 'filters', 'outputs'))))
|
||||
|
||||
input_plugins = result.dig("pipelines", pipeline_id, "plugins", "inputs")
|
||||
filter_plugins = result.dig("pipelines", pipeline_id, "plugins", "filters")
|
||||
output_plugins = result.dig("pipelines", pipeline_id, "plugins", "outputs")
|
||||
expect(input_plugins[0]).to_not be_nil # not ready...
|
||||
|
||||
expect(input_plugins).to include(a_hash_including(
|
||||
'id' => plugin_id_input,
|
||||
'flow' => a_hash_including(
|
||||
'throughput' => hash_including('current' => a_value >= 0, 'lifetime' => a_value > 0)
|
||||
)
|
||||
))
|
||||
|
||||
expect(filter_plugins).to include(a_hash_including(
|
||||
'id' => plugin_id_filter,
|
||||
'flow' => a_hash_including(
|
||||
'worker_utilization' => hash_including('current' => a_value >= 0, 'lifetime' => a_value >= 0),
|
||||
'worker_millis_per_event' => hash_including('current' => a_value >= 0, 'lifetime' => a_value >= 0),
|
||||
)
|
||||
))
|
||||
|
||||
expect(output_plugins).to include(a_hash_including(
|
||||
'id' => plugin_id_output,
|
||||
'flow' => a_hash_including(
|
||||
'worker_utilization' => hash_including('current' => a_value >= 0, 'lifetime' => a_value >= 0),
|
||||
'worker_millis_per_event' => hash_including('current' => a_value >= 0, 'lifetime' => a_value >= 0),
|
||||
)
|
||||
))
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
context "with lower-ASCII plugin id's" do
|
||||
let(:plugin_id_input) { "standard-input" }
|
||||
let(:plugin_id_filter) { "Mutations" }
|
||||
let(:plugin_id_output) { "StandardOutput" }
|
||||
include_examples "plugin-level flow metrics"
|
||||
end
|
||||
|
||||
context "with unicode plugin id's" do
|
||||
let(:plugin_id_input) { "입력" }
|
||||
let(:plugin_id_filter) { "変じる" }
|
||||
let(:plugin_id_output) { "le-résultat" }
|
||||
include_examples "plugin-level flow metrics"
|
||||
end
|
||||
|
||||
end
|
||||
|
||||
context "with lower-ASCII pipeline id" do
|
||||
let(:pipeline_id) { "main" }
|
||||
include_examples "pipeline metrics"
|
||||
end
|
||||
|
||||
context "with unicode pipeline id" do
|
||||
before(:each) do
|
||||
if @fixture.settings.feature_flag == "persistent_queues"
|
||||
skip('behaviour for unicode pipeline ids is unspecified when PQ is enabled')
|
||||
# NOTE: pipeline ids are used verbatim as a part of the queue path, so the subset
|
||||
# of unicode characters that are supported depend on the OS and Filesystem.
|
||||
# The pipeline will fail to start, rendering these monitoring specs useless.
|
||||
end
|
||||
end
|
||||
let(:pipeline_id) { "변환-verändern-変ずる" }
|
||||
include_examples "pipeline metrics"
|
||||
end
|
||||
|
||||
it "can configure logging" do
|
||||
logstash_service = @fixture.get_service("logstash")
|
||||
logstash_service.start_with_stdin
|
||||
logstash_service.wait_for_logstash
|
||||
|
||||
|
@ -246,86 +425,6 @@ describe "Test Monitoring API" do
|
|||
logging_get_assert logstash_service, "INFO", "TRACE"
|
||||
end
|
||||
|
||||
it "should retrieve the pipeline flow statuses" do
|
||||
logstash_service = @fixture.get_service("logstash")
|
||||
logstash_service.start_with_stdin
|
||||
logstash_service.wait_for_logstash
|
||||
number_of_events.times {
|
||||
logstash_service.write_to_stdin("Testing flow metrics")
|
||||
sleep(1)
|
||||
}
|
||||
|
||||
Stud.try(max_retry.times, [StandardError, RSpec::Expectations::ExpectationNotMetError]) do
|
||||
# node_stats can fail if the stats subsystem isn't ready
|
||||
result = logstash_service.monitoring_api.node_stats rescue nil
|
||||
expect(result).not_to be_nil
|
||||
# we use fetch here since we want failed fetches to raise an exception
|
||||
# and trigger the retry block
|
||||
expect(result).to include('pipelines' => hash_including('main' => hash_including('flow')))
|
||||
flow_status = result.dig("pipelines", "main", "flow")
|
||||
expect(flow_status).to_not be_nil
|
||||
expect(flow_status).to include(
|
||||
# due to three-decimal-place rounding, it is easy for our worker_concurrency and queue_backpressure
|
||||
# to be zero, so we are just looking for these to be _populated_
|
||||
'worker_concurrency' => hash_including('current' => a_value >= 0, 'lifetime' => a_value >= 0),
|
||||
'worker_utilization' => hash_including('current' => a_value >= 0, 'lifetime' => a_value >= 0),
|
||||
'queue_backpressure' => hash_including('current' => a_value >= 0, 'lifetime' => a_value >= 0),
|
||||
# depending on flow capture interval, our current rate can easily be zero, but our lifetime rates
|
||||
# should be non-zero so long as pipeline uptime is less than ~10 minutes.
|
||||
'input_throughput' => hash_including('current' => a_value >= 0, 'lifetime' => a_value > 0),
|
||||
'filter_throughput' => hash_including('current' => a_value >= 0, 'lifetime' => a_value > 0),
|
||||
'output_throughput' => hash_including('current' => a_value >= 0, 'lifetime' => a_value > 0)
|
||||
)
|
||||
if logstash_service.settings.feature_flag == "persistent_queues"
|
||||
expect(flow_status).to include(
|
||||
'queue_persisted_growth_bytes' => hash_including('current' => a_kind_of(Numeric), 'lifetime' => a_kind_of(Numeric)),
|
||||
'queue_persisted_growth_events' => hash_including('current' => a_kind_of(Numeric), 'lifetime' => a_kind_of(Numeric))
|
||||
)
|
||||
else
|
||||
expect(flow_status).to_not include('queue_persisted_growth_bytes')
|
||||
expect(flow_status).to_not include('queue_persisted_growth_events')
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
it "should retrieve plugin level flow metrics" do
|
||||
logstash_service = @fixture.get_service("logstash")
|
||||
logstash_service.start_with_stdin(plugins_config)
|
||||
logstash_service.wait_for_logstash
|
||||
number_of_events.times {
|
||||
logstash_service.write_to_stdin("Testing plugin-level flow metrics")
|
||||
sleep(1)
|
||||
}
|
||||
|
||||
Stud.try(max_retry.times, [StandardError, RSpec::Expectations::ExpectationNotMetError]) do
|
||||
# node_stats can fail if the stats subsystem isn't ready
|
||||
result = logstash_service.monitoring_api.node_stats rescue nil
|
||||
# if the result is nil, we probably aren't ready yet
|
||||
# our assertion failure will cause Stud to retry
|
||||
expect(result).not_to be_nil
|
||||
|
||||
expect(result).to include('pipelines' => hash_including('main' => hash_including('plugins' => hash_including('inputs', 'filters', 'outputs'))))
|
||||
|
||||
input_plugins = result.dig("pipelines", "main", "plugins", "inputs")
|
||||
filter_plugins = result.dig("pipelines", "main", "plugins", "filters")
|
||||
output_plugins = result.dig("pipelines", "main", "plugins", "outputs")
|
||||
expect(input_plugins[0]).to_not be_nil
|
||||
|
||||
input_plugin_flow_status = input_plugins[0].dig("flow")
|
||||
filter_plugin_flow_status = filter_plugins[0].dig("flow")
|
||||
output_plugin_flow_status = output_plugins[0].dig("flow")
|
||||
|
||||
expect(input_plugin_flow_status).to include('throughput' => hash_including('current' => a_value >= 0, 'lifetime' => a_value > 0))
|
||||
expect(filter_plugin_flow_status).to include(
|
||||
'worker_utilization' => hash_including('current' => a_value >= 0, 'lifetime' => a_value >= 0),
|
||||
'worker_millis_per_event' => hash_including('current' => a_value >= 0, 'lifetime' => a_value >= 0),
|
||||
)
|
||||
expect(output_plugin_flow_status).to include(
|
||||
'worker_utilization' => hash_including('current' => a_value >= 0, 'lifetime' => a_value >= 0),
|
||||
'worker_millis_per_event' => hash_including('current' => a_value >= 0, 'lifetime' => a_value >= 0),
|
||||
)
|
||||
end
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
|
|
|
@ -25,7 +25,7 @@ buildscript {
|
|||
dependencies {
|
||||
classpath "org.yaml:snakeyaml:${snakeYamlVersion}"
|
||||
classpath "de.undercouch:gradle-download-task:4.0.4"
|
||||
classpath "org.jruby:jruby-core:9.4.6.0"
|
||||
classpath "org.jruby:jruby-core:9.4.7.0"
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -207,8 +207,6 @@ def versionsPath = project.hasProperty("LOGSTASH_CORE_PATH") ? LOGSTASH_CORE_PAT
|
|||
versionMap = (Map) (new Yaml()).load(new File("${versionsPath}").text)
|
||||
|
||||
String jRubyURL
|
||||
String previousJRubyVersion = "9.3.9.0"
|
||||
String previousJRubyURL = "https://repo1.maven.org/maven2/org/jruby/jruby-dist/${previousJRubyVersion}/jruby-dist-${previousJRubyVersion}-bin.tar.gz"
|
||||
String jRubyVersion
|
||||
String jRubySha1
|
||||
Boolean doChecksum
|
||||
|
@ -224,7 +222,6 @@ if (versionMap["jruby-runtime-override"]) {
|
|||
doChecksum = true
|
||||
}
|
||||
def jrubyTarPath = "${projectDir}/vendor/_/jruby-dist-${jRubyVersion}-bin.tar.gz"
|
||||
def previousJrubyTarPath = "${projectDir}/vendor/_/jruby-dist-${previousJRubyVersion}-bin.tar.gz"
|
||||
|
||||
def customJRubyDir = project.hasProperty("custom.jruby.path") ? project.property("custom.jruby.path") : ""
|
||||
def customJRubyVersion = customJRubyDir == "" ? "" : Files.readAllLines(Paths.get(customJRubyDir, "VERSION")).get(0).trim()
|
||||
|
|
|
@ -74,6 +74,7 @@ dependency,dependencyUrl,licenseOverride,copyright,sourceURL
|
|||
"ffi:",https://github.com/ffi/ffi,BSD-3-CLAUSE
|
||||
"ffi-binary-libfixposix:",https://github.com/byteit101/subspawn,Ruby
|
||||
"filesize:",https://github.com/dominikh,MIT
|
||||
"fileutils:",https://github.com/ruby/fileutils,BSD-2-Clause
|
||||
"fugit:",https://github.com/floraison/fugit,MIT
|
||||
"gelfd2:",https://github.com/ptqa/gelfd2,Apache-2.0
|
||||
"gems:",https://github.com/rubygems/gems,MIT
|
||||
|
|
Can't render this file because it has a wrong number of fields in line 2.
|
|
@ -0,0 +1,22 @@
|
|||
Copyright (C) 1993-2013 Yukihiro Matsumoto. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
1. Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
SUCH DAMAGE.
|
10
versions.yml
10
versions.yml
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
# alpha and beta qualifiers are now added via VERSION_QUALIFIER environment var
|
||||
logstash: 8.14.0
|
||||
logstash-core: 8.14.0
|
||||
logstash: 8.14.3
|
||||
logstash-core: 8.14.3
|
||||
logstash-core-plugin-api: 2.1.16
|
||||
|
||||
bundled_jdk:
|
||||
|
@ -13,8 +13,8 @@ bundled_jdk:
|
|||
# jruby must reference a *released* version of jruby which can be downloaded from the official download url
|
||||
# *and* for which jars artifacts are published for compile-time
|
||||
jruby:
|
||||
version: 9.4.6.0
|
||||
sha1: 871d520c9f2494ca56138200c6b2a95c54d0a639
|
||||
version: 9.4.7.0
|
||||
sha1: ea3b99edcb48ed436173977551a113759541c18c
|
||||
# jruby-runtime-override, if specified, will override the jruby version installed in vendor/jruby
|
||||
#jruby-runtime-override:
|
||||
# url: https://oss.sonatype.org/content/repositories/snapshots/org/jruby/jruby-dist/9.3.0.0-SNAPSHOT/jruby-dist-9.3.0.0-20210723.214927-259-bin.tar.gz
|
||||
|
@ -24,6 +24,6 @@ jruby:
|
|||
# Note: this file is copied to the root of logstash-core because its gemspec needs it when
|
||||
# bundler evaluates the gemspec via bin/logstash
|
||||
# Ensure Jackson version here is kept in sync with version used by jrjackson gem
|
||||
jrjackson: 0.4.18
|
||||
jrjackson: 0.4.20
|
||||
jackson: 2.15.3
|
||||
jackson-databind: 2.15.3
|
||||
|
|
|
@ -136,17 +136,23 @@ module LogStash module Filters module Geoip class DatabaseManager
|
|||
end
|
||||
|
||||
def update!(database_type, updated_db_info)
|
||||
new_database_path = updated_db_info.path
|
||||
notify_plugins(database_type, :update, new_database_path) do |db_type, ids|
|
||||
logger.info("geoip filter plugin will use database #{new_database_path}",
|
||||
:database_type => db_type, :pipeline_ids => ids) unless ids.empty?
|
||||
@trigger_lock.synchronize do
|
||||
new_database_path = updated_db_info.path
|
||||
@states[database_type].database_path = new_database_path
|
||||
notify_plugins(database_type, :update, new_database_path) do |db_type, ids|
|
||||
logger.info("geoip filter plugin will use database #{new_database_path}",
|
||||
:database_type => db_type, :pipeline_ids => ids) unless ids.empty?
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def expire!(database_type)
|
||||
notify_plugins(database_type, :expire) do |db_type, ids|
|
||||
logger.warn("geoip filter plugin will stop filtering and will tag all events with the '_geoip_expired_database' tag.",
|
||||
:database_type => db_type, :pipeline_ids => ids)
|
||||
@trigger_lock.synchronize do
|
||||
@states[database_type].database_path = nil
|
||||
notify_plugins(database_type, :expire) do |db_type, ids|
|
||||
logger.warn("geoip filter plugin will stop filtering and will tag all events with the '_geoip_expired_database' tag.",
|
||||
:database_type => db_type, :pipeline_ids => ids)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
|
|
|
@ -8,11 +8,14 @@ require "filters/geoip/database_manager"
|
|||
describe LogStash::Filters::Geoip do
|
||||
describe 'DatabaseManager', :aggregate_failures do
|
||||
let(:pipeline_id) { SecureRandom.hex(16) }
|
||||
let(:mock_geoip_plugin) do
|
||||
double("LogStash::Filters::Geoip").tap do |c|
|
||||
allow(c).to receive(:execution_context).and_return(double("EC", pipeline_id: pipeline_id))
|
||||
allow(c).to receive(:update_filter).with(anything)
|
||||
end
|
||||
let(:mock_geoip_plugin) { mock_geoip_plugin_factory.call }
|
||||
let(:mock_geoip_plugin_factory) do
|
||||
->() {
|
||||
double("LogStash::Filters::Geoip").tap do |c|
|
||||
allow(c).to receive(:execution_context).and_return(double("EC", pipeline_id: pipeline_id))
|
||||
allow(c).to receive(:update_filter).with(anything)
|
||||
end
|
||||
}
|
||||
end
|
||||
|
||||
let(:eula_database_infos) { Hash.new { LogStash::GeoipDatabaseManagement::DbInfo::PENDING } }
|
||||
|
@ -119,10 +122,19 @@ describe LogStash::Filters::Geoip do
|
|||
|
||||
shared_examples "subscribed to expire notifications" do
|
||||
context "when the manager expires the db" do
|
||||
it "notifies the plugin" do
|
||||
before(:each) do
|
||||
db_manager.eula_subscription("City").notify(LogStash::GeoipDatabaseManagement::DbInfo::EXPIRED)
|
||||
end
|
||||
it "notifies the plugin" do
|
||||
expect(mock_geoip_plugin).to have_received(:update_filter).with(:expire)
|
||||
end
|
||||
context "subsequent subscriptions" do
|
||||
it "are given the nil path" do
|
||||
plugin2 = mock_geoip_plugin_factory.call
|
||||
path2 = db_manager.subscribe_database_path("City", nil, plugin2)
|
||||
expect(path2).to be_nil
|
||||
end
|
||||
end
|
||||
end
|
||||
context "when the manager expires a different DB" do
|
||||
it 'does not notify the plugin' do
|
||||
|
@ -135,10 +147,19 @@ describe LogStash::Filters::Geoip do
|
|||
shared_examples "subscribed to update notifications" do
|
||||
context "when the manager updates the db" do
|
||||
let(:updated_db_path) { "/this/that/another.mmdb" }
|
||||
it "notifies the plugin" do
|
||||
before(:each) do
|
||||
db_manager.eula_subscription("City").notify(LogStash::GeoipDatabaseManagement::DbInfo.new(path: updated_db_path))
|
||||
end
|
||||
it "notifies the plugin" do
|
||||
expect(mock_geoip_plugin).to have_received(:update_filter).with(:update, updated_db_path)
|
||||
end
|
||||
context "subsequent subscriptions" do
|
||||
it "are given the updated path" do
|
||||
plugin2 = mock_geoip_plugin_factory.call
|
||||
path2 = db_manager.subscribe_database_path("City", nil, plugin2)
|
||||
expect(path2).to eq updated_db_path
|
||||
end
|
||||
end
|
||||
end
|
||||
context "when the manager updates a different DB" do
|
||||
let(:updated_db_path) { "/this/that/another.mmdb" }
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue