Migrate x-pack-logstash source to logstash

This commit is contained in:
Jenkins CI 2018-04-20 19:11:15 +00:00 committed by Andrew Cholakian
parent 155801520c
commit 93cad10da1
296 changed files with 9337 additions and 46 deletions

View file

@ -8,7 +8,8 @@ ADD buildSrc /opt/logstash/buildSrc
RUN /opt/logstash/gradlew wrapper RUN /opt/logstash/gradlew wrapper
ADD versions.yml /opt/logstash/versions.yml ADD versions.yml /opt/logstash/versions.yml
ADD LICENSE /opt/logstash/LICENSE ADD LICENSE.txt /opt/logstash/LICENSE.txt
ADD licenses /opt/logstash/licenses
ADD CONTRIBUTORS /opt/logstash/CONTRIBUTORS ADD CONTRIBUTORS /opt/logstash/CONTRIBUTORS
ADD Gemfile.template /opt/logstash/Gemfile.template ADD Gemfile.template /opt/logstash/Gemfile.template
ADD Rakefile /opt/logstash/Rakefile ADD Rakefile /opt/logstash/Rakefile
@ -24,6 +25,7 @@ ADD logstash-core /opt/logstash/logstash-core
ADD logstash-core-plugin-api /opt/logstash/logstash-core-plugin-api ADD logstash-core-plugin-api /opt/logstash/logstash-core-plugin-api
ADD bin /opt/logstash/bin ADD bin /opt/logstash/bin
ADD modules /opt/logstash/modules ADD modules /opt/logstash/modules
ADD x-pack /opt/logstash/x-pack
ADD ci /opt/logstash/ci ADD ci /opt/logstash/ci
ADD settings.gradle /opt/logstash/settings.gradle ADD settings.gradle /opt/logstash/settings.gradle

13
LICENSE
View file

@ -1,13 +0,0 @@
Copyright (c) 20122017 Elasticsearch <http://www.elastic.co>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

13
LICENSE.txt Normal file
View file

@ -0,0 +1,13 @@
Source code in this repository is variously licensed under the Apache License
Version 2.0, an Apache compatible license, or the Elastic License. Outside of
the "x-pack" folder, source code in a given file is licensed under the Apache
License Version 2.0, unless otherwise noted at the beginning of the file or a
LICENSE file present in the directory subtree declares a separate license.
Within the "x-pack" folder, source code in a given file is licensed under the
Elastic License, unless otherwise noted at the beginning of the file or a
LICENSE file present in the directory subtree declares a separate license.
The build produces two sets of binaries - one set that falls under the Elastic
License and another set that falls under Apache License Version 2.0. The
binaries that contain `-oss` in the artifact name are licensed under the Apache
License Version 2.0.

View file

@ -18,14 +18,24 @@ supported platforms, from [downloads page](https://www.elastic.co/downloads/logs
### Snapshot Builds ### Snapshot Builds
For the daring, snapshot builds are available. These builds are created nightly and have undergone no formal QA, so they should **never** be run in production. For the daring, snapshot builds are available.
These builds are created nightly and have undergone no formal QA, so they should **never** be run in production.
| artifact | | Complete, with X-Pack | Apache 2.0 licensed |
| --- | | --------------------- | ---------------------- |
| [tar](https://snapshots.elastic.co/downloads/logstash/logstash-7.0.0-alpha1-SNAPSHOT.tar.gz) | | [tar-complete][] | [tar-oss][] |
| [zip](https://snapshots.elastic.co/downloads/logstash/logstash-7.0.0-alpha1-SNAPSHOT.zip) | | [zip-complete][] | [zip-oss][] |
| [deb](https://snapshots.elastic.co/downloads/logstash/logstash-7.0.0-alpha1-SNAPSHOT.deb) | | [deb-complete][] | [deb-oss][] |
| [rpm](https://snapshots.elastic.co/downloads/logstash/logstash-7.0.0-alpha1-SNAPSHOT.rpm) | | [rpm-complete][] | [rpm-oss][] |
[tar-complete]: https://snapshots.elastic.co/downloads/logstash/logstash-7.0.0-alpha1-SNAPSHOT.tar.gz
[zip-complete]: https://snapshots.elastic.co/downloads/logstash/logstash-7.0.0-alpha1-SNAPSHOT.zip
[deb-complete]: https://snapshots.elastic.co/downloads/logstash/logstash-7.0.0-alpha1-SNAPSHOT.deb
[rpm-complete]: https://snapshots.elastic.co/downloads/logstash/logstash-7.0.0-alpha1-SNAPSHOT.rpm
[tar-oss]: https://snapshots.elastic.co/downloads/logstash/logstash-oss-7.0.0-alpha1-SNAPSHOT.tar.gz
[zip-oss]: https://snapshots.elastic.co/downloads/logstash/logstash-oss-7.0.0-alpha1-SNAPSHOT.zip
[deb-oss]: https://snapshots.elastic.co/downloads/logstash/logstash-oss-7.0.0-alpha1-SNAPSHOT.deb
[rpm-oss]: https://snapshots.elastic.co/downloads/logstash/logstash-oss-7.0.0-alpha1-SNAPSHOT.rpm
## Need Help? ## Need Help?
@ -80,6 +90,12 @@ The printed version should be the same as in the `.ruby-version` file.
### Building Logstash ### Building Logstash
The Logstash project includes the source code for all of Logstash, including the Elastic-Licensed X-Pack features and functions; to run Logstash from source using only the OSS-licensed code, export the `OSS` environment variable with a value of `true`:
``` sh
export OSS=true
```
* To run Logstash from the repo you must first bootstrap the environment: * To run Logstash from the repo you must first bootstrap the environment:
```sh ```sh
@ -189,6 +205,8 @@ Note that if a plugin is installed using the plugin manager `bin/logstash-plugin
## Building Artifacts ## Building Artifacts
Built artifacts will be placed in the `LS_HOME/build` directory, and will create the directory if it is not already present.
You can build a Logstash snapshot package as tarball or zip file You can build a Logstash snapshot package as tarball or zip file
```sh ```sh
@ -196,7 +214,12 @@ You can build a Logstash snapshot package as tarball or zip file
./gradlew assembleZipDistribution ./gradlew assembleZipDistribution
``` ```
This will create the artifact `LS_HOME/build` directory OSS-only artifacts can similarly be built with their own gradle tasks:
```sh
./gradlew assembleOssTarDistribution
./gradlew assembleOssZipDistribution
```
You can also build .rpm and .deb, but the [fpm](https://github.com/jordansissel/fpm) tool is required. You can also build .rpm and .deb, but the [fpm](https://github.com/jordansissel/fpm) tool is required.
@ -205,6 +228,13 @@ rake artifact:rpm
rake artifact:deb rake artifact:deb
``` ```
and:
```sh
rake artifact:rpm_oss
rake artifact:deb_oss
```
## Project Principles ## Project Principles
* Community: If a newbie has a bad time, it's a bug. * Community: If a newbie has a bad time, it's a bug.

View file

@ -281,7 +281,8 @@ bootstrap.dependsOn installTestGems
runIntegrationTests.shouldRunAfter tasks.getByPath(":logstash-core:test") runIntegrationTests.shouldRunAfter tasks.getByPath(":logstash-core:test")
check.dependsOn runIntegrationTests check.dependsOn runIntegrationTests
String elasticsearchSnapshotURL = "https://snapshots.elastic.co/downloads/elasticsearch/elasticsearch-${version}-SNAPSHOT.tar.gz"
String elasticsearchSnapshotURL = System.getenv("ELASTICSEARCH_SNAPSHOT_URL") ?: "https://snapshots.elastic.co/downloads/elasticsearch/elasticsearch-${version}-SNAPSHOT.tar.gz"
String elasticsearchDownloadLocation = "${projectDir}/build/elasticsearch-${version}-SNAPSHOT.tar.gz" String elasticsearchDownloadLocation = "${projectDir}/build/elasticsearch-${version}-SNAPSHOT.tar.gz"
task downloadEs(type: Download) { task downloadEs(type: Download) {

View file

@ -6,6 +6,7 @@ set -e
# installing gems. See https://github.com/elastic/logstash/issues/5179 # installing gems. See https://github.com/elastic/logstash/issues/5179
export JRUBY_OPTS="-J-Xmx1g" export JRUBY_OPTS="-J-Xmx1g"
export GRADLE_OPTS="-Xmx2g -Dorg.gradle.daemon=false -Dorg.gradle.logging.level=info" export GRADLE_OPTS="-Xmx2g -Dorg.gradle.daemon=false -Dorg.gradle.logging.level=info"
export OSS=true
SELECTED_TEST_SUITE=$1 SELECTED_TEST_SUITE=$1

View file

@ -14,6 +14,10 @@ else
IMAGE_NAME=$branch_specifier"-"$(date +%s%N) IMAGE_NAME=$branch_specifier"-"$(date +%s%N)
fi fi
if [ "$OSS" == "true" ]; then
DOCKER_ENV_OPTS="${DOCKER_ENV_OPTS} --env OSS=true"
fi
echo "Running Docker CI build for '$IMAGE_NAME' " echo "Running Docker CI build for '$IMAGE_NAME' "
# Remove old docker cid just in case # Remove old docker cid just in case
@ -23,14 +27,19 @@ docker build -t $IMAGE_NAME .
exit_code=$?; [[ $exit_code != 0 ]] && exit $exit_code exit_code=$?; [[ $exit_code != 0 ]] && exit $exit_code
cleanup() { cleanup() {
cat docker_cid | xargs docker rm --force -v if [ -e docker_cid ]; then
cat docker_cid | xargs docker rm --force -v
fi
} }
trap cleanup EXIT trap cleanup EXIT
# Run the command, skip the first argument, which is the image name # Run the command, skip the first argument, which is the image name
echo "Running tests in built docker image" docker run $DOCKER_ENV_OPTS --cidfile=docker_cid --sig-proxy=true --rm $IMAGE_NAME ${@:2}
docker run --sig-proxy=true --cidfile=docker_cid --rm $IMAGE_NAME ${@:2}
# Remove the container cid since we ran cleanly, no need to force rm it if we got to this point
rm docker_cid
exit_code=$? exit_code=$?
[[ $REMOVE_IMAGE == "true" ]] && docker rmi $IMAGE_NAME [[ $REMOVE_IMAGE == "true" ]] && docker rmi $IMAGE_NAME
echo "exiting with code: '$exit_code'" echo "exiting with code: '$exit_code'"
exit $exit_code #preserve the exit code from the test run exit $exit_code #preserve the exit code from the test run

View file

@ -9,6 +9,7 @@ export GRADLE_OPTS="-Xmx2g -Dorg.gradle.daemon=false -Dorg.gradle.logging.level=
export SPEC_OPTS="--order rand --format documentation" export SPEC_OPTS="--order rand --format documentation"
export CI=true export CI=true
export OSS=true
if [[ $1 = "setup" ]]; then if [[ $1 = "setup" ]]; then
echo "Setup only, no tests will be run" echo "Setup only, no tests will be run"

View file

@ -9,6 +9,7 @@ export GRADLE_OPTS="-Xmx2g -Dorg.gradle.daemon=false -Dorg.gradle.logging.level=
export SPEC_OPTS="--order rand --format documentation" export SPEC_OPTS="--order rand --format documentation"
export CI=true export CI=true
export OSS=true
SELECTED_TEST_SUITE=$1 SELECTED_TEST_SUITE=$1

View file

@ -211,4 +211,36 @@
# #
# Where to find custom plugins # Where to find custom plugins
# path.plugins: [] # path.plugins: []
#
# ------------ X-Pack Settings (not applicable for OSS build)--------------
#
# X-Pack Monitoring
# https://www.elastic.co/guide/en/logstash/current/monitoring-logstash.html
#xpack.monitoring.enabled: false
#xpack.monitoring.elasticsearch.username: logstash_system
#xpack.monitoring.elasticsearch.password: password
#xpack.monitoring.elasticsearch.url: ["https://es1:9200", "https://es2:9200"]
#xpack.monitoring.elasticsearch.ssl.ca: [ "/path/to/ca.crt" ]
#xpack.monitoring.elasticsearch.ssl.truststore.path: path/to/file
#xpack.monitoring.elasticsearch.ssl.truststore.password: password
#xpack.monitoring.elasticsearch.ssl.keystore.path: /path/to/file
#xpack.monitoring.elasticsearch.ssl.keystore.password: password
#xpack.monitoring.elasticsearch.ssl.verification_mode: certificate
#xpack.monitoring.elasticsearch.sniffing: false
#xpack.monitoring.collection.interval: 10s
#xpack.monitoring.collection.pipeline.details.enabled: true
#
# X-Pack Management
# https://www.elastic.co/guide/en/logstash/current/logstash-centralized-pipeline-management.html
#xpack.management.enabled: false
#xpack.management.pipeline.id: ["main", "apache_logs"]
#xpack.management.elasticsearch.username: logstash_admin_user
#xpack.management.elasticsearch.password: password
#xpack.management.elasticsearch.url: ["https://es1:9200", "https://es2:9200"]
#xpack.management.elasticsearch.ssl.ca: [ "/path/to/ca.crt" ]
#xpack.management.elasticsearch.ssl.truststore.path: /path/to/file
#xpack.management.elasticsearch.ssl.truststore.password: password
#xpack.management.elasticsearch.ssl.keystore.path: /path/to/file
#xpack.management.elasticsearch.ssl.keystore.password: password
#xpack.management.elasticsearch.sniffing: false
#xpack.management.logstash.poll_interval: 5s

View file

@ -3,7 +3,7 @@
:include-xpack: true :include-xpack: true
:lang: en :lang: en
:xls-repo-dir: {docdir}/../../logstash-extra/x-pack-logstash/docs/{lang} :xls-repo-dir: {docdir}/../x-pack/docs/{lang}
:log-repo-dir: {docdir} :log-repo-dir: {docdir}
:plugins-repo-dir: {docdir}/../../logstash-docs/docs :plugins-repo-dir: {docdir}/../../logstash-docs/docs

View file

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View file

@ -0,0 +1,223 @@
ELASTIC LICENSE AGREEMENT
PLEASE READ CAREFULLY THIS ELASTIC LICENSE AGREEMENT (THIS "AGREEMENT"), WHICH
CONSTITUTES A LEGALLY BINDING AGREEMENT AND GOVERNS ALL OF YOUR USE OF ALL OF
THE ELASTIC SOFTWARE WITH WHICH THIS AGREEMENT IS INCLUDED ("ELASTIC SOFTWARE")
THAT IS PROVIDED IN OBJECT CODE FORMAT, AND, IN ACCORDANCE WITH SECTION 2 BELOW,
CERTAIN OF THE ELASTIC SOFTWARE THAT IS PROVIDED IN SOURCE CODE FORMAT. BY
INSTALLING OR USING ANY OF THE ELASTIC SOFTWARE GOVERNED BY THIS AGREEMENT, YOU
ARE ASSENTING TO THE TERMS AND CONDITIONS OF THIS AGREEMENT. IF YOU DO NOT AGREE
WITH SUCH TERMS AND CONDITIONS, YOU MAY NOT INSTALL OR USE THE ELASTIC SOFTWARE
GOVERNED BY THIS AGREEMENT. IF YOU ARE INSTALLING OR USING THE SOFTWARE ON
BEHALF OF A LEGAL ENTITY, YOU REPRESENT AND WARRANT THAT YOU HAVE THE ACTUAL
AUTHORITY TO AGREE TO THE TERMS AND CONDITIONS OF THIS AGREEMENT ON BEHALF OF
SUCH ENTITY.
Posted Date: April 20, 2018
This Agreement is entered into by and between Elasticsearch BV ("Elastic") and
You, or the legal entity on behalf of whom You are acting (as applicable,
"You").
1. OBJECT CODE END USER LICENSES, RESTRICTIONS AND THIRD PARTY OPEN SOURCE
SOFTWARE
1.1 Object Code End User License. Subject to the terms and conditions of
Section 1.2 of this Agreement, Elastic hereby grants to You, AT NO CHARGE and
for so long as you are not in breach of any provision of this Agreement, a
License to the Basic Features and Functions of the Elastic Software.
1.2 Reservation of Rights; Restrictions. As between Elastic and You, Elastic
and its licensors own all right, title and interest in and to the Elastic
Software, and except as expressly set forth in Sections 1.1, and 2.1 of this
Agreement, no other license to the Elastic Software is granted to You under
this Agreement, by implication, estoppel or otherwise. You agree not to: (i)
reverse engineer or decompile, decrypt, disassemble or otherwise reduce any
Elastic Software provided to You in Object Code, or any portion thereof, to
Source Code, except and only to the extent any such restriction is prohibited
by applicable law, (ii) except as expressly permitted in this Agreement,
prepare derivative works from, modify, copy or use the Elastic Software Object
Code or the Commercial Software Source Code in any manner; (iii) except as
expressly permitted in Section 1.1 above, transfer, sell, rent, lease,
distribute, sublicense, loan or otherwise transfer, Elastic Software Object
Code, in whole or in part, to any third party; (iv) use Elastic Software
Object Code for providing time-sharing services, any software-as-a-service,
service bureau services or as part of an application services provider or
other service offering (collectively, "SaaS Offering") where obtaining access
to the Elastic Software or the features and functions of the Elastic Software
is a primary reason or substantial motivation for users of the SaaS Offering
to access and/or use the SaaS Offering ("Prohibited SaaS Offering"); (v)
circumvent the limitations on use of Elastic Software provided to You in
Object Code format that are imposed or preserved by any License Key, or (vi)
alter or remove any Marks and Notices in the Elastic Software. If You have any
question as to whether a specific SaaS Offering constitutes a Prohibited SaaS
Offering, or are interested in obtaining Elastic's permission to engage in
commercial or non-commercial distribution of the Elastic Software, please
contact elastic_license@elastic.co.
1.3 Third Party Open Source Software. The Commercial Software may contain or
be provided with third party open source libraries, components, utilities and
other open source software (collectively, "Open Source Software"), which Open
Source Software may have applicable license terms as identified on a website
designated by Elastic. Notwithstanding anything to the contrary herein, use of
the Open Source Software shall be subject to the license terms and conditions
applicable to such Open Source Software, to the extent required by the
applicable licensor (which terms shall not restrict the license rights granted
to You hereunder, but may contain additional rights). To the extent any
condition of this Agreement conflicts with any license to the Open Source
Software, the Open Source Software license will govern with respect to such
Open Source Software only. Elastic may also separately provide you with
certain open source software that is licensed by Elastic. Your use of such
Elastic open source software will not be governed by this Agreement, but by
the applicable open source license terms.
2. COMMERCIAL SOFTWARE SOURCE CODE
2.1 Limited License. Subject to the terms and conditions of Section 2.2 of
this Agreement, Elastic hereby grants to You, AT NO CHARGE and for so long as
you are not in breach of any provision of this Agreement, a limited,
non-exclusive, non-transferable, fully paid up royalty free right and license
to the Commercial Software in Source Code format, without the right to grant
or authorize sublicenses, to prepare Derivative Works of the Commercial
Software, provided You (i) do not hack the licensing mechanism, or otherwise
circumvent the intended limitations on the use of Elastic Software to enable
features other than Basic Features and Functions or those features You are
entitled to as part of a Subscription, and (ii) use the resulting object code
only for reasonable testing purposes.
2.2 Restrictions. Nothing in Section 2.1 grants You the right to (i) use the
Commercial Software Source Code other than in accordance with Section 2.1
above, (ii) use a Derivative Work of the Commercial Software outside of a
Non-production Environment, in any production capacity, on a temporary or
permanent basis, or (iii) transfer, sell, rent, lease, distribute, sublicense,
loan or otherwise make available the Commercial Software Source Code, in whole
or in part, to any third party. Notwithstanding the foregoing, You may
maintain a copy of the repository in which the Source Code of the Commercial
Software resides and that copy may be publicly accessible, provided that you
include this Agreement with Your copy of the repository.
3. TERMINATION
3.1 Termination. This Agreement will automatically terminate, whether or not
You receive notice of such Termination from Elastic, if You breach any of its
provisions.
3.2 Post Termination. Upon any termination of this Agreement, for any reason,
You shall promptly cease the use of the Elastic Software in Object Code format
and cease use of the Commercial Software in Source Code format. For the
avoidance of doubt, termination of this Agreement will not affect Your right
to use Elastic Software, in either Object Code or Source Code formats, made
available under the Apache License Version 2.0.
3.3 Survival. Sections 1.2, 2.2. 3.3, 4 and 5 shall survive any termination or
expiration of this Agreement.
4. DISCLAIMER OF WARRANTIES AND LIMITATION OF LIABILITY
4.1 Disclaimer of Warranties. TO THE MAXIMUM EXTENT PERMITTED UNDER APPLICABLE
LAW, THE ELASTIC SOFTWARE IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND,
AND ELASTIC AND ITS LICENSORS MAKE NO WARRANTIES WHETHER EXPRESSED, IMPLIED OR
STATUTORY REGARDING OR RELATING TO THE ELASTIC SOFTWARE. TO THE MAXIMUM EXTENT
PERMITTED UNDER APPLICABLE LAW, ELASTIC AND ITS LICENSORS SPECIFICALLY
DISCLAIM ALL IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
PURPOSE AND NON-INFRINGEMENT WITH RESPECT TO THE ELASTIC SOFTWARE, AND WITH
RESPECT TO THE USE OF THE FOREGOING. FURTHER, ELASTIC DOES NOT WARRANT RESULTS
OF USE OR THAT THE ELASTIC SOFTWARE WILL BE ERROR FREE OR THAT THE USE OF THE
ELASTIC SOFTWARE WILL BE UNINTERRUPTED.
4.2 Limitation of Liability. IN NO EVENT SHALL ELASTIC OR ITS LICENSORS BE
LIABLE TO YOU OR ANY THIRD PARTY FOR ANY DIRECT OR INDIRECT DAMAGES,
INCLUDING, WITHOUT LIMITATION, FOR ANY LOSS OF PROFITS, LOSS OF USE, BUSINESS
INTERRUPTION, LOSS OF DATA, COST OF SUBSTITUTE GOODS OR SERVICES, OR FOR ANY
SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES OF ANY KIND, IN CONNECTION WITH
OR ARISING OUT OF THE USE OR INABILITY TO USE THE ELASTIC SOFTWARE, OR THE
PERFORMANCE OF OR FAILURE TO PERFORM THIS AGREEMENT, WHETHER ALLEGED AS A
BREACH OF CONTRACT OR TORTIOUS CONDUCT, INCLUDING NEGLIGENCE, EVEN IF ELASTIC
HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
5. MISCELLANEOUS
This Agreement completely and exclusively states the entire agreement of the
parties regarding the subject matter herein, and it supersedes, and its terms
govern, all prior proposals, agreements, or other communications between the
parties, oral or written, regarding such subject matter. This Agreement may be
modified by Elastic from time to time, and any such modifications will be
effective upon the "Posted Date" set forth at the top of the modified
Agreement. If any provision hereof is held unenforceable, this Agreement will
continue without said provision and be interpreted to reflect the original
intent of the parties. This Agreement and any non-contractual obligation
arising out of or in connection with it, is governed exclusively by Dutch law.
This Agreement shall not be governed by the 1980 UN Convention on Contracts
for the International Sale of Goods. All disputes arising out of or in
connection with this Agreement, including its existence and validity, shall be
resolved by the courts with jurisdiction in Amsterdam, The Netherlands, except
where mandatory law provides for the courts at another location in The
Netherlands to have jurisdiction. The parties hereby irrevocably waive any and
all claims and defenses either might otherwise have in any such action or
proceeding in any of such courts based upon any alleged lack of personal
jurisdiction, improper venue, forum non conveniens or any similar claim or
defense. A breach or threatened breach, by You of Section 2 may cause
irreparable harm for which damages at law may not provide adequate relief, and
therefore Elastic shall be entitled to seek injunctive relief without being
required to post a bond. You may not assign this Agreement (including by
operation of law in connection with a merger or acquisition), in whole or in
part to any third party without the prior written consent of Elastic, which
may be withheld or granted by Elastic in its sole and absolute discretion.
Any assignment in violation of the preceding sentence is void. Notices to
Elastic may also be sent to legal@elastic.co.
6. DEFINITIONS
The following terms have the meanings ascribed:
6.1 "Affiliate" means, with respect to a party, any entity that controls, is
controlled by, or which is under common control with, such party, where
"control" means ownership of at least fifty percent (50%) of the outstanding
voting shares of the entity, or the contractual right to establish policy for,
and manage the operations of, the entity.
6.2 "Basic Features and Functions" means those features and functions of the
Elastic Software that are eligible for use under a Basic license, as set forth
at https://www.elastic.co/subscriptions, as may be modified by Elastic from
time to time.
6.3 "Commercial Software" means the Elastic Software Source Code in any file
containing a header stating the contents are subject to the Elastic License or
which is contained in the repository folder labeled "x-pack", unless a LICENSE
file present in the directory subtree declares a different license.
6.4 "Derivative Work of the Commercial Software" means, for purposes of this
Agreement, any modification(s) or enhancement(s) to the Commercial Software,
which represent, as a whole, an original work of authorship.
6.5 "License" means a limited, non-exclusive, non-transferable, fully paid up,
royalty free, right and license, without the right to grant or authorize
sublicenses, solely for Your internal business operations to (i) install and
use the applicable Features and Functions of the Elastic Software in Object
Code, and (ii) permit Contractors and Your Affiliates to use the Elastic
software as set forth in (i) above, provided that such use by Contractors must
be solely for Your benefit and/or the benefit of Your Affiliates, and You
shall be responsible for all acts and omissions of such Contractors and
Affiliates in connection with their use of the Elastic software that are
contrary to the terms and conditions of this Agreement.
6.6 "License Key" means a sequence of bytes, including but not limited to a
JSON blob, that is used to enable certain features and functions of the
Elastic Software.
6.7 "Marks and Notices" means all Elastic trademarks, trade names, logos and
notices present on the Documentation as originally provided by Elastic.
6.8 "Non-production Environment" means an environment for development, testing
or quality assurance, where software is not used for production purposes.
6.9 "Object Code" means any form resulting from mechanical transformation or
translation of Source Code form, including but not limited to compiled object
code, generated documentation, and conversions to other media types.
6.10 "Source Code" means the preferred form of computer software for making
modifications, including but not limited to software source code,
documentation source, and configuration files.
6.11 "Subscription" means the right to receive Support Services and a License
to the Commercial Software.

View file

@ -61,6 +61,13 @@ module LogStash module Config
module_hash = modules_array.find {|m| m["name"] == module_name} module_hash = modules_array.find {|m| m["name"] == module_name}
current_module = plugin_modules.find { |allmodules| allmodules.module_name == module_name } current_module = plugin_modules.find { |allmodules| allmodules.module_name == module_name }
enabled = current_module.is_enabled?(module_settings)
unless enabled
logger.warn("The #{module_name} module is not enabled. Please check the logs for additional information.")
next
end
alt_name = "module-#{module_name}" alt_name = "module-#{module_name}"
pipeline_id = alt_name pipeline_id = alt_name
module_settings.set("pipeline.id", pipeline_id) module_settings.set("pipeline.id", pipeline_id)
@ -71,6 +78,7 @@ module LogStash module Config
modul_setup = settings.get("modules_setup") modul_setup = settings.get("modules_setup")
# Only import data if it's not a config test and --setup is true # Only import data if it's not a config test and --setup is true
if !config_test && modul_setup if !config_test && modul_setup
logger.info("Setting up the #{module_name} module")
esclient = LogStash::ElasticsearchClient.build(module_hash) esclient = LogStash::ElasticsearchClient.build(module_hash)
kbnclient = LogStash::Modules::KibanaClient.new(module_hash) kbnclient = LogStash::Modules::KibanaClient.new(module_hash)
esconnected = esclient.can_connect? esconnected = esclient.can_connect?
@ -86,7 +94,10 @@ module LogStash module Config
connect_fail_args[:elasticsearch_hosts] = esclient.host_settings connect_fail_args[:elasticsearch_hosts] = esclient.host_settings
connect_fail_args[:kibana_hosts] = kbnclient.host_settings connect_fail_args[:kibana_hosts] = kbnclient.host_settings
end end
else
logger.info("Starting the #{module_name} module")
end end
config_string = current_module.config_string config_string = current_module.config_string
pipelines << {"pipeline_id" => pipeline_id, "alt_name" => alt_name, "config_string" => config_string, "settings" => module_settings} pipelines << {"pipeline_id" => pipeline_id, "alt_name" => alt_name, "config_string" => config_string, "settings" => module_settings}
rescue => e rescue => e
@ -101,5 +112,6 @@ module LogStash module Config
end end
pipelines pipelines
end end
end end
end end end end

View file

@ -18,7 +18,7 @@ module LogStash module Modules class Scaffold
@module_name = name @module_name = name
@directory = directory # this is the 'configuration folder in the GEM root.' @directory = directory # this is the 'configuration folder in the GEM root.'
@kibana_version_parts = "6.0.0".split('.') # this is backup in case kibana client fails to connect @kibana_version_parts = "6.0.0".split('.') # this is backup in case kibana client fails to connect
logger.info("Initializing module", :module_name => name, :directory => directory) logger.debug("Found module", :module_name => name, :directory => directory)
end end
def add_kibana_version(version_parts) def add_kibana_version(version_parts)
@ -48,5 +48,11 @@ module LogStash module Modules class Scaffold
return nil if @logstash_configuration.nil? return nil if @logstash_configuration.nil?
@logstash_configuration.config_string @logstash_configuration.config_string
end end
# subclass may override
def is_enabled?(settings)
true
end
end end end # class LogStash::Modules::Scaffold end end end # class LogStash::Modules::Scaffold

View file

@ -5,7 +5,6 @@ namespace "artifact" do
def package_files def package_files
[ [
"LICENSE",
"NOTICE.TXT", "NOTICE.TXT",
"CONTRIBUTORS", "CONTRIBUTORS",
"bin/**/*", "bin/**/*",
@ -80,8 +79,8 @@ namespace "artifact" do
def files(excluder=nil) def files(excluder=nil)
excluder ||= self.method(:exclude?) excluder ||= self.method(:exclude?)
return @files if @files
@files = package_files.collect do |glob| package_files.collect do |glob|
Rake::FileList[glob].reject(&excluder) Rake::FileList[glob].reject(&excluder)
end.flatten.uniq end.flatten.uniq
end end
@ -92,25 +91,25 @@ namespace "artifact" do
desc "Build a tar.gz of default logstash plugins with all dependencies" desc "Build a tar.gz of default logstash plugins with all dependencies"
task "tar" => ["prepare", "generate_build_metadata", "license:generate-notice-file"] do task "tar" => ["prepare", "generate_build_metadata", "license:generate-notice-file"] do
puts("[artifact:tar] Building tar.gz of default plugins") puts("[artifact:tar] Building tar.gz of default plugins")
build_tar build_tar('ELASTIC-LICENSE')
end end
desc "Build an OSS tar.gz of default logstash plugins with all dependencies" desc "Build an OSS tar.gz of default logstash plugins with all dependencies"
task "tar_oss" => ["prepare", "generate_build_metadata", "license:generate-notice-file"] do task "tar_oss" => ["prepare", "generate_build_metadata", "license:generate-notice-file"] do
puts("[artifact:tar] Building tar.gz of default plugins") puts("[artifact:tar] Building tar.gz of default plugins")
build_tar("-oss", oss_excluder) build_tar('APACHE-LICENSE-2.0', "-oss", oss_excluder)
end end
desc "Build a zip of default logstash plugins with all dependencies" desc "Build a zip of default logstash plugins with all dependencies"
task "zip" => ["prepare", "generate_build_metadata", "license:generate-notice-file"] do task "zip" => ["prepare", "generate_build_metadata", "license:generate-notice-file"] do
puts("[artifact:zip] Building zip of default plugins") puts("[artifact:zip] Building zip of default plugins")
build_zip build_zip('ELASTIC-LICENSE')
end end
desc "Build a zip of default logstash plugins with all dependencies" desc "Build a zip of default logstash plugins with all dependencies"
task "zip_oss" => ["prepare", "generate_build_metadata", "license:generate-notice-file"] do task "zip_oss" => ["prepare", "generate_build_metadata", "license:generate-notice-file"] do
puts("[artifact:zip] Building zip of default plugins") puts("[artifact:zip] Building zip of default plugins")
build_zip("-oss", oss_excluder) build_zip('APACHE-LICENSE-2.0',"-oss", oss_excluder)
end end
@ -123,7 +122,7 @@ namespace "artifact" do
desc "Build an RPM of logstash with all dependencies" desc "Build an RPM of logstash with all dependencies"
task "rpm_oss" => ["prepare", "generate_build_metadata", "license:generate-notice-file"] do task "rpm_oss" => ["prepare", "generate_build_metadata", "license:generate-notice-file"] do
puts("[artifact:rpm] building rpm package") puts("[artifact:rpm] building rpm package")
package("centos", "5", "-oss", oss_excluder) package("centos", "5", :oss)
end end
@ -136,7 +135,7 @@ namespace "artifact" do
desc "Build a DEB of logstash with all dependencies" desc "Build a DEB of logstash with all dependencies"
task "deb_oss" => ["prepare", "generate_build_metadata", "license:generate-notice-file"] do task "deb_oss" => ["prepare", "generate_build_metadata", "license:generate-notice-file"] do
puts("[artifact:deb] building deb package") puts("[artifact:deb] building deb package")
package("ubuntu", "12.04", "-oss", oss_excluder) package("ubuntu", "12.04", :oss)
end end
desc "Generate logstash core gems" desc "Generate logstash core gems"
@ -152,13 +151,13 @@ namespace "artifact" do
desc "Build a zip of all logstash plugins from logstash-plugins github repo" desc "Build a zip of all logstash plugins from logstash-plugins github repo"
task "zip-all-plugins" => ["prepare-all", "generate_build_metadata"] do task "zip-all-plugins" => ["prepare-all", "generate_build_metadata"] do
puts("[artifact:zip] Building zip of all plugins") puts("[artifact:zip] Building zip of all plugins")
build_zip "-all-plugins" build_zip('ELASTIC-LICENSE', "-all-plugins")
end end
desc "Build a tar.gz of all logstash plugins from logstash-plugins github repo" desc "Build a tar.gz of all logstash plugins from logstash-plugins github repo"
task "tar-all-plugins" => ["prepare-all", "generate_build_metadata"] do task "tar-all-plugins" => ["prepare-all", "generate_build_metadata"] do
puts("[artifact:tar] Building tar.gz of all plugins") puts("[artifact:tar] Building tar.gz of all plugins")
build_tar "-all-plugins" build_tar('ELASTIC-LICENSE', "-all-plugins")
end end
# Auxiliary tasks # Auxiliary tasks
@ -248,7 +247,7 @@ namespace "artifact" do
end end
end end
def build_tar(tar_suffix = nil, excluder=nil) def build_tar(license, tar_suffix = nil, excluder=nil)
require "zlib" require "zlib"
require "archive/tar/minitar" require "archive/tar/minitar"
ensure_logstash_version_constant_defined ensure_logstash_version_constant_defined
@ -260,6 +259,10 @@ namespace "artifact" do
write_to_tar(tar, path, "logstash-#{LOGSTASH_VERSION}#{PACKAGE_SUFFIX}/#{path}") write_to_tar(tar, path, "logstash-#{LOGSTASH_VERSION}#{PACKAGE_SUFFIX}/#{path}")
end end
source_license_path = "licenses/#{license}.txt"
fail("Missing source license: #{source_license_path}") unless File.exists?(source_license_path)
write_to_tar(tar, source_license_path, "logstash-#{LOGSTASH_VERSION}#{PACKAGE_SUFFIX}/LICENSE.txt")
# add build.rb to tar # add build.rb to tar
metadata_file_path_in_tar = File.join("logstash-core", "lib", "logstash", "build.rb") metadata_file_path_in_tar = File.join("logstash-core", "lib", "logstash", "build.rb")
path_in_tar = File.join("logstash-#{LOGSTASH_VERSION}#{PACKAGE_SUFFIX}", metadata_file_path_in_tar) path_in_tar = File.join("logstash-#{LOGSTASH_VERSION}#{PACKAGE_SUFFIX}", metadata_file_path_in_tar)
@ -293,7 +296,7 @@ namespace "artifact" do
end end
end end
def build_zip(zip_suffix = "", excluder=nil) def build_zip(license, zip_suffix = "", excluder=nil)
require 'zip' require 'zip'
ensure_logstash_version_constant_defined ensure_logstash_version_constant_defined
zippath = "build/logstash#{zip_suffix}-#{LOGSTASH_VERSION}#{PACKAGE_SUFFIX}.zip" zippath = "build/logstash#{zip_suffix}-#{LOGSTASH_VERSION}#{PACKAGE_SUFFIX}.zip"
@ -305,6 +308,10 @@ namespace "artifact" do
zipfile.add(path_in_zip, path) zipfile.add(path_in_zip, path)
end end
source_license_path = "licenses/#{license}.txt"
fail("Missing source license: #{source_license_path}") unless File.exists?(source_license_path)
zipfile.add("logstash-#{LOGSTASH_VERSION}#{PACKAGE_SUFFIX}/LICENSE.txt", source_license_path)
# add build.rb to zip # add build.rb to zip
metadata_file_path_in_zip = File.join("logstash-core", "lib", "logstash", "build.rb") metadata_file_path_in_zip = File.join("logstash-core", "lib", "logstash", "build.rb")
path_in_zip = File.join("logstash-#{LOGSTASH_VERSION}#{PACKAGE_SUFFIX}", metadata_file_path_in_zip) path_in_zip = File.join("logstash-#{LOGSTASH_VERSION}#{PACKAGE_SUFFIX}", metadata_file_path_in_zip)
@ -315,7 +322,9 @@ namespace "artifact" do
puts "Complete: #{zippath}" puts "Complete: #{zippath}"
end end
def package(platform, version, suffix=nil, excluder=nil) def package(platform, version, variant=:standard)
oss = variant == :oss
require "stud/temporary" require "stud/temporary"
require "fpm/errors" # TODO(sissel): fix this in fpm require "fpm/errors" # TODO(sissel): fix this in fpm
require "fpm/package/dir" require "fpm/package/dir"
@ -333,6 +342,14 @@ namespace "artifact" do
metadata_source_file_path = BUILD_METADATA_FILE.path metadata_source_file_path = BUILD_METADATA_FILE.path
dir.input("#{metadata_source_file_path}=/usr/share/logstash/#{metadata_file_path}") dir.input("#{metadata_source_file_path}=/usr/share/logstash/#{metadata_file_path}")
suffix = ""
excluder = nil
if oss
suffix= "-oss"
excluder = oss_excluder
end
files(excluder).each do |path| files(excluder).each do |path|
next if File.directory?(path) next if File.directory?(path)
# Omit any config dir from /usr/share/logstash for packages, since we're # Omit any config dir from /usr/share/logstash for packages, since we're
@ -341,6 +358,16 @@ namespace "artifact" do
dir.input("#{path}=/usr/share/logstash/#{path}") dir.input("#{path}=/usr/share/logstash/#{path}")
end end
if oss
# Artifacts whose sources are exclusively licensed under the Apache License and
# Apache-compatible licenses are distributed under the Apache License 2.0
dir.input("licenses/APACHE-LICENSE-2.0.txt=/usr/share/logstash/LICENSE.txt")
else
# Artifacts whose sources include Elastic Commercial Software are distributed
# under the Elastic License.
dir.input("licenses/ELASTIC-LICENSE.txt=/usr/share/logstash/LICENSE.txt")
end
# Create an empty /var/log/logstash/ directory in the package # Create an empty /var/log/logstash/ directory in the package
# This is a bit obtuse, I suppose, but it is necessary until # This is a bit obtuse, I suppose, but it is necessary until
# we find a better way to do this with fpm. # we find a better way to do this with fpm.
@ -374,8 +401,12 @@ namespace "artifact" do
case platform case platform
when "redhat", "centos" when "redhat", "centos"
require "fpm/package/rpm" require "fpm/package/rpm"
# Red Hat calls 'Apache Software License' == ASL
license = oss ? "ASL 2.0" : "Elastic License"
out = dir.convert(FPM::Package::RPM) out = dir.convert(FPM::Package::RPM)
out.license = "ASL 2.0" # Red Hat calls 'Apache Software License' == ASL out.license = license
out.attributes[:rpm_use_file_permissions] = true out.attributes[:rpm_use_file_permissions] = true
out.attributes[:rpm_user] = "root" out.attributes[:rpm_user] = "root"
out.attributes[:rpm_group] = "root" out.attributes[:rpm_group] = "root"
@ -387,8 +418,11 @@ namespace "artifact" do
out.config_files << "/etc/logstash/pipelines.yml" out.config_files << "/etc/logstash/pipelines.yml"
when "debian", "ubuntu" when "debian", "ubuntu"
require "fpm/package/deb" require "fpm/package/deb"
license = oss ? "ASL-2.0" : "Elastic-License"
out = dir.convert(FPM::Package::Deb) out = dir.convert(FPM::Package::Deb)
out.license = "Apache 2.0" out.license = license
out.attributes[:deb_user] = "root" out.attributes[:deb_user] = "root"
out.attributes[:deb_group] = "root" out.attributes[:deb_group] = "root"
out.attributes[:deb_suggests] = "java8-runtime-headless" out.attributes[:deb_suggests] = "java8-runtime-headless"

3
x-pack/CHANGELOG.md Normal file
View file

@ -0,0 +1,3 @@
## 0.1.0
- First version of Logstash monitoring includes agent that ships Logstash monitoring data
periodically to Elasticsearch instance.

34
x-pack/README.md Normal file
View file

@ -0,0 +1,34 @@
# Elastic License Functionality
This directory tree contains files subject to the Elastic License. The files subject to the Elastic License are grouped in this directory to clearly separate them from files licensed under the Apache License 2.0.
# Logstash X-Pack
Set of plugins that form Logstash X-Pack features.
# Setup
Logstash X-Pack features are a default part of Logstash; as X-Pack features provide integrations with the rest of the Elastic Stack, they will need to be configured to point to an Elasticsearch instance in order to work.
## Opting Out of X-Pack
If you are unable or unwilling to run the Elastic-Licensed X-Pack Features and Functions, OSS-only distributions are available from the [downloads page][]; to run Logstash from source without X-Pack, ensure that your environment variable `OSS` is exported with a value of `true`:
~~~ sh
export OSS=true
~~~
[downloads page]: https://www.elastic.co/downloads/logstash
# Configuration
To configure x-pack settings, you can edit config/logstash.yml and add `xpack.*` configs from the [logstash x-pack settings][].
[logstash x-pack settings]: https://www.elastic.co/guide/en/logstash/current/settings-xpack.html
# Building documentation
This repo contains information that is used in the Logstash Reference.
To build the Logstash Reference on your local machine, use the docbldls or docbldlsx build commands defined in https://github.com/elastic/docs/blob/master/doc_build_aliases.sh

View file

@ -0,0 +1,11 @@
#!/bin/bash
# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
# or more contributor license agreements. Licensed under the Elastic License;
# you may not use this file except in compliance with the Elastic License.
if [ -n "${ELASTICSEARCH_SNAPSHOT_URL}" ]; then
export DOCKER_ENV_OPTS="${DOCKER_ENV_OPTS} --env ELASTICSEARCH_SNAPSHOT_URL=${ELASTICSEARCH_SNAPSHOT_URL}"
fi
ci/docker_run.sh logstash-xpack-integration-tests x-pack/ci/integration_tests.sh $@

7
x-pack/ci/docker_unit_tests.sh Executable file
View file

@ -0,0 +1,7 @@
#!/bin/bash
# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
# or more contributor license agreements. Licensed under the Elastic License;
# you may not use this file except in compliance with the Elastic License.
ci/docker_run.sh logstash-xpack-unit-tests x-pack/ci/unit_tests.sh $@

15
x-pack/ci/integration_tests.sh Executable file
View file

@ -0,0 +1,15 @@
#!/bin/bash -ie
#Note - ensure that the -e flag is set to properly set the $? status if any command fails
# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
# or more contributor license agreements. Licensed under the Elastic License;
# you may not use this file except in compliance with the Elastic License.
# Since we are using the system jruby, we need to make sure our jvm process
# uses at least 1g of memory, If we don't do this we can get OOM issues when
# installing gems. See https://github.com/elastic/logstash/issues/5179
export JRUBY_OPTS="-J-Xmx1g"
export GRADLE_OPTS="-Xmx2g -Dorg.gradle.daemon=false -Dorg.gradle.logging.level=info"
export CI=true
./gradlew runXPackIntegrationTests

15
x-pack/ci/unit_tests.sh Executable file
View file

@ -0,0 +1,15 @@
#!/bin/bash -ie
#Note - ensure that the -e flag is set to properly set the $? status if any command fails
# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
# or more contributor license agreements. Licensed under the Elastic License;
# you may not use this file except in compliance with the Elastic License.
# Since we are using the system jruby, we need to make sure our jvm process
# uses at least 1g of memory, If we don't do this we can get OOM issues when
# installing gems. See https://github.com/elastic/logstash/issues/5179
export JRUBY_OPTS="-J-Xmx1g"
export GRADLE_OPTS="-Xmx2g -Dorg.gradle.daemon=false -Dorg.gradle.logging.level=info"
export CI=true
./gradlew runXPackUnitTests

View file

@ -0,0 +1,17 @@
include::{log-repo-dir}/index-shared1.asciidoc[]
:edit_url!:
include::setup/setting-up-xpack.asciidoc[]
:edit_url:
include::{log-repo-dir}/index-shared2.asciidoc[]
:edit_url!:
include::release-notes/breaking-changes-xpack.asciidoc[]
:edit_url:
include::{log-repo-dir}/index-shared3.asciidoc[]
:edit_url!:
include::release-notes/xpack.asciidoc[]

View file

@ -0,0 +1,90 @@
[role="xpack"]
[[logstash-centralized-pipeline-management]]
=== Centralized Pipeline Management
The pipeline management feature centralizes the creation and
management of Logstash configuration pipelines.
NOTE: Centralized pipeline management is an {xpack} feature that is not included
with the basic license. If you want to try all of the features, you can start a
30-day trial. At the end of the trial period, you can purchase a subscription to
keep using the full functionality of the {xpack} components. For more
information, see https://www.elastic.co/subscriptions and
https://www.elastic.co/guide/en/x-pack/master/license-management.html[License
Management].
From within the pipeline
management UI in {kib}, you can control multiple Logstash instances. You can
add, edit, and delete pipeline configurations. On the Logstash side, you simply
need to enable configuration management and register Logstash to use the
centrally managed pipeline configurations.
The pipeline configurations, along with some metadata, are stored in
Elasticsearch. Any changes that you make to a pipeline definition in the UI are
picked up and loaded automatically by all Logstash instances registered to use
the pipeline. The changes are applied immediately; you do not have to restart
Logstash to pick up the changes, as long as Logstash is already registered to
use the pipeline.
NOTE: Centralized management is disabled until you configure and enable
{security}.
==== Managing Pipelines
Before using the pipeline management UI, you must:
* <<configuring-centralized-pipelines, Configure centralized pipeline management>>.
* If {kib} is protected with basic authentication, make sure your {kib} user has
the `logstash_admin` role as well as the `logstash_writer` role that you created
when you <<ls-security,configured Logstash to use basic authentication>>.
To centrally manage Logstash pipelines:
. Open {kib} in your browser and go to the Management tab. If you've set up
configuration management correctly, you'll see an area for managing Logstash.
Click the *Pipelines* link.
+
image::management/images/centralized_config.png[]
. To add a new pipeline, click the *Add* button and specify values for the
following fields:
+
--
[horizontal]
Pipeline ID::
A name that uniquely identifies the pipeline. This is the ID that you used when
you
<<configuring-centralized-pipelines,configured centralized pipeline management>>
and specified a list of pipeline IDs in the `xpack.management.pipeline.id`
setting.
Description::
A description of the pipeline configuration. This information is for your use.
Pipeline::
The pipeline configuration. You can treat the editor in the pipeline management
UI like any other editor. You don't have to worry about whitespace or indentation.
image::management/images/new_pipeline.png[]
--
. Click *Save*.
The pipeline runs on all Logstash instances that are registered to use the
pipeline. There is no validation done at the UI level. The UI will save the new
configuration, and Logstash will attempt to load it. You need to check the local
Logstash logs for configuration errors. If you're using the Logstash monitoring
feature in {xpack}, you can also navigate to the Monitoring tab to check the
status of your Logstash nodes.
You can specify multiple pipeline configurations that run in parallel on the
same Logstash node.
If you edit a pipeline configuration and save the changes, Logstash reloads
the configuration in the background and continues processing events.
If you delete a pipeline (for example, `apache`) from the UI, Logstash will
attempt to stop the pipeline if it's running. Logstash will wait until all
events have been fully processed by the pipeline. Before deleting a pipeline,
make sure you understand your data sources because stopping a pipeline may
lead to data loss.

View file

@ -0,0 +1,42 @@
[role="xpack"]
[[configuring-centralized-pipelines]]
=== Configuring Centralized Pipeline Management
To configure
{logstash-ref}/logstash-centralized-pipeline-management.html[centralized pipeline management]:
. Verify that you are using a license that includes the pipeline management
feature.
+
--
For more information, see https://www.elastic.co/subscriptions and
{xpack-ref}/license-management.html[License Management].
--
. Specify
<<configuration-management-settings,configuration management settings>> in the
`logstash.yml` file. At a
minimum, set:
+
* `xpack.management.enabled: true` to enable centralized configuration
management.
* `xpack.management.elasticsearch.url` to specify the Elasticsearch
instance that will store the Logstash pipeline configurations and metadata.
* `xpack.management.pipeline.id` to register the pipelines that you want to
centrally manage.
. Restart Logstash.
. If your Elasticsearch cluster is protected with basic authentication, assign
the `logstash_admin` role to any users who will use centralized pipeline
management. See <<ls-security>>.
NOTE: Centralized management is disabled until you configure and enable
{security}.
IMPORTANT: After you've configured Logstash to use centralized pipeline
management, you can no longer specify local pipeline configurations. This means
that the `pipelines.yml` file and settings like `path.config` and
`config.string` are inactive when this feature is enabled.
include::{xls-repo-dir}/settings/configuration-management-settings.asciidoc[]

Binary file not shown.

After

Width:  |  Height:  |  Size: 62 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 154 KiB

View file

@ -0,0 +1,48 @@
[role="xpack"]
[[logstash-monitoring-collectors]]
==== Collectors
Collectors, as their name implies, collect things. In {monitoring} for Logstash,
collectors are just <<pipeline,Inputs>> in the same way that ordinary Logstash
configurations provide inputs.
Like {monitoring} for {es}, each collector can create zero or more monitoring
documents. As it is currently implemented, each Logstash node runs two types of
collectors: one for node stats and one for pipeline stats.
[options="header"]
|=======================
| Collector | Data Types | Description
| Node Stats | `logstash_stats`
| Gathers details about the running node, such as memory utilization and CPU
usage (for example, `GET /_stats`).
+
This runs on every Logstash node with {monitoring} enabled. One common
failure is that Logstash directories are copied with their `path.data` directory
included (`./data` by default), which copies the persistent UUID of the Logstash
node along with it. As a result, it generally appears that one or more Logstash
nodes are failing to collect monitoring data, when in fact they are all really
misreporting as the _same_ Logstash node. Re-use `path.data` directories only
when upgrading Logstash, such that upgraded nodes replace the previous versions.
| Pipeline Stats | `logstash_state`
| Gathers details about the node's running pipelines, which powers the
Monitoring Pipeline UI.
|=======================
Per collection interval, which defaults to 10 seconds (`10s`), each collector is
run. The failure of an individual collector does not impact any other collector.
Each collector, as an ordinary Logstash input, creates a separate Logstash event
in its isolated monitoring pipeline. The Logstash output then sends the data.
The collection interval can be configured dynamically and you can also disable
data collection. For more information about the configuration options for the
collectors, see <<monitoring-settings>>.
WARNING: Unlike {monitoring} for {es} and {kib}, there is no
`xpack.monitoring.collection.enabled` setting on Logstash. You must use the
`xpack.monitoring.enabled` setting to enable and disable data collection.
If gaps exist in the monitoring charts in {kib}, it is typically because either
a collector failed or the monitoring cluster did not receive the data (for
example, it was being restarted). In the event that a collector fails, a logged
error should exist on the node that attempted to perform the collection.

View file

@ -0,0 +1,106 @@
[role="xpack"]
[[configuring-logstash]]
=== Configuring Monitoring for Logstash Nodes
++++
<titleabbrev>Configuring Monitoring</titleabbrev>
++++
To monitor Logstash nodes:
. Identify where to send monitoring data. This cluster is often referred to as
the _production cluster_. For examples of typical monitoring architectures, see
{xpack-ref}/how-monitoring-works.html[How Monitoring Works].
+
--
IMPORTANT: To visualize Logstash as part of the Elastic Stack (as shown in Step
6), send metrics to your _production_ cluster. Sending metrics to a dedicated
monitoring cluster will show the Logstash metrics under the _monitoring_ cluster.
--
. Verify that the `xpack.monitoring.collection.enabled` setting is `true` on the
production cluster. If that setting is `false`, the collection of monitoring data
is disabled in {es} and data is ignored from all other sources.
. Configure your Logstash nodes to send metrics by setting the
`xpack.monitoring.elasticsearch.url` in `logstash.yml`. If {security} is enabled,
you also need to specify the credentials for the
{xpack-ref}/setting-up-authentication.html#built-in-users[built-in `logstash_system` user]. For more information about these settings, see <<monitoring-settings>>.
+
--
[source,yaml]
--------------------------------------------------
xpack.monitoring.elasticsearch.url: ["http://es-prod-node-1:9200", "http://es-prod-node-2:9200"] <1>
xpack.monitoring.elasticsearch.username: "logstash_system" <2>
xpack.monitoring.elasticsearch.password: "changeme"
--------------------------------------------------
<1> If SSL/TLS is enabled on the production cluster, you must
connect through HTTPS. As of v5.2.1, you can specify multiple
Elasticsearch hosts as an array as well as specifying a single
host as a string. If multiple URLs are specified, Logstash
can round-robin requests to these production nodes.
<2> If {security} is disabled on the production cluster, you can omit these
`username` and `password` settings.
--
. If SSL/TLS is enabled on the production {es} cluster, specify the trusted
CA certificates that will be used to verify the identity of the nodes
in the cluster.
+
--
To add a CA certificate to a Logstash node's trusted certificates, you
can specify the location of the PEM encoded certificate with the
`ca` setting:
[source,yaml]
--------------------------------------------------
xpack.monitoring.elasticsearch.ssl.ca: /path/to/ca.crt
--------------------------------------------------
Alternatively, you can configure trusted certificates using a truststore
(a Java Keystore file that contains the certificates):
[source,yaml]
--------------------------------------------------
xpack.monitoring.elasticsearch.ssl.truststore.path: /path/to/file
xpack.monitoring.elasticsearch.ssl.truststore.password: password
--------------------------------------------------
Also, optionally, you can set up client certificate using a keystore
(a Java Keystore file that contains the certificate):
[source,yaml]
--------------------------------------------------
xpack.monitoring.elasticsearch.ssl.keystore.path: /path/to/file
xpack.monitoring.elasticsearch.ssl.keystore.password: password
--------------------------------------------------
Set sniffing to `true` to enable discovery of other nodes of the {es} cluster.
It defaults to `false`.
[source,yaml]
--------------------------------------------------
xpack.monitoring.elasticsearch.sniffing: false
--------------------------------------------------
--
. Restart your Logstash nodes.
. To verify your {monitoring} configuration, point your web browser at your {kib}
host, and select **Monitoring** from the side navigation. Metrics reported from
your Logstash nodes should be visible in the Logstash section. When security is
enabled, to view the monitoring dashboards you must log in to {kib} as a user
who has the `kibana_user` and `monitoring_user` roles.
+
image:monitoring/images/monitoring-ui.png["Monitoring",link="monitoring/images/monitoring-ui.png"]
[float]
[[monitoring-upgraded-logstash]]
==== Re-enabling Logstash Monitoring After Upgrading
When upgrading from older versions of {xpack}, the built-in `logstash_system`
user is disabled for security reasons. To resume monitoring,
{xpack-ref}/monitoring-troubleshooting.html[change the password and re-enable the logstash_system user].
include::{xls-repo-dir}/settings/monitoring-settings.asciidoc[]

Binary file not shown.

After

Width:  |  Height:  |  Size: 384 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 186 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 101 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 279 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 9.3 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.9 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.8 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 365 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 155 KiB

View file

@ -0,0 +1,5 @@
include::intro.asciidoc[]
include::monitoring-overview.asciidoc[]
include::monitoring-ui.asciidoc[]
include::pipeline-viewer.asciidoc[]
include::troubleshooting.asciidoc[]

View file

@ -0,0 +1,19 @@
Alternatively, you can <<configuring-logstash,configure {monitoring}>> to send
data to a monitoring cluster.
NOTE: Monitoring is an {xpack} feature under the Basic License and is therefore
*free to use*.
You can use the <<logstash-monitoring-ui,monitoring UI>> in {xpack} to view the
metrics and gain insight into how your Logstash deployment is running.
The <<logstash-pipeline-viewer,pipeline viewer>> in {xpack} offers additional
visibility into the behavior and performance of complex pipeline configurations.
It shows a graph representation of the overall pipeline topology, data flow, and
branching logic, overlaid with important metrics, like events per second, for
each plugin in the view.
This documentation focuses on the {monitoring} infrastructure and setup in
Logstash. For an introduction to monitoring your Elastic stack, including {es}
and {kib}, see {xpack-ref}/xpack-monitoring.html[Monitoring the Elastic Stack].

View file

@ -0,0 +1,45 @@
[role="xpack"]
[[logstash-monitoring-output]]
==== Output
Like all Logstash pipelines, the purpose of the dedicated monitoring pipeline is
to send events to outputs. In the case of {monitoring} for Logstash, the output
is always an `elasticsearch` output. However, unlike ordinary Logstash pipelines,
the output is configured within the `logstash.yml` settings file via the
`xpack.monitoring.elasticsearch.*` settings.
Other than its unique manner of configuration, this `elasticsearch` output
behaves like all `elasticsearch` outputs, including its ability to pause data
collection when issues exist with the output.
IMPORTANT: It is critical that all Logstash nodes share the same setup.
Otherwise, monitoring data might be routed in different ways or to different places.
[[logstash-monitoring-default]]
===== Default Configuration
If a Logstash node does not explicitly define an {monitoring} output setting,
the following default configuration is used:
[source,yaml]
---------------------------------------------------
xpack.monitoring.elasticsearch.url: [ "http://localhost:9200" ]
---------------------------------------------------
All data produced by {monitoring} for Logstash is indexed in the monitoring
cluster by using the `.monitoring-logstash` template, which is managed by the
{ref}/es-monitoring-exporters.html[exporters] within {es}.
If you are working with a cluster that has {security} enabled, extra steps are
necessary to properly configure Logstash. For more information, see
<<configuring-logstash>>.
IMPORTANT: When discussing security relative to the `elasticsearch` output, it
is critical to remember that all users are managed on the production cluster,
which is identified in the `xpack.monitoring.elasticsearch.url` setting.
This is particularly important to remember when you move from development
environments to production environments, where you often have dedicated
monitoring clusters.
For more information about the configuration options for the output, see
<<monitoring-settings>>.

View file

@ -0,0 +1,35 @@
[role="xpack"]
[[logstash-monitoring-overview]]
=== {monitoring} Overview
++++
<titleabbrev>Overview</titleabbrev>
++++
This section deals with Logstash, including an explanation of its internal parts
at a high level. {monitoring} for Logstash represents a total of two pieces:
* <<logstash-monitoring-collectors,Collectors>>
* <<logstash-monitoring-output,Output>>
These pieces are created when {monitoring} for Logstash is enabled, and they
live outside of the default Logstash pipeline in a dedicated monitoring
pipeline. This configuration means that all data and processing has a minimal
impact on ordinary Logstash processing. As a secondary benefit of existing in a
separate pipeline, existing Logstash features, such as the
<<plugins-outputs-elasticsearch,`elasticsearch` output>>, can be reused to
benefit from its retry policies.
NOTE: The `elasticsearch` output that is used by {monitoring} for Logstash is
configured exclusively via settings found in `logstash.yml`. It is not
configured by using anything from the Logstash configurations that might also be
using their own separate `elasticsearch` outputs.
The {es} cluster that is configured for use with {monitoring} for Logstash is
expected to be the production cluster. This configuration enables the production
{es} cluster to add metadata (for example, its cluster UUID) to the Logstash
monitoring data then route it to the monitoring clusters. For more information
about typical monitoring architectures, see
{xpack-ref}/how-monitoring-works.html[How Monitoring Works].
include::collectors.asciidoc[]
include::monitoring-output.asciidoc[]

View file

@ -0,0 +1,25 @@
[role="xpack"]
[[logstash-monitoring-ui]]
=== Monitoring UI
When running Logstash 5.2 or greater, you can use the
https://www.elastic.co/products/x-pack/monitoring[monitoring feature in X-Pack]
to gain deep visibility into metrics about your Logstash deployment. In the
overview dashboard, you can see all events received and sent by Logstash, plus
info about memory usage and uptime:
image::monitoring/images/overviewstats.png[Logstash monitoring overview dashboard in Kibana]
Then you can drill down to see stats about a specific node:
image::monitoring/images/nodestats.png[Logstash monitoring node stats dashboard in Kibana]
NOTE: A Logstash node is considered unique based on its persistent UUID, which
is written to the <<logstash-settings-file,`path.data`>> directory when the node
starts.
Before you can use the monitoring UI,
<<configuring-logstash, configure Logstash monitoring>>.
For information about using the Monitoring UI, see
{kibana-ref}/xpack-monitoring.html[{monitoring} in {kib}].

View file

@ -0,0 +1,142 @@
[role="xpack"]
[[logstash-pipeline-viewer]]
=== Pipeline Viewer UI
The pipeline viewer in {xpack} provides a simple way for you to visualize and
monitor the behavior of complex Logstash pipeline configurations. Within the
pipeline viewer, you can explore a directed acyclic graph (DAG) representation
of the overall pipeline topology, data flow, and branching logic. The diagram
is overlayed with important metrics, like events per second and time spent in
milliseconds, for each plugin in the view.
The diagram includes visual indicators to draw your attention to potential
bottlenecks in the pipeline, making it easy for you to diagnose and fix
problems.
[IMPORTANT]
==========================================================================
When you configure the stages in your Logstash pipeline, make sure you specify
semantic IDs. If you don't specify IDs, Logstash generates them for you.
Using semantic IDs makes it easier to identify the configurations that are
causing bottlenecks. For example, you may have several grok filters running
in your pipeline. If you haven't specified semantic IDs, you won't be able
to tell at a glance which filters are slow. If you specify semantic IDs,
such as `apacheParsingGrok` and `cloudwatchGrok`, you'll know exactly which
grok filters are causing bottlenecks.
==========================================================================
Before using the pipeline viewer, you need to <<setup-xpack,set up {xpack}>> and
<<monitoring-logstash,configure Logstash monitoring>>.
[float]
==== What types of problems does the pipeline viewer show?
The pipeline viewer highlights CPU% and event latency in cases where the values
are anomalous. The purpose of these highlights is to enable users to quickly
identify processing that is disproportionately slow. This may not necessarily
mean that anything is wrong with a given plugin, since some plugins are slower
than others due to the nature of the work they do. For instance, you may find
that a grok filter that uses a complicated regexp runs a lot slower than a
mutate filter that simply adds a field. The grok filter might be highlighted in
this case, though it may not be possible to further optimize its work.
The exact formula used is a heuristic, and thus is subject to change.
[float]
==== View the pipeline diagram
To view the pipeline diagram:
. In Logstash, start the Logstash pipeline that you want to monitor.
+
Assuming that you've set up Logstash monitoring, Logstash will begin shipping
metrics to the monitoring cluster.
. Navigate to the Monitoring tab in Kibana.
+
You should see a Logstash section.
+
[role="screenshot"]
image::monitoring/images/monitoring-ui.png[Monitoring UI]
. Click the *Pipelines* link under Logstash to see all the pipelines that are
being monitored.
+
Each pipeline is identified by a pipeline ID (`main` by default). For each
pipeline, you'll see charts showing the pipeline's throughput and the number
of nodes on which the pipeline is running during the selected time range.
+
[role="screenshot"]
image::monitoring/images/pipeline-viewer-overview.png[Pipeline Overview]
+
// To update the screenshot above, see pipelines/tweets_about_rain.conf
+
. Click a pipeline in the list to drill down and explore the pipeline
diagram. The diagram shows the latest version of the pipeline. To view an
older version of the pipeline, select a version from the drop-down list next
to the pipeline ID at the top of the page.
+
NOTE: Each time you modify a pipeline, Logstash generates a new version. Viewing
different versions of the pipeline stats allows you see how changes to the pipeline
over time affect throughput and other metrics. Note that Logstash stores multiple
versions of the pipeline stats; it does not store multiple versions of the pipeline
configurations themselves.
+
The diagram shows all the stages feeding data through the pipeline. It also shows
conditional logic.
+
[role="screenshot"]
image::monitoring/images/pipeline-diagram.png[Pipeline Diagram]
+
// To update the screenshot above, see pipelines/tweets_about_rain.conf
+
The information displayed on each vertex varies depending on the plugin type.
+
Here's an example of an *input* vertex:
+
[role="screenshot"]
image::monitoring/images/pipeline-input-detail.png[Input vertex]
+
The *I* badge indicates that this is an input stage. The vertex shows:
+
--
* input type - *stdin*
* user-supplied ID - *logfileRead*
* throughput expressed in events per second - *0.7 e/s*
Here's an example of a *filter* vertex.
[role="screenshot"]
image::monitoring/images/pipeline-filter-detail.png[Filter vertex]
The filter icon indicates that this is a filter stage. The vertex shows:
* filter type - *sleep*
* user-supplied ID - *caSleep*
* worker usage expressed as the percentage of total execution time - *0%*
* performance - the number of milliseconds spent processing each event - *20.00 ms/e*
* throughput - the number of events sent per second - *0.0 e/s*
Stats that are anomalously slow appear highlighted in the pipeline viewer.
This doesn't necessarily indicate a problem, but it highlights potential
bottle necks so that you can find them quickly.
An *output* vertex shows the same information as a vertex node, but it has an
*O* badge to indicate that it is an output stage:
[role="screenshot"]
image::monitoring/images/pipeline-output-detail.png[Output vertex]
--
. Hover over a vertex in the diagram, and you'll see only the related nodes that
are ancestors or descendants of the current vertex.
. Explore the diagram and look for performance anomalies.
. Click on a vertex to see details about it.
+
[role="screenshot"]
image::monitoring/images/pipeline-viewer-detail-drawer.png[Vertex detail]

View file

@ -0,0 +1,43 @@
### This is the sample pipeline whose screenshots are used in
### the Pipeline Viewer documentation (../pipeline-viewer.asciidoc)
###
### Whenever the Pipeline Viewer UI changes, run this pipeline and
### open in the new UI to take updated screenshots.
###
input {
## Note: you will have to setup the environment variables used
## below. Refer to the Twitter Logstash Input plugin documentation
## for their expected values
twitter {
id => "tweet harvester"
consumer_key => "${TWITTER_API_CONSUMER_KEY}"
consumer_secret => "${TWITTER_API_CONSUMER_SECRET}"
keywords => [ "rain", "monsoon", "shower", "drizzle" ]
oauth_token => "${TWITTER_API_OAUTH_TOKEN}"
oauth_token_secret => "${TWITTER_API_OAUTH_TOKEN_SECRET}"
}
}
filter {
grok {
match => { "message" => "%{WORD:is_rt}" }
}
if [is_rt] == "RT" {
drop {
id => "drop_all_RTs"
}
}
}
output {
stdout {
codec => dots
}
elasticsearch {
user => "elastic"
password => "changeme"
index => "tweets"
}
}

View file

@ -0,0 +1,36 @@
[role="xpack"]
[[monitoring-troubleshooting]]
=== Troubleshooting {monitoring} in Logstash
++++
<titleabbrev>Troubleshooting</titleabbrev>
++++
[float]
==== Logstash Monitoring Not Working After Upgrade
When upgrading from older versions, the built-in `logstash_system` user is
disabled for security reasons. To resume monitoring:
. Change the `logstash_system` password:
+
--
[source, sh]
---------------------------------------------------------------
PUT _xpack/security/user/logstash_system/_password
{
"password": "newpassword"
}
---------------------------------------------------------------
//CONSOLE
--
. Re-enable the `logstash_system` user:
+
--
[source, sh]
---------------------------------------------------------------
PUT _xpack/security/user/logstash_system/_enable
---------------------------------------------------------------
//CONSOLE
--

View file

@ -0,0 +1,29 @@
[role="xpack"]
[[xls-7.0.0-alpha1]]
=== Logstash {xpack} 7.0.0-alpha1 Release Notes
[float]
[[xls-breaking-7.0.0-alpha1]]
==== Breaking Changes
No breaking changes have been made yet.
////
[float]
[[features-7.0.0-alpha1]]
===== New Features
[float]
[[enhancements-7.0.0-alpha1]]
===== Enhancements
[float]
[[bugs-7.0.0-alpha1]]
===== Bug Fixes
////
See also:
* <<logstash-7-0-0-alpha1>>
* {ref}/xes-7.0.0-alpha1.html[{es} {xpack} 7.0.0-alpha1 Release Notes]
* {kibana-ref}/xkb-7.0.0-alpha1.html[{kib} {xpack} 7.0.0-alpha1 Release Notes]

View file

@ -0,0 +1,14 @@
[role="xpack"]
[[breaking-changes-xls]]
== {xpack} Breaking Changes
This section summarizes the changes that you need to be aware of when migrating
your application from one version of {xpack} to another.
See also:
* <<breaking-changes,Breaking Changes in Logstash>>
* {ref}/breaking-changes-xes.html[{xpack} Breaking Changes in {es}]
* {kibana-ref}/breaking-changes-xkb.html[{xpack} Breaking Changes in {kib}]
There are no breaking changes in Logstash {xpack} 7.0 features.

View file

@ -0,0 +1,15 @@
[role="xpack"]
[[release-notes-xls]]
== {xpack} Release Notes
This section summarizes the changes in each release for all of the {xpack} components in Logstash.
* <<xls-7.0.0-alpha1>>
See also:
* <<releasenotes,Logstash Release Notes>>
* {ref}/release-notes-xes.html[{es} {xpack} Release Notes]
* {kibana-ref}/release-notes-xkb.html[{kib} {xpack} Release Notes]
include::7.0.0-alpha1.asciidoc[]

View file

@ -0,0 +1,255 @@
[role="xpack"]
[[ls-security]]
=== Configuring Security in Logstash
++++
<titleabbrev>Configuring Security</titleabbrev>
++++
The Logstash {es} plugins (
{logstash-ref}/plugins-outputs-elasticsearch.html[output],
{logstash-ref}/plugins-inputs-elasticsearch.html[input],
{logstash-ref}/plugins-filters-elasticsearch.html[filter]
and {logstash-ref}/monitoring-logstash.html[monitoring])
support authentication and encryption over HTTP.
To use Logstash with a secured cluster, you need to configure authentication
credentials for Logstash. Logstash throws an exception and the processing
pipeline is halted if authentication fails.
If encryption is enabled on the cluster, you also need to enable TLS/SSL in the
Logstash configuration.
If you want to monitor your Logstash instance with {monitoring}, and store the
monitoring data in a secured {es} cluster, you must configure Logstash
with a username and password for a user with the appropriate permissions.
In addition to configuring authentication credentials for Logstash, you need
to grant authorized users permission to access the Logstash indices.
[float]
[[ls-http-auth-basic]]
==== Configuring Logstash to use Basic Authentication
Logstash needs to be able to manage index templates, create indices,
and write and delete documents in the indices it creates.
To set up authentication credentials for Logstash:
. Create a `logstash_writer` role that has the `manage_index_templates` and
`monitor` cluster privileges, and the `write`, `delete`, and `create_index`
privileges for the Logstash indices. You can create roles from the **Management >
Roles** UI in {kib} or through the `role` API:
+
[source, sh]
---------------------------------------------------------------
POST _xpack/security/role/logstash_writer
{
"cluster": ["manage_index_templates", "monitor"],
"indices": [
{
"names": [ "logstash-*" ], <1>
"privileges": ["write","delete","create_index"]
}
]
}
---------------------------------------------------------------
<1> If you use a custom Logstash index pattern, specify that pattern
instead of the default `logstash-*` pattern.
. Create a `logstash_internal` user and assign it the `logstash_writer` role.
You can create users from the **Management > Users** UI in {kib} or through
the `user` API:
+
[source, sh]
---------------------------------------------------------------
POST _xpack/security/user/logstash_internal
{
"password" : "x-pack-test-password",
"roles" : [ "logstash_writer"],
"full_name" : "Internal Logstash User"
}
---------------------------------------------------------------
. Configure Logstash to authenticate as the `logstash_internal` user you just
created. You configure credentials separately for each of the {es} plugins in
your Logstash `.conf` file. For example:
+
[source,js]
--------------------------------------------------
input {
elasticsearch {
...
user => logstash_internal
password => x-pack-test-password
}
}
filter {
elasticsearch {
...
user => logstash_internal
password => x-pack-test-password
}
}
output {
elasticsearch {
...
user => logstash_internal
password => x-pack-test-password
}
}
--------------------------------------------------
[float]
[[ls-user-access]]
==== Granting Users Access to the Logstash Indices
To access the indices Logstash creates, users need the `read` and
`view_index_metadata` privileges:
. Create a `logstash_reader` role that has the `read` and `view_index_metadata`
privileges for the Logstash indices. You can create roles from the
**Management > Roles** UI in {kib} or through the `role` API:
+
[source, sh]
---------------------------------------------------------------
POST _xpack/security/role/logstash_reader
{
"indices": [
{
"names": [ "logstash-*" ], <1>
"privileges": ["read","view_index_metadata"]
}
]
}
---------------------------------------------------------------
<1> If you use a custom Logstash index pattern, specify that pattern
instead of the default `logstash-*` pattern.
. Assign your Logstash users the `logstash_reader` role. If the Logstash user
will be using
{logstash-ref}/logstash-centralized-pipeline-management.html[centralized pipeline management],
also assign the `logstash_admin` role. You can create and manage users from the
**Management > Users** UI in {kib} or through the `user` API:
+
[source, sh]
---------------------------------------------------------------
POST _xpack/security/user/logstash_user
{
"password" : "x-pack-test-password",
"roles" : [ "logstash_reader", "logstash_admin"], <1>
"full_name" : "Kibana User for Logstash"
}
---------------------------------------------------------------
<1> `logstash_admin` is a built-in role that provides access to `.logstash-*`
indices for managing configurations.
[float]
[[ls-http-auth-pki]]
==== Configuring the {es} Output to use PKI Authentication
The `elasticsearch` output supports PKI authentication. To use an X.509
client-certificate for authentication, you configure the `keystore` and
`keystore_password` options in your Logstash `.conf` file:
[source,js]
--------------------------------------------------
output {
elasticsearch {
...
keystore => /path/to/keystore.jks
keystore_password => realpassword
truststore => /path/to/truststore.jks <1>
truststore_password => realpassword
}
}
--------------------------------------------------
<1> If you use a separate truststore, the truststore path and password are
also required.
[float]
[[ls-http-ssl]]
==== Configuring Logstash to use TLS Encryption
If TLS encryption is enabled on the {es} cluster, you need to
configure the `ssl` and `cacert` options in your Logstash `.conf` file:
[source,js]
--------------------------------------------------
output {
elasticsearch {
...
ssl => true
cacert => '/path/to/cert.pem' <1>
}
}
--------------------------------------------------
<1> The path to the local `.pem` file that contains the Certificate
Authority's certificate.
[float]
[[ls-monitoring-user]]
==== Configuring Credentials for Logstash Monitoring
If you plan to ship Logstash {logstash-ref}/monitoring-logstash.html[monitoring]
data to a secure cluster, you need to configure the username and password that
Logstash uses to authenticate for shipping monitoring data.
{security} comes preconfigured with a
{xpack-ref}/setting-up-authentication.html#built-in-users[`logstash_system` built-in user]
for this purpose. This user has the minimum permissions necessary for the
monitoring function, and _should not_ be used for any other purpose - it is
specifically _not intended_ for use within a Logstash pipeline.
By default, the `logstash_system` user does not have a password. The user will
not be enabled until you set a password. Set the password through the change
password API:
[source,js]
---------------------------------------------------------------------
PUT _xpack/security/user/logstash_system/_password
{
"password": "t0p.s3cr3t"
}
---------------------------------------------------------------------
// CONSOLE
Then configure the user and password in the `logstash.yml` configuration file:
[source,yaml]
----------------------------------------------------------
xpack.monitoring.elasticsearch.username: logstash_system
xpack.monitoring.elasticsearch.password: t0p.s3cr3t
----------------------------------------------------------
If you initially installed an older version of {xpack}, and then upgraded, the
`logstash_system` user may have defaulted to `disabled` for security reasons.
You can enable the user through the `user` API:
[source,js]
---------------------------------------------------------------------
PUT _xpack/security/user/logstash_system/_enable
---------------------------------------------------------------------
// CONSOLE
[float]
[[ls-pipeline-management-user]]
==== Configuring Credentials for Centralized Pipeline Management
If you plan to use Logstash
{logstash-ref}/logstash-centralized-pipeline-management.html[centralized pipeline management],
you need to configure the username and password that Logstash uses for managing
configurations.
You configure the user and password in the `logstash.yml` configuration file:
[source,yaml]
----------------------------------------------------------
xpack.management.elasticsearch.username: logstash_admin_user <1>
xpack.management.elasticsearch.password: t0p.s3cr3t
----------------------------------------------------------
<1> The user you specify here must have the built-in `logstash_admin` role as
well as the `logstash_writer` role that you created earlier.

View file

@ -0,0 +1,80 @@
[role="xpack"]
[[configuration-management-settings]]
==== Configuration Management Settings in Logstash
++++
<titleabbrev>Configuration Management Settings</titleabbrev>
++++
You can set the following `xpack.management` settings in `logstash.yml` to
enable
<<logstash-centralized-pipeline-management,centralized pipeline management>>.
For more information about configuring Logstash, see <<logstash-settings-file>>.
The following example shows basic settings that assume {es} and {kib} are
installed on the localhost with basic AUTH enabled, but no SSL. If you're using
SSL, you need to specify additional SSL settings.
[source,shell]
-----
xpack.management.enabled: true
xpack.management.elasticsearch.url: "http://localhost:9200/"
xpack.management.elasticsearch.username: logstash_admin_user
xpack.management.elasticsearch.password: t0p.s3cr3t
xpack.management.logstash.poll_interval: 5s
xpack.management.pipeline.id: ["apache", "cloudwatch_logs"]
-----
`xpack.management.enabled`::
Set to `true` to enable {xpack} centralized configuration management for
Logstash.
`xpack.management.logstash.poll_interval`::
How often the Logstash instance polls for pipeline changes from Elasticsearch.
The default is 5s.
`xpack.management.pipeline.id`::
Specify a comma-separated list of pipeline IDs to register for centralized
pipeline management. After changing this setting, you need to restart Logstash
to pick up changes.
`xpack.management.elasticsearch.url`::
The {es} instance that will store the Logstash pipeline configurations and
metadata. This might be the same {es} instance specified in the `outputs`
section in your Logstash configuration, or a different one. Defaults to
`http://localhost:9200`.
`xpack.management.elasticsearch.username` and `xpack.management.elasticsearch.password`::
If your {es} cluster is protected with basic authentication, these settings
provide the username and password that the Logstash instance uses to
authenticate for accessing the configuration data. The username you specify here
should have the `logstash_admin` role, which provides access to `.logstash-*`
indices for managing configurations.
`xpack.management.elasticsearch.ssl.ca`::
Optional setting that enables you to specify a path to the `.pem` file for the
certificate authority for your {es} instance.
`xpack.management.elasticsearch.ssl.truststore.path`::
Optional setting that provides the path to the Java keystore (JKS) to validate
the servers certificate.
`xpack.management.elasticsearch.ssl.truststore.password`::
Optional setting that provides the password to the truststore.
`xpack.management.elasticsearch.ssl.keystore.path`::
Optional setting that provides the path to the Java keystore (JKS) to validate
the clients certificate.
`xpack.management.elasticsearch.ssl.keystore.password`::
Optional setting that provides the password to the keystore.

View file

@ -0,0 +1,77 @@
[role="xpack"]
[[monitoring-settings]]
==== Monitoring Settings in Logstash
++++
<titleabbrev>Monitoring Settings</titleabbrev>
++++
You can set the following `xpack.monitoring` settings in `logstash.yml` to
control how monitoring data is collected from your Logstash nodes. However, the
defaults work best in most circumstances. For more information about configuring
Logstash, see <<logstash-settings-file>>.
[float]
[[monitoring-general-settings]]
===== General Monitoring Settings
`xpack.monitoring.enabled`::
Monitoring is disabled by default. Set to `true` to enable {xpack} monitoring.
`xpack.monitoring.elasticsearch.url`::
The {es} instances that you want to ship your Logstash metrics to. This might be
the same {es} instance specified in the `outputs` section in your Logstash
configuration, or a different one. This is *not* the URL of your dedicated
monitoring cluster. Even if you are using a dedicated monitoring cluster, the
Logstash metrics must be routed through your production cluster. You can specify
a single host as a string, or specify multiple hosts as an array. Defaults to
`http://localhost:9200`.
`xpack.monitoring.elasticsearch.username` and `xpack.monitoring.elasticsearch.password`::
If your {es} is protected with basic authentication, these settings provide the
username and password that the Logstash instance uses to authenticate for
shipping monitoring data.
[float]
[[monitoring-collection-settings]]
==== Monitoring Collection Settings
`xpack.monitoring.collection.interval`::
Controls how often data samples are collected and shipped on the Logstash side.
Defaults to `10s`. If you modify the collection interval, set the
`xpack.monitoring.min_interval_seconds` option in `kibana.yml` to the same value.
[float]
[[monitoring-ssl-settings]]
===== {monitoring} TLS/SSL Settings
You can configure the following Transport Layer Security (TLS) or
Secure Sockets Layer (SSL) settings. For more information, see
<<ls-monitoring-user>>.
`xpack.monitoring.elasticsearch.ssl.ca`::
Optional setting that enables you to specify a path to the `.pem` file for the
certificate authority for your {es} instance.
`xpack.monitoring.elasticsearch.ssl.truststore.path`::
Optional settings that provide the paths to the Java keystore (JKS) to validate
the servers certificate.
`xpack.monitoring.elasticsearch.ssl.truststore.password`::
Optional settings that provide the password to the truststore.
`xpack.monitoring.elasticsearch.ssl.keystore.path`::
Optional settings that provide the paths to the Java keystore (JKS) to validate
the clients certificate.
`xpack.monitoring.elasticsearch.ssl.keystore.password`::
Optional settings that provide the password to the keystore.

View file

@ -0,0 +1,10 @@
[role="xpack"]
[[settings-xpack]]
=== {xpack} Settings in Logstash
++++
<titleabbrev>{xpack} Settings</titleabbrev>
++++
include::{asciidoc-dir}/../../shared/settings.asciidoc[]
For more Logstash configuration settings, see <<logstash-settings-file>>.

Binary file not shown.

After

Width:  |  Height:  |  Size: 36 KiB

View file

@ -0,0 +1,17 @@
[role="xpack"]
[[setup-xpack]]
== Setting Up X-Pack
{xpack} is an Elastic Stack extension that provides security, alerting,
monitoring, machine learning, pipeline management, and many other capabilities.
By default, when you install Logstash, {xpack} is installed.
If you want to try all of the {xpack} features, you can
{xpack-ref}/license-management.html[start a 30-day trial]. At the end of the
trial period, you can purchase a subscription to keep using the full
functionality of the {xpack} components. For more information, see https://www.elastic.co/subscriptions.
include::configuring-xls.asciidoc[]
include::{xls-repo-dir}/management/configuring-centralized-pipelines.asciidoc[]
include::{xls-repo-dir}/monitoring/configuring-logstash.asciidoc[]
include::{xls-repo-dir}/security/logstash.asciidoc[]

View file

@ -0,0 +1,96 @@
# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
# or more contributor license agreements. Licensed under the Elastic License;
# you may not use this file except in compliance with the Elastic License.
require "logstash/bootstrap_check/default_config"
require "logstash/logging/logger"
require "logstash/errors"
java_import java.util.concurrent.TimeUnit
module LogStash
module ConfigManagement
# Override the default Logstash's bootstrap check
# instead of making the `-e` and the `-f` mandatory we rely
# on the elasticsearch source.
#
# If we don't use config management we checks for CLI flags/logstash.yml options
class BootstrapCheck
include LogStash::Util::Loggable
def self.check(settings)
check_path_config(settings)
if settings.get("config.string")
raise LogStash::BootstrapCheckError, "You cannot use -e since Elasticsearch is configured as the config store."
end
if settings.get("config.test_and_exit")
raise LogStash::BootstrapCheckError, "You cannot use -t since Elasticsearch is configured as the config store"
end
if !settings.get("modules.cli").empty? || !settings.get("modules").empty?
raise LogStash::BootstrapCheckError, "You cannot use --modules since Elasticsearch is configured as the config store"
end
interval = settings.get("xpack.management.logstash.poll_interval")
# override core settings, so the agent will trigger the auto reload
settings.set("config.reload.automatic", true)
settings.set("config.reload.interval", interval)
pipeline_ids = settings.get("xpack.management.pipeline.id")
if pipeline_ids.reject { |id| id.strip.empty? }.empty?
raise LogStash::BootstrapCheckError, "You need to specify the ID of the pipelines with the `xpack.management.pipeline.id` options in your logstash.yml"
end
duplicate_ids = find_duplicate_ids(pipeline_ids)
if duplicate_ids.size > 0
raise LogStash::BootstrapCheckError, "Duplicate pipeline ids found in `xpack.management.pipeline.id`, defined IDs must be unique, Duplicated ids: #{duplicate_ids.join(', ')}"
end
logger.info("Using Elasticsearch as config store", :pipeline_id => pipeline_ids, :poll_interval => "#{interval}ns")
end
def self.check_path_config(settings)
path_config = settings.get("path.config")
return if (path_config.nil? || path_config.empty?)
configs_count = Dir.glob(path_config).size
return if configs_count.zero?
msg = sprintf("There are config files (%i) in the '%s' folder.", configs_count, path_config)
msg.concat(" Elasticsearch is configured as the config store so configs cannot be sourced")
msg.concat(" via the command line with -f or via logstash.yml with path.config")
logger.error(msg)
raise LogStash::BootstrapCheckError, msg
end
def self.find_duplicate_ids(ids)
normalized_ids = ids.dup
.map(&:to_s)
.map(&:strip)
.map(&:downcase)
.group_by { |id| id }
duplicate_ids = []
ids.each do |id|
if normalized_ids.fetch(id.downcase).size > 1
duplicate_ids << id
end
end
# We normalize the pipeline id into lowercase string,
# this allow us to detect weird capitalized ids and all lowercase ids.
# But when reporting the ids, its more useful to the user
# report the `uniq` with the appropriate capitalization.
#
# Example:
# pipeline1, pipeline1 => ["pipeline1"]
# pipeline1, PIPELINE1 => ["pipeline1", "PIPELINE1"]
duplicate_ids.uniq
end
end
end
end

View file

@ -0,0 +1,197 @@
# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
# or more contributor license agreements. Licensed under the Elastic License;
# you may not use this file except in compliance with the Elastic License.
require "logstash/config/pipeline_config"
require "logstash/config/source/base"
require "logstash/config/source_loader"
require "logstash/logging/logger"
require "logstash/outputs/elasticsearch"
require "logstash/json"
require 'helpers/elasticsearch_options'
require "license_checker/licensed"
module LogStash
module ConfigManagement
class ElasticsearchSource < LogStash::Config::Source::Base
include LogStash::Util::Loggable, LogStash::LicenseChecker::Licensed,
LogStash::Helpers::ElasticsearchOptions
class RemoteConfigError < LogStash::Error; end
PIPELINE_INDEX = ".logstash"
PIPELINE_TYPE = "doc"
VALID_LICENSES = %w(trial standard gold platinum)
FEATURE_INTERNAL = 'management'
FEATURE_EXTERNAL = 'logstash'
SUPPORTED_PIPELINE_SETTINGS = %w(
pipeline.workers
pipeline.batch.size
pipeline.batch.delay
queue.type
queue.max_bytes
queue.checkpoint.writes
)
def initialize(settings)
super(settings)
if @settings.get("xpack.management.enabled") && !@settings.get_setting("xpack.management.elasticsearch.password").set?
raise ArgumentError.new("You must set the password using the \"xpack.management.elasticsearch.password\" in logstash.yml")
end
@es_options = es_options_from_settings('management', settings)
if enabled?
setup_license_checker(FEATURE_INTERNAL)
license_check(true)
end
end
def match?
@settings.get("xpack.management.enabled")
end
def config_conflict?
false
end
def pipeline_configs
logger.trace("Fetch remote config pipeline", :pipeline_ids => pipeline_ids)
begin
license_check(true)
rescue LogStash::LicenseChecker::LicenseError => e
if @cached_pipelines.nil?
raise e
else
return @cached_pipelines
end
end
response = fetch_config(pipeline_ids)
if response["error"]
raise RemoteConfigError, "Cannot find find configuration for pipeline_id: #{pipeline_ids}, server returned status: `#{response["status"]}`, message: `#{response["error"]}`"
end
if response["docs"].nil?
logger.debug("Server returned an unknown or malformed document structure", :response => response)
raise RemoteConfigError, "Elasticsearch returned an unknown or malformed document structure"
end
# Cache pipelines to handle the case where a remote configuration error can render a pipeline unusable
# it is not reloadable
@cached_pipelines = response["docs"].collect do |document|
get_pipeline(document)
end.compact
end
def get_pipeline(response)
pipeline_id = response["_id"]
if response["found"] == false
logger.debug("Could not find a remote configuration for a specific `pipeline_id`", :pipeline_id => pipeline_id)
return nil
end
config_string = response.fetch("_source", {})["pipeline"]
raise RemoteConfigError, "Empty configuration for pipeline_id: #{pipeline_id}" if config_string.nil? || config_string.empty?
config_part = org.logstash.common.SourceWithMetadata.new("x-pack-config-management", pipeline_id.to_s, config_string)
# We don't support multiple pipelines, so use the global settings from the logstash.yml file
settings = @settings.clone
settings.set("pipeline.id", pipeline_id)
# override global settings with pipeline settings from ES, if any
pipeline_settings = response["_source"]["pipeline_settings"]
unless pipeline_settings.nil?
pipeline_settings.each do |setting, value|
if SUPPORTED_PIPELINE_SETTINGS.include? setting
settings.set(setting, value) if value
else
logger.warn("Ignoring unsupported or unknown pipeline settings '#{setting}'")
end
end
end
LogStash::Config::PipelineConfig.new(self.class.name, pipeline_id.to_sym, config_part, settings)
end
# This is a bit of a hack until we refactor the ElasticSearch plugins
# and extract correctly the http client, right now I am using the plugins
# to deal with the certificates and the other SSL options
#
# But we have to silence the logger from the plugin, to make sure the
# log originate from the `ElasticsearchSource`
def build_client
es = LogStash::Outputs::ElasticSearch.new(@es_options)
new_logger = logger
es.instance_eval { @logger = new_logger }
es.build_client
end
def fetch_config(pipeline_ids)
request_body_string = LogStash::Json.dump({ "docs" => pipeline_ids.collect { |pipeline_id| { "_id" => pipeline_id } } })
client.post(config_path, {}, request_body_string)
end
def config_path
"#{PIPELINE_INDEX}/#{PIPELINE_TYPE}/_mget"
end
def populate_license_state(xpack_info)
if !xpack_info.installed?
{
:state => :error,
:log_level => :error,
:log_message => "X-Pack is installed on Logstash but not on Elasticsearch. Please install X-Pack on Elasticsearch to use the monitoring feature. Other features may be available."
}
elsif !xpack_info.feature_enabled?("security")
{
:state => :error,
:log_level => :error,
:log_message => "X-Pack Security needs to be enabled in Elasticsearch. Please set xpack.security.enabled: true in elasticsearch.yml."
}
elsif !xpack_info.license_available?
{
:state => :error,
:log_level => :error,
:log_message => 'Configuration Management is not available: License information is currently unavailable. Please make sure you have added your production elasticsearch connection info in the xpack.monitoring.elasticsearch settings.'
}
elsif !xpack_info.license_one_of?(VALID_LICENSES)
{
:state => :error,
:log_level => :error,
:log_message => "Configuration Management is not available: #{xpack_info.license_type} is not a valid license for this feature."
}
elsif !xpack_info.license_active?
{
:state => :ok,
:log_level => :warn,
:log_message => 'Configuration Management feature requires a valid license. You can continue to monitor Logstash, but please contact your administrator to update your license'
}
else
unless xpack_info.feature_enabled?(FEATURE_EXTERNAL)
logger.warn('Monitoring installed and enabled in Logstash, but not enabled in Elasticsearch')
end
{ :state => :ok, :log_level => :info, :log_message => 'Configuration Management License OK' }
end
end
alias_method :enabled?, :match?
private
def pipeline_ids
@settings.get("xpack.management.pipeline.id")
end
def client
@client ||= build_client
end
end
end
end

View file

@ -0,0 +1,43 @@
# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
# or more contributor license agreements. Licensed under the Elastic License;
# you may not use this file except in compliance with the Elastic License.
require "logstash/environment"
require "logstash/universal_plugin"
require "logstash/logging/logger"
require "logstash/runner"
require "config_management/hooks"
require "config_management/elasticsearch_source"
require "config_management/bootstrap_check"
module LogStash
module ConfigManagement
class Extension < LogStash::UniversalPlugin
include LogStash::Util::Loggable
def register_hooks(hooks)
hooks.register_hooks(LogStash::Runner, Hooks.new)
end
def additionals_settings(settings)
logger.trace("Registering additionals settings")
settings.register(LogStash::Setting::Boolean.new("xpack.management.enabled", false))
settings.register(LogStash::Setting::TimeValue.new("xpack.management.logstash.poll_interval", "5s"))
settings.register(LogStash::Setting::ArrayCoercible.new("xpack.management.pipeline.id", String, ["main"]))
settings.register(LogStash::Setting::NullableString.new("xpack.management.elasticsearch.username", "logstash_system"))
settings.register(LogStash::Setting::NullableString.new("xpack.management.elasticsearch.password"))
settings.register(LogStash::Setting::ArrayCoercible.new("xpack.management.elasticsearch.url", String, [ "https://localhost:9200" ] ))
settings.register(LogStash::Setting::NullableString.new("xpack.management.elasticsearch.ssl.ca"))
settings.register(LogStash::Setting::NullableString.new("xpack.management.elasticsearch.ssl.truststore.path"))
settings.register(LogStash::Setting::NullableString.new("xpack.management.elasticsearch.ssl.truststore.password"))
settings.register(LogStash::Setting::NullableString.new("xpack.management.elasticsearch.ssl.keystore.path"))
settings.register(LogStash::Setting::NullableString.new("xpack.management.elasticsearch.ssl.keystore.password"))
settings.register(LogStash::Setting::Boolean.new("xpack.management.elasticsearch.sniffing", false))
rescue => e
logger.error("Cannot register new settings", :message => e.message, :backtrace => e.backtrace)
raise e
end
end
end
end

View file

@ -0,0 +1,53 @@
# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
# or more contributor license agreements. Licensed under the Elastic License;
# you may not use this file except in compliance with the Elastic License.
require "logstash/runner"
require "logstash/logging/logger"
require "config_management/bootstrap_check"
require "config_management/elasticsearch_source"
require "logstash/config/source_loader"
require "logstash/config/source/local"
require "logstash/config/source/multi_local"
require "logstash/config/source/modules"
module LogStash
module ConfigManagement
class Hooks
include LogStash::Util::Loggable
def before_bootstrap_checks(runner)
if management?(runner)
bootstrap_checks = LogStash::Runner::DEFAULT_BOOTSTRAP_CHECKS.dup
# We only need to allow logstash to start without any parameters
# and validate the ES parameters if needed
bootstrap_checks.delete(LogStash::BootstrapCheck::DefaultConfig)
bootstrap_checks << LogStash::ConfigManagement::BootstrapCheck
runner.bootstrap_checks = bootstrap_checks
end
end
def after_bootstrap_checks(runner)
# If xpack is enabled we can safely remove the local source completely and just use
# elasticsearch as the source of truth.
#
# The bootstrap check guards will make sure we can go ahead to load the remote config source
if management?(runner)
logger.debug("Removing the `Logstash::Config::Source::Local` and replacing it with `ElasticsearchSource`")
runner.source_loader.remove_source(LogStash::Config::Source::Local)
runner.source_loader.remove_source(LogStash::Config::Source::MultiLocal)
runner.source_loader.remove_source(LogStash::Config::Source::Modules)
source = LogStash::ConfigManagement::ElasticsearchSource.new(runner.settings)
runner.source_loader.add_source(source)
end
end
private
def management?(runner)
runner.setting("xpack.management.enabled")
end
end
end
end

View file

@ -0,0 +1,115 @@
# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
# or more contributor license agreements. Licensed under the Elastic License;
# you may not use this file except in compliance with the Elastic License.
module LogStash module Helpers
module ElasticsearchOptions
extend self
ES_SETTINGS =%w(ssl.ca ssl.truststore.path ssl.keystore.path url username password)
# Retrieve elasticsearch options from either specific settings, or modules if the setting is not there and the
# feature supports falling back to modules if the feature is not specified in logstash.yml
def es_options_from_settings_or_modules(feature, settings)
only_modules_configured?(feature, settings) ? es_options_from_modules(settings) : es_options_from_settings(feature, settings)
end
# Populate the Elasticsearch options from LogStashSettings file, based on the feature that is being
# used.
def es_options_from_settings(feature, settings)
opts = {}
opts['hosts'] = settings.get("xpack.#{feature}.elasticsearch.url")
opts['user'] = settings.get("xpack.#{feature}.elasticsearch.username")
opts['password'] = settings.get("xpack.#{feature}.elasticsearch.password")
opts['sniffing'] = settings.get("xpack.#{feature}.elasticsearch.sniffing")
if cacert = settings.get("xpack.#{feature}.elasticsearch.ssl.ca")
opts['cacert'] = cacert
opts['ssl'] = true
end
if truststore = settings.get("xpack.#{feature}.elasticsearch.ssl.truststore.path")
opts['truststore'] = truststore
opts['truststore_password'] = settings.get("xpack.#{feature}.elasticsearch.ssl.truststore.password")
opts['ssl'] = true
end
if keystore = settings.get("xpack.#{feature}.elasticsearch.ssl.keystore.path")
opts['keystore'] = keystore
opts['keystore_password']= settings.get("xpack.#{feature}.elasticsearch.ssl.keystore.password")
opts['ssl'] = true
end
opts
end
# Elasticsearch settings can be extracted from the modules settings inside the configuration.
# Few options will be supported, however - the modules security configuration is
# different to
def es_options_from_modules(settings)
module_settings = extract_module_settings(settings)
if module_settings.empty?
return nil
end
opts = {}
setting = LogStash::Setting::SplittableStringArray.new("var.elasticsearch.hosts", String, ["localhost:9200"])
raw_value = module_settings[setting.name]
setting.set(raw_value) unless raw_value.nil?
opts['hosts'] = setting.value
opts['user'] = module_settings['var.elasticsearch.username']
password = module_settings['var.elasticsearch.password']
opts['password'] = password.value unless password.nil?
# Sniffing is not supported for modules.
opts['sniffing'] = false
if cacert = module_settings["var.elasticsearch.ssl.certificate_authority"]
opts['cacert'] = cacert
opts['ssl'] = true
end
opts
end
# Determine whether only modules have been configured, and not monitoring
# @param String feature to be checked
# @param Logstash::Settings Logstash settings
def only_modules_configured?(feature, settings)
modules_configured?(settings) && !feature_configured?(feature, settings)
end
# If not settings are configured, then assume that the feature has not been configured.
# The assumption is that with security setup, at least one setting (password or certificates)
# should be configured. If security is not setup, and defaults 'just work' for monitoring, then
# this will need to be reconsidered.
def feature_configured?(feature, settings)
ES_SETTINGS.each do |option|
return true if settings.set?("xpack.#{feature}.elasticsearch.#{option}")
end
false
end
def modules_configured?(settings)
!extract_module_settings(settings).nil?
end
# Read module settings from yaml file. This should be refactored in Logstash proper to allow for DRY-ing up
# these settings
def extract_module_settings(settings)
cli_settings = settings.get("modules.cli")
yml_settings = settings.get("modules")
modules_array = if !(cli_settings.empty? && yml_settings.empty?)
LogStash::Modules::SettingsMerger.merge(cli_settings, yml_settings)
elsif cli_settings.empty?
yml_settings
else
cli_settings
end
LogStash::Modules::SettingsMerger.merge_cloud_settings(modules_array.first, settings) unless modules_array.empty?
# As only one module is supported in the initial rollout, use the first one found
modules_array.first
end
end end end

View file

@ -0,0 +1,67 @@
# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
# or more contributor license agreements. Licensed under the Elastic License;
# you may not use this file except in compliance with the Elastic License.
require "logstash/json"
require 'license_checker/license_reader'
require 'license_checker/x_pack_info'
java_import java.util.concurrent.Executors
java_import java.util.concurrent.TimeUnit
module LogStash
module LicenseChecker
class LicenseError < StandardError; end
class LicenseManager
include LogStash::Util::Loggable, Observable
attr_reader :last_updated
LICENSE_TYPES = :trial, :basic, :standard, :gold, :platinum
def initialize (reader, feature, refresh_period=30, refresh_unit=TimeUnit::SECONDS)
@license_reader = reader
@feature = feature
fetch_xpack_info
if @executor.nil?
@executor = Executors.new_single_thread_scheduled_executor{ |runnable| create_daemon_thread (runnable)}
@executor.schedule_at_fixed_rate(Proc.new{fetch_xpack_info}, refresh_period, refresh_period, refresh_unit)
end
end
def current_xpack_info
@xpack_info
end
def fetch_xpack_info
xpack_info = @license_reader.fetch_xpack_info
# TODO: we should be more lenient when we're having issues
xpack_info ||= XPackInfo.xpack_not_installed
update_xpack_info(xpack_info)
end
private
def update_xpack_info(xpack_info)
return if xpack_info == @xpack_info
@xpack_info = xpack_info
logger.debug('updating observers of xpack info change') if logger.debug?
changed
notify_observers(current_xpack_info)
end
# Create a daemon thread for the license checker to stop this thread from keeping logstash running in the
# event of shutdown
def create_daemon_thread(runnable)
thread = java.lang.Thread.new(runnable, "#{@feature}-license-manager")
thread.set_daemon(true)
thread
end
end
end
end

View file

@ -0,0 +1,72 @@
# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
# or more contributor license agreements. Licensed under the Elastic License;
# you may not use this file except in compliance with the Elastic License.
require 'logstash/logging/logger'
require 'logstash/outputs/elasticsearch'
require 'logstash/json'
require 'json'
module LogStash
module LicenseChecker
class LicenseReader
include LogStash::Util::Loggable
XPACK_MISSING_STATUS_CODES = [400, 404]
def initialize(settings, feature, options)
@namespace = "xpack.#{feature}"
@settings = settings
@es_options = options
end
##
# fetches an XPackInfo, or log and return nil if unavailable.
# @return [XPathInfo, nil]
def fetch_xpack_info
begin
response = client.get('_xpack')
# TODO: do we need both this AND the exception-based control flow??
return XPackInfo.xpack_not_installed if xpack_missing_response?(response)
XPackInfo.from_es_response(response)
rescue ::LogStash::Outputs::ElasticSearch::HttpClient::Pool::BadResponseCodeError => bad_response_error
raise unless XPACK_MISSING_STATUS_CODES.include?(bad_response_error.response_code)
XPackInfo.xpack_not_installed
end
rescue => e
logger.error('Unable to retrieve license information from license server', :message => e.message, :class => e.class.name, :backtrace => e.backtrace)
nil
end
##
# @api private
def client
@client ||= build_client
end
private
# # This is a bit of a hack until we refactor the ElasticSearch plugins
# # and extract correctly the http client, right now I am using the plugins
# # to deal with the certificates and the other SSL options
# #
# # But we have to silence the logger from the plugin, to make sure the
# # log originate from the `ElasticsearchSource`
def build_client
es = LogStash::Outputs::ElasticSearch.new(@es_options)
new_logger = logger
es.instance_eval { @logger = new_logger }
es.build_client
end
# Depending on the version Elasticsearch will return a 400 or a 404 response is xpack is not installed:
# 5.x will return a 400, 6.x will return 404
def xpack_missing_response?(response)
!!response['status'] && XPACK_MISSING_STATUS_CODES.include?(response['status'])
end
end
end
end

View file

@ -0,0 +1,90 @@
# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
# or more contributor license agreements. Licensed under the Elastic License;
# you may not use this file except in compliance with the Elastic License.
require "logstash/json"
require 'license_checker/license_manager'
require 'license_checker/x_pack_info'
require 'license_checker/license_reader'
java_import java.util.concurrent.TimeUnit
# Mixin to add License Checking functionality to a feature:
# To add license checking:
# - Include the Licensed mixin to the class under license
# - Call setup_license_checker to configure the license checker functionality - this will load up the license
# and setup the regular poll to check
# - Any features that require a license check by passing it as a block to 'with_license_check', the block will be
# executed if the license check is ok, but will either be ignored (and logged), or an error raised if the license
# state is invalid.
# - To do a license check without passing a block, use 'license_check' which returns true if the check is ok, and either
# returns false or raises, depending on the parameter passed in.
# Classes that include this mixin should implement 'populate_license_state', and fill in the license_state object as
# :state - :ok or :error. If the :state is ok then license checks will succeed, if :error, then they will not
# :log_level - When the license state changes, a log entry is emitted - set this to the appropriate level for the license state
# (this is not used to set the state, so if, for example the licese functionality allows expired licenses
# to function as is, set the state to ok, and the log_level to :warn)
# :log_message - Message to log when the license state changes
module LogStash
module LicenseChecker
module Licensed
include LogStash::Util::Loggable
def setup_license_checker(feature, refresh_period=30, refresh_unit=TimeUnit::SECONDS)
@feature = feature
license_manager = LogStash::LicenseChecker::LicenseManager.new(license_reader, feature, refresh_period, refresh_unit)
xpack_info = license_manager.current_xpack_info
update_license_state(xpack_info)
license_manager.add_observer(self, :update_license_state)
end
# Classes that include Licensed mixin should override this method, populating the values of state, log_level and log_message
# appropriately for how the license is to be enforced for that feature.
# @param [LogStash::LicenseChecker::XPackInfo] License Info object
# @return [Hash] The overriding class should construct an hash populated as follows:
# :state - State of the license, should a license check succeed or fail. :ok or :error
# :log_message - Message to be logged when the license changes for this feature
# :log_level - Level to log the license change message at - symbolized version of method names
# for [LogStash::Logging::Logger] - eg :info, :warn, :error, etc
def populate_license_state(xpack_info)
{ :state => :error, :log_level => :error, :log_message => "Licensing is not currently setup for #{@feature}, please contact support"}
end
def with_license_check(raise_on_error=false)
current_license_state = get_current_license_state
message = current_license_state[:log_message]
# This logs the call, as long as the last logged call wasn't the same
logger.send current_license_state[:log_level], message if message != @last_message
@last_message = message
if current_license_state[:state] == :ok
block_given? ? yield : true
else
raise LogStash::LicenseChecker::LicenseError.new(message) if raise_on_error
false unless block_given?
end
end
alias_method :license_check, :with_license_check
def license_reader
LogStash::LicenseChecker::LicenseReader.new(@settings, @feature, @es_options)
end
def update_license_state(xpack_info)
logger.debug("updating licensing state #{xpack_info}")
@license_state = populate_license_state(xpack_info)
end
private
def get_current_license_state
@license_state.dup
end
end
end
end

View file

@ -0,0 +1,93 @@
# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
# or more contributor license agreements. Licensed under the Elastic License;
# you may not use this file except in compliance with the Elastic License.
require "logstash/json"
require 'license_checker/license_reader'
java_import java.util.concurrent.Executors
java_import java.util.concurrent.TimeUnit
module LogStash
module LicenseChecker
class XPackInfo
include LogStash::Util::Loggable
LICENSE_TYPES = :trial, :basic, :standard, :gold, :platinum
def initialize(license, features = nil, installed=true)
@license = license
@installed = installed
@features = features
freeze
end
def method_missing(meth)
if meth.to_s.match(/license_(.+)/)
return nil if @license.nil?
@license[$1]
else
super
end
end
def installed?
@installed
end
def license_available?
!@license.nil?
end
def license_active?
return false if @license.nil?
license_status == 'active'
end
def license_one_of?(types)
return false if @license.nil?
types.include?(license_type)
end
def feature_enabled?(feature)
return false unless @features.include?(feature)
return false unless @features[feature].fetch('available', false)
@features[feature].fetch('enabled', false)
end
def to_s
"installed:#{installed?},
license:#{@license.nil? ? '<no license loaded>' : @license.to_s},
features:#{@features.nil? ? '<no features loaded>' : @features.to_s},
last_updated:#{@last_updated}}"
end
def ==(other)
return false if other.nil?
return false unless other.instance_variable_get(:@installed) == @installed
return false unless other.instance_variable_get(:@license) == @license
return false unless other.instance_variable_get(:@features) == @features
true
end
def self.from_es_response(es_response)
if es_response.nil? || es_response['license'].nil?
logger.warn("Nil response from License Server")
XPackInfo.new(nil)
else
license = es_response.fetch('license', {}).dup.freeze
features = es_response.fetch('features', {}).dup.freeze
XPackInfo.new(license, features)
end
end
def self.xpack_not_installed
XPackInfo.new(nil, nil,false)
end
end
end
end

View file

@ -0,0 +1,19 @@
# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
# or more contributor license agreements. Licensed under the Elastic License;
# you may not use this file except in compliance with the Elastic License.
#
require "logstash/plugins/registry"
require "logstash/modules/util"
require "monitoring/monitoring"
require "monitoring/inputs/metrics"
require "config_management/extension"
require "modules/xpack_scaffold"
LogStash::PLUGIN_REGISTRY.add(:input, "metrics", LogStash::Inputs::Metrics)
LogStash::PLUGIN_REGISTRY.add(:universal, "monitoring", LogStash::MonitoringExtension)
LogStash::PLUGIN_REGISTRY.add(:universal, "config_management", LogStash::ConfigManagement::Extension)
LogStash::PLUGIN_REGISTRY.add(:modules, "arcsight",
LogStash::Modules::XpackScaffold.new("arcsight",
File.join(File.dirname(__FILE__), "..", "modules", "arcsight", "configuration"),
["basic", "trial", "standard", "gold", "platinum"]
))

View file

@ -0,0 +1,70 @@
# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
# or more contributor license agreements. Licensed under the Elastic License;
# you may not use this file except in compliance with the Elastic License.
require 'license_checker/licensed'
require 'helpers/elasticsearch_options'
java_import java.util.concurrent.TimeUnit
module LogStash
module LicenseChecker
class ModuleLicenseChecker
include LogStash::LicenseChecker::Licensed
include LogStash::Helpers::ElasticsearchOptions
include LogStash::Util::Loggable
def initialize(module_name, valid_licenses)
@module_name = module_name
@feature = "#{@module_name}_module"
@valid_licenses = valid_licenses
@setup = false
end
# returns true if license is valid, false otherwise
def check(settings)
setup(settings) unless @setup
license_check
end
private
def setup(settings)
@es_options = es_options_from_modules(settings) || {}
#TODO: reduce the refresh period and handle if a license expires while running
setup_license_checker(@feature, 3650, TimeUnit::DAYS)
@setup = true
end
def populate_license_state(xpack_info)
if !xpack_info.installed?
{
:state => :error,
:log_level => :error,
:log_message => "X-Pack is installed on Logstash but not on Elasticsearch. Please install X-Pack on Elasticsearch to use the #{@module_name} module."
}
elsif !xpack_info.license_available?
{
:state => :error,
:log_level => :error,
:log_message => "The #{@module_name} module is not available: License information is currently unavailable. Please make sure you have added your production elasticsearch connection information."
}
elsif !xpack_info.license_one_of?(@valid_licenses)
{
:state => :error,
:log_level => :error,
:log_message => "The #{@module_name} module is not available: #{xpack_info.license_type} is not a valid license for this feature."
}
elsif !xpack_info.license_active?
{
:state => :ok,
:log_level => :warn,
:log_message => "The #{@module_name} module requires an active license."
}
else
{:state => :ok, :log_level => :info, :log_message => "The #{@module_name} module License OK"}
end
end
end
end
end

View file

@ -0,0 +1,22 @@
# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
# or more contributor license agreements. Licensed under the Elastic License;
# you may not use this file except in compliance with the Elastic License.
require "logstash/modules/scaffold"
require "modules/module_license_checker"
module LogStash
module Modules
class XpackScaffold < LogStash::Modules::Scaffold
def initialize(name, directory, valid_licenses)
super(name, directory)
@license_checker = LogStash::LicenseChecker::ModuleLicenseChecker.new(name, valid_licenses)
end
def is_enabled?(settings)
@license_checker.check(settings)
end
end
end
end

View file

@ -0,0 +1,225 @@
# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
# or more contributor license agreements. Licensed under the Elastic License;
# you may not use this file except in compliance with the Elastic License.
require "logstash/event"
require "logstash/inputs/base"
require "logstash/instrument/collector"
require 'license_checker/licensed'
require 'helpers/elasticsearch_options'
require "concurrent"
require "thread"
module LogStash module Inputs
# The Metrics input recieves periodic metric data snapshot from Logstash core.
# This input is responsible for registring itself to the collector.
# The collector class will periodically emits new snapshot of the system, JVM and other metric data.
# This input further transform it into a `Logstash::Event`, which can be consumed by the shipper and
# shipped to Elasticsearch
class Metrics < LogStash::Inputs::Base
include LogStash::LicenseChecker::Licensed, LogStash::Helpers::ElasticsearchOptions
require "monitoring/inputs/metrics/state_event_factory"
require "monitoring/inputs/metrics/stats_event_factory"
@pipelines_mutex = Mutex.new
@pipelines = {}
VALID_LICENSES = %w(basic trial standard gold platinum)
FEATURE = 'monitoring'
require "monitoring/inputs/timer_task_logger"
attr_reader :queue, :agent
config_name "metrics"
# Polling frequency in seconds on the metric store
config :collection_interval, :type => :integer, :default => 10
# Maximum time in seconds a polling iteration of the metric store can take before it dies
# When it dies, the snapshot will wait the `collection_interval` before doing another snapshot.
config :collection_timeout_interval, :type => :integer, :default => 10 * 60
# Collect per-plugin / queue / other component stats
config :extended_performance_collection, :type => :boolean, :default => true
# Serialize and store the logstash config into logstash-states
config :config_collection, :type => :boolean, :default => true
def register
@global_stats = fetch_global_stats
@agent = nil
@settings = LogStash::SETTINGS.clone
@last_updated_pipeline_hashes = []
@es_options = es_options_from_settings_or_modules(FEATURE, @settings)
setup_license_checker(FEATURE)
configure_snapshot_poller
end
def pipeline_started(agent, pipeline)
@agent = agent
with_license_check do
update_pipeline_state(pipeline)
end
end
def configure_snapshot_poller
@timer_task = Concurrent::TimerTask.new({
:execution_interval => @collection_interval,
:timeout_interval => @collection_timeout_interval
}) do
update(metric.collector.snapshot_metric)
end
@timer_task.add_observer(TimerTaskLogger.new)
end
def run(arg_queue)
@logger.debug("Metric: input started")
@queue = arg_queue
# This must be invoked here because we need a queue to store the data
LogStash::PLUGIN_REGISTRY.hooks.register_hooks(LogStash::Agent, self)
exec_timer_task
sleep_till_stop
end
def exec_timer_task
@timer_task.execute
end
def sleep_till_stop
# Keep this plugin thread alive,
# until we shutdown the metric pipeline
sleep(1) while !stop?
end
def stop
@logger.debug("Metrics input: stopped")
@timer_task.shutdown if @timer_task
end
def update(snapshot)
with_license_check do
update_stats(snapshot)
update_states
end
end
def update_stats(snapshot)
@logger.debug("Metrics input: received a new snapshot", :created_at => snapshot.created_at, :snapshot => snapshot) if @logger.debug?
begin
event = StatsEventFactory.new(@global_stats, snapshot).make(agent, @extended_performance_collection)
rescue => e
if @logger.debug?
@logger.error("Failed to create monitoring event", :message => e.message, :error => e.class.name, :backtrace => e.backtrace)
else
@logger.error("Failed to create monitoring event", :message => e.message, :error => e.class.name)
end
return
end
remove_reserved_fields(event)
# The back pressure is handled in the collector's
# scheduled task (running into his own thread) if something append to one of the listener it will
# will timeout. In a sane pipeline, with a low traffic of events it shouldn't be a problems.
emit_event(event)
end
def update_states
return unless @agent
# Update once every 10m
time_for_update = @last_states_update.nil? || @last_states_update < (Time.now - 60*10)
pipeline_hashes = []
agent.pipelines.each do |pipeline_id, pipeline|
if time_for_update || !@last_updated_pipeline_hashes.include?(pipeline.hash)
update_pipeline_state(pipeline)
end
pipeline_hashes << pipeline.hash
end
@last_updated_pipeline_hashes = pipeline_hashes
@last_states_update ||= Time.now
end
def update_pipeline_state(pipeline)
return if pipeline.system?
if @config_collection
emit_event(state_event_for(pipeline))
end
end
def state_event_for(pipeline)
StateEventFactory.new(pipeline).make()
end
def emit_event(event)
queue << event
end
def populate_license_state(xpack_info)
if !xpack_info.installed?
{
:state => :error,
:log_level => :error,
:log_message => "X-Pack is installed on Logstash but not on Elasticsearch. Please install X-Pack on Elasticsearch to use the monitoring feature. Other features may be available."
}
elsif !xpack_info.license_available?
{
:state => :error,
:log_level => :error,
:log_message => 'Monitoring is not available: License information is currently unavailable. Please make sure you have added your production elasticsearch connection info in the xpack.monitoring.elasticsearch settings.'
}
elsif !xpack_info.license_one_of?(VALID_LICENSES)
{
:state => :error,
:log_level => :error,
:log_message => "Monitoring is not available: #{xpack_info.license_type} is not a valid license for this feature."
}
elsif !xpack_info.license_active?
{
:state => :ok,
:log_level => :warn,
:log_message => 'Monitoring requires a valid license. You can continue to monitor Logstash, but please contact your administrator to update your license'
}
else
unless xpack_info.feature_enabled?(FEATURE)
logger.warn('Monitoring installed and enabled in Logstash, but not enabled in Elasticsearch')
end
{ :state => :ok, :log_level => :info, :log_message => 'Monitoring License OK' }
end
end
private
def remove_reserved_fields(event)
event.remove("@timestamp")
event.remove("@version")
end
def fetch_global_stats
{
"uuid" => LogStash::SETTINGS.get("node.uuid"),
"name" => LogStash::SETTINGS.get("node.name"),
"host" => Socket.gethostname,
"http_address" => nil,
"ephemeral_id" => nil,
"version" => ::LOGSTASH_VERSION,
"snapshot" => ::BUILD_INFO["build_snapshot"],
"status" => "green",
"pipeline" => {
"workers" => LogStash::SETTINGS.get("pipeline.workers"),
"batch_size" => LogStash::SETTINGS.get("pipeline.batch.size"),
}
}
end
end
end; end

View file

@ -0,0 +1,135 @@
# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
# or more contributor license agreements. Licensed under the Elastic License;
# you may not use this file except in compliance with the Elastic License.
#
require 'monitoring/inputs/metrics'
require 'logstash-core'
require 'logstash/compiler'
module LogStash; module Inputs; class Metrics; class StateEvent;
class LIRSerializer
attr_reader :lir_pipeline
def self.serialize(lir_pipeline)
self.new(lir_pipeline).serialize
end
def initialize(lir_pipeline)
@lir_pipeline = lir_pipeline
end
def serialize
{
"hash" => lir_pipeline.unique_hash,
"type" => "lir",
"version" => "0.0.0",
"plugins" => plugins,
"graph" => {
"vertices" => vertices,
"edges" => edges
}
}
end
def vertices
graph.getVertices.map {|v| vertex(v) }
end
def edges
graph.getEdges.map {|e| edge(e) }
end
def graph
lir_pipeline.graph
end
def vertex(v)
hashified_vertex = case vertex_type(v)
when :plugin
plugin_vertex(v)
when :if
if_vertex(v)
when :queue
queue_vertex(v)
end
decorate_vertex(v, hashified_vertex)
end
def vertex_type(v)
if v.java_kind_of?(org.logstash.config.ir.graph.PluginVertex)
:plugin
elsif v.java_kind_of?(org.logstash.config.ir.graph.IfVertex)
:if
elsif v.java_kind_of?(org.logstash.config.ir.graph.QueueVertex)
:queue
else
raise "Unexpected vertex type! #{v}"
end
end
def decorate_vertex(v, v_json)
v_json["meta"] = format_swm(v.source_with_metadata)
v_json["id"] = v.id
v_json["explicit_id"] = !!v.explicit_id
v_json["type"] = vertex_type(v).to_s
v_json
end
def plugin_vertex(v)
pd = v.plugin_definition
{
"config_name" => pd.name,
"plugin_type" => pd.getType.to_s.downcase
}
end
def if_vertex(v)
{
"condition" => v.humanReadableExpression
}
end
def queue_vertex(v)
{}
end
def edge(e)
e_json = {
"from" => e.from.id,
"to" => e.to.id,
"id" => e.id
}
if e.java_kind_of?(org.logstash.config.ir.graph.BooleanEdge)
e_json["when"] = e.edge_type
e_json["type"] = "boolean"
else
e_json["type"] = "plain"
end
e_json
end
def format_swm(source_with_metadata)
return nil unless source_with_metadata
{
"source" => {
"protocol" => source_with_metadata.protocol,
"id" => source_with_metadata.id,
"line" => source_with_metadata.line,
"column" => source_with_metadata.column
# We omit the text of the source code for security reasons
# raw text may contain passwords
}
}
end
def plugins
::Gem::Specification.
find_all.
select {|spec| spec.metadata && spec.metadata["logstash_plugin"] == "true"}.
map {|spec| { :name => spec.name, :version => spec.version.to_s } }
end
end
end; end; end; end

View file

@ -0,0 +1,40 @@
# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
# or more contributor license agreements. Licensed under the Elastic License;
# you may not use this file except in compliance with the Elastic License.
#
module LogStash; module Inputs; class Metrics;
class StateEventFactory
require "monitoring/inputs/metrics/state_event/lir_serializer"
def initialize(pipeline)
raise ArgumentError, "No pipeline passed in!" unless pipeline.is_a?(LogStash::Pipeline) || pipeline.is_a?(LogStash::JavaBasePipeline)
@event = LogStash::Event.new
@event.set("[@metadata]", {
"document_type" => "logstash_state",
"timestamp" => Time.now
})
@event.set("[pipeline]", pipeline_data(pipeline))
@event.remove("@timestamp")
@event.remove("@version")
@event
end
def pipeline_data(pipeline)
{
"id" => pipeline.pipeline_id,
"hash" => pipeline.lir.unique_hash,
"ephemeral_id" => pipeline.ephemeral_id,
"workers" => pipeline.settings.get("pipeline.workers"),
"batch_size" => pipeline.settings.get("pipeline.batch.size"),
"representation" => ::LogStash::Inputs::Metrics::StateEvent::LIRSerializer.serialize(pipeline.lir)
}
end
def make
@event
end
end
end; end; end

View file

@ -0,0 +1,147 @@
# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
# or more contributor license agreements. Licensed under the Elastic License;
# you may not use this file except in compliance with the Elastic License.
#
module LogStash; module Inputs; class Metrics; module StatsEvent;
class PipelinesInfo
def self.format_pipelines_info(agent, metric_store, extended_performance_collection)
# It is important that we iterate via the agent's pipelines vs. the
# metrics pipelines. This prevents race conditions as pipeline stats may be
# populated before the agent has it in its own pipelines state
stats = metric_store.get_with_path("/stats/pipelines")[:stats][:pipelines]
agent.pipelines.map do |pipeline_id, pipeline|
p_stats = stats[pipeline_id]
# Don't record stats for system pipelines
next nil if pipeline.system?
res = {
"id" => pipeline_id.to_s,
"hash" => pipeline.lir.unique_hash,
"ephemeral_id" => pipeline.ephemeral_id,
"events" => format_pipeline_events(p_stats[:events]),
"queue" => format_queue_stats(pipeline_id, metric_store),
"reloads" => {
"successes" => p_stats[:reloads][:successes].value,
"failures" => p_stats[:reloads][:failures].value
}
}
if extended_performance_collection
res["vertices"] = format_pipeline_vertex_stats(p_stats[:plugins], pipeline)
end
res
end.compact
end
def self.format_pipeline_events(stats)
result = {}
(stats || {}).each { |stage, counter| result[stage.to_s] = counter.value }
result
end
def self.format_pipeline_vertex_stats(stats, pipeline)
return nil unless stats
[:inputs, :filters, :outputs].flat_map do |section|
format_pipeline_vertex_section_stats(stats[section], pipeline)
end.select {|stats| !stats.nil?} # Ignore empty sections
end
ROOT_METRIC_MAPPINGS = {
'events.in' => 'events_in',
'events.out' => 'events_out',
'events.queue_push_duration_in_millis' => 'queue_push_duration_in_millis',
'events.duration_in_millis' => 'duration_in_millis',
'name' => :discard # we don't need this, pipeline_state has this already
}
def self.format_pipeline_vertex_section_stats(stats, pipeline)
return nil unless stats
(stats || {}).reduce([]) do |acc,kv|
plugin_id, plugin_stats = kv
props = Hash.new {|h,k| h[k] = []}
next unless plugin_stats
flattened = flatten_metrics(plugin_stats)
segmented = flattened.reduce(Hash.new {|h,k| h[k] = []}) do |acc,kv|
k,v = kv
metric_value = v.value
root_metric_field = ROOT_METRIC_MAPPINGS[k]
if root_metric_field
if root_metric_field != :discard
acc[root_metric_field] = metric_value
end
else
type_sym = v.type.to_sym
nested_type = if type_sym == :"counter/long"
:long_counters
elsif type_sym == :"gauge/numeric"
:double_gauges
else
nil
end
if nested_type
acc[nested_type] << { :name => k, :value => metric_value }
end
end
acc
end
acc << {
:id => plugin_id,
:pipeline_ephemeral_id => pipeline.ephemeral_id
}.merge(segmented)
acc
end
end
def self.flatten_metrics(hash_or_value, namespaces = [])
if hash_or_value.is_a?(Hash)
return hash_or_value.reduce({}) do |acc,kv|
k,v = kv
# We must concat the arrays, creating a copy instead of mutation
# to handle the case where there are multiple sibling metrics in a namespace
new_namespaces = namespaces.clone
new_namespaces << k
acc.merge(flatten_metrics(v, new_namespaces))
end
else
{ namespaces.join('.') => hash_or_value }
end
end
def self.format_queue_stats(pipeline_id, metric_store)
path = [:stats, :pipelines, pipeline_id, :queue, :type]
if metric_store.has_metric?(*path)
queue_type = metric_store.get_shallow(*path).value
else
queue_type = 'memory'
end
events = 0
queue_size_in_bytes = 0
max_queue_size_in_bytes = 0
if queue_type == "persisted"
queue_path = [:stats, :pipelines, pipeline_id, :queue]
events = metric_store.get_shallow(*queue_path, :events).value
queue_size_in_bytes = metric_store.get_shallow(*queue_path, :capacity, :queue_size_in_bytes).value
max_queue_size_in_bytes = metric_store.get_shallow(*queue_path, :capacity, :max_queue_size_in_bytes).value
end
{
:type => queue_type,
:events_count => events,
:queue_size_in_bytes => queue_size_in_bytes,
:max_queue_size_in_bytes => max_queue_size_in_bytes,
}
end
end
end; end; end; end

View file

@ -0,0 +1,119 @@
# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
# or more contributor license agreements. Licensed under the Elastic License;
# you may not use this file except in compliance with the Elastic License.
#
module LogStash; module Inputs; class Metrics;
class StatsEventFactory
include ::LogStash::Util::Loggable
require 'monitoring/inputs/metrics/stats_event/pipelines_info'
def initialize(global_stats, snapshot)
@global_stats = global_stats
@snapshot = snapshot
@metric_store = @snapshot.metric_store
end
def make(agent, extended_performance_collection=true)
LogStash::Event.new(
"timestamp" => @snapshot.created_at,
"logstash" => fetch_node_stats(agent, @metric_store),
"events" => format_global_event_count(@metric_store),
"process" => format_process_stats(@metric_store),
"pipelines" => StatsEvent::PipelinesInfo.format_pipelines_info(agent, @metric_store, extended_performance_collection),
"reloads" => format_reloads(@metric_store),
"jvm" => format_jvm_stats(@metric_store),
"os" => format_os_stats(@metric_store),
"queue" => format_queue_stats(agent, @metric_store),
"@metadata" => {
"document_type" => "logstash_stats",
"timestamp" => Time.now
}
)
end
def format_process_stats(stats)
stats.extract_metrics([:jvm, :process],
[:cpu, :percent],
:open_file_descriptors,
:max_file_descriptors
)
end
def format_jvm_stats(stats)
result = stats.extract_metrics([:jvm], :uptime_in_millis)
heap_stats = stats.extract_metrics([:jvm, :memory, :heap],
:used_in_bytes, :used_percent, :max_in_bytes)
result["mem"] = {
"heap_used_in_bytes" => heap_stats[:used_in_bytes],
"heap_used_percent" => heap_stats[:used_percent],
"heap_max_in_bytes" => heap_stats[:max_in_bytes],
}
result["gc"] = {
"collectors" => {
"old" => stats.extract_metrics([:jvm, :gc, :collectors, :old],
:collection_time_in_millis, :collection_count),
"young" => stats.extract_metrics([:jvm, :gc, :collectors, :young],
:collection_time_in_millis, :collection_count)
}
}
result
end
def format_os_stats(stats)
load_average = stats.extract_metrics([:jvm, :process, :cpu], :load_average)
if os_stats?(stats)
cpuacct = stats.extract_metrics([:os, :cgroup, :cpuacct], :control_group, :usage_nanos)
cgroups_stats = stats.extract_metrics([:os, :cgroup, :cpu, :stat], :number_of_elapsed_periods, :number_of_times_throttled, :time_throttled_nanos)
control_group = stats.get_shallow(:os, :cgroup, :cpu, :control_group).value
{:cpu => load_average, :cgroup => {:cpuacct => cpuacct, :cpu => {:control_group => control_group, :stat => cgroups_stats}}}
else
{:cpu => load_average}
end
end
# OS stats are not available on all platforms
# TODO: replace exception logic with has_keys? when it is implemented in MetricStore
def os_stats?(stats)
stats.get_shallow(:os)
true
rescue LogStash::Instrument::MetricStore::MetricNotFound
false
end
def format_reloads(stats)
stats.extract_metrics([:stats, :reloads], :successes, :failures)
end
def format_global_event_count(stats)
stats.extract_metrics([:stats, :events], :in, :filtered, :out, :duration_in_millis)
end
def format_queue_stats(agent, stats)
events = 0
pipelines_stats = stats.get_shallow(:stats, :pipelines)
total_queued_events = 0
pipelines_stats.each do |pipeline_id, p_stats|
type = p_stats[:queue] && p_stats[:queue][:type].value
pipeline = agent.get_pipeline(pipeline_id)
# Check if pipeline is nil to avoid race condition where metrics system refers pipeline that has been stopped already
next if pipeline.nil? || pipeline.system? || type != 'persisted'
total_queued_events = p_stats[:queue][:events].value
end
{:events_count => total_queued_events}
end
def fetch_node_stats(agent, stats)
@global_stats.merge({
"http_address" => stats.get_shallow(:http_address).value,
"ephemeral_id" => agent.ephemeral_id
})
end
end
end; end; end

View file

@ -0,0 +1,23 @@
# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
# or more contributor license agreements. Licensed under the Elastic License;
# you may not use this file except in compliance with the Elastic License.
require "logstash/util/loggable"
require "concurrent"
module LogStash module Inputs
class TimerTaskLogger
include LogStash::Util::Loggable
def update(run_at, result, exception)
if !exception.nil?
# This can happen if the pipeline is blocked for too long
if exception.is_a?(Concurrent::TimeoutError)
logger.debug("metric shipper took too much time to complete", :exception => exception.class, :message => exception.message)
else
logger.error("metric shipper exception", :exception => exception.class, :message => exception.message)
end
end
end
end
end end

View file

@ -0,0 +1,22 @@
# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
# or more contributor license agreements. Licensed under the Elastic License;
# you may not use this file except in compliance with the Elastic License.
require "logstash/config/source/base"
module LogStash module Monitoring
class InternalPipelineSource < LogStash::Config::Source::Base
def initialize(pipeline_config)
super(pipeline_config.settings)
@pipeline_config = pipeline_config
end
def pipeline_configs
return @pipeline_config
end
def match?
true
end
end
end end

View file

@ -0,0 +1,194 @@
# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
# or more contributor license agreements. Licensed under the Elastic License;
# you may not use this file except in compliance with the Elastic License.
require "logstash/agent"
require "logstash/universal_plugin"
require "monitoring/internal_pipeline_source"
require "logstash/config/pipeline_config"
require 'helpers/elasticsearch_options'
java_import java.util.concurrent.TimeUnit
module LogStash
class MonitoringExtension < LogStash::UniversalPlugin
include LogStash::Util::Loggable
class TemplateData
def initialize(node_uuid,
system_api_version,
es_settings,
collection_interval,
collection_timeout_interval,
extended_performance_collection,
config_collection)
@system_api_version = system_api_version
@node_uuid = node_uuid
@collection_interval = collection_interval
@collection_timeout_interval = collection_timeout_interval
@extended_performance_collection = extended_performance_collection
@config_collection = config_collection
@es_hosts = es_settings['hosts']
@user = es_settings['user']
@password = es_settings['password']
@ca_path = es_settings['cacert']
@truststore_path = es_settings['truststore']
@truststore_password = es_settings['truststore_password']
@keystore_path = es_settings['keystore']
@keystore_password = es_settings['keystore_password']
@sniffing = es_settings['sniffing']
end
attr_accessor :system_api_version, :es_hosts, :user, :password, :node_uuid
attr_accessor :ca_path, :truststore_path, :truststore_password
attr_accessor :keystore_path, :keystore_password, :sniffing, :ssl_certificate_verification
def collection_interval
TimeUnit::SECONDS.convert(@collection_interval, TimeUnit::NANOSECONDS)
end
def collection_timeout_interval
TimeUnit::SECONDS.convert(@collection_timeout_interval, TimeUnit::NANOSECONDS)
end
def auth?
user && password
end
def ssl?
ca_path || (truststore_path && truststore_password) || (keystore_path && keystore_password)
end
def truststore?
truststore_path && truststore_password
end
def keystore?
keystore_path && keystore_password
end
def extended_performance_collection?
@extended_performance_collection
end
def config_collection?
@config_collection
end
def get_binding
binding
end
end
class PipelineRegisterHook
include LogStash::Util::Loggable, LogStash::Helpers::ElasticsearchOptions
PIPELINE_ID = ".monitoring-logstash"
API_VERSION = 2
def initialize
# nothing to do here
end
def after_agent(runner)
return unless monitoring_enabled?(runner.settings)
logger.trace("registering the metrics pipeline")
LogStash::SETTINGS.set("node.uuid", runner.agent.id)
internal_pipeline_source = LogStash::Monitoring::InternalPipelineSource.new(setup_metrics_pipeline)
runner.source_loader.add_source(internal_pipeline_source)
rescue => e
logger.error("Failed to set up the metrics pipeline", :message => e.message, :backtrace => e.backtrace)
raise e
end
# For versions prior to 6.3 the default value of "xpack.monitoring.enabled" was true
# For versions 6.3+ the default of "xpack.monitoring.enabled" is false.
# To help keep passivity, assume that if "xpack.monitoring.elasticsearch.url" has been set that monitoring should be enabled.
# return true if xpack.monitoring.enabled=true (explicitly) or xpack.monitoring.elasticsearch.url is configured
def monitoring_enabled?(settings)
return settings.get_value("xpack.monitoring.enabled") if settings.set?("xpack.monitoring.enabled")
if settings.set?("xpack.monitoring.elasticsearch.url")
logger.warn("xpack.monitoring.enabled has not been defined, but found elasticsearch configuration. Please explicitly set `xpack.monitoring.enabled: true` in logstash.yml")
true
else
default = settings.get_default("xpack.monitoring.enabled")
logger.trace("xpack.monitoring.enabled has not been defined, defaulting to default value: " + default.to_s)
default # false as of 6.3
end
end
def setup_metrics_pipeline
settings = LogStash::SETTINGS.clone
# reset settings for the metrics pipeline
settings.get_setting("path.config").reset
settings.set("pipeline.id", PIPELINE_ID)
settings.set("config.reload.automatic", false)
settings.set("metric.collect", false)
settings.set("queue.type", "memory")
settings.set("pipeline.workers", 1) # this is a low throughput pipeline
settings.set("pipeline.batch.size", 2)
settings.set("pipeline.system", true)
config = generate_pipeline_config(settings)
logger.debug("compiled metrics pipeline config: ", :config => config)
config_part = org.logstash.common.SourceWithMetadata.new("x-pack-metrics", "internal_pipeline_source", config)
LogStash::Config::PipelineConfig.new(self, PIPELINE_ID.to_sym, config_part, settings)
end
def generate_pipeline_config(settings)
collection_interval = settings.get("xpack.monitoring.collection.interval")
collection_timeout_interval = settings.get("xpack.monitoring.collection.timeout_interval")
extended_performance_collection = settings.get("xpack.monitoring.collection.pipeline.details.enabled")
config_collection = settings.get("xpack.monitoring.collection.config.enabled")
es_settings = es_options_from_settings_or_modules('monitoring', settings)
data = TemplateData.new(LogStash::SETTINGS.get("node.uuid"), API_VERSION,
es_settings,
collection_interval, collection_timeout_interval,
extended_performance_collection, config_collection)
template_path = ::File.join(::File.dirname(__FILE__), "..", "template.cfg.erb")
template = ::File.read(template_path)
ERB.new(template, 3).result(data.get_binding)
end
end
def initialize
# nothing to do here
end
def register_hooks(hooks)
logger.trace("registering hook")
hooks.register_hooks(LogStash::Runner, PipelineRegisterHook.new)
end
def additionals_settings(settings)
logger.trace("registering additionals_settings")
settings.register(LogStash::Setting::Boolean.new("xpack.monitoring.enabled", false))
settings.register(LogStash::Setting::ArrayCoercible.new("xpack.monitoring.elasticsearch.url", String, [ "http://localhost:9200" ] ))
settings.register(LogStash::Setting::TimeValue.new("xpack.monitoring.collection.interval", "10s"))
settings.register(LogStash::Setting::TimeValue.new("xpack.monitoring.collection.timeout_interval", "10m"))
settings.register(LogStash::Setting::NullableString.new("xpack.monitoring.elasticsearch.username", "logstash_system"))
settings.register(LogStash::Setting::NullableString.new("xpack.monitoring.elasticsearch.password"))
settings.register(LogStash::Setting::NullableString.new("xpack.monitoring.elasticsearch.ssl.ca"))
settings.register(LogStash::Setting::NullableString.new("xpack.monitoring.elasticsearch.ssl.truststore.path"))
settings.register(LogStash::Setting::NullableString.new("xpack.monitoring.elasticsearch.ssl.truststore.password"))
settings.register(LogStash::Setting::NullableString.new("xpack.monitoring.elasticsearch.ssl.keystore.path"))
settings.register(LogStash::Setting::NullableString.new("xpack.monitoring.elasticsearch.ssl.keystore.password"))
settings.register(LogStash::Setting::String.new("xpack.monitoring.elasticsearch.ssl.verification_mode", "certificate", true, ["none", "certificate"]))
settings.register(LogStash::Setting::Boolean.new("xpack.monitoring.elasticsearch.sniffing", false))
settings.register(LogStash::Setting::Boolean.new("xpack.monitoring.collection.pipeline.details.enabled", true))
settings.register(LogStash::Setting::Boolean.new("xpack.monitoring.collection.config.enabled", true))
settings.register(LogStash::Setting::String.new("node.uuid", ""))
rescue => e
logger.error e.message
logger.error e.backtrace
raise e
end
end
end

View file

@ -0,0 +1,44 @@
# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
# or more contributor license agreements. Licensed under the Elastic License;
# you may not use this file except in compliance with the Elastic License.
input {
metrics {
collection_interval => <%= collection_interval %>
collection_timeout_interval => <%= collection_timeout_interval %>
extended_performance_collection => <%= extended_performance_collection? %>
config_collection => <%= config_collection? %>
}
}
output {
elasticsearch {
hosts => <%= es_hosts %>
bulk_path => "/_xpack/monitoring/_bulk?system_id=logstash&system_api_version=<%= system_api_version %>&interval=1s"
manage_template => false
document_type => "%{[@metadata][document_type]}"
index => ""
sniffing => <%= sniffing %>
<% if auth? %>
user => "<%= user %>"
password => "<%= password %>"
<% end %>
<% if ssl? %>
ssl => true
<% if ca_path %>
cacert => "<%= ca_path %>"
<% end %>
<% if truststore? %>
truststore => "<%= truststore_path %>"
truststore_password => "<%= truststore_password %>"
<% end %>
<% if keystore? %>
keystore => "<%= keystore_path %>"
keystore_password => "<%= keystore_password %>"
<% end %>
<% else %>
# In the case where the user does not want SSL we don't set ssl => false
# the reason being that the user can still turn ssl on by using https in their URL
# This causes the ES output to throw an error due to conflicting messages
<% end %>
}
}

99
x-pack/modules/README.md Normal file
View file

@ -0,0 +1,99 @@
# Module settings and structure
## settings
### logstash.yml
```
modules:
- name: netflow
var.elasticsearch.hosts: "es.mycloud.com"
var.elasticsearch.username: "foo"
var.elasticsearch.password: "password"
var.kibana.host: "kb.mycloud.com"
var.kibana.username: "foo"
var.kibana.password: "password"
var.input.tcp.port: 5606
```
### command-line
```
bin/logstash \
--modules netflow \
-M "netflow.var.output.elasticsearch.host=es.mycloud.com" \
-M "netflow.var.output.elasticsearch.user=foo" \
-M "netflow.var.output.elasticsearch.password=password" \
-M "netflow.var.input.tcp.port=5606"
```
## Current Gem structure
```
GEM File structure
logstash-module-netflow
├── configuration
│ ├── elasticsearch
│ │ └── netflow.json
│ ├── kibana
│ │ ├── dashboard
│ │ │ └── netflow.json (contains '["dash1", "dash2"]')
│ │ │ └── dash1.json ("panelJSON" contains refs to visualization panels 1,2 and search 1)
│ │ │ └── dash2.json ("panelJSON" contains refs to visualization panel 3 and search 2)
│ │ ├── index-pattern
| | | └── netflow.json
│ │ ├── search
| | | └── search1.json
| | | └── search2.json
│ │ └── vizualization
| | | └── panel1.json
| | | └── panel2.json
| | | └── panel3.json
│ └── logstash
│ └── netflow.conf.erb
├── lib
│ └── logstash_registry.rb
└── logstash-module-netflow.gemspec
```
## Proposed multi-version Gem structure
```
GEM File structure
logstash-module-netflow
├── configuration
│ ├── elasticsearch
│ │ └── netflow.json
│ ├── kibana
│ │ ├── dashboard
│ │ │ └── netflow.json (contains '{"v5.5.0": ["dash1", "dash2"], "v6.0.4": ["dash1", "dash2"]')
│ │ │ └── v5.5.0
│ │ │ | └── dash1.json ("panelJSON" contains refs to visualization panels 1,2 and search 1)
│ │ │ | └── dash2.json ("panelJSON" contains refs to visualization panel 3 and search 2)
│ │ │ └── v6.0.4
│ │ │ └── dash1.json ("panelJSON" contains refs to visualization panels 1,2 and search 1)
│ │ │ └── dash2.json ("panelJSON" contains refs to visualization panel 3 and search 2)
│ │ ├── index-pattern
│ │ │ └── v5
| | | | └── netflow.json
│ │ │ └── v6
| | | └── netflow.json
│ │ ├── search
│ │ │ └── v5
| | | | └── search1.json
| | | | └── search2.json
│ │ │ └── v6
| | | └── search1.json
| | | └── search2.json
│ │ └── vizualization
│ │ │ └── v5
| | | | └── panel1.json
| | | | └── panel2.json
| | | | └── panel3.json
│ │ │ └── v6
| | | └── panel1.json
| | | └── panel2.json
| | | └── panel3.json
│ └── logstash
│ └── netflow.conf.erb
├── lib
│ └── logstash_registry.rb
└── logstash-module-netflow.gemspec
```

View file

@ -0,0 +1,222 @@
{
"order": 0,
"template": "arcsight-*",
"mappings": {
"_default_": {
"_meta": {
"version": "7.0.0"
},
"dynamic": true,
"dynamic_templates": [
{
"string_fields": {
"mapping": {
"type": "keyword"
},
"match_mapping_type": "string",
"match": "*"
}
}
],
"properties": {
"destinationPort": {
"type": "integer"
},
"flexDate1": {
"format": "epoch_millis||epoch_second||date_time||MMM dd YYYY HH:mm:ss z||MMM dd yyyy HH:mm:ss",
"type": "date"
},
"sourcePort": {
"type": "integer"
},
"baseEventCount": {
"type": "integer"
},
"destinationAddress": {
"type": "ip"
},
"destinationProcessId": {
"type": "integer"
},
"oldFileSize": {
"type": "integer"
},
"destination": {
"dynamic": false,
"type": "object",
"properties": {
"city_name": {
"type": "keyword"
},
"country_name": {
"type": "keyword"
},
"country_code2": {
"type": "keyword"
},
"location": {
"type": "geo_point"
},
"region_name": {
"type": "keyword"
}
}
},
"source": {
"dynamic": false,
"type": "object",
"properties": {
"city_name": {
"type": "keyword"
},
"country_name": {
"type": "keyword"
},
"country_code2": {
"type": "keyword"
},
"location": {
"type": "geo_point"
},
"region_name": {
"type": "keyword"
}
}
},
"deviceReceiptTime": {
"format": "epoch_millis||epoch_second||date_time||MMM dd YYYY HH:mm:ss z||MMM dd yyyy HH:mm:ss",
"type": "date"
},
"destinationTranslatedPort": {
"type": "integer"
},
"deviceTranslatedAddress": {
"type": "ip"
},
"deviceAddress": {
"type": "ip"
},
"agentReceiptTime": {
"format": "epoch_millis||epoch_second||date_time||MMM dd YYYY HH:mm:ss z||MMM dd yyyy HH:mm:ss",
"type": "date"
},
"startTime": {
"format": "epoch_millis||epoch_second||date_time||MMM dd YYYY HH:mm:ss z||MMM dd yyyy HH:mm:ss",
"type": "date"
},
"sourceProcessId": {
"type": "integer"
},
"bytesIn": {
"type": "integer"
},
"bytesOut": {
"type": "integer"
},
"severity": {
"type": "keyword"
},
"deviceProcessId": {
"type": "integer"
},
"agentAddress": {
"type": "ip"
},
"sourceAddress": {
"type": "ip"
},
"sourceTranslatedPort": {
"type": "integer"
},
"deviceCustomDate2": {
"format": "epoch_millis||epoch_second||date_time||MMM dd YYYY HH:mm:ss z||MMM dd yyyy HH:mm:ss",
"type": "date"
},
"deviceCustomDate1": {
"format": "epoch_millis||epoch_second||date_time||MMM dd YYYY HH:mm:ss z||MMM dd yyyy HH:mm:ss",
"type": "date"
},
"flexNumber1": {
"type": "long"
},
"deviceCustomFloatingPoint1": {
"type": "float"
},
"oldFileModificationTime": {
"format": "epoch_millis||epoch_second||date_time||MMM dd YYYY HH:mm:ss z||MMM dd yyyy HH:mm:ss",
"type": "date"
},
"deviceCustomFloatingPoint2": {
"type": "float"
},
"oldFileCreateTime": {
"format": "epoch_millis||epoch_second||date_time||MMM dd YYYY HH:mm:ss z||MMM dd yyyy HH:mm:ss",
"type": "date"
},
"deviceCustomFloatingPoint3": {
"type": "float"
},
"sourceTranslatedAddress": {
"type": "ip"
},
"deviceCustomFloatingPoint4": {
"type": "float"
},
"flexNumber2": {
"type": "long"
},
"fileCreateTime": {
"format": "epoch_millis||epoch_second||date_time||MMM dd YYYY HH:mm:ss z||MMM dd yyyy HH:mm:ss",
"type": "date"
},
"fileModificationTime": {
"format": "epoch_millis||epoch_second||date_time||MMM dd YYYY HH:mm:ss z||MMM dd yyyy HH:mm:ss",
"type": "date"
},
"fileSize": {
"type": "integer"
},
"destinationTranslatedAddress": {
"type": "ip"
},
"endTime": {
"format": "epoch_millis||epoch_second||date_time||MMM dd YYYY HH:mm:ss z||MMM dd yyyy HH:mm:ss",
"type": "date"
},
"deviceCustomNumber1": {
"type": "long"
},
"deviceDirection": {
"type": "integer"
},
"device": {
"dynamic": false,
"type": "object",
"properties": {
"city_name": {
"type": "keyword"
},
"country_name": {
"type": "keyword"
},
"country_code2": {
"type": "keyword"
},
"location": {
"type": "geo_point"
},
"region_name": {
"type": "keyword"
}
}
},
"deviceCustomNumber3": {
"type": "long"
},
"deviceCustomNumber2": {
"type": "long"
}
}
}
}
}

View file

@ -0,0 +1,20 @@
{
"title": "[ArcSight] Microsoft DNS Overview",
"hits": 0,
"description": "",
"panelsJSON": "[{\"col\":11,\"id\":\"4ee62420-7523-11e7-871d-5f0fb978413c\",\"panelIndex\":1,\"row\":2,\"size_x\":2,\"size_y\":2,\"type\":\"visualization\"},{\"col\":1,\"id\":\"1de45d60-7523-11e7-9445-91c40765092f\",\"panelIndex\":3,\"row\":2,\"size_x\":10,\"size_y\":2,\"type\":\"visualization\"},{\"col\":1,\"id\":\"d72d7940-7529-11e7-9445-91c40765092f\",\"panelIndex\":5,\"row\":9,\"size_x\":6,\"size_y\":4,\"type\":\"visualization\"},{\"col\":1,\"id\":\"1c54cda0-752c-11e7-9445-91c40765092f\",\"panelIndex\":6,\"row\":13,\"size_x\":12,\"size_y\":2,\"type\":\"visualization\"},{\"col\":7,\"id\":\"8cda1c30-752a-11e7-9445-91c40765092f\",\"panelIndex\":7,\"row\":9,\"size_x\":6,\"size_y\":4,\"type\":\"visualization\"},{\"col\":1,\"id\":\"aa57b050-7526-11e7-b440-f1d91dc5774d\",\"panelIndex\":9,\"row\":4,\"size_x\":12,\"size_y\":2,\"type\":\"visualization\"},{\"col\":7,\"id\":\"4303de60-752b-11e7-9445-91c40765092f\",\"panelIndex\":11,\"row\":15,\"size_x\":6,\"size_y\":4,\"type\":\"visualization\"},{\"col\":1,\"id\":\"31b85570-454a-11e7-86b6-95298e9da6dc\",\"panelIndex\":12,\"row\":1,\"size_x\":12,\"size_y\":1,\"type\":\"visualization\"},{\"col\":1,\"id\":\"8f0161a0-752d-11e7-b440-f1d91dc5774d\",\"panelIndex\":13,\"row\":15,\"size_x\":6,\"size_y\":4,\"type\":\"visualization\"},{\"col\":1,\"id\":\"ebfd45a0-75a4-11e7-b440-f1d91dc5774d\",\"panelIndex\":14,\"row\":6,\"size_x\":6,\"size_y\":3,\"type\":\"visualization\"},{\"col\":7,\"id\":\"bd1c82c0-75a7-11e7-871d-5f0fb978413c\",\"panelIndex\":15,\"row\":6,\"size_x\":6,\"size_y\":3,\"type\":\"visualization\"},{\"size_x\":6,\"size_y\":3,\"panelIndex\":16,\"type\":\"visualization\",\"id\":\"c658b300-7745-11e7-8fb2-417804dc0ec8\",\"col\":7,\"row\":19},{\"size_x\":6,\"size_y\":3,\"panelIndex\":17,\"type\":\"visualization\",\"id\":\"b1f98ce0-7745-11e7-8fb2-417804dc0ec8\",\"col\":1,\"row\":19}]",
"optionsJSON": "{\"darkTheme\":false}",
"uiStateJSON": "{\"P-11\":{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":null,\"direction\":null}}}},\"P-13\":{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":null,\"direction\":null}}}},\"P-3\":{\"vis\":{\"defaultColors\":{\"0 - 100\":\"rgb(0,104,55)\"}}},\"P-5\":{\"vis\":{\"defaultColors\":{\"0 - 18k\":\"rgb(247,251,255)\",\"18k - 36k\":\"rgb(227,238,249)\",\"36k - 54k\":\"rgb(208,225,242)\",\"54k - 72k\":\"rgb(182,212,233)\",\"72k - 90k\":\"rgb(148,196,223)\",\"90k - 108k\":\"rgb(107,174,214)\",\"108k - 126k\":\"rgb(74,152,201)\",\"126k - 144k\":\"rgb(46,126,188)\",\"144k - 162k\":\"rgb(23,100,171)\",\"162k - 180k\":\"rgb(8,74,145)\"},\"legendOpen\":false}},\"P-16\":{\"mapZoom\":1,\"mapCenter\":[12.211180191503997,0]},\"P-17\":{\"mapZoom\":1,\"mapCenter\":[-0.17578097424708533,0]}}",
"version": 1,
"timeRestore": true,
"timeTo": "now",
"timeFrom": "now-24h",
"refreshInterval": {
"display": "Off",
"pause": false,
"value": 0
},
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"filter\":[{\"query\":{\"query_string\":{\"analyze_wildcard\":true,\"query\":\"*\"}}}],\"highlightAll\":true,\"version\":true}"
}
}

View file

@ -0,0 +1,20 @@
{
"title": "[ArcSight] Network Overview Dashboard",
"hits": 0,
"description": "",
"panelsJSON": "[{\"col\":1,\"id\":\"77cb1470-3989-11e7-8b9d-ddc45b5f6d00\",\"panelIndex\":1,\"row\":12,\"size_x\":12,\"size_y\":2,\"type\":\"visualization\"},{\"col\":1,\"id\":\"801fff70-395a-11e7-ae19-21fb91585845\",\"panelIndex\":2,\"row\":18,\"size_x\":12,\"size_y\":2,\"type\":\"visualization\"},{\"col\":1,\"id\":\"d6d526f0-395b-11e7-ae19-21fb91585845\",\"panelIndex\":5,\"row\":4,\"size_x\":12,\"size_y\":2,\"type\":\"visualization\"},{\"col\":1,\"id\":\"f57ea930-395d-11e7-ae19-21fb91585845\",\"panelIndex\":6,\"row\":16,\"size_x\":12,\"size_y\":2,\"type\":\"visualization\"},{\"col\":1,\"id\":\"161e27e0-3988-11e7-8b9d-ddc45b5f6d00\",\"panelIndex\":7,\"row\":2,\"size_x\":10,\"size_y\":2,\"type\":\"visualization\"},{\"col\":1,\"id\":\"75582a90-3987-11e7-8b9d-ddc45b5f6d00\",\"panelIndex\":9,\"row\":6,\"size_x\":4,\"size_y\":3,\"type\":\"visualization\"},{\"col\":5,\"id\":\"e9c3ee00-3978-11e7-ae19-21fb91585845\",\"panelIndex\":11,\"row\":6,\"size_x\":4,\"size_y\":3,\"type\":\"visualization\"},{\"col\":1,\"id\":\"ad802c10-3973-11e7-ae19-21fb91585845\",\"panelIndex\":13,\"row\":9,\"size_x\":8,\"size_y\":3,\"type\":\"visualization\"},{\"col\":9,\"id\":\"ec926660-396f-11e7-ae19-21fb91585845\",\"panelIndex\":15,\"row\":9,\"size_x\":4,\"size_y\":3,\"type\":\"visualization\"},{\"col\":9,\"id\":\"154ff7e0-3987-11e7-8b9d-ddc45b5f6d00\",\"panelIndex\":16,\"row\":6,\"size_x\":4,\"size_y\":3,\"type\":\"visualization\"},{\"col\":1,\"id\":\"5acb74d0-398b-11e7-ae19-21fb91585845\",\"panelIndex\":17,\"row\":14,\"size_x\":12,\"size_y\":2,\"type\":\"visualization\"},{\"col\":1,\"id\":\"7a043760-3990-11e7-8b9d-ddc45b5f6d00\",\"panelIndex\":18,\"row\":20,\"size_x\":6,\"size_y\":4,\"type\":\"visualization\"},{\"col\":7,\"id\":\"fd70bca0-398f-11e7-8b9d-ddc45b5f6d00\",\"panelIndex\":19,\"row\":20,\"size_x\":6,\"size_y\":4,\"type\":\"visualization\"},{\"col\":11,\"id\":\"ed2f5570-3d5b-11e7-8b9d-ddc45b5f6d00\",\"panelIndex\":20,\"row\":2,\"size_x\":2,\"size_y\":2,\"type\":\"visualization\"},{\"col\":1,\"id\":\"31b85570-454a-11e7-86b6-95298e9da6dc\",\"panelIndex\":21,\"row\":1,\"size_x\":12,\"size_y\":1,\"type\":\"visualization\"},{\"size_x\":6,\"size_y\":6,\"panelIndex\":24,\"type\":\"visualization\",\"id\":\"45387480-3989-11e7-8b9d-ddc45b5f6d00\",\"col\":1,\"row\":24},{\"size_x\":6,\"size_y\":6,\"panelIndex\":25,\"type\":\"visualization\",\"id\":\"35ce1310-3989-11e7-8b9d-ddc45b5f6d00\",\"col\":7,\"row\":24}]",
"optionsJSON": "{\"darkTheme\":false}",
"uiStateJSON": "{\"P-11\":{\"vis\":{\"colors\":{\"/Attempt\":\"#0A50A1\",\"/Failure\":\"#BF1B00\",\"/Success\":\"#629E51\"}}},\"P-13\":{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":null,\"direction\":null}}}},\"P-15\":{\"vis\":{\"defaultColors\":{\"0% - 17%\":\"rgb(255,255,204)\",\"17% - 34%\":\"rgb(255,230,146)\",\"34% - 50%\":\"rgb(254,191,90)\",\"50% - 67%\":\"rgb(253,141,60)\",\"67% - 84%\":\"rgb(244,61,37)\",\"84% - 100%\":\"rgb(202,8,35)\"},\"legendOpen\":false}},\"P-16\":{\"vis\":{\"colors\":{\"Anti-Virus\":\"#EF843C\",\"Content Security\":\"#7EB26D\",\"Firewall\":\"#E24D42\",\"Integrated Security\":\"#962D82\",\"Network-based IDS/IPS\":\"#1F78C1\",\"Operating System\":\"#1F78C1\",\"VPN\":\"#EAB839\"}}},\"P-18\":{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":null,\"direction\":null}}}},\"P-7\":{\"vis\":{\"defaultColors\":{\"0 - 100\":\"rgb(0,104,55)\"},\"legendOpen\":false}},\"P-8\":{\"vis\":{\"defaultColors\":{\"0 - 100\":\"rgb(0,104,55)\"}}},\"P-9\":{\"vis\":{\"colors\":{\"/Attempt\":\"#0A50A1\",\"/Failure\":\"#BF1B00\",\"/Success\":\"#629E51\"}}},\"P-25\":{\"mapZoom\":1,\"mapCenter\":[-0.3515602939922709,0]},\"P-24\":{\"mapZoom\":1,\"mapCenter\":[-0.3515602939922709,0]}}",
"version": 1,
"timeRestore": true,
"timeTo": "now",
"timeFrom": "now-24h",
"refreshInterval": {
"display": "Off",
"pause": false,
"value": 0
},
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"filter\":[{\"query\":{\"query_string\":{\"analyze_wildcard\":true,\"query\":\"*\"}}}],\"highlightAll\":true,\"version\":true}"
}
}

View file

@ -0,0 +1,20 @@
{
"title": "[ArcSight] Endpoint OS Activity Dashboard",
"hits": 0,
"description": "",
"panelsJSON": "[{\"col\":1,\"id\":\"c9e333a0-4550-11e7-86b6-95298e9da6dc\",\"panelIndex\":3,\"row\":8,\"size_x\":6,\"size_y\":3,\"type\":\"visualization\"},{\"col\":1,\"id\":\"463fc740-454e-11e7-86b6-95298e9da6dc\",\"panelIndex\":4,\"row\":2,\"size_x\":10,\"size_y\":2,\"type\":\"visualization\"},{\"col\":7,\"id\":\"8f8d6230-454f-11e7-86b6-95298e9da6dc\",\"panelIndex\":5,\"row\":8,\"size_x\":6,\"size_y\":3,\"type\":\"visualization\"},{\"col\":1,\"id\":\"f0664070-4551-11e7-86b6-95298e9da6dc\",\"panelIndex\":7,\"row\":6,\"size_x\":12,\"size_y\":2,\"type\":\"visualization\"},{\"col\":7,\"id\":\"d8314510-454f-11e7-86b6-95298e9da6dc\",\"panelIndex\":8,\"row\":14,\"size_x\":6,\"size_y\":3,\"type\":\"visualization\"},{\"col\":1,\"id\":\"2b369910-4553-11e7-83ea-67cb6920446d\",\"panelIndex\":9,\"row\":11,\"size_x\":6,\"size_y\":6,\"type\":\"visualization\"},{\"col\":7,\"id\":\"9141cc20-4553-11e7-83ea-67cb6920446d\",\"panelIndex\":10,\"row\":11,\"size_x\":6,\"size_y\":3,\"type\":\"visualization\"},{\"col\":1,\"id\":\"31b85570-454a-11e7-86b6-95298e9da6dc\",\"panelIndex\":11,\"row\":1,\"size_x\":12,\"size_y\":1,\"type\":\"visualization\"},{\"col\":11,\"id\":\"0e4558b0-4552-11e7-86b6-95298e9da6dc\",\"panelIndex\":12,\"row\":2,\"size_x\":2,\"size_y\":2,\"type\":\"visualization\"},{\"col\":1,\"id\":\"47c2a140-454f-11e7-86b6-95298e9da6dc\",\"panelIndex\":13,\"row\":4,\"size_x\":12,\"size_y\":2,\"type\":\"visualization\"},{\"col\":9,\"id\":\"68180c80-4556-11e7-83ea-67cb6920446d\",\"panelIndex\":14,\"row\":17,\"size_x\":4,\"size_y\":5,\"type\":\"visualization\"},{\"col\":9,\"id\":\"08ee04d0-4556-11e7-83ea-67cb6920446d\",\"panelIndex\":15,\"row\":22,\"size_x\":4,\"size_y\":6,\"type\":\"visualization\"},{\"col\":1,\"id\":\"b897ce70-4556-11e7-83ea-67cb6920446d\",\"panelIndex\":16,\"row\":21,\"size_x\":8,\"size_y\":3,\"type\":\"visualization\"},{\"col\":1,\"id\":\"93531890-4556-11e7-83ea-67cb6920446d\",\"panelIndex\":17,\"row\":26,\"size_x\":8,\"size_y\":2,\"type\":\"visualization\"},{\"col\":1,\"id\":\"a8ce0ef0-4556-11e7-83ea-67cb6920446d\",\"panelIndex\":18,\"row\":17,\"size_x\":8,\"size_y\":4,\"type\":\"visualization\"},{\"col\":1,\"id\":\"82caeb10-4556-11e7-83ea-67cb6920446d\",\"panelIndex\":19,\"row\":24,\"size_x\":8,\"size_y\":2,\"type\":\"visualization\"}]",
"optionsJSON": "{\"darkTheme\":false}",
"uiStateJSON": "{\"P-13\":{\"vis\":{\"colors\":{\"Destination Users\":\"#E24D42\",\"Event Count\":\"#64B0C8\"}}},\"P-14\":{\"vis\":{\"legendOpen\":false}},\"P-15\":{\"vis\":{\"legendOpen\":false}},\"P-2\":{\"vis\":{\"defaultColors\":{\"0 - 100\":\"rgb(0,104,55)\"}}},\"P-3\":{\"vis\":{\"colors\":{\"Count\":\"#64B0C8\",\"Destination User Names\":\"#E24D42\",\"Event Types\":\"#EF843C\"},\"legendOpen\":true}},\"P-4\":{\"vis\":{\"defaultColors\":{\"0 - 100\":\"rgb(0,104,55)\"}}},\"P-5\":{\"vis\":{\"defaultColors\":{\"0 - 55k\":\"rgb(255,255,204)\",\"55k - 110k\":\"rgb(255,241,170)\",\"110k - 165k\":\"rgb(254,225,135)\",\"165k - 220k\":\"rgb(254,201,101)\",\"220k - 275k\":\"rgb(254,171,73)\",\"275k - 330k\":\"rgb(253,141,60)\",\"330k - 385k\":\"rgb(252,91,46)\",\"385k - 440k\":\"rgb(237,47,34)\",\"440k - 495k\":\"rgb(212,16,32)\",\"495k - 550k\":\"rgb(176,0,38)\"},\"legendOpen\":false}},\"P-8\":{\"vis\":{\"colors\":{\"/Attempt\":\"#447EBC\",\"/Failure\":\"#E24D42\",\"/Success\":\"#7EB26D\"}}},\"P-9\":{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":null,\"direction\":null}}}}}",
"version": 1,
"timeRestore": true,
"timeTo": "now",
"timeFrom": "now-24h",
"refreshInterval": {
"display": "Off",
"pause": false,
"value": 0
},
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"filter\":[{\"query\":{\"query_string\":{\"analyze_wildcard\":true,\"query\":\"*\"}}}],\"highlightAll\":true,\"version\":true}"
}
}

View file

@ -0,0 +1,20 @@
{
"title": "[ArcSight] Network Suspicious Activity Dashboard",
"hits": 0,
"description": "",
"panelsJSON": "[{\"col\":1,\"id\":\"aa2ff0a0-3e4a-11e7-96c4-0d3a291ec93a\",\"panelIndex\":1,\"row\":8,\"size_x\":8,\"size_y\":3,\"type\":\"visualization\"},{\"col\":1,\"id\":\"992c7bd0-3e4e-11e7-96c4-0d3a291ec93a\",\"panelIndex\":2,\"row\":11,\"size_x\":4,\"size_y\":3,\"type\":\"visualization\"},{\"col\":5,\"id\":\"f99c22e0-3e4e-11e7-96c4-0d3a291ec93a\",\"panelIndex\":3,\"row\":11,\"size_x\":4,\"size_y\":3,\"type\":\"visualization\"},{\"col\":1,\"id\":\"e3888410-3e50-11e7-96c4-0d3a291ec93a\",\"panelIndex\":5,\"row\":6,\"size_x\":12,\"size_y\":2,\"type\":\"visualization\"},{\"col\":9,\"id\":\"75582a90-3987-11e7-8b9d-ddc45b5f6d00\",\"panelIndex\":9,\"row\":8,\"size_x\":4,\"size_y\":3,\"type\":\"visualization\"},{\"col\":1,\"id\":\"0bdbb5a0-3e55-11e7-96c4-0d3a291ec93a\",\"panelIndex\":11,\"row\":4,\"size_x\":12,\"size_y\":2,\"type\":\"visualization\"},{\"col\":1,\"id\":\"afdba840-3e55-11e7-96c4-0d3a291ec93a\",\"panelIndex\":12,\"row\":14,\"size_x\":6,\"size_y\":4,\"type\":\"visualization\"},{\"col\":7,\"id\":\"bfa45650-3e55-11e7-96c4-0d3a291ec93a\",\"panelIndex\":13,\"row\":14,\"size_x\":6,\"size_y\":4,\"type\":\"visualization\"},{\"col\":9,\"id\":\"cd462cc0-3e55-11e7-96c4-0d3a291ec93a\",\"panelIndex\":14,\"row\":11,\"size_x\":4,\"size_y\":3,\"type\":\"visualization\"},{\"col\":1,\"id\":\"31b85570-454a-11e7-86b6-95298e9da6dc\",\"panelIndex\":15,\"row\":1,\"size_x\":12,\"size_y\":1,\"type\":\"visualization\"},{\"col\":1,\"id\":\"161e27e0-3988-11e7-8b9d-ddc45b5f6d00\",\"panelIndex\":16,\"row\":2,\"size_x\":10,\"size_y\":2,\"type\":\"visualization\"},{\"col\":11,\"id\":\"ed2f5570-3d5b-11e7-8b9d-ddc45b5f6d00\",\"panelIndex\":17,\"row\":2,\"size_x\":2,\"size_y\":2,\"type\":\"visualization\"}]",
"optionsJSON": "{\"darkTheme\":false}",
"uiStateJSON": "{\"P-1\":{\"vis\":{\"colors\":{\"Destination Addresses\":\"#E0752D\",\"Destination Ports\":\"#E24D42\"},\"legendOpen\":false}},\"P-16\":{\"vis\":{\"defaultColors\":{\"0 - 100\":\"rgb(0,104,55)\"}}},\"P-17\":{\"vis\":{\"defaultColors\":{\"0 - 50\":\"rgb(255,255,204)\",\"100 - 200\":\"rgb(253,141,60)\",\"200 - 300\":\"rgb(227,27,28)\",\"300 - 400\":\"rgb(128,0,38)\",\"50 - 100\":\"rgb(254,217,118)\"}}},\"P-2\":{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":null,\"direction\":null}}}},\"P-3\":{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":null,\"direction\":null}}}},\"P-8\":{\"vis\":{\"defaultColors\":{\"0 - 100\":\"rgb(0,104,55)\"}}},\"P-9\":{\"vis\":{\"colors\":{\"/Attempt\":\"#0A50A1\",\"/Failure\":\"#BF1B00\",\"/Success\":\"#629E51\"}}}}",
"version": 1,
"timeRestore": true,
"timeTo": "now",
"timeFrom": "now-24h",
"refreshInterval": {
"display": "Off",
"pause": false,
"value": 0
},
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"filter\":[{\"query\":{\"query_string\":{\"analyze_wildcard\":true,\"query\":\"*\"}}}],\"highlightAll\":true,\"version\":true}"
}
}

View file

@ -0,0 +1,7 @@
[
"153e0bf0-752f-11e7-ae68-d756b92f3a9c",
"37af0b40-398d-11e7-ae19-21fb91585845",
"64c92510-4555-11e7-83ea-67cb6920446d",
"82051450-3e56-11e7-96c4-0d3a291ec93a",
"d2fa5030-3e5d-11e7-b212-897f1496dc0e"
]

View file

@ -0,0 +1,20 @@
{
"title": "[ArcSight] Endpoint Overview Dashboard",
"hits": 0,
"description": "",
"panelsJSON": "[{\"col\":11,\"id\":\"c53825b0-3e4b-11e7-af78-9fc514b4e118\",\"panelIndex\":1,\"row\":2,\"size_x\":2,\"size_y\":2,\"type\":\"visualization\"},{\"col\":7,\"id\":\"e301a830-3e4d-11e7-af78-9fc514b4e118\",\"panelIndex\":2,\"row\":9,\"size_x\":6,\"size_y\":3,\"type\":\"visualization\"},{\"col\":1,\"id\":\"9de87d40-3e4e-11e7-af78-9fc514b4e118\",\"panelIndex\":3,\"row\":9,\"size_x\":6,\"size_y\":3,\"type\":\"visualization\"},{\"col\":1,\"id\":\"96af5bf0-3e50-11e7-af78-9fc514b4e118\",\"panelIndex\":5,\"row\":4,\"size_x\":12,\"size_y\":2,\"type\":\"visualization\"},{\"col\":7,\"id\":\"ff476320-3e4a-11e7-af78-9fc514b4e118\",\"panelIndex\":6,\"row\":12,\"size_x\":6,\"size_y\":3,\"type\":\"visualization\"},{\"col\":1,\"id\":\"b74e59b0-3e5f-11e7-899c-f940f646009b\",\"panelIndex\":7,\"row\":2,\"size_x\":10,\"size_y\":2,\"type\":\"visualization\"},{\"col\":1,\"id\":\"7c6875e0-3e61-11e7-899c-f940f646009b\",\"panelIndex\":8,\"row\":12,\"size_x\":6,\"size_y\":3,\"type\":\"visualization\"},{\"col\":1,\"columns\":[\"categoryDeviceGroup\",\"categoryTechnique\",\"categoryOutcome\",\"categorySignificance\",\"categoryObject\",\"categoryBehavior\",\"categoryDeviceType\"],\"id\":\"1d9ba830-3e47-11e7-af78-9fc514b4e118\",\"panelIndex\":9,\"row\":20,\"size_x\":12,\"size_y\":5,\"sort\":[\"deviceReceiptTime\",\"desc\"],\"type\":\"search\"},{\"col\":7,\"id\":\"cc8affd0-3e65-11e7-899c-f940f646009b\",\"panelIndex\":10,\"row\":15,\"size_x\":6,\"size_y\":2,\"type\":\"visualization\"},{\"col\":1,\"id\":\"1bde8be0-3e68-11e7-899c-f940f646009b\",\"panelIndex\":11,\"row\":6,\"size_x\":6,\"size_y\":3,\"type\":\"visualization\"},{\"col\":1,\"id\":\"7c414c90-3e66-11e7-899c-f940f646009b\",\"panelIndex\":12,\"row\":15,\"size_x\":6,\"size_y\":5,\"type\":\"visualization\"},{\"col\":7,\"id\":\"6fb90a30-3e6b-11e7-9d4a-89ea81333ea4\",\"panelIndex\":14,\"row\":6,\"size_x\":6,\"size_y\":3,\"type\":\"visualization\"},{\"col\":1,\"id\":\"31b85570-454a-11e7-86b6-95298e9da6dc\",\"panelIndex\":15,\"row\":1,\"size_x\":12,\"size_y\":1,\"type\":\"visualization\"},{\"size_x\":6,\"size_y\":3,\"panelIndex\":17,\"type\":\"visualization\",\"id\":\"2a33c810-3e4d-11e7-af78-9fc514b4e118\",\"col\":7,\"row\":17}]",
"optionsJSON": "{\"darkTheme\":false}",
"uiStateJSON": "{\"P-11\":{\"vis\":{\"colors\":{\"Anti-Virus\":\"#EAB839\",\"Database\":\"#629E51\",\"Host-based IDS/IPS\":\"#E0752D\",\"Operating System\":\"#BF1B00\",\"Security Mangement\":\"#64B0C8\"}}},\"P-12\":{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":null,\"direction\":null}}}},\"P-14\":{\"vis\":{\"colors\":{\"/Attempt\":\"#0A50A1\",\"/Failure\":\"#BF1B00\",\"/Informational\":\"#7EB26D\",\"/Informational/Warning\":\"#EF843C\",\"/Success\":\"#629E51\",\"Anti-Virus\":\"#EAB839\",\"Database\":\"#629E51\",\"Host-based IDS/IPS\":\"#E0752D\",\"Log Consolidator\":\"#E0F9D7\",\"Operating System\":\"#BF1B00\",\"Recon\":\"#BF1B00\",\"Security Mangement\":\"#64B0C8\"}}},\"P-2\":{\"vis\":{\"colors\":{\"/Attempt\":\"#0A50A1\",\"/Failure\":\"#BF1B00\",\"/Success\":\"#629E51\"}}},\"P-3\":{\"vis\":{\"colors\":{\"/Attempt\":\"#0A50A1\",\"/Failure\":\"#BF1B00\",\"/Success\":\"#629E51\"}}},\"P-7\":{\"vis\":{\"defaultColors\":{\"0 - 100\":\"rgb(0,104,55)\"}}},\"P-8\":{\"vis\":{\"colors\":{\"/Attempt\":\"#0A50A1\",\"/Failure\":\"#BF1B00\",\"/Success\":\"#629E51\"}}},\"P-17\":{\"mapZoom\":1,\"mapCenter\":[12.897489183755892,0]}}",
"version": 1,
"timeRestore": true,
"timeTo": "now",
"timeFrom": "now-24h",
"refreshInterval": {
"display": "Off",
"pause": false,
"value": 0
},
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"filter\":[{\"query\":{\"query_string\":{\"analyze_wildcard\":true,\"query\":\"*\"}}}],\"highlightAll\":true,\"version\":true}"
}
}

File diff suppressed because one or more lines are too long

View file

@ -0,0 +1,35 @@
{
"title": "DNS Events",
"description": "",
"hits": 0,
"columns": [
"deviceVendor",
"deviceProduct",
"applicationProtocol",
"categoryBehavior",
"categoryOutcome",
"destinationAddress",
"destinationDnsDomain",
"destinationPort",
"deviceCustomString1Label",
"deviceCustomString1",
"deviceCustomString3Label",
"deviceCustomString3",
"deviceCustomString4Label",
"deviceCustomString4",
"deviceEventCategory",
"deviceHostName",
"deviceSeverity",
"sourceAddress",
"sourcePort",
"transportProtocol"
],
"sort": [
"deviceReceiptTime",
"desc"
],
"version": 1,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"index\":\"arcsight-*\",\"highlightAll\":true,\"version\":true,\"query\":{\"query_string\":{\"query\":\"deviceEventCategory:\\\"dns\\\"\",\"analyze_wildcard\":true}},\"filter\":[]}"
}
}

View file

@ -0,0 +1,22 @@
{
"title": "Endpoint Event Explorer [ArcSight]",
"description": "",
"hits": 0,
"columns": [
"categoryDeviceGroup",
"categoryTechnique",
"categoryOutcome",
"categorySignificance",
"categoryObject",
"categoryBehavior",
"categoryDeviceType"
],
"sort": [
"deviceReceiptTime",
"desc"
],
"version": 1,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"index\":\"arcsight-*\",\"highlightAll\":true,\"version\":true,\"filter\":[],\"query\":{\"query_string\":{\"query\":\"categoryDeviceGroup:\\\"/Operating System\\\" OR categoryDeviceGroup:\\\"/IDS/Host\\\" OR categoryDeviceGroup:\\\"/Application\\\"\",\"analyze_wildcard\":true}}}"
}
}

View file

@ -0,0 +1,30 @@
{
"title": "Network Events [ArcSight]",
"description": "",
"hits": 0,
"columns": [
"priority",
"name",
"sourceAddress",
"sourcePort",
"destinationAddress",
"destinationPort",
"applicationProtocol",
"message",
"categoryBehavior",
"categoryOutcome",
"deviceAddress",
"deviceProduct",
"deviceVendor",
"categoryDeviceGroup",
"categoryDeviceType"
],
"sort": [
"deviceReceiptTime",
"desc"
],
"version": 1,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"index\":\"arcsight-*\",\"highlightAll\":true,\"version\":true,\"query\":{\"query_string\":{\"query\":\"categoryDeviceGroup:\\\"/Firewall\\\" OR categoryDeviceGroup:\\\"/IDS/Network\\\" OR categoryDeviceGroup:\\\"/VPN\\\"\",\"analyze_wildcard\":true}},\"filter\":[]}"
}
}

View file

@ -0,0 +1,27 @@
{
"title": "Endpoint - OS Events [ArcSight]",
"description": "",
"hits": 0,
"columns": [
"deviceVendor",
"deviceProduct",
"name",
"deviceEventClassId",
"deviceEventCategory",
"sourceUserName",
"destinationUserName",
"destinationHostName",
"categoryBehavior",
"categoryOutcome",
"sourceNtDomain",
"destinationNTDomain"
],
"sort": [
"deviceReceiptTime",
"desc"
],
"version": 1,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"index\":\"arcsight-*\",\"highlightAll\":true,\"version\":true,\"filter\":[],\"query\":{\"query_string\":{\"query\":\"categoryDeviceGroup:\\\"/Operating System\\\"\",\"analyze_wildcard\":true}}}"
}
}

View file

@ -0,0 +1,38 @@
{
"title": "Microsoft DNS Events [ArcSight]",
"description": "",
"hits": 0,
"columns": [
"deviceVendor",
"deviceProduct",
"categoryBehavior",
"categoryOutcome",
"destinationAddress",
"destinationPort",
"destinationHostName",
"deviceEventClassId",
"deviceCustomString1Label",
"deviceCustomString1",
"deviceCustomString2Label",
"deviceCustomString2",
"deviceCustomString3Label",
"deviceCustomString3",
"deviceCustomString4Label",
"deviceCustomString4",
"deviceEventCategory",
"deviceSeverity",
"sourceAddress",
"sourcePort",
"transportProtocol",
"bytesIn",
"requestUrl"
],
"sort": [
"deviceReceiptTime",
"desc"
],
"version": 1,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"index\":\"arcsight-*\",\"highlightAll\":true,\"version\":true,\"query\":{\"query_string\":{\"analyze_wildcard\":true,\"query\":\"deviceProduct:\\\"DNS Trace Log\\\"\"}},\"filter\":[]}"
}
}

View file

@ -0,0 +1,11 @@
{
"title": "Top Destination Domains by Source Address",
"visState": "{\"aggs\":[{\"enabled\":true,\"id\":\"1\",\"params\":{},\"schema\":\"metric\",\"type\":\"count\"},{\"enabled\":true,\"id\":\"2\",\"params\":{\"customLabel\":\"Source Address(es)\",\"field\":\"sourceAddress\",\"order\":\"desc\",\"orderBy\":\"1\",\"size\":10},\"schema\":\"segment\",\"type\":\"terms\"},{\"enabled\":true,\"id\":\"3\",\"params\":{\"customLabel\":\"Destination Domain(s)\",\"field\":\"destinationDnsDomain\",\"order\":\"desc\",\"orderBy\":\"1\",\"size\":5},\"schema\":\"group\",\"type\":\"terms\"}],\"listeners\":{},\"params\":{\"addLegend\":true,\"addTimeMarker\":false,\"addTooltip\":true,\"categoryAxes\":[{\"id\":\"CategoryAxis-1\",\"labels\":{\"show\":true,\"truncate\":100},\"position\":\"bottom\",\"scale\":{\"type\":\"linear\"},\"show\":true,\"style\":{},\"title\":{\"text\":\"Source Address(es)\"},\"type\":\"category\"}],\"defaultYExtents\":false,\"drawLinesBetweenPoints\":true,\"grid\":{\"categoryLines\":false,\"style\":{\"color\":\"#eee\"}},\"interpolate\":\"linear\",\"legendPosition\":\"right\",\"radiusRatio\":9,\"scale\":\"linear\",\"seriesParams\":[{\"data\":{\"id\":\"1\",\"label\":\"Count\"},\"drawLinesBetweenPoints\":true,\"mode\":\"stacked\",\"show\":\"true\",\"showCircles\":true,\"type\":\"histogram\",\"valueAxis\":\"ValueAxis-1\"}],\"setYExtents\":false,\"showCircles\":true,\"times\":[],\"valueAxes\":[{\"id\":\"ValueAxis-1\",\"labels\":{\"filter\":false,\"rotate\":0,\"show\":true,\"truncate\":100},\"name\":\"LeftAxis-1\",\"position\":\"left\",\"scale\":{\"mode\":\"normal\",\"type\":\"linear\"},\"show\":true,\"style\":{},\"title\":{},\"type\":\"value\"}]},\"title\":\"Top Destination Domains by Source Address\",\"type\":\"histogram\"}",
"uiStateJSON": "{}",
"description": "",
"savedSearchId": "16a72e70-4543-11e7-9510-4b0b4978ab0e",
"version": 1,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"filter\":[]}"
}
}

View file

@ -0,0 +1,11 @@
{
"title": "Top 10 Source Users by Destination Users [ArcSight]",
"visState": "{\"title\":\"Top 10 Source Users by Destination Users [ArcSight]\",\"type\":\"pie\",\"params\":{\"addTooltip\":true,\"addLegend\":true,\"legendPosition\":\"bottom\",\"isDonut\":true},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"sourceUserName\",\"size\":10,\"order\":\"desc\",\"orderBy\":\"1\",\"customLabel\":\"Source Users\"}},{\"id\":\"3\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"destinationUserName\",\"size\":10,\"order\":\"desc\",\"orderBy\":\"1\",\"customLabel\":\"Destination Users\"}}],\"listeners\":{}}",
"uiStateJSON": "{}",
"description": "",
"savedSearchId": "7a2fc9c0-454d-11e7-86b6-95298e9da6dc",
"version": 1,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"filter\":[]}"
}
}

View file

@ -0,0 +1,10 @@
{
"title": "Firewall - Navigation",
"visState": "{\"title\":\"Firewall - Navigation\",\"type\":\"markdown\",\"params\":{\"markdown\":\"### **Navigation Pane** ###\\n\\n[Firewall Devices Overview](#/dashboard/37af0b40-398d-11e7-ae19-21fb91585845)\\n\\n[Firewall Suspicious Activities](#/dashboard/82051450-3e56-11e7-96c4-0d3a291ec93a)\\n\\n[Endopint Overview](#dashboard/d2fa5030-3e5d-11e7-b212-897f1496dc0e)\"},\"aggs\":[],\"listeners\":{}}",
"uiStateJSON": "{}",
"description": "",
"version": 1,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"query\":{\"query_string\":{\"query\":\"*\"}},\"filter\":[]}"
}
}

View file

@ -0,0 +1,10 @@
{
"title": "Events by Source Addresses [ArcSight]",
"visState": "{\"title\":\"Events by Source Addresses [ArcSight]\",\"type\":\"metrics\",\"params\":{\"id\":\"e1a58ab0-3957-11e7-ae19-21fb91585845\",\"type\":\"timeseries\",\"series\":[{\"id\":\"8f58a280-395a-11e7-ae19-21fb91585845\",\"color\":\"rgba(211,49,21,1)\",\"split_mode\":\"everything\",\"metrics\":[{\"id\":\"8f58a281-395a-11e7-ae19-21fb91585845\",\"type\":\"count\"},{\"settings\":\"\",\"minimize\":0,\"window\":\"\",\"model\":\"simple\",\"sigma\":\"\",\"id\":\"140cf490-395b-11e7-ae19-21fb91585845\",\"type\":\"moving_average\",\"field\":\"8f58a281-395a-11e7-ae19-21fb91585845\"}],\"seperate_axis\":1,\"axis_position\":\"right\",\"formatter\":\"number\",\"chart_type\":\"line\",\"line_width\":\"3\",\"point_size\":\"0\",\"fill\":\"0\",\"stacked\":\"none\",\"filter\":\"categoryDeviceGroup:\\\"/Firewall\\\" OR categoryDeviceGroup:\\\"/IDS/Network\\\" OR categoryDeviceGroup:\\\"/VPN\\\" \",\"terms_field\":\"deviceHostName\",\"terms_order_by\":null,\"label\":\"Events\",\"steps\":0,\"value_template\":\"{{value}}\"},{\"id\":\"3bb711b0-395b-11e7-ae19-21fb91585845\",\"color\":\"rgba(104,188,0,1)\",\"split_mode\":\"terms\",\"metrics\":[{\"id\":\"3bb711b1-395b-11e7-ae19-21fb91585845\",\"type\":\"count\"},{\"settings\":\"\",\"minimize\":0,\"window\":\"\",\"model\":\"simple\",\"id\":\"4b515cc0-395b-11e7-ae19-21fb91585845\",\"type\":\"moving_average\",\"field\":\"3bb711b1-395b-11e7-ae19-21fb91585845\"}],\"seperate_axis\":1,\"axis_position\":\"left\",\"formatter\":\"number\",\"chart_type\":\"bar\",\"line_width\":\"0\",\"point_size\":1,\"fill\":\"0.5\",\"stacked\":\"none\",\"terms_field\":\"sourceAddress\",\"terms_size\":\"10\",\"label\":\"Top Source Addresses\"}],\"time_field\":\"deviceReceiptTime\",\"index_pattern\":\"arcsight-*\",\"interval\":\"auto\",\"axis_position\":\"left\",\"axis_formatter\":\"number\",\"show_legend\":1,\"bar_color_rules\":[{\"id\":\"e4772140-3957-11e7-ae19-21fb91585845\"}],\"background_color\":null,\"filter\":\"categoryDeviceGroup:\\\"/Firewall\\\" OR categoryDeviceGroup:\\\"/IDS/Network\\\" OR categoryDeviceGroup:\\\"/VPN\\\" \",\"background_color_rules\":[{\"id\":\"837bfbb0-395a-11e7-ae19-21fb91585845\"}],\"gauge_color_rules\":[{\"id\":\"8970f250-395a-11e7-ae19-21fb91585845\"}],\"gauge_width\":10,\"gauge_inner_width\":10,\"gauge_style\":\"half\"},\"aggs\":[],\"listeners\":{}}",
"uiStateJSON": "{}",
"description": "",
"version": 1,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"query\":{\"query_string\":{\"query\":\"*\"}},\"filter\":[]}"
}
}

View file

@ -0,0 +1,10 @@
{
"title": "Endpoint - OS Average EPS [ArcSight]",
"visState": "{\"title\":\"Endpoint - OS Average EPS [ArcSight]\",\"type\":\"metrics\",\"params\":{\"id\":\"3f2cf630-3e4b-11e7-af78-9fc514b4e118\",\"type\":\"gauge\",\"series\":[{\"id\":\"3f2cf631-3e4b-11e7-af78-9fc514b4e118\",\"color\":\"rgba(0,156,224,1)\",\"split_mode\":\"everything\",\"metrics\":[{\"id\":\"3f2cf632-3e4b-11e7-af78-9fc514b4e118\",\"type\":\"count\"},{\"id\":\"2f12f3d0-7dc5-11e7-95f6-690ab80d4e85\",\"type\":\"cumulative_sum\",\"field\":\"3f2cf632-3e4b-11e7-af78-9fc514b4e118\"},{\"unit\":\"1s\",\"id\":\"330d7a00-7dc5-11e7-95f6-690ab80d4e85\",\"type\":\"derivative\",\"field\":\"2f12f3d0-7dc5-11e7-95f6-690ab80d4e85\"},{\"settings\":\"\",\"minimize\":0,\"window\":\"\",\"model\":\"simple\",\"id\":\"373fd910-7dc5-11e7-95f6-690ab80d4e85\",\"type\":\"moving_average\",\"field\":\"330d7a00-7dc5-11e7-95f6-690ab80d4e85\"}],\"seperate_axis\":0,\"axis_position\":\"right\",\"formatter\":\"number\",\"chart_type\":\"line\",\"line_width\":1,\"point_size\":1,\"fill\":0.5,\"stacked\":\"none\",\"label\":\"Event Throughput\",\"offset_time\":\"1m\",\"value_template\":\"{{value}} / s\"}],\"time_field\":\"deviceReceiptTime\",\"index_pattern\":\"arcsight-*\",\"interval\":\"auto\",\"axis_position\":\"left\",\"axis_formatter\":\"number\",\"show_legend\":1,\"bar_color_rules\":[{\"id\":\"527ca820-3e4b-11e7-af78-9fc514b4e118\"}],\"gauge_color_rules\":[{\"id\":\"52cee6d0-3e4b-11e7-af78-9fc514b4e118\"}],\"gauge_width\":10,\"gauge_inner_width\":10,\"gauge_style\":\"half\",\"filter\":\"categoryDeviceGroup:\\\"/Operating System\\\"\"},\"aggs\":[],\"listeners\":{}}",
"uiStateJSON": "{}",
"description": "",
"version": 1,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"query\":{\"query_string\":{\"query\":\"*\"}},\"filter\":[]}"
}
}

View file

@ -0,0 +1,10 @@
{
"title": "Endpoint - Navigation",
"visState": "{\"title\":\"Endpoint - Navigation\",\"type\":\"markdown\",\"params\":{\"markdown\":\"### **Navigation Pane** ###\\n\\n[Endopint Overview](#dashboard/d2fa5030-3e5d-11e7-b212-897f1496dc0e)\"},\"aggs\":[],\"listeners\":{}}",
"uiStateJSON": "{}",
"description": "",
"version": 1,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"query\":{\"query_string\":{\"query\":\"*\"}},\"filter\":[]}"
}
}

View file

@ -0,0 +1,11 @@
{
"title": "Device Type Breakdown [ArcSight]",
"visState": "{\"title\":\"Device Type Breakdown [ArcSight]\",\"type\":\"pie\",\"params\":{\"addTooltip\":true,\"addLegend\":true,\"legendPosition\":\"right\",\"isDonut\":false},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"categoryDeviceType\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"1\",\"customLabel\":\"Firewall Types\"}}],\"listeners\":{}}",
"uiStateJSON": "{}",
"description": "",
"savedSearchId": "6315e7a0-34be-11e7-95dc-4f6090d732f6",
"version": 1,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"filter\":[]}"
}
}

View file

@ -0,0 +1,11 @@
{
"title": "Device Metrics Overview [ArcSight]",
"visState": "{\"title\":\"Device Metrics Overview [ArcSight]\",\"type\":\"metric\",\"params\":{\"addLegend\":false,\"addTooltip\":true,\"fontSize\":\"30\",\"gauge\":{\"autoExtend\":false,\"backStyle\":\"Full\",\"colorSchema\":\"Green to Red\",\"colorsRange\":[{\"from\":0,\"to\":100}],\"gaugeColorMode\":\"None\",\"gaugeStyle\":\"Full\",\"gaugeType\":\"Metric\",\"invertColors\":false,\"labels\":{\"color\":\"black\",\"show\":true},\"orientation\":\"vertical\",\"percentageMode\":false,\"scale\":{\"color\":\"#333\",\"labels\":false,\"show\":false,\"width\":2},\"style\":{\"bgColor\":false,\"bgFill\":\"#000\",\"fontSize\":\"12\",\"labelColor\":false,\"subText\":\"\"},\"type\":\"simple\",\"useRange\":false,\"verticalSplit\":false},\"handleNoResults\":true,\"type\":\"gauge\"},\"aggs\":[{\"id\":\"8\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{\"customLabel\":\"Event Count\"}},{\"id\":\"4\",\"enabled\":true,\"type\":\"cardinality\",\"schema\":\"metric\",\"params\":{\"field\":\"deviceHostName\",\"customLabel\":\"Devices\"}},{\"id\":\"5\",\"enabled\":true,\"type\":\"cardinality\",\"schema\":\"metric\",\"params\":{\"field\":\"sourceAddress\",\"customLabel\":\"Sources\"}},{\"id\":\"6\",\"enabled\":true,\"type\":\"cardinality\",\"schema\":\"metric\",\"params\":{\"field\":\"destinationAddress\",\"customLabel\":\"Destinations\"}},{\"id\":\"7\",\"enabled\":true,\"type\":\"cardinality\",\"schema\":\"metric\",\"params\":{\"field\":\"destinationPort\",\"customLabel\":\"Ports\"}}],\"listeners\":{}}",
"uiStateJSON": "{\"vis\":{\"defaultColors\":{\"0 - 100\":\"rgb(0,104,55)\"}}}",
"description": "",
"savedSearchId": "6315e7a0-34be-11e7-95dc-4f6090d732f6",
"version": 1,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"filter\":[]}"
}
}

View file

@ -0,0 +1,11 @@
{
"title": "Device Types by Vendor [ArcSight]",
"visState": "{\"title\":\"Device Types by Vendor [ArcSight]\",\"type\":\"pie\",\"params\":{\"addTooltip\":true,\"addLegend\":true,\"legendPosition\":\"right\",\"isDonut\":false},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"categoryDeviceType\",\"exclude\":\"Network-based IDS/IPS\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"1\"}},{\"id\":\"3\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"deviceVendor\",\"exclude\":\"\",\"size\":5,\"order\":\"desc\",\"orderBy\":\"1\"}}],\"listeners\":{}}",
"uiStateJSON": "{}",
"description": "",
"savedSearchId": "1d9ba830-3e47-11e7-af78-9fc514b4e118",
"version": 1,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"filter\":[]}"
}
}

View file

@ -0,0 +1,11 @@
{
"title": "Top 10 Event Types [ArcSight]",
"visState": "{\"title\":\"Top 10 Event Types [ArcSight]\",\"type\":\"tagcloud\",\"params\":{\"scale\":\"square root\",\"orientation\":\"single\",\"minFontSize\":18,\"maxFontSize\":72},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"deviceEventClassId\",\"size\":10,\"order\":\"desc\",\"orderBy\":\"1\"}}],\"listeners\":{}}",
"uiStateJSON": "{}",
"description": "",
"savedSearchId": "bb1f4bc0-73fd-11e7-b4d0-0fc7dfb45744",
"version": 1,
"kibanaSavedObjectMeta": {
"searchSourceJSON": "{\"filter\":[]}"
}
}

Some files were not shown because too many files have changed in this diff Show more